OSDN Git Service

KVM: vmx: write HOST_IA32_EFER in vmx_set_constant_host_state()
[uclinux-h8/linux.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/asm.h>
42 #include <asm/cpu.h>
43 #include <asm/io.h>
44 #include <asm/desc.h>
45 #include <asm/vmx.h>
46 #include <asm/virtext.h>
47 #include <asm/mce.h>
48 #include <asm/fpu/internal.h>
49 #include <asm/perf_event.h>
50 #include <asm/debugreg.h>
51 #include <asm/kexec.h>
52 #include <asm/apic.h>
53 #include <asm/irq_remapping.h>
54 #include <asm/mmu_context.h>
55 #include <asm/spec-ctrl.h>
56 #include <asm/mshyperv.h>
57
58 #include "trace.h"
59 #include "pmu.h"
60 #include "vmx_evmcs.h"
61
62 #define __ex(x) __kvm_handle_fault_on_reboot(x)
63 #define __ex_clear(x, reg) \
64         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
65
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
68
69 static const struct x86_cpu_id vmx_cpu_id[] = {
70         X86_FEATURE_MATCH(X86_FEATURE_VMX),
71         {}
72 };
73 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
74
75 static bool __read_mostly enable_vpid = 1;
76 module_param_named(vpid, enable_vpid, bool, 0444);
77
78 static bool __read_mostly enable_vnmi = 1;
79 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
80
81 static bool __read_mostly flexpriority_enabled = 1;
82 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
83
84 static bool __read_mostly enable_ept = 1;
85 module_param_named(ept, enable_ept, bool, S_IRUGO);
86
87 static bool __read_mostly enable_unrestricted_guest = 1;
88 module_param_named(unrestricted_guest,
89                         enable_unrestricted_guest, bool, S_IRUGO);
90
91 static bool __read_mostly enable_ept_ad_bits = 1;
92 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
93
94 static bool __read_mostly emulate_invalid_guest_state = true;
95 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
96
97 static bool __read_mostly fasteoi = 1;
98 module_param(fasteoi, bool, S_IRUGO);
99
100 static bool __read_mostly enable_apicv = 1;
101 module_param(enable_apicv, bool, S_IRUGO);
102
103 static bool __read_mostly enable_shadow_vmcs = 1;
104 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
105 /*
106  * If nested=1, nested virtualization is supported, i.e., guests may use
107  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
108  * use VMX instructions.
109  */
110 static bool __read_mostly nested = 0;
111 module_param(nested, bool, S_IRUGO);
112
113 static u64 __read_mostly host_xss;
114
115 static bool __read_mostly enable_pml = 1;
116 module_param_named(pml, enable_pml, bool, S_IRUGO);
117
118 #define MSR_TYPE_R      1
119 #define MSR_TYPE_W      2
120 #define MSR_TYPE_RW     3
121
122 #define MSR_BITMAP_MODE_X2APIC          1
123 #define MSR_BITMAP_MODE_X2APIC_APICV    2
124
125 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
126
127 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
128 static int __read_mostly cpu_preemption_timer_multi;
129 static bool __read_mostly enable_preemption_timer = 1;
130 #ifdef CONFIG_X86_64
131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 #endif
133
134 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
135 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
136 #define KVM_VM_CR0_ALWAYS_ON                            \
137         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
138          X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
139 #define KVM_CR4_GUEST_OWNED_BITS                                      \
140         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
141          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
142
143 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
144 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
145 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
146
147 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
148
149 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
150
151 /*
152  * Hyper-V requires all of these, so mark them as supported even though
153  * they are just treated the same as all-context.
154  */
155 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
156         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
157         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
158         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
159         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
160
161 /*
162  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
163  * ple_gap:    upper bound on the amount of time between two successive
164  *             executions of PAUSE in a loop. Also indicate if ple enabled.
165  *             According to test, this time is usually smaller than 128 cycles.
166  * ple_window: upper bound on the amount of time a guest is allowed to execute
167  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
168  *             less than 2^12 cycles
169  * Time is measured based on a counter that runs at the same rate as the TSC,
170  * refer SDM volume 3b section 21.6.13 & 22.1.3.
171  */
172 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
173
174 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
175 module_param(ple_window, uint, 0444);
176
177 /* Default doubles per-vcpu window every exit. */
178 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
179 module_param(ple_window_grow, uint, 0444);
180
181 /* Default resets per-vcpu window every exit to ple_window. */
182 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
183 module_param(ple_window_shrink, uint, 0444);
184
185 /* Default is to compute the maximum so we can never overflow. */
186 static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
187 module_param(ple_window_max, uint, 0444);
188
189 extern const ulong vmx_return;
190
191 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
192 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
193 static DEFINE_MUTEX(vmx_l1d_flush_mutex);
194
195 /* Storage for pre module init parameter parsing */
196 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
197
198 static const struct {
199         const char *option;
200         bool for_parse;
201 } vmentry_l1d_param[] = {
202         [VMENTER_L1D_FLUSH_AUTO]         = {"auto", true},
203         [VMENTER_L1D_FLUSH_NEVER]        = {"never", true},
204         [VMENTER_L1D_FLUSH_COND]         = {"cond", true},
205         [VMENTER_L1D_FLUSH_ALWAYS]       = {"always", true},
206         [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
207         [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
208 };
209
210 #define L1D_CACHE_ORDER 4
211 static void *vmx_l1d_flush_pages;
212
213 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
214 {
215         struct page *page;
216         unsigned int i;
217
218         if (!enable_ept) {
219                 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
220                 return 0;
221         }
222
223         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
224                 u64 msr;
225
226                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
227                 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
228                         l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
229                         return 0;
230                 }
231         }
232
233         /* If set to auto use the default l1tf mitigation method */
234         if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
235                 switch (l1tf_mitigation) {
236                 case L1TF_MITIGATION_OFF:
237                         l1tf = VMENTER_L1D_FLUSH_NEVER;
238                         break;
239                 case L1TF_MITIGATION_FLUSH_NOWARN:
240                 case L1TF_MITIGATION_FLUSH:
241                 case L1TF_MITIGATION_FLUSH_NOSMT:
242                         l1tf = VMENTER_L1D_FLUSH_COND;
243                         break;
244                 case L1TF_MITIGATION_FULL:
245                 case L1TF_MITIGATION_FULL_FORCE:
246                         l1tf = VMENTER_L1D_FLUSH_ALWAYS;
247                         break;
248                 }
249         } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
250                 l1tf = VMENTER_L1D_FLUSH_ALWAYS;
251         }
252
253         if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
254             !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
255                 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
256                 if (!page)
257                         return -ENOMEM;
258                 vmx_l1d_flush_pages = page_address(page);
259
260                 /*
261                  * Initialize each page with a different pattern in
262                  * order to protect against KSM in the nested
263                  * virtualization case.
264                  */
265                 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
266                         memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
267                                PAGE_SIZE);
268                 }
269         }
270
271         l1tf_vmx_mitigation = l1tf;
272
273         if (l1tf != VMENTER_L1D_FLUSH_NEVER)
274                 static_branch_enable(&vmx_l1d_should_flush);
275         else
276                 static_branch_disable(&vmx_l1d_should_flush);
277
278         if (l1tf == VMENTER_L1D_FLUSH_COND)
279                 static_branch_enable(&vmx_l1d_flush_cond);
280         else
281                 static_branch_disable(&vmx_l1d_flush_cond);
282         return 0;
283 }
284
285 static int vmentry_l1d_flush_parse(const char *s)
286 {
287         unsigned int i;
288
289         if (s) {
290                 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
291                         if (vmentry_l1d_param[i].for_parse &&
292                             sysfs_streq(s, vmentry_l1d_param[i].option))
293                                 return i;
294                 }
295         }
296         return -EINVAL;
297 }
298
299 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
300 {
301         int l1tf, ret;
302
303         l1tf = vmentry_l1d_flush_parse(s);
304         if (l1tf < 0)
305                 return l1tf;
306
307         if (!boot_cpu_has(X86_BUG_L1TF))
308                 return 0;
309
310         /*
311          * Has vmx_init() run already? If not then this is the pre init
312          * parameter parsing. In that case just store the value and let
313          * vmx_init() do the proper setup after enable_ept has been
314          * established.
315          */
316         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
317                 vmentry_l1d_flush_param = l1tf;
318                 return 0;
319         }
320
321         mutex_lock(&vmx_l1d_flush_mutex);
322         ret = vmx_setup_l1d_flush(l1tf);
323         mutex_unlock(&vmx_l1d_flush_mutex);
324         return ret;
325 }
326
327 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
328 {
329         if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
330                 return sprintf(s, "???\n");
331
332         return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
333 }
334
335 static const struct kernel_param_ops vmentry_l1d_flush_ops = {
336         .set = vmentry_l1d_flush_set,
337         .get = vmentry_l1d_flush_get,
338 };
339 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
340
341 enum ept_pointers_status {
342         EPT_POINTERS_CHECK = 0,
343         EPT_POINTERS_MATCH = 1,
344         EPT_POINTERS_MISMATCH = 2
345 };
346
347 struct kvm_vmx {
348         struct kvm kvm;
349
350         unsigned int tss_addr;
351         bool ept_identity_pagetable_done;
352         gpa_t ept_identity_map_addr;
353
354         enum ept_pointers_status ept_pointers_match;
355         spinlock_t ept_pointer_lock;
356 };
357
358 #define NR_AUTOLOAD_MSRS 8
359
360 struct vmcs_hdr {
361         u32 revision_id:31;
362         u32 shadow_vmcs:1;
363 };
364
365 struct vmcs {
366         struct vmcs_hdr hdr;
367         u32 abort;
368         char data[0];
369 };
370
371 /*
372  * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
373  * and whose values change infrequently, but are not constant.  I.e. this is
374  * used as a write-through cache of the corresponding VMCS fields.
375  */
376 struct vmcs_host_state {
377         unsigned long cr3;      /* May not match real cr3 */
378         unsigned long cr4;      /* May not match real cr4 */
379         unsigned long gs_base;
380         unsigned long fs_base;
381
382         u16           fs_sel, gs_sel, ldt_sel;
383 #ifdef CONFIG_X86_64
384         u16           ds_sel, es_sel;
385 #endif
386 };
387
388 /*
389  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
390  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
391  * loaded on this CPU (so we can clear them if the CPU goes down).
392  */
393 struct loaded_vmcs {
394         struct vmcs *vmcs;
395         struct vmcs *shadow_vmcs;
396         int cpu;
397         bool launched;
398         bool nmi_known_unmasked;
399         bool hv_timer_armed;
400         /* Support for vnmi-less CPUs */
401         int soft_vnmi_blocked;
402         ktime_t entry_time;
403         s64 vnmi_blocked_time;
404         unsigned long *msr_bitmap;
405         struct list_head loaded_vmcss_on_cpu_link;
406         struct vmcs_host_state host_state;
407 };
408
409 struct shared_msr_entry {
410         unsigned index;
411         u64 data;
412         u64 mask;
413 };
414
415 /*
416  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
417  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
418  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
419  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
420  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
421  * More than one of these structures may exist, if L1 runs multiple L2 guests.
422  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
423  * underlying hardware which will be used to run L2.
424  * This structure is packed to ensure that its layout is identical across
425  * machines (necessary for live migration).
426  *
427  * IMPORTANT: Changing the layout of existing fields in this structure
428  * will break save/restore compatibility with older kvm releases. When
429  * adding new fields, either use space in the reserved padding* arrays
430  * or add the new fields to the end of the structure.
431  */
432 typedef u64 natural_width;
433 struct __packed vmcs12 {
434         /* According to the Intel spec, a VMCS region must start with the
435          * following two fields. Then follow implementation-specific data.
436          */
437         struct vmcs_hdr hdr;
438         u32 abort;
439
440         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
441         u32 padding[7]; /* room for future expansion */
442
443         u64 io_bitmap_a;
444         u64 io_bitmap_b;
445         u64 msr_bitmap;
446         u64 vm_exit_msr_store_addr;
447         u64 vm_exit_msr_load_addr;
448         u64 vm_entry_msr_load_addr;
449         u64 tsc_offset;
450         u64 virtual_apic_page_addr;
451         u64 apic_access_addr;
452         u64 posted_intr_desc_addr;
453         u64 ept_pointer;
454         u64 eoi_exit_bitmap0;
455         u64 eoi_exit_bitmap1;
456         u64 eoi_exit_bitmap2;
457         u64 eoi_exit_bitmap3;
458         u64 xss_exit_bitmap;
459         u64 guest_physical_address;
460         u64 vmcs_link_pointer;
461         u64 guest_ia32_debugctl;
462         u64 guest_ia32_pat;
463         u64 guest_ia32_efer;
464         u64 guest_ia32_perf_global_ctrl;
465         u64 guest_pdptr0;
466         u64 guest_pdptr1;
467         u64 guest_pdptr2;
468         u64 guest_pdptr3;
469         u64 guest_bndcfgs;
470         u64 host_ia32_pat;
471         u64 host_ia32_efer;
472         u64 host_ia32_perf_global_ctrl;
473         u64 vmread_bitmap;
474         u64 vmwrite_bitmap;
475         u64 vm_function_control;
476         u64 eptp_list_address;
477         u64 pml_address;
478         u64 padding64[3]; /* room for future expansion */
479         /*
480          * To allow migration of L1 (complete with its L2 guests) between
481          * machines of different natural widths (32 or 64 bit), we cannot have
482          * unsigned long fields with no explict size. We use u64 (aliased
483          * natural_width) instead. Luckily, x86 is little-endian.
484          */
485         natural_width cr0_guest_host_mask;
486         natural_width cr4_guest_host_mask;
487         natural_width cr0_read_shadow;
488         natural_width cr4_read_shadow;
489         natural_width cr3_target_value0;
490         natural_width cr3_target_value1;
491         natural_width cr3_target_value2;
492         natural_width cr3_target_value3;
493         natural_width exit_qualification;
494         natural_width guest_linear_address;
495         natural_width guest_cr0;
496         natural_width guest_cr3;
497         natural_width guest_cr4;
498         natural_width guest_es_base;
499         natural_width guest_cs_base;
500         natural_width guest_ss_base;
501         natural_width guest_ds_base;
502         natural_width guest_fs_base;
503         natural_width guest_gs_base;
504         natural_width guest_ldtr_base;
505         natural_width guest_tr_base;
506         natural_width guest_gdtr_base;
507         natural_width guest_idtr_base;
508         natural_width guest_dr7;
509         natural_width guest_rsp;
510         natural_width guest_rip;
511         natural_width guest_rflags;
512         natural_width guest_pending_dbg_exceptions;
513         natural_width guest_sysenter_esp;
514         natural_width guest_sysenter_eip;
515         natural_width host_cr0;
516         natural_width host_cr3;
517         natural_width host_cr4;
518         natural_width host_fs_base;
519         natural_width host_gs_base;
520         natural_width host_tr_base;
521         natural_width host_gdtr_base;
522         natural_width host_idtr_base;
523         natural_width host_ia32_sysenter_esp;
524         natural_width host_ia32_sysenter_eip;
525         natural_width host_rsp;
526         natural_width host_rip;
527         natural_width paddingl[8]; /* room for future expansion */
528         u32 pin_based_vm_exec_control;
529         u32 cpu_based_vm_exec_control;
530         u32 exception_bitmap;
531         u32 page_fault_error_code_mask;
532         u32 page_fault_error_code_match;
533         u32 cr3_target_count;
534         u32 vm_exit_controls;
535         u32 vm_exit_msr_store_count;
536         u32 vm_exit_msr_load_count;
537         u32 vm_entry_controls;
538         u32 vm_entry_msr_load_count;
539         u32 vm_entry_intr_info_field;
540         u32 vm_entry_exception_error_code;
541         u32 vm_entry_instruction_len;
542         u32 tpr_threshold;
543         u32 secondary_vm_exec_control;
544         u32 vm_instruction_error;
545         u32 vm_exit_reason;
546         u32 vm_exit_intr_info;
547         u32 vm_exit_intr_error_code;
548         u32 idt_vectoring_info_field;
549         u32 idt_vectoring_error_code;
550         u32 vm_exit_instruction_len;
551         u32 vmx_instruction_info;
552         u32 guest_es_limit;
553         u32 guest_cs_limit;
554         u32 guest_ss_limit;
555         u32 guest_ds_limit;
556         u32 guest_fs_limit;
557         u32 guest_gs_limit;
558         u32 guest_ldtr_limit;
559         u32 guest_tr_limit;
560         u32 guest_gdtr_limit;
561         u32 guest_idtr_limit;
562         u32 guest_es_ar_bytes;
563         u32 guest_cs_ar_bytes;
564         u32 guest_ss_ar_bytes;
565         u32 guest_ds_ar_bytes;
566         u32 guest_fs_ar_bytes;
567         u32 guest_gs_ar_bytes;
568         u32 guest_ldtr_ar_bytes;
569         u32 guest_tr_ar_bytes;
570         u32 guest_interruptibility_info;
571         u32 guest_activity_state;
572         u32 guest_sysenter_cs;
573         u32 host_ia32_sysenter_cs;
574         u32 vmx_preemption_timer_value;
575         u32 padding32[7]; /* room for future expansion */
576         u16 virtual_processor_id;
577         u16 posted_intr_nv;
578         u16 guest_es_selector;
579         u16 guest_cs_selector;
580         u16 guest_ss_selector;
581         u16 guest_ds_selector;
582         u16 guest_fs_selector;
583         u16 guest_gs_selector;
584         u16 guest_ldtr_selector;
585         u16 guest_tr_selector;
586         u16 guest_intr_status;
587         u16 host_es_selector;
588         u16 host_cs_selector;
589         u16 host_ss_selector;
590         u16 host_ds_selector;
591         u16 host_fs_selector;
592         u16 host_gs_selector;
593         u16 host_tr_selector;
594         u16 guest_pml_index;
595 };
596
597 /*
598  * For save/restore compatibility, the vmcs12 field offsets must not change.
599  */
600 #define CHECK_OFFSET(field, loc)                                \
601         BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc),       \
602                 "Offset of " #field " in struct vmcs12 has changed.")
603
604 static inline void vmx_check_vmcs12_offsets(void) {
605         CHECK_OFFSET(hdr, 0);
606         CHECK_OFFSET(abort, 4);
607         CHECK_OFFSET(launch_state, 8);
608         CHECK_OFFSET(io_bitmap_a, 40);
609         CHECK_OFFSET(io_bitmap_b, 48);
610         CHECK_OFFSET(msr_bitmap, 56);
611         CHECK_OFFSET(vm_exit_msr_store_addr, 64);
612         CHECK_OFFSET(vm_exit_msr_load_addr, 72);
613         CHECK_OFFSET(vm_entry_msr_load_addr, 80);
614         CHECK_OFFSET(tsc_offset, 88);
615         CHECK_OFFSET(virtual_apic_page_addr, 96);
616         CHECK_OFFSET(apic_access_addr, 104);
617         CHECK_OFFSET(posted_intr_desc_addr, 112);
618         CHECK_OFFSET(ept_pointer, 120);
619         CHECK_OFFSET(eoi_exit_bitmap0, 128);
620         CHECK_OFFSET(eoi_exit_bitmap1, 136);
621         CHECK_OFFSET(eoi_exit_bitmap2, 144);
622         CHECK_OFFSET(eoi_exit_bitmap3, 152);
623         CHECK_OFFSET(xss_exit_bitmap, 160);
624         CHECK_OFFSET(guest_physical_address, 168);
625         CHECK_OFFSET(vmcs_link_pointer, 176);
626         CHECK_OFFSET(guest_ia32_debugctl, 184);
627         CHECK_OFFSET(guest_ia32_pat, 192);
628         CHECK_OFFSET(guest_ia32_efer, 200);
629         CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
630         CHECK_OFFSET(guest_pdptr0, 216);
631         CHECK_OFFSET(guest_pdptr1, 224);
632         CHECK_OFFSET(guest_pdptr2, 232);
633         CHECK_OFFSET(guest_pdptr3, 240);
634         CHECK_OFFSET(guest_bndcfgs, 248);
635         CHECK_OFFSET(host_ia32_pat, 256);
636         CHECK_OFFSET(host_ia32_efer, 264);
637         CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
638         CHECK_OFFSET(vmread_bitmap, 280);
639         CHECK_OFFSET(vmwrite_bitmap, 288);
640         CHECK_OFFSET(vm_function_control, 296);
641         CHECK_OFFSET(eptp_list_address, 304);
642         CHECK_OFFSET(pml_address, 312);
643         CHECK_OFFSET(cr0_guest_host_mask, 344);
644         CHECK_OFFSET(cr4_guest_host_mask, 352);
645         CHECK_OFFSET(cr0_read_shadow, 360);
646         CHECK_OFFSET(cr4_read_shadow, 368);
647         CHECK_OFFSET(cr3_target_value0, 376);
648         CHECK_OFFSET(cr3_target_value1, 384);
649         CHECK_OFFSET(cr3_target_value2, 392);
650         CHECK_OFFSET(cr3_target_value3, 400);
651         CHECK_OFFSET(exit_qualification, 408);
652         CHECK_OFFSET(guest_linear_address, 416);
653         CHECK_OFFSET(guest_cr0, 424);
654         CHECK_OFFSET(guest_cr3, 432);
655         CHECK_OFFSET(guest_cr4, 440);
656         CHECK_OFFSET(guest_es_base, 448);
657         CHECK_OFFSET(guest_cs_base, 456);
658         CHECK_OFFSET(guest_ss_base, 464);
659         CHECK_OFFSET(guest_ds_base, 472);
660         CHECK_OFFSET(guest_fs_base, 480);
661         CHECK_OFFSET(guest_gs_base, 488);
662         CHECK_OFFSET(guest_ldtr_base, 496);
663         CHECK_OFFSET(guest_tr_base, 504);
664         CHECK_OFFSET(guest_gdtr_base, 512);
665         CHECK_OFFSET(guest_idtr_base, 520);
666         CHECK_OFFSET(guest_dr7, 528);
667         CHECK_OFFSET(guest_rsp, 536);
668         CHECK_OFFSET(guest_rip, 544);
669         CHECK_OFFSET(guest_rflags, 552);
670         CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
671         CHECK_OFFSET(guest_sysenter_esp, 568);
672         CHECK_OFFSET(guest_sysenter_eip, 576);
673         CHECK_OFFSET(host_cr0, 584);
674         CHECK_OFFSET(host_cr3, 592);
675         CHECK_OFFSET(host_cr4, 600);
676         CHECK_OFFSET(host_fs_base, 608);
677         CHECK_OFFSET(host_gs_base, 616);
678         CHECK_OFFSET(host_tr_base, 624);
679         CHECK_OFFSET(host_gdtr_base, 632);
680         CHECK_OFFSET(host_idtr_base, 640);
681         CHECK_OFFSET(host_ia32_sysenter_esp, 648);
682         CHECK_OFFSET(host_ia32_sysenter_eip, 656);
683         CHECK_OFFSET(host_rsp, 664);
684         CHECK_OFFSET(host_rip, 672);
685         CHECK_OFFSET(pin_based_vm_exec_control, 744);
686         CHECK_OFFSET(cpu_based_vm_exec_control, 748);
687         CHECK_OFFSET(exception_bitmap, 752);
688         CHECK_OFFSET(page_fault_error_code_mask, 756);
689         CHECK_OFFSET(page_fault_error_code_match, 760);
690         CHECK_OFFSET(cr3_target_count, 764);
691         CHECK_OFFSET(vm_exit_controls, 768);
692         CHECK_OFFSET(vm_exit_msr_store_count, 772);
693         CHECK_OFFSET(vm_exit_msr_load_count, 776);
694         CHECK_OFFSET(vm_entry_controls, 780);
695         CHECK_OFFSET(vm_entry_msr_load_count, 784);
696         CHECK_OFFSET(vm_entry_intr_info_field, 788);
697         CHECK_OFFSET(vm_entry_exception_error_code, 792);
698         CHECK_OFFSET(vm_entry_instruction_len, 796);
699         CHECK_OFFSET(tpr_threshold, 800);
700         CHECK_OFFSET(secondary_vm_exec_control, 804);
701         CHECK_OFFSET(vm_instruction_error, 808);
702         CHECK_OFFSET(vm_exit_reason, 812);
703         CHECK_OFFSET(vm_exit_intr_info, 816);
704         CHECK_OFFSET(vm_exit_intr_error_code, 820);
705         CHECK_OFFSET(idt_vectoring_info_field, 824);
706         CHECK_OFFSET(idt_vectoring_error_code, 828);
707         CHECK_OFFSET(vm_exit_instruction_len, 832);
708         CHECK_OFFSET(vmx_instruction_info, 836);
709         CHECK_OFFSET(guest_es_limit, 840);
710         CHECK_OFFSET(guest_cs_limit, 844);
711         CHECK_OFFSET(guest_ss_limit, 848);
712         CHECK_OFFSET(guest_ds_limit, 852);
713         CHECK_OFFSET(guest_fs_limit, 856);
714         CHECK_OFFSET(guest_gs_limit, 860);
715         CHECK_OFFSET(guest_ldtr_limit, 864);
716         CHECK_OFFSET(guest_tr_limit, 868);
717         CHECK_OFFSET(guest_gdtr_limit, 872);
718         CHECK_OFFSET(guest_idtr_limit, 876);
719         CHECK_OFFSET(guest_es_ar_bytes, 880);
720         CHECK_OFFSET(guest_cs_ar_bytes, 884);
721         CHECK_OFFSET(guest_ss_ar_bytes, 888);
722         CHECK_OFFSET(guest_ds_ar_bytes, 892);
723         CHECK_OFFSET(guest_fs_ar_bytes, 896);
724         CHECK_OFFSET(guest_gs_ar_bytes, 900);
725         CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
726         CHECK_OFFSET(guest_tr_ar_bytes, 908);
727         CHECK_OFFSET(guest_interruptibility_info, 912);
728         CHECK_OFFSET(guest_activity_state, 916);
729         CHECK_OFFSET(guest_sysenter_cs, 920);
730         CHECK_OFFSET(host_ia32_sysenter_cs, 924);
731         CHECK_OFFSET(vmx_preemption_timer_value, 928);
732         CHECK_OFFSET(virtual_processor_id, 960);
733         CHECK_OFFSET(posted_intr_nv, 962);
734         CHECK_OFFSET(guest_es_selector, 964);
735         CHECK_OFFSET(guest_cs_selector, 966);
736         CHECK_OFFSET(guest_ss_selector, 968);
737         CHECK_OFFSET(guest_ds_selector, 970);
738         CHECK_OFFSET(guest_fs_selector, 972);
739         CHECK_OFFSET(guest_gs_selector, 974);
740         CHECK_OFFSET(guest_ldtr_selector, 976);
741         CHECK_OFFSET(guest_tr_selector, 978);
742         CHECK_OFFSET(guest_intr_status, 980);
743         CHECK_OFFSET(host_es_selector, 982);
744         CHECK_OFFSET(host_cs_selector, 984);
745         CHECK_OFFSET(host_ss_selector, 986);
746         CHECK_OFFSET(host_ds_selector, 988);
747         CHECK_OFFSET(host_fs_selector, 990);
748         CHECK_OFFSET(host_gs_selector, 992);
749         CHECK_OFFSET(host_tr_selector, 994);
750         CHECK_OFFSET(guest_pml_index, 996);
751 }
752
753 /*
754  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
755  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
756  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
757  *
758  * IMPORTANT: Changing this value will break save/restore compatibility with
759  * older kvm releases.
760  */
761 #define VMCS12_REVISION 0x11e57ed0
762
763 /*
764  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
765  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
766  * current implementation, 4K are reserved to avoid future complications.
767  */
768 #define VMCS12_SIZE 0x1000
769
770 /*
771  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
772  * supported VMCS12 field encoding.
773  */
774 #define VMCS12_MAX_FIELD_INDEX 0x17
775
776 struct nested_vmx_msrs {
777         /*
778          * We only store the "true" versions of the VMX capability MSRs. We
779          * generate the "non-true" versions by setting the must-be-1 bits
780          * according to the SDM.
781          */
782         u32 procbased_ctls_low;
783         u32 procbased_ctls_high;
784         u32 secondary_ctls_low;
785         u32 secondary_ctls_high;
786         u32 pinbased_ctls_low;
787         u32 pinbased_ctls_high;
788         u32 exit_ctls_low;
789         u32 exit_ctls_high;
790         u32 entry_ctls_low;
791         u32 entry_ctls_high;
792         u32 misc_low;
793         u32 misc_high;
794         u32 ept_caps;
795         u32 vpid_caps;
796         u64 basic;
797         u64 cr0_fixed0;
798         u64 cr0_fixed1;
799         u64 cr4_fixed0;
800         u64 cr4_fixed1;
801         u64 vmcs_enum;
802         u64 vmfunc_controls;
803 };
804
805 /*
806  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
807  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
808  */
809 struct nested_vmx {
810         /* Has the level1 guest done vmxon? */
811         bool vmxon;
812         gpa_t vmxon_ptr;
813         bool pml_full;
814
815         /* The guest-physical address of the current VMCS L1 keeps for L2 */
816         gpa_t current_vmptr;
817         /*
818          * Cache of the guest's VMCS, existing outside of guest memory.
819          * Loaded from guest memory during VMPTRLD. Flushed to guest
820          * memory during VMCLEAR and VMPTRLD.
821          */
822         struct vmcs12 *cached_vmcs12;
823         /*
824          * Cache of the guest's shadow VMCS, existing outside of guest
825          * memory. Loaded from guest memory during VM entry. Flushed
826          * to guest memory during VM exit.
827          */
828         struct vmcs12 *cached_shadow_vmcs12;
829         /*
830          * Indicates if the shadow vmcs must be updated with the
831          * data hold by vmcs12
832          */
833         bool sync_shadow_vmcs;
834         bool dirty_vmcs12;
835
836         /*
837          * vmcs02 has been initialized, i.e. state that is constant for
838          * vmcs02 has been written to the backing VMCS.  Initialization
839          * is delayed until L1 actually attempts to run a nested VM.
840          */
841         bool vmcs02_initialized;
842
843         bool change_vmcs01_virtual_apic_mode;
844
845         /* L2 must run next, and mustn't decide to exit to L1. */
846         bool nested_run_pending;
847
848         struct loaded_vmcs vmcs02;
849
850         /*
851          * Guest pages referred to in the vmcs02 with host-physical
852          * pointers, so we must keep them pinned while L2 runs.
853          */
854         struct page *apic_access_page;
855         struct page *virtual_apic_page;
856         struct page *pi_desc_page;
857         struct pi_desc *pi_desc;
858         bool pi_pending;
859         u16 posted_intr_nv;
860
861         struct hrtimer preemption_timer;
862         bool preemption_timer_expired;
863
864         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
865         u64 vmcs01_debugctl;
866         u64 vmcs01_guest_bndcfgs;
867
868         u16 vpid02;
869         u16 last_vpid;
870
871         struct nested_vmx_msrs msrs;
872
873         /* SMM related state */
874         struct {
875                 /* in VMX operation on SMM entry? */
876                 bool vmxon;
877                 /* in guest mode on SMM entry? */
878                 bool guest_mode;
879         } smm;
880 };
881
882 #define POSTED_INTR_ON  0
883 #define POSTED_INTR_SN  1
884
885 /* Posted-Interrupt Descriptor */
886 struct pi_desc {
887         u32 pir[8];     /* Posted interrupt requested */
888         union {
889                 struct {
890                                 /* bit 256 - Outstanding Notification */
891                         u16     on      : 1,
892                                 /* bit 257 - Suppress Notification */
893                                 sn      : 1,
894                                 /* bit 271:258 - Reserved */
895                                 rsvd_1  : 14;
896                                 /* bit 279:272 - Notification Vector */
897                         u8      nv;
898                                 /* bit 287:280 - Reserved */
899                         u8      rsvd_2;
900                                 /* bit 319:288 - Notification Destination */
901                         u32     ndst;
902                 };
903                 u64 control;
904         };
905         u32 rsvd[6];
906 } __aligned(64);
907
908 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
909 {
910         return test_and_set_bit(POSTED_INTR_ON,
911                         (unsigned long *)&pi_desc->control);
912 }
913
914 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
915 {
916         return test_and_clear_bit(POSTED_INTR_ON,
917                         (unsigned long *)&pi_desc->control);
918 }
919
920 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
921 {
922         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
923 }
924
925 static inline void pi_clear_sn(struct pi_desc *pi_desc)
926 {
927         return clear_bit(POSTED_INTR_SN,
928                         (unsigned long *)&pi_desc->control);
929 }
930
931 static inline void pi_set_sn(struct pi_desc *pi_desc)
932 {
933         return set_bit(POSTED_INTR_SN,
934                         (unsigned long *)&pi_desc->control);
935 }
936
937 static inline void pi_clear_on(struct pi_desc *pi_desc)
938 {
939         clear_bit(POSTED_INTR_ON,
940                   (unsigned long *)&pi_desc->control);
941 }
942
943 static inline int pi_test_on(struct pi_desc *pi_desc)
944 {
945         return test_bit(POSTED_INTR_ON,
946                         (unsigned long *)&pi_desc->control);
947 }
948
949 static inline int pi_test_sn(struct pi_desc *pi_desc)
950 {
951         return test_bit(POSTED_INTR_SN,
952                         (unsigned long *)&pi_desc->control);
953 }
954
955 struct vmx_msrs {
956         unsigned int            nr;
957         struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
958 };
959
960 struct vcpu_vmx {
961         struct kvm_vcpu       vcpu;
962         unsigned long         host_rsp;
963         u8                    fail;
964         u8                    msr_bitmap_mode;
965         u32                   exit_intr_info;
966         u32                   idt_vectoring_info;
967         ulong                 rflags;
968         struct shared_msr_entry *guest_msrs;
969         int                   nmsrs;
970         int                   save_nmsrs;
971         unsigned long         host_idt_base;
972 #ifdef CONFIG_X86_64
973         u64                   msr_host_kernel_gs_base;
974         u64                   msr_guest_kernel_gs_base;
975 #endif
976
977         u64                   arch_capabilities;
978         u64                   spec_ctrl;
979
980         u32 vm_entry_controls_shadow;
981         u32 vm_exit_controls_shadow;
982         u32 secondary_exec_control;
983
984         /*
985          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
986          * non-nested (L1) guest, it always points to vmcs01. For a nested
987          * guest (L2), it points to a different VMCS.  loaded_cpu_state points
988          * to the VMCS whose state is loaded into the CPU registers that only
989          * need to be switched when transitioning to/from the kernel; a NULL
990          * value indicates that host state is loaded.
991          */
992         struct loaded_vmcs    vmcs01;
993         struct loaded_vmcs   *loaded_vmcs;
994         struct loaded_vmcs   *loaded_cpu_state;
995         bool                  __launched; /* temporary, used in vmx_vcpu_run */
996         struct msr_autoload {
997                 struct vmx_msrs guest;
998                 struct vmx_msrs host;
999         } msr_autoload;
1000
1001         struct {
1002                 int vm86_active;
1003                 ulong save_rflags;
1004                 struct kvm_segment segs[8];
1005         } rmode;
1006         struct {
1007                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
1008                 struct kvm_save_segment {
1009                         u16 selector;
1010                         unsigned long base;
1011                         u32 limit;
1012                         u32 ar;
1013                 } seg[8];
1014         } segment_cache;
1015         int vpid;
1016         bool emulation_required;
1017
1018         u32 exit_reason;
1019
1020         /* Posted interrupt descriptor */
1021         struct pi_desc pi_desc;
1022
1023         /* Support for a guest hypervisor (nested VMX) */
1024         struct nested_vmx nested;
1025
1026         /* Dynamic PLE window. */
1027         int ple_window;
1028         bool ple_window_dirty;
1029
1030         bool req_immediate_exit;
1031
1032         /* Support for PML */
1033 #define PML_ENTITY_NUM          512
1034         struct page *pml_pg;
1035
1036         /* apic deadline value in host tsc */
1037         u64 hv_deadline_tsc;
1038
1039         u64 current_tsc_ratio;
1040
1041         u32 host_pkru;
1042
1043         unsigned long host_debugctlmsr;
1044
1045         /*
1046          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
1047          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
1048          * in msr_ia32_feature_control_valid_bits.
1049          */
1050         u64 msr_ia32_feature_control;
1051         u64 msr_ia32_feature_control_valid_bits;
1052         u64 ept_pointer;
1053 };
1054
1055 enum segment_cache_field {
1056         SEG_FIELD_SEL = 0,
1057         SEG_FIELD_BASE = 1,
1058         SEG_FIELD_LIMIT = 2,
1059         SEG_FIELD_AR = 3,
1060
1061         SEG_FIELD_NR = 4
1062 };
1063
1064 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
1065 {
1066         return container_of(kvm, struct kvm_vmx, kvm);
1067 }
1068
1069 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
1070 {
1071         return container_of(vcpu, struct vcpu_vmx, vcpu);
1072 }
1073
1074 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
1075 {
1076         return &(to_vmx(vcpu)->pi_desc);
1077 }
1078
1079 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
1080 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
1081 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
1082 #define FIELD64(number, name)                                           \
1083         FIELD(number, name),                                            \
1084         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
1085
1086
1087 static u16 shadow_read_only_fields[] = {
1088 #define SHADOW_FIELD_RO(x) x,
1089 #include "vmx_shadow_fields.h"
1090 };
1091 static int max_shadow_read_only_fields =
1092         ARRAY_SIZE(shadow_read_only_fields);
1093
1094 static u16 shadow_read_write_fields[] = {
1095 #define SHADOW_FIELD_RW(x) x,
1096 #include "vmx_shadow_fields.h"
1097 };
1098 static int max_shadow_read_write_fields =
1099         ARRAY_SIZE(shadow_read_write_fields);
1100
1101 static const unsigned short vmcs_field_to_offset_table[] = {
1102         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
1103         FIELD(POSTED_INTR_NV, posted_intr_nv),
1104         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1105         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
1106         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
1107         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
1108         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
1109         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
1110         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
1111         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
1112         FIELD(GUEST_INTR_STATUS, guest_intr_status),
1113         FIELD(GUEST_PML_INDEX, guest_pml_index),
1114         FIELD(HOST_ES_SELECTOR, host_es_selector),
1115         FIELD(HOST_CS_SELECTOR, host_cs_selector),
1116         FIELD(HOST_SS_SELECTOR, host_ss_selector),
1117         FIELD(HOST_DS_SELECTOR, host_ds_selector),
1118         FIELD(HOST_FS_SELECTOR, host_fs_selector),
1119         FIELD(HOST_GS_SELECTOR, host_gs_selector),
1120         FIELD(HOST_TR_SELECTOR, host_tr_selector),
1121         FIELD64(IO_BITMAP_A, io_bitmap_a),
1122         FIELD64(IO_BITMAP_B, io_bitmap_b),
1123         FIELD64(MSR_BITMAP, msr_bitmap),
1124         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
1125         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
1126         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
1127         FIELD64(PML_ADDRESS, pml_address),
1128         FIELD64(TSC_OFFSET, tsc_offset),
1129         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
1130         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
1131         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
1132         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
1133         FIELD64(EPT_POINTER, ept_pointer),
1134         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
1135         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
1136         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
1137         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
1138         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
1139         FIELD64(VMREAD_BITMAP, vmread_bitmap),
1140         FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
1141         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
1142         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
1143         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
1144         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
1145         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
1146         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
1147         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
1148         FIELD64(GUEST_PDPTR0, guest_pdptr0),
1149         FIELD64(GUEST_PDPTR1, guest_pdptr1),
1150         FIELD64(GUEST_PDPTR2, guest_pdptr2),
1151         FIELD64(GUEST_PDPTR3, guest_pdptr3),
1152         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
1153         FIELD64(HOST_IA32_PAT, host_ia32_pat),
1154         FIELD64(HOST_IA32_EFER, host_ia32_efer),
1155         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
1156         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
1157         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
1158         FIELD(EXCEPTION_BITMAP, exception_bitmap),
1159         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
1160         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
1161         FIELD(CR3_TARGET_COUNT, cr3_target_count),
1162         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
1163         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
1164         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
1165         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
1166         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
1167         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
1168         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
1169         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
1170         FIELD(TPR_THRESHOLD, tpr_threshold),
1171         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
1172         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
1173         FIELD(VM_EXIT_REASON, vm_exit_reason),
1174         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
1175         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
1176         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
1177         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
1178         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
1179         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
1180         FIELD(GUEST_ES_LIMIT, guest_es_limit),
1181         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
1182         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
1183         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
1184         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
1185         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
1186         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
1187         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
1188         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
1189         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
1190         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
1191         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
1192         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
1193         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
1194         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
1195         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
1196         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1197         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1198         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1199         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1200         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1201         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
1202         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
1203         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1204         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1205         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1206         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1207         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1208         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1209         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1210         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1211         FIELD(EXIT_QUALIFICATION, exit_qualification),
1212         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1213         FIELD(GUEST_CR0, guest_cr0),
1214         FIELD(GUEST_CR3, guest_cr3),
1215         FIELD(GUEST_CR4, guest_cr4),
1216         FIELD(GUEST_ES_BASE, guest_es_base),
1217         FIELD(GUEST_CS_BASE, guest_cs_base),
1218         FIELD(GUEST_SS_BASE, guest_ss_base),
1219         FIELD(GUEST_DS_BASE, guest_ds_base),
1220         FIELD(GUEST_FS_BASE, guest_fs_base),
1221         FIELD(GUEST_GS_BASE, guest_gs_base),
1222         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1223         FIELD(GUEST_TR_BASE, guest_tr_base),
1224         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1225         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1226         FIELD(GUEST_DR7, guest_dr7),
1227         FIELD(GUEST_RSP, guest_rsp),
1228         FIELD(GUEST_RIP, guest_rip),
1229         FIELD(GUEST_RFLAGS, guest_rflags),
1230         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1231         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1232         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1233         FIELD(HOST_CR0, host_cr0),
1234         FIELD(HOST_CR3, host_cr3),
1235         FIELD(HOST_CR4, host_cr4),
1236         FIELD(HOST_FS_BASE, host_fs_base),
1237         FIELD(HOST_GS_BASE, host_gs_base),
1238         FIELD(HOST_TR_BASE, host_tr_base),
1239         FIELD(HOST_GDTR_BASE, host_gdtr_base),
1240         FIELD(HOST_IDTR_BASE, host_idtr_base),
1241         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1242         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1243         FIELD(HOST_RSP, host_rsp),
1244         FIELD(HOST_RIP, host_rip),
1245 };
1246
1247 static inline short vmcs_field_to_offset(unsigned long field)
1248 {
1249         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1250         unsigned short offset;
1251         unsigned index;
1252
1253         if (field >> 15)
1254                 return -ENOENT;
1255
1256         index = ROL16(field, 6);
1257         if (index >= size)
1258                 return -ENOENT;
1259
1260         index = array_index_nospec(index, size);
1261         offset = vmcs_field_to_offset_table[index];
1262         if (offset == 0)
1263                 return -ENOENT;
1264         return offset;
1265 }
1266
1267 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1268 {
1269         return to_vmx(vcpu)->nested.cached_vmcs12;
1270 }
1271
1272 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
1273 {
1274         return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
1275 }
1276
1277 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
1278 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
1279 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
1280 static bool vmx_xsaves_supported(void);
1281 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1282                             struct kvm_segment *var, int seg);
1283 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1284                             struct kvm_segment *var, int seg);
1285 static bool guest_state_valid(struct kvm_vcpu *vcpu);
1286 static u32 vmx_segment_access_rights(struct kvm_segment *var);
1287 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1288 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1289 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1290 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1291                                             u16 error_code);
1292 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
1293 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1294                                                           u32 msr, int type);
1295
1296 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1297 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
1298 /*
1299  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1300  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1301  */
1302 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
1303
1304 /*
1305  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1306  * can find which vCPU should be waken up.
1307  */
1308 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1309 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1310
1311 enum {
1312         VMX_VMREAD_BITMAP,
1313         VMX_VMWRITE_BITMAP,
1314         VMX_BITMAP_NR
1315 };
1316
1317 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1318
1319 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
1320 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
1321
1322 static bool cpu_has_load_ia32_efer;
1323 static bool cpu_has_load_perf_global_ctrl;
1324
1325 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1326 static DEFINE_SPINLOCK(vmx_vpid_lock);
1327
1328 static struct vmcs_config {
1329         int size;
1330         int order;
1331         u32 basic_cap;
1332         u32 revision_id;
1333         u32 pin_based_exec_ctrl;
1334         u32 cpu_based_exec_ctrl;
1335         u32 cpu_based_2nd_exec_ctrl;
1336         u32 vmexit_ctrl;
1337         u32 vmentry_ctrl;
1338         struct nested_vmx_msrs nested;
1339 } vmcs_config;
1340
1341 static struct vmx_capability {
1342         u32 ept;
1343         u32 vpid;
1344 } vmx_capability;
1345
1346 #define VMX_SEGMENT_FIELD(seg)                                  \
1347         [VCPU_SREG_##seg] = {                                   \
1348                 .selector = GUEST_##seg##_SELECTOR,             \
1349                 .base = GUEST_##seg##_BASE,                     \
1350                 .limit = GUEST_##seg##_LIMIT,                   \
1351                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1352         }
1353
1354 static const struct kvm_vmx_segment_field {
1355         unsigned selector;
1356         unsigned base;
1357         unsigned limit;
1358         unsigned ar_bytes;
1359 } kvm_vmx_segment_fields[] = {
1360         VMX_SEGMENT_FIELD(CS),
1361         VMX_SEGMENT_FIELD(DS),
1362         VMX_SEGMENT_FIELD(ES),
1363         VMX_SEGMENT_FIELD(FS),
1364         VMX_SEGMENT_FIELD(GS),
1365         VMX_SEGMENT_FIELD(SS),
1366         VMX_SEGMENT_FIELD(TR),
1367         VMX_SEGMENT_FIELD(LDTR),
1368 };
1369
1370 static u64 host_efer;
1371
1372 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1373
1374 /*
1375  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1376  * away by decrementing the array size.
1377  */
1378 static const u32 vmx_msr_index[] = {
1379 #ifdef CONFIG_X86_64
1380         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1381 #endif
1382         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1383 };
1384
1385 DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1386
1387 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1388
1389 #define KVM_EVMCS_VERSION 1
1390
1391 #if IS_ENABLED(CONFIG_HYPERV)
1392 static bool __read_mostly enlightened_vmcs = true;
1393 module_param(enlightened_vmcs, bool, 0444);
1394
1395 static inline void evmcs_write64(unsigned long field, u64 value)
1396 {
1397         u16 clean_field;
1398         int offset = get_evmcs_offset(field, &clean_field);
1399
1400         if (offset < 0)
1401                 return;
1402
1403         *(u64 *)((char *)current_evmcs + offset) = value;
1404
1405         current_evmcs->hv_clean_fields &= ~clean_field;
1406 }
1407
1408 static inline void evmcs_write32(unsigned long field, u32 value)
1409 {
1410         u16 clean_field;
1411         int offset = get_evmcs_offset(field, &clean_field);
1412
1413         if (offset < 0)
1414                 return;
1415
1416         *(u32 *)((char *)current_evmcs + offset) = value;
1417         current_evmcs->hv_clean_fields &= ~clean_field;
1418 }
1419
1420 static inline void evmcs_write16(unsigned long field, u16 value)
1421 {
1422         u16 clean_field;
1423         int offset = get_evmcs_offset(field, &clean_field);
1424
1425         if (offset < 0)
1426                 return;
1427
1428         *(u16 *)((char *)current_evmcs + offset) = value;
1429         current_evmcs->hv_clean_fields &= ~clean_field;
1430 }
1431
1432 static inline u64 evmcs_read64(unsigned long field)
1433 {
1434         int offset = get_evmcs_offset(field, NULL);
1435
1436         if (offset < 0)
1437                 return 0;
1438
1439         return *(u64 *)((char *)current_evmcs + offset);
1440 }
1441
1442 static inline u32 evmcs_read32(unsigned long field)
1443 {
1444         int offset = get_evmcs_offset(field, NULL);
1445
1446         if (offset < 0)
1447                 return 0;
1448
1449         return *(u32 *)((char *)current_evmcs + offset);
1450 }
1451
1452 static inline u16 evmcs_read16(unsigned long field)
1453 {
1454         int offset = get_evmcs_offset(field, NULL);
1455
1456         if (offset < 0)
1457                 return 0;
1458
1459         return *(u16 *)((char *)current_evmcs + offset);
1460 }
1461
1462 static inline void evmcs_touch_msr_bitmap(void)
1463 {
1464         if (unlikely(!current_evmcs))
1465                 return;
1466
1467         if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1468                 current_evmcs->hv_clean_fields &=
1469                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1470 }
1471
1472 static void evmcs_load(u64 phys_addr)
1473 {
1474         struct hv_vp_assist_page *vp_ap =
1475                 hv_get_vp_assist_page(smp_processor_id());
1476
1477         vp_ap->current_nested_vmcs = phys_addr;
1478         vp_ap->enlighten_vmentry = 1;
1479 }
1480
1481 static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1482 {
1483         /*
1484          * Enlightened VMCSv1 doesn't support these:
1485          *
1486          *      POSTED_INTR_NV                  = 0x00000002,
1487          *      GUEST_INTR_STATUS               = 0x00000810,
1488          *      APIC_ACCESS_ADDR                = 0x00002014,
1489          *      POSTED_INTR_DESC_ADDR           = 0x00002016,
1490          *      EOI_EXIT_BITMAP0                = 0x0000201c,
1491          *      EOI_EXIT_BITMAP1                = 0x0000201e,
1492          *      EOI_EXIT_BITMAP2                = 0x00002020,
1493          *      EOI_EXIT_BITMAP3                = 0x00002022,
1494          */
1495         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1496         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1497                 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1498         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1499                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1500         vmcs_conf->cpu_based_2nd_exec_ctrl &=
1501                 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1502
1503         /*
1504          *      GUEST_PML_INDEX                 = 0x00000812,
1505          *      PML_ADDRESS                     = 0x0000200e,
1506          */
1507         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1508
1509         /*      VM_FUNCTION_CONTROL             = 0x00002018, */
1510         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1511
1512         /*
1513          *      EPTP_LIST_ADDRESS               = 0x00002024,
1514          *      VMREAD_BITMAP                   = 0x00002026,
1515          *      VMWRITE_BITMAP                  = 0x00002028,
1516          */
1517         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1518
1519         /*
1520          *      TSC_MULTIPLIER                  = 0x00002032,
1521          */
1522         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1523
1524         /*
1525          *      PLE_GAP                         = 0x00004020,
1526          *      PLE_WINDOW                      = 0x00004022,
1527          */
1528         vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1529
1530         /*
1531          *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
1532          */
1533         vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1534
1535         /*
1536          *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
1537          *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
1538          */
1539         vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1540         vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1541
1542         /*
1543          * Currently unsupported in KVM:
1544          *      GUEST_IA32_RTIT_CTL             = 0x00002814,
1545          */
1546 }
1547
1548 /* check_ept_pointer() should be under protection of ept_pointer_lock. */
1549 static void check_ept_pointer_match(struct kvm *kvm)
1550 {
1551         struct kvm_vcpu *vcpu;
1552         u64 tmp_eptp = INVALID_PAGE;
1553         int i;
1554
1555         kvm_for_each_vcpu(i, vcpu, kvm) {
1556                 if (!VALID_PAGE(tmp_eptp)) {
1557                         tmp_eptp = to_vmx(vcpu)->ept_pointer;
1558                 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
1559                         to_kvm_vmx(kvm)->ept_pointers_match
1560                                 = EPT_POINTERS_MISMATCH;
1561                         return;
1562                 }
1563         }
1564
1565         to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
1566 }
1567
1568 static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
1569 {
1570         int ret;
1571
1572         spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1573
1574         if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
1575                 check_ept_pointer_match(kvm);
1576
1577         if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
1578                 ret = -ENOTSUPP;
1579                 goto out;
1580         }
1581
1582         ret = hyperv_flush_guest_mapping(
1583                         to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
1584
1585 out:
1586         spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
1587         return ret;
1588 }
1589 #else /* !IS_ENABLED(CONFIG_HYPERV) */
1590 static inline void evmcs_write64(unsigned long field, u64 value) {}
1591 static inline void evmcs_write32(unsigned long field, u32 value) {}
1592 static inline void evmcs_write16(unsigned long field, u16 value) {}
1593 static inline u64 evmcs_read64(unsigned long field) { return 0; }
1594 static inline u32 evmcs_read32(unsigned long field) { return 0; }
1595 static inline u16 evmcs_read16(unsigned long field) { return 0; }
1596 static inline void evmcs_load(u64 phys_addr) {}
1597 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1598 static inline void evmcs_touch_msr_bitmap(void) {}
1599 #endif /* IS_ENABLED(CONFIG_HYPERV) */
1600
1601 static inline bool is_exception_n(u32 intr_info, u8 vector)
1602 {
1603         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1604                              INTR_INFO_VALID_MASK)) ==
1605                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1606 }
1607
1608 static inline bool is_debug(u32 intr_info)
1609 {
1610         return is_exception_n(intr_info, DB_VECTOR);
1611 }
1612
1613 static inline bool is_breakpoint(u32 intr_info)
1614 {
1615         return is_exception_n(intr_info, BP_VECTOR);
1616 }
1617
1618 static inline bool is_page_fault(u32 intr_info)
1619 {
1620         return is_exception_n(intr_info, PF_VECTOR);
1621 }
1622
1623 static inline bool is_invalid_opcode(u32 intr_info)
1624 {
1625         return is_exception_n(intr_info, UD_VECTOR);
1626 }
1627
1628 static inline bool is_gp_fault(u32 intr_info)
1629 {
1630         return is_exception_n(intr_info, GP_VECTOR);
1631 }
1632
1633 static inline bool is_machine_check(u32 intr_info)
1634 {
1635         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1636                              INTR_INFO_VALID_MASK)) ==
1637                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1638 }
1639
1640 /* Undocumented: icebp/int1 */
1641 static inline bool is_icebp(u32 intr_info)
1642 {
1643         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1644                 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1645 }
1646
1647 static inline bool cpu_has_vmx_msr_bitmap(void)
1648 {
1649         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1650 }
1651
1652 static inline bool cpu_has_vmx_tpr_shadow(void)
1653 {
1654         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1655 }
1656
1657 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1658 {
1659         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1660 }
1661
1662 static inline bool cpu_has_secondary_exec_ctrls(void)
1663 {
1664         return vmcs_config.cpu_based_exec_ctrl &
1665                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1666 }
1667
1668 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1669 {
1670         return vmcs_config.cpu_based_2nd_exec_ctrl &
1671                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1672 }
1673
1674 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1675 {
1676         return vmcs_config.cpu_based_2nd_exec_ctrl &
1677                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1678 }
1679
1680 static inline bool cpu_has_vmx_apic_register_virt(void)
1681 {
1682         return vmcs_config.cpu_based_2nd_exec_ctrl &
1683                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1684 }
1685
1686 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1687 {
1688         return vmcs_config.cpu_based_2nd_exec_ctrl &
1689                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1690 }
1691
1692 static inline bool cpu_has_vmx_encls_vmexit(void)
1693 {
1694         return vmcs_config.cpu_based_2nd_exec_ctrl &
1695                 SECONDARY_EXEC_ENCLS_EXITING;
1696 }
1697
1698 /*
1699  * Comment's format: document - errata name - stepping - processor name.
1700  * Refer from
1701  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1702  */
1703 static u32 vmx_preemption_cpu_tfms[] = {
1704 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1705 0x000206E6,
1706 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1707 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1708 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1709 0x00020652,
1710 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1711 0x00020655,
1712 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1713 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1714 /*
1715  * 320767.pdf - AAP86  - B1 -
1716  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1717  */
1718 0x000106E5,
1719 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1720 0x000106A0,
1721 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1722 0x000106A1,
1723 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1724 0x000106A4,
1725  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1726  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1727  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1728 0x000106A5,
1729 };
1730
1731 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1732 {
1733         u32 eax = cpuid_eax(0x00000001), i;
1734
1735         /* Clear the reserved bits */
1736         eax &= ~(0x3U << 14 | 0xfU << 28);
1737         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1738                 if (eax == vmx_preemption_cpu_tfms[i])
1739                         return true;
1740
1741         return false;
1742 }
1743
1744 static inline bool cpu_has_vmx_preemption_timer(void)
1745 {
1746         return vmcs_config.pin_based_exec_ctrl &
1747                 PIN_BASED_VMX_PREEMPTION_TIMER;
1748 }
1749
1750 static inline bool cpu_has_vmx_posted_intr(void)
1751 {
1752         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1753                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1754 }
1755
1756 static inline bool cpu_has_vmx_apicv(void)
1757 {
1758         return cpu_has_vmx_apic_register_virt() &&
1759                 cpu_has_vmx_virtual_intr_delivery() &&
1760                 cpu_has_vmx_posted_intr();
1761 }
1762
1763 static inline bool cpu_has_vmx_flexpriority(void)
1764 {
1765         return cpu_has_vmx_tpr_shadow() &&
1766                 cpu_has_vmx_virtualize_apic_accesses();
1767 }
1768
1769 static inline bool cpu_has_vmx_ept_execute_only(void)
1770 {
1771         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1772 }
1773
1774 static inline bool cpu_has_vmx_ept_2m_page(void)
1775 {
1776         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1777 }
1778
1779 static inline bool cpu_has_vmx_ept_1g_page(void)
1780 {
1781         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1782 }
1783
1784 static inline bool cpu_has_vmx_ept_4levels(void)
1785 {
1786         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1787 }
1788
1789 static inline bool cpu_has_vmx_ept_mt_wb(void)
1790 {
1791         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1792 }
1793
1794 static inline bool cpu_has_vmx_ept_5levels(void)
1795 {
1796         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1797 }
1798
1799 static inline bool cpu_has_vmx_ept_ad_bits(void)
1800 {
1801         return vmx_capability.ept & VMX_EPT_AD_BIT;
1802 }
1803
1804 static inline bool cpu_has_vmx_invept_context(void)
1805 {
1806         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1807 }
1808
1809 static inline bool cpu_has_vmx_invept_global(void)
1810 {
1811         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1812 }
1813
1814 static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1815 {
1816         return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1817 }
1818
1819 static inline bool cpu_has_vmx_invvpid_single(void)
1820 {
1821         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1822 }
1823
1824 static inline bool cpu_has_vmx_invvpid_global(void)
1825 {
1826         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1827 }
1828
1829 static inline bool cpu_has_vmx_invvpid(void)
1830 {
1831         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1832 }
1833
1834 static inline bool cpu_has_vmx_ept(void)
1835 {
1836         return vmcs_config.cpu_based_2nd_exec_ctrl &
1837                 SECONDARY_EXEC_ENABLE_EPT;
1838 }
1839
1840 static inline bool cpu_has_vmx_unrestricted_guest(void)
1841 {
1842         return vmcs_config.cpu_based_2nd_exec_ctrl &
1843                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1844 }
1845
1846 static inline bool cpu_has_vmx_ple(void)
1847 {
1848         return vmcs_config.cpu_based_2nd_exec_ctrl &
1849                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1850 }
1851
1852 static inline bool cpu_has_vmx_basic_inout(void)
1853 {
1854         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1855 }
1856
1857 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1858 {
1859         return flexpriority_enabled && lapic_in_kernel(vcpu);
1860 }
1861
1862 static inline bool cpu_has_vmx_vpid(void)
1863 {
1864         return vmcs_config.cpu_based_2nd_exec_ctrl &
1865                 SECONDARY_EXEC_ENABLE_VPID;
1866 }
1867
1868 static inline bool cpu_has_vmx_rdtscp(void)
1869 {
1870         return vmcs_config.cpu_based_2nd_exec_ctrl &
1871                 SECONDARY_EXEC_RDTSCP;
1872 }
1873
1874 static inline bool cpu_has_vmx_invpcid(void)
1875 {
1876         return vmcs_config.cpu_based_2nd_exec_ctrl &
1877                 SECONDARY_EXEC_ENABLE_INVPCID;
1878 }
1879
1880 static inline bool cpu_has_virtual_nmis(void)
1881 {
1882         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1883 }
1884
1885 static inline bool cpu_has_vmx_wbinvd_exit(void)
1886 {
1887         return vmcs_config.cpu_based_2nd_exec_ctrl &
1888                 SECONDARY_EXEC_WBINVD_EXITING;
1889 }
1890
1891 static inline bool cpu_has_vmx_shadow_vmcs(void)
1892 {
1893         u64 vmx_msr;
1894         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1895         /* check if the cpu supports writing r/o exit information fields */
1896         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1897                 return false;
1898
1899         return vmcs_config.cpu_based_2nd_exec_ctrl &
1900                 SECONDARY_EXEC_SHADOW_VMCS;
1901 }
1902
1903 static inline bool cpu_has_vmx_pml(void)
1904 {
1905         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1906 }
1907
1908 static inline bool cpu_has_vmx_tsc_scaling(void)
1909 {
1910         return vmcs_config.cpu_based_2nd_exec_ctrl &
1911                 SECONDARY_EXEC_TSC_SCALING;
1912 }
1913
1914 static inline bool cpu_has_vmx_vmfunc(void)
1915 {
1916         return vmcs_config.cpu_based_2nd_exec_ctrl &
1917                 SECONDARY_EXEC_ENABLE_VMFUNC;
1918 }
1919
1920 static bool vmx_umip_emulated(void)
1921 {
1922         return vmcs_config.cpu_based_2nd_exec_ctrl &
1923                 SECONDARY_EXEC_DESC;
1924 }
1925
1926 static inline bool report_flexpriority(void)
1927 {
1928         return flexpriority_enabled;
1929 }
1930
1931 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1932 {
1933         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
1934 }
1935
1936 /*
1937  * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1938  * to modify any valid field of the VMCS, or are the VM-exit
1939  * information fields read-only?
1940  */
1941 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1942 {
1943         return to_vmx(vcpu)->nested.msrs.misc_low &
1944                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1945 }
1946
1947 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
1948 {
1949         return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
1950 }
1951
1952 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
1953 {
1954         return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
1955                         CPU_BASED_MONITOR_TRAP_FLAG;
1956 }
1957
1958 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
1959 {
1960         return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
1961                 SECONDARY_EXEC_SHADOW_VMCS;
1962 }
1963
1964 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1965 {
1966         return vmcs12->cpu_based_vm_exec_control & bit;
1967 }
1968
1969 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1970 {
1971         return (vmcs12->cpu_based_vm_exec_control &
1972                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1973                 (vmcs12->secondary_vm_exec_control & bit);
1974 }
1975
1976 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1977 {
1978         return vmcs12->pin_based_vm_exec_control &
1979                 PIN_BASED_VMX_PREEMPTION_TIMER;
1980 }
1981
1982 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1983 {
1984         return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1985 }
1986
1987 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1988 {
1989         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1990 }
1991
1992 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1993 {
1994         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1995 }
1996
1997 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1998 {
1999         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
2000 }
2001
2002 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
2003 {
2004         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
2005 }
2006
2007 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
2008 {
2009         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
2010 }
2011
2012 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
2013 {
2014         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
2015 }
2016
2017 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
2018 {
2019         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
2020 }
2021
2022 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
2023 {
2024         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2025 }
2026
2027 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
2028 {
2029         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
2030 }
2031
2032 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
2033 {
2034         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
2035 }
2036
2037 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
2038 {
2039         return nested_cpu_has_vmfunc(vmcs12) &&
2040                 (vmcs12->vm_function_control &
2041                  VMX_VMFUNC_EPTP_SWITCHING);
2042 }
2043
2044 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
2045 {
2046         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
2047 }
2048
2049 static inline bool is_nmi(u32 intr_info)
2050 {
2051         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
2052                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
2053 }
2054
2055 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
2056                               u32 exit_intr_info,
2057                               unsigned long exit_qualification);
2058
2059 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
2060 {
2061         int i;
2062
2063         for (i = 0; i < vmx->nmsrs; ++i)
2064                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
2065                         return i;
2066         return -1;
2067 }
2068
2069 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
2070 {
2071     struct {
2072         u64 vpid : 16;
2073         u64 rsvd : 48;
2074         u64 gva;
2075     } operand = { vpid, 0, gva };
2076     bool error;
2077
2078     asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
2079                   : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
2080                   : "memory");
2081     BUG_ON(error);
2082 }
2083
2084 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
2085 {
2086         struct {
2087                 u64 eptp, gpa;
2088         } operand = {eptp, gpa};
2089         bool error;
2090
2091         asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
2092                       : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
2093                       : "memory");
2094         BUG_ON(error);
2095 }
2096
2097 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
2098 {
2099         int i;
2100
2101         i = __find_msr_index(vmx, msr);
2102         if (i >= 0)
2103                 return &vmx->guest_msrs[i];
2104         return NULL;
2105 }
2106
2107 static void vmcs_clear(struct vmcs *vmcs)
2108 {
2109         u64 phys_addr = __pa(vmcs);
2110         bool error;
2111
2112         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
2113                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2114                       : "memory");
2115         if (unlikely(error))
2116                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
2117                        vmcs, phys_addr);
2118 }
2119
2120 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
2121 {
2122         vmcs_clear(loaded_vmcs->vmcs);
2123         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
2124                 vmcs_clear(loaded_vmcs->shadow_vmcs);
2125         loaded_vmcs->cpu = -1;
2126         loaded_vmcs->launched = 0;
2127 }
2128
2129 static void vmcs_load(struct vmcs *vmcs)
2130 {
2131         u64 phys_addr = __pa(vmcs);
2132         bool error;
2133
2134         if (static_branch_unlikely(&enable_evmcs))
2135                 return evmcs_load(phys_addr);
2136
2137         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
2138                       : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
2139                       : "memory");
2140         if (unlikely(error))
2141                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
2142                        vmcs, phys_addr);
2143 }
2144
2145 #ifdef CONFIG_KEXEC_CORE
2146 /*
2147  * This bitmap is used to indicate whether the vmclear
2148  * operation is enabled on all cpus. All disabled by
2149  * default.
2150  */
2151 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
2152
2153 static inline void crash_enable_local_vmclear(int cpu)
2154 {
2155         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
2156 }
2157
2158 static inline void crash_disable_local_vmclear(int cpu)
2159 {
2160         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
2161 }
2162
2163 static inline int crash_local_vmclear_enabled(int cpu)
2164 {
2165         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
2166 }
2167
2168 static void crash_vmclear_local_loaded_vmcss(void)
2169 {
2170         int cpu = raw_smp_processor_id();
2171         struct loaded_vmcs *v;
2172
2173         if (!crash_local_vmclear_enabled(cpu))
2174                 return;
2175
2176         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
2177                             loaded_vmcss_on_cpu_link)
2178                 vmcs_clear(v->vmcs);
2179 }
2180 #else
2181 static inline void crash_enable_local_vmclear(int cpu) { }
2182 static inline void crash_disable_local_vmclear(int cpu) { }
2183 #endif /* CONFIG_KEXEC_CORE */
2184
2185 static void __loaded_vmcs_clear(void *arg)
2186 {
2187         struct loaded_vmcs *loaded_vmcs = arg;
2188         int cpu = raw_smp_processor_id();
2189
2190         if (loaded_vmcs->cpu != cpu)
2191                 return; /* vcpu migration can race with cpu offline */
2192         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
2193                 per_cpu(current_vmcs, cpu) = NULL;
2194         crash_disable_local_vmclear(cpu);
2195         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
2196
2197         /*
2198          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
2199          * is before setting loaded_vmcs->vcpu to -1 which is done in
2200          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
2201          * then adds the vmcs into percpu list before it is deleted.
2202          */
2203         smp_wmb();
2204
2205         loaded_vmcs_init(loaded_vmcs);
2206         crash_enable_local_vmclear(cpu);
2207 }
2208
2209 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
2210 {
2211         int cpu = loaded_vmcs->cpu;
2212
2213         if (cpu != -1)
2214                 smp_call_function_single(cpu,
2215                          __loaded_vmcs_clear, loaded_vmcs, 1);
2216 }
2217
2218 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
2219 {
2220         if (vpid == 0)
2221                 return true;
2222
2223         if (cpu_has_vmx_invvpid_individual_addr()) {
2224                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
2225                 return true;
2226         }
2227
2228         return false;
2229 }
2230
2231 static inline void vpid_sync_vcpu_single(int vpid)
2232 {
2233         if (vpid == 0)
2234                 return;
2235
2236         if (cpu_has_vmx_invvpid_single())
2237                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
2238 }
2239
2240 static inline void vpid_sync_vcpu_global(void)
2241 {
2242         if (cpu_has_vmx_invvpid_global())
2243                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
2244 }
2245
2246 static inline void vpid_sync_context(int vpid)
2247 {
2248         if (cpu_has_vmx_invvpid_single())
2249                 vpid_sync_vcpu_single(vpid);
2250         else
2251                 vpid_sync_vcpu_global();
2252 }
2253
2254 static inline void ept_sync_global(void)
2255 {
2256         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
2257 }
2258
2259 static inline void ept_sync_context(u64 eptp)
2260 {
2261         if (cpu_has_vmx_invept_context())
2262                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
2263         else
2264                 ept_sync_global();
2265 }
2266
2267 static __always_inline void vmcs_check16(unsigned long field)
2268 {
2269         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2270                          "16-bit accessor invalid for 64-bit field");
2271         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2272                          "16-bit accessor invalid for 64-bit high field");
2273         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2274                          "16-bit accessor invalid for 32-bit high field");
2275         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2276                          "16-bit accessor invalid for natural width field");
2277 }
2278
2279 static __always_inline void vmcs_check32(unsigned long field)
2280 {
2281         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2282                          "32-bit accessor invalid for 16-bit field");
2283         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2284                          "32-bit accessor invalid for natural width field");
2285 }
2286
2287 static __always_inline void vmcs_check64(unsigned long field)
2288 {
2289         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2290                          "64-bit accessor invalid for 16-bit field");
2291         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2292                          "64-bit accessor invalid for 64-bit high field");
2293         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2294                          "64-bit accessor invalid for 32-bit field");
2295         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2296                          "64-bit accessor invalid for natural width field");
2297 }
2298
2299 static __always_inline void vmcs_checkl(unsigned long field)
2300 {
2301         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2302                          "Natural width accessor invalid for 16-bit field");
2303         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2304                          "Natural width accessor invalid for 64-bit field");
2305         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2306                          "Natural width accessor invalid for 64-bit high field");
2307         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2308                          "Natural width accessor invalid for 32-bit field");
2309 }
2310
2311 static __always_inline unsigned long __vmcs_readl(unsigned long field)
2312 {
2313         unsigned long value;
2314
2315         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2316                       : "=a"(value) : "d"(field) : "cc");
2317         return value;
2318 }
2319
2320 static __always_inline u16 vmcs_read16(unsigned long field)
2321 {
2322         vmcs_check16(field);
2323         if (static_branch_unlikely(&enable_evmcs))
2324                 return evmcs_read16(field);
2325         return __vmcs_readl(field);
2326 }
2327
2328 static __always_inline u32 vmcs_read32(unsigned long field)
2329 {
2330         vmcs_check32(field);
2331         if (static_branch_unlikely(&enable_evmcs))
2332                 return evmcs_read32(field);
2333         return __vmcs_readl(field);
2334 }
2335
2336 static __always_inline u64 vmcs_read64(unsigned long field)
2337 {
2338         vmcs_check64(field);
2339         if (static_branch_unlikely(&enable_evmcs))
2340                 return evmcs_read64(field);
2341 #ifdef CONFIG_X86_64
2342         return __vmcs_readl(field);
2343 #else
2344         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
2345 #endif
2346 }
2347
2348 static __always_inline unsigned long vmcs_readl(unsigned long field)
2349 {
2350         vmcs_checkl(field);
2351         if (static_branch_unlikely(&enable_evmcs))
2352                 return evmcs_read64(field);
2353         return __vmcs_readl(field);
2354 }
2355
2356 static noinline void vmwrite_error(unsigned long field, unsigned long value)
2357 {
2358         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2359                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2360         dump_stack();
2361 }
2362
2363 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
2364 {
2365         bool error;
2366
2367         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
2368                       : CC_OUT(na) (error) : "a"(value), "d"(field));
2369         if (unlikely(error))
2370                 vmwrite_error(field, value);
2371 }
2372
2373 static __always_inline void vmcs_write16(unsigned long field, u16 value)
2374 {
2375         vmcs_check16(field);
2376         if (static_branch_unlikely(&enable_evmcs))
2377                 return evmcs_write16(field, value);
2378
2379         __vmcs_writel(field, value);
2380 }
2381
2382 static __always_inline void vmcs_write32(unsigned long field, u32 value)
2383 {
2384         vmcs_check32(field);
2385         if (static_branch_unlikely(&enable_evmcs))
2386                 return evmcs_write32(field, value);
2387
2388         __vmcs_writel(field, value);
2389 }
2390
2391 static __always_inline void vmcs_write64(unsigned long field, u64 value)
2392 {
2393         vmcs_check64(field);
2394         if (static_branch_unlikely(&enable_evmcs))
2395                 return evmcs_write64(field, value);
2396
2397         __vmcs_writel(field, value);
2398 #ifndef CONFIG_X86_64
2399         asm volatile ("");
2400         __vmcs_writel(field+1, value >> 32);
2401 #endif
2402 }
2403
2404 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
2405 {
2406         vmcs_checkl(field);
2407         if (static_branch_unlikely(&enable_evmcs))
2408                 return evmcs_write64(field, value);
2409
2410         __vmcs_writel(field, value);
2411 }
2412
2413 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
2414 {
2415         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2416                          "vmcs_clear_bits does not support 64-bit fields");
2417         if (static_branch_unlikely(&enable_evmcs))
2418                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2419
2420         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2421 }
2422
2423 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2424 {
2425         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2426                          "vmcs_set_bits does not support 64-bit fields");
2427         if (static_branch_unlikely(&enable_evmcs))
2428                 return evmcs_write32(field, evmcs_read32(field) | mask);
2429
2430         __vmcs_writel(field, __vmcs_readl(field) | mask);
2431 }
2432
2433 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2434 {
2435         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2436 }
2437
2438 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2439 {
2440         vmcs_write32(VM_ENTRY_CONTROLS, val);
2441         vmx->vm_entry_controls_shadow = val;
2442 }
2443
2444 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2445 {
2446         if (vmx->vm_entry_controls_shadow != val)
2447                 vm_entry_controls_init(vmx, val);
2448 }
2449
2450 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2451 {
2452         return vmx->vm_entry_controls_shadow;
2453 }
2454
2455
2456 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2457 {
2458         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2459 }
2460
2461 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2462 {
2463         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2464 }
2465
2466 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2467 {
2468         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2469 }
2470
2471 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2472 {
2473         vmcs_write32(VM_EXIT_CONTROLS, val);
2474         vmx->vm_exit_controls_shadow = val;
2475 }
2476
2477 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2478 {
2479         if (vmx->vm_exit_controls_shadow != val)
2480                 vm_exit_controls_init(vmx, val);
2481 }
2482
2483 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2484 {
2485         return vmx->vm_exit_controls_shadow;
2486 }
2487
2488
2489 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2490 {
2491         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2492 }
2493
2494 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2495 {
2496         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2497 }
2498
2499 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2500 {
2501         vmx->segment_cache.bitmask = 0;
2502 }
2503
2504 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2505                                        unsigned field)
2506 {
2507         bool ret;
2508         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2509
2510         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2511                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2512                 vmx->segment_cache.bitmask = 0;
2513         }
2514         ret = vmx->segment_cache.bitmask & mask;
2515         vmx->segment_cache.bitmask |= mask;
2516         return ret;
2517 }
2518
2519 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2520 {
2521         u16 *p = &vmx->segment_cache.seg[seg].selector;
2522
2523         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2524                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2525         return *p;
2526 }
2527
2528 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2529 {
2530         ulong *p = &vmx->segment_cache.seg[seg].base;
2531
2532         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2533                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2534         return *p;
2535 }
2536
2537 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2538 {
2539         u32 *p = &vmx->segment_cache.seg[seg].limit;
2540
2541         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2542                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2543         return *p;
2544 }
2545
2546 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2547 {
2548         u32 *p = &vmx->segment_cache.seg[seg].ar;
2549
2550         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2551                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2552         return *p;
2553 }
2554
2555 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2556 {
2557         u32 eb;
2558
2559         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
2560              (1u << DB_VECTOR) | (1u << AC_VECTOR);
2561         /*
2562          * Guest access to VMware backdoor ports could legitimately
2563          * trigger #GP because of TSS I/O permission bitmap.
2564          * We intercept those #GP and allow access to them anyway
2565          * as VMware does.
2566          */
2567         if (enable_vmware_backdoor)
2568                 eb |= (1u << GP_VECTOR);
2569         if ((vcpu->guest_debug &
2570              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2571             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2572                 eb |= 1u << BP_VECTOR;
2573         if (to_vmx(vcpu)->rmode.vm86_active)
2574                 eb = ~0;
2575         if (enable_ept)
2576                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
2577
2578         /* When we are running a nested L2 guest and L1 specified for it a
2579          * certain exception bitmap, we must trap the same exceptions and pass
2580          * them to L1. When running L2, we will only handle the exceptions
2581          * specified above if L1 did not want them.
2582          */
2583         if (is_guest_mode(vcpu))
2584                 eb |= get_vmcs12(vcpu)->exception_bitmap;
2585
2586         vmcs_write32(EXCEPTION_BITMAP, eb);
2587 }
2588
2589 /*
2590  * Check if MSR is intercepted for currently loaded MSR bitmap.
2591  */
2592 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2593 {
2594         unsigned long *msr_bitmap;
2595         int f = sizeof(unsigned long);
2596
2597         if (!cpu_has_vmx_msr_bitmap())
2598                 return true;
2599
2600         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2601
2602         if (msr <= 0x1fff) {
2603                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2604         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2605                 msr &= 0x1fff;
2606                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2607         }
2608
2609         return true;
2610 }
2611
2612 /*
2613  * Check if MSR is intercepted for L01 MSR bitmap.
2614  */
2615 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2616 {
2617         unsigned long *msr_bitmap;
2618         int f = sizeof(unsigned long);
2619
2620         if (!cpu_has_vmx_msr_bitmap())
2621                 return true;
2622
2623         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2624
2625         if (msr <= 0x1fff) {
2626                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2627         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2628                 msr &= 0x1fff;
2629                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2630         }
2631
2632         return true;
2633 }
2634
2635 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2636                 unsigned long entry, unsigned long exit)
2637 {
2638         vm_entry_controls_clearbit(vmx, entry);
2639         vm_exit_controls_clearbit(vmx, exit);
2640 }
2641
2642 static int find_msr(struct vmx_msrs *m, unsigned int msr)
2643 {
2644         unsigned int i;
2645
2646         for (i = 0; i < m->nr; ++i) {
2647                 if (m->val[i].index == msr)
2648                         return i;
2649         }
2650         return -ENOENT;
2651 }
2652
2653 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2654 {
2655         int i;
2656         struct msr_autoload *m = &vmx->msr_autoload;
2657
2658         switch (msr) {
2659         case MSR_EFER:
2660                 if (cpu_has_load_ia32_efer) {
2661                         clear_atomic_switch_msr_special(vmx,
2662                                         VM_ENTRY_LOAD_IA32_EFER,
2663                                         VM_EXIT_LOAD_IA32_EFER);
2664                         return;
2665                 }
2666                 break;
2667         case MSR_CORE_PERF_GLOBAL_CTRL:
2668                 if (cpu_has_load_perf_global_ctrl) {
2669                         clear_atomic_switch_msr_special(vmx,
2670                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2671                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2672                         return;
2673                 }
2674                 break;
2675         }
2676         i = find_msr(&m->guest, msr);
2677         if (i < 0)
2678                 goto skip_guest;
2679         --m->guest.nr;
2680         m->guest.val[i] = m->guest.val[m->guest.nr];
2681         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2682
2683 skip_guest:
2684         i = find_msr(&m->host, msr);
2685         if (i < 0)
2686                 return;
2687
2688         --m->host.nr;
2689         m->host.val[i] = m->host.val[m->host.nr];
2690         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2691 }
2692
2693 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2694                 unsigned long entry, unsigned long exit,
2695                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2696                 u64 guest_val, u64 host_val)
2697 {
2698         vmcs_write64(guest_val_vmcs, guest_val);
2699         if (host_val_vmcs != HOST_IA32_EFER)
2700                 vmcs_write64(host_val_vmcs, host_val);
2701         vm_entry_controls_setbit(vmx, entry);
2702         vm_exit_controls_setbit(vmx, exit);
2703 }
2704
2705 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2706                                   u64 guest_val, u64 host_val, bool entry_only)
2707 {
2708         int i, j = 0;
2709         struct msr_autoload *m = &vmx->msr_autoload;
2710
2711         switch (msr) {
2712         case MSR_EFER:
2713                 if (cpu_has_load_ia32_efer) {
2714                         add_atomic_switch_msr_special(vmx,
2715                                         VM_ENTRY_LOAD_IA32_EFER,
2716                                         VM_EXIT_LOAD_IA32_EFER,
2717                                         GUEST_IA32_EFER,
2718                                         HOST_IA32_EFER,
2719                                         guest_val, host_val);
2720                         return;
2721                 }
2722                 break;
2723         case MSR_CORE_PERF_GLOBAL_CTRL:
2724                 if (cpu_has_load_perf_global_ctrl) {
2725                         add_atomic_switch_msr_special(vmx,
2726                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2727                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2728                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2729                                         HOST_IA32_PERF_GLOBAL_CTRL,
2730                                         guest_val, host_val);
2731                         return;
2732                 }
2733                 break;
2734         case MSR_IA32_PEBS_ENABLE:
2735                 /* PEBS needs a quiescent period after being disabled (to write
2736                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2737                  * provide that period, so a CPU could write host's record into
2738                  * guest's memory.
2739                  */
2740                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2741         }
2742
2743         i = find_msr(&m->guest, msr);
2744         if (!entry_only)
2745                 j = find_msr(&m->host, msr);
2746
2747         if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
2748                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2749                                 "Can't add msr %x\n", msr);
2750                 return;
2751         }
2752         if (i < 0) {
2753                 i = m->guest.nr++;
2754                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2755         }
2756         m->guest.val[i].index = msr;
2757         m->guest.val[i].value = guest_val;
2758
2759         if (entry_only)
2760                 return;
2761
2762         if (j < 0) {
2763                 j = m->host.nr++;
2764                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
2765         }
2766         m->host.val[j].index = msr;
2767         m->host.val[j].value = host_val;
2768 }
2769
2770 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2771 {
2772         u64 guest_efer = vmx->vcpu.arch.efer;
2773         u64 ignore_bits = 0;
2774
2775         if (!enable_ept) {
2776                 /*
2777                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2778                  * host CPUID is more efficient than testing guest CPUID
2779                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2780                  */
2781                 if (boot_cpu_has(X86_FEATURE_SMEP))
2782                         guest_efer |= EFER_NX;
2783                 else if (!(guest_efer & EFER_NX))
2784                         ignore_bits |= EFER_NX;
2785         }
2786
2787         /*
2788          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2789          */
2790         ignore_bits |= EFER_SCE;
2791 #ifdef CONFIG_X86_64
2792         ignore_bits |= EFER_LMA | EFER_LME;
2793         /* SCE is meaningful only in long mode on Intel */
2794         if (guest_efer & EFER_LMA)
2795                 ignore_bits &= ~(u64)EFER_SCE;
2796 #endif
2797
2798         /*
2799          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2800          * On CPUs that support "load IA32_EFER", always switch EFER
2801          * atomically, since it's faster than switching it manually.
2802          */
2803         if (cpu_has_load_ia32_efer ||
2804             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2805                 if (!(guest_efer & EFER_LMA))
2806                         guest_efer &= ~EFER_LME;
2807                 if (guest_efer != host_efer)
2808                         add_atomic_switch_msr(vmx, MSR_EFER,
2809                                               guest_efer, host_efer, false);
2810                 else
2811                         clear_atomic_switch_msr(vmx, MSR_EFER);
2812                 return false;
2813         } else {
2814                 clear_atomic_switch_msr(vmx, MSR_EFER);
2815
2816                 guest_efer &= ~ignore_bits;
2817                 guest_efer |= host_efer & ignore_bits;
2818
2819                 vmx->guest_msrs[efer_offset].data = guest_efer;
2820                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2821
2822                 return true;
2823         }
2824 }
2825
2826 #ifdef CONFIG_X86_32
2827 /*
2828  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2829  * VMCS rather than the segment table.  KVM uses this helper to figure
2830  * out the current bases to poke them into the VMCS before entry.
2831  */
2832 static unsigned long segment_base(u16 selector)
2833 {
2834         struct desc_struct *table;
2835         unsigned long v;
2836
2837         if (!(selector & ~SEGMENT_RPL_MASK))
2838                 return 0;
2839
2840         table = get_current_gdt_ro();
2841
2842         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2843                 u16 ldt_selector = kvm_read_ldt();
2844
2845                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2846                         return 0;
2847
2848                 table = (struct desc_struct *)segment_base(ldt_selector);
2849         }
2850         v = get_desc_base(&table[selector >> 3]);
2851         return v;
2852 }
2853 #endif
2854
2855 static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2856 {
2857         struct vcpu_vmx *vmx = to_vmx(vcpu);
2858         struct vmcs_host_state *host_state;
2859 #ifdef CONFIG_X86_64
2860         int cpu = raw_smp_processor_id();
2861 #endif
2862         unsigned long fs_base, gs_base;
2863         u16 fs_sel, gs_sel;
2864         int i;
2865
2866         vmx->req_immediate_exit = false;
2867
2868         if (vmx->loaded_cpu_state)
2869                 return;
2870
2871         vmx->loaded_cpu_state = vmx->loaded_vmcs;
2872         host_state = &vmx->loaded_cpu_state->host_state;
2873
2874         /*
2875          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2876          * allow segment selectors with cpl > 0 or ti == 1.
2877          */
2878         host_state->ldt_sel = kvm_read_ldt();
2879
2880 #ifdef CONFIG_X86_64
2881         savesegment(ds, host_state->ds_sel);
2882         savesegment(es, host_state->es_sel);
2883
2884         gs_base = cpu_kernelmode_gs_base(cpu);
2885         if (likely(is_64bit_mm(current->mm))) {
2886                 save_fsgs_for_kvm();
2887                 fs_sel = current->thread.fsindex;
2888                 gs_sel = current->thread.gsindex;
2889                 fs_base = current->thread.fsbase;
2890                 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
2891         } else {
2892                 savesegment(fs, fs_sel);
2893                 savesegment(gs, gs_sel);
2894                 fs_base = read_msr(MSR_FS_BASE);
2895                 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
2896         }
2897
2898         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2899 #else
2900         savesegment(fs, fs_sel);
2901         savesegment(gs, gs_sel);
2902         fs_base = segment_base(fs_sel);
2903         gs_base = segment_base(gs_sel);
2904 #endif
2905
2906         if (unlikely(fs_sel != host_state->fs_sel)) {
2907                 if (!(fs_sel & 7))
2908                         vmcs_write16(HOST_FS_SELECTOR, fs_sel);
2909                 else
2910                         vmcs_write16(HOST_FS_SELECTOR, 0);
2911                 host_state->fs_sel = fs_sel;
2912         }
2913         if (unlikely(gs_sel != host_state->gs_sel)) {
2914                 if (!(gs_sel & 7))
2915                         vmcs_write16(HOST_GS_SELECTOR, gs_sel);
2916                 else
2917                         vmcs_write16(HOST_GS_SELECTOR, 0);
2918                 host_state->gs_sel = gs_sel;
2919         }
2920         if (unlikely(fs_base != host_state->fs_base)) {
2921                 vmcs_writel(HOST_FS_BASE, fs_base);
2922                 host_state->fs_base = fs_base;
2923         }
2924         if (unlikely(gs_base != host_state->gs_base)) {
2925                 vmcs_writel(HOST_GS_BASE, gs_base);
2926                 host_state->gs_base = gs_base;
2927         }
2928
2929         for (i = 0; i < vmx->save_nmsrs; ++i)
2930                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2931                                    vmx->guest_msrs[i].data,
2932                                    vmx->guest_msrs[i].mask);
2933 }
2934
2935 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2936 {
2937         struct vmcs_host_state *host_state;
2938
2939         if (!vmx->loaded_cpu_state)
2940                 return;
2941
2942         WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
2943         host_state = &vmx->loaded_cpu_state->host_state;
2944
2945         ++vmx->vcpu.stat.host_state_reload;
2946         vmx->loaded_cpu_state = NULL;
2947
2948 #ifdef CONFIG_X86_64
2949         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2950 #endif
2951         if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
2952                 kvm_load_ldt(host_state->ldt_sel);
2953 #ifdef CONFIG_X86_64
2954                 load_gs_index(host_state->gs_sel);
2955 #else
2956                 loadsegment(gs, host_state->gs_sel);
2957 #endif
2958         }
2959         if (host_state->fs_sel & 7)
2960                 loadsegment(fs, host_state->fs_sel);
2961 #ifdef CONFIG_X86_64
2962         if (unlikely(host_state->ds_sel | host_state->es_sel)) {
2963                 loadsegment(ds, host_state->ds_sel);
2964                 loadsegment(es, host_state->es_sel);
2965         }
2966 #endif
2967         invalidate_tss_limit();
2968 #ifdef CONFIG_X86_64
2969         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2970 #endif
2971         load_fixmap_gdt(raw_smp_processor_id());
2972 }
2973
2974 #ifdef CONFIG_X86_64
2975 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
2976 {
2977         preempt_disable();
2978         if (vmx->loaded_cpu_state)
2979                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2980         preempt_enable();
2981         return vmx->msr_guest_kernel_gs_base;
2982 }
2983
2984 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
2985 {
2986         preempt_disable();
2987         if (vmx->loaded_cpu_state)
2988                 wrmsrl(MSR_KERNEL_GS_BASE, data);
2989         preempt_enable();
2990         vmx->msr_guest_kernel_gs_base = data;
2991 }
2992 #endif
2993
2994 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2995 {
2996         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2997         struct pi_desc old, new;
2998         unsigned int dest;
2999
3000         /*
3001          * In case of hot-plug or hot-unplug, we may have to undo
3002          * vmx_vcpu_pi_put even if there is no assigned device.  And we
3003          * always keep PI.NDST up to date for simplicity: it makes the
3004          * code easier, and CPU migration is not a fast path.
3005          */
3006         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
3007                 return;
3008
3009         /*
3010          * First handle the simple case where no cmpxchg is necessary; just
3011          * allow posting non-urgent interrupts.
3012          *
3013          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
3014          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
3015          * expects the VCPU to be on the blocked_vcpu_list that matches
3016          * PI.NDST.
3017          */
3018         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
3019             vcpu->cpu == cpu) {
3020                 pi_clear_sn(pi_desc);
3021                 return;
3022         }
3023
3024         /* The full case.  */
3025         do {
3026                 old.control = new.control = pi_desc->control;
3027
3028                 dest = cpu_physical_id(cpu);
3029
3030                 if (x2apic_enabled())
3031                         new.ndst = dest;
3032                 else
3033                         new.ndst = (dest << 8) & 0xFF00;
3034
3035                 new.sn = 0;
3036         } while (cmpxchg64(&pi_desc->control, old.control,
3037                            new.control) != old.control);
3038 }
3039
3040 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
3041 {
3042         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
3043         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
3044 }
3045
3046 /*
3047  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
3048  * vcpu mutex is already taken.
3049  */
3050 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3051 {
3052         struct vcpu_vmx *vmx = to_vmx(vcpu);
3053         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
3054
3055         if (!already_loaded) {
3056                 loaded_vmcs_clear(vmx->loaded_vmcs);
3057                 local_irq_disable();
3058                 crash_disable_local_vmclear(cpu);
3059
3060                 /*
3061                  * Read loaded_vmcs->cpu should be before fetching
3062                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
3063                  * See the comments in __loaded_vmcs_clear().
3064                  */
3065                 smp_rmb();
3066
3067                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
3068                          &per_cpu(loaded_vmcss_on_cpu, cpu));
3069                 crash_enable_local_vmclear(cpu);
3070                 local_irq_enable();
3071         }
3072
3073         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
3074                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
3075                 vmcs_load(vmx->loaded_vmcs->vmcs);
3076                 indirect_branch_prediction_barrier();
3077         }
3078
3079         if (!already_loaded) {
3080                 void *gdt = get_current_gdt_ro();
3081                 unsigned long sysenter_esp;
3082
3083                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3084
3085                 /*
3086                  * Linux uses per-cpu TSS and GDT, so set these when switching
3087                  * processors.  See 22.2.4.
3088                  */
3089                 vmcs_writel(HOST_TR_BASE,
3090                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
3091                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
3092
3093                 /*
3094                  * VM exits change the host TR limit to 0x67 after a VM
3095                  * exit.  This is okay, since 0x67 covers everything except
3096                  * the IO bitmap and have have code to handle the IO bitmap
3097                  * being lost after a VM exit.
3098                  */
3099                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
3100
3101                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
3102                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
3103
3104                 vmx->loaded_vmcs->cpu = cpu;
3105         }
3106
3107         /* Setup TSC multiplier */
3108         if (kvm_has_tsc_control &&
3109             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
3110                 decache_tsc_multiplier(vmx);
3111
3112         vmx_vcpu_pi_load(vcpu, cpu);
3113         vmx->host_pkru = read_pkru();
3114         vmx->host_debugctlmsr = get_debugctlmsr();
3115 }
3116
3117 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
3118 {
3119         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
3120
3121         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
3122                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
3123                 !kvm_vcpu_apicv_active(vcpu))
3124                 return;
3125
3126         /* Set SN when the vCPU is preempted */
3127         if (vcpu->preempted)
3128                 pi_set_sn(pi_desc);
3129 }
3130
3131 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
3132 {
3133         vmx_vcpu_pi_put(vcpu);
3134
3135         vmx_prepare_switch_to_host(to_vmx(vcpu));
3136 }
3137
3138 static bool emulation_required(struct kvm_vcpu *vcpu)
3139 {
3140         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3141 }
3142
3143 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
3144
3145 /*
3146  * Return the cr0 value that a nested guest would read. This is a combination
3147  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
3148  * its hypervisor (cr0_read_shadow).
3149  */
3150 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
3151 {
3152         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
3153                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
3154 }
3155 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
3156 {
3157         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
3158                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
3159 }
3160
3161 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
3162 {
3163         unsigned long rflags, save_rflags;
3164
3165         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
3166                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3167                 rflags = vmcs_readl(GUEST_RFLAGS);
3168                 if (to_vmx(vcpu)->rmode.vm86_active) {
3169                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3170                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
3171                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3172                 }
3173                 to_vmx(vcpu)->rflags = rflags;
3174         }
3175         return to_vmx(vcpu)->rflags;
3176 }
3177
3178 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
3179 {
3180         unsigned long old_rflags = vmx_get_rflags(vcpu);
3181
3182         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
3183         to_vmx(vcpu)->rflags = rflags;
3184         if (to_vmx(vcpu)->rmode.vm86_active) {
3185                 to_vmx(vcpu)->rmode.save_rflags = rflags;
3186                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3187         }
3188         vmcs_writel(GUEST_RFLAGS, rflags);
3189
3190         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
3191                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
3192 }
3193
3194 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
3195 {
3196         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3197         int ret = 0;
3198
3199         if (interruptibility & GUEST_INTR_STATE_STI)
3200                 ret |= KVM_X86_SHADOW_INT_STI;
3201         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
3202                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
3203
3204         return ret;
3205 }
3206
3207 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
3208 {
3209         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3210         u32 interruptibility = interruptibility_old;
3211
3212         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
3213
3214         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
3215                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
3216         else if (mask & KVM_X86_SHADOW_INT_STI)
3217                 interruptibility |= GUEST_INTR_STATE_STI;
3218
3219         if ((interruptibility != interruptibility_old))
3220                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
3221 }
3222
3223 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
3224 {
3225         unsigned long rip;
3226
3227         rip = kvm_rip_read(vcpu);
3228         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3229         kvm_rip_write(vcpu, rip);
3230
3231         /* skipping an emulated instruction also counts */
3232         vmx_set_interrupt_shadow(vcpu, 0);
3233 }
3234
3235 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3236                                                unsigned long exit_qual)
3237 {
3238         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3239         unsigned int nr = vcpu->arch.exception.nr;
3240         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3241
3242         if (vcpu->arch.exception.has_error_code) {
3243                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3244                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3245         }
3246
3247         if (kvm_exception_is_soft(nr))
3248                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3249         else
3250                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3251
3252         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3253             vmx_get_nmi_mask(vcpu))
3254                 intr_info |= INTR_INFO_UNBLOCK_NMI;
3255
3256         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3257 }
3258
3259 /*
3260  * KVM wants to inject page-faults which it got to the guest. This function
3261  * checks whether in a nested guest, we need to inject them to L1 or L2.
3262  */
3263 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
3264 {
3265         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3266         unsigned int nr = vcpu->arch.exception.nr;
3267
3268         if (nr == PF_VECTOR) {
3269                 if (vcpu->arch.exception.nested_apf) {
3270                         *exit_qual = vcpu->arch.apf.nested_apf_token;
3271                         return 1;
3272                 }
3273                 /*
3274                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
3275                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
3276                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
3277                  * can be written only when inject_pending_event runs.  This should be
3278                  * conditional on a new capability---if the capability is disabled,
3279                  * kvm_multiple_exception would write the ancillary information to
3280                  * CR2 or DR6, for backwards ABI-compatibility.
3281                  */
3282                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
3283                                                     vcpu->arch.exception.error_code)) {
3284                         *exit_qual = vcpu->arch.cr2;
3285                         return 1;
3286                 }
3287         } else {
3288                 if (vmcs12->exception_bitmap & (1u << nr)) {
3289                         if (nr == DB_VECTOR) {
3290                                 *exit_qual = vcpu->arch.dr6;
3291                                 *exit_qual &= ~(DR6_FIXED_1 | DR6_BT);
3292                                 *exit_qual ^= DR6_RTM;
3293                         } else {
3294                                 *exit_qual = 0;
3295                         }
3296                         return 1;
3297                 }
3298         }
3299
3300         return 0;
3301 }
3302
3303 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
3304 {
3305         /*
3306          * Ensure that we clear the HLT state in the VMCS.  We don't need to
3307          * explicitly skip the instruction because if the HLT state is set,
3308          * then the instruction is already executing and RIP has already been
3309          * advanced.
3310          */
3311         if (kvm_hlt_in_guest(vcpu->kvm) &&
3312                         vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
3313                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3314 }
3315
3316 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
3317 {
3318         struct vcpu_vmx *vmx = to_vmx(vcpu);
3319         unsigned nr = vcpu->arch.exception.nr;
3320         bool has_error_code = vcpu->arch.exception.has_error_code;
3321         u32 error_code = vcpu->arch.exception.error_code;
3322         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3323
3324         if (has_error_code) {
3325                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
3326                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3327         }
3328
3329         if (vmx->rmode.vm86_active) {
3330                 int inc_eip = 0;
3331                 if (kvm_exception_is_soft(nr))
3332                         inc_eip = vcpu->arch.event_exit_inst_len;
3333                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
3334                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3335                 return;
3336         }
3337
3338         WARN_ON_ONCE(vmx->emulation_required);
3339
3340         if (kvm_exception_is_soft(nr)) {
3341                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3342                              vmx->vcpu.arch.event_exit_inst_len);
3343                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3344         } else
3345                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3346
3347         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
3348
3349         vmx_clear_hlt(vcpu);
3350 }
3351
3352 static bool vmx_rdtscp_supported(void)
3353 {
3354         return cpu_has_vmx_rdtscp();
3355 }
3356
3357 static bool vmx_invpcid_supported(void)
3358 {
3359         return cpu_has_vmx_invpcid();
3360 }
3361
3362 /*
3363  * Swap MSR entry in host/guest MSR entry array.
3364  */
3365 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
3366 {
3367         struct shared_msr_entry tmp;
3368
3369         tmp = vmx->guest_msrs[to];
3370         vmx->guest_msrs[to] = vmx->guest_msrs[from];
3371         vmx->guest_msrs[from] = tmp;
3372 }
3373
3374 /*
3375  * Set up the vmcs to automatically save and restore system
3376  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
3377  * mode, as fiddling with msrs is very expensive.
3378  */
3379 static void setup_msrs(struct vcpu_vmx *vmx)
3380 {
3381         int save_nmsrs, index;
3382
3383         save_nmsrs = 0;
3384 #ifdef CONFIG_X86_64
3385         if (is_long_mode(&vmx->vcpu)) {
3386                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
3387                 if (index >= 0)
3388                         move_msr_up(vmx, index, save_nmsrs++);
3389                 index = __find_msr_index(vmx, MSR_LSTAR);
3390                 if (index >= 0)
3391                         move_msr_up(vmx, index, save_nmsrs++);
3392                 index = __find_msr_index(vmx, MSR_CSTAR);
3393                 if (index >= 0)
3394                         move_msr_up(vmx, index, save_nmsrs++);
3395                 index = __find_msr_index(vmx, MSR_TSC_AUX);
3396                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
3397                         move_msr_up(vmx, index, save_nmsrs++);
3398                 /*
3399                  * MSR_STAR is only needed on long mode guests, and only
3400                  * if efer.sce is enabled.
3401                  */
3402                 index = __find_msr_index(vmx, MSR_STAR);
3403                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
3404                         move_msr_up(vmx, index, save_nmsrs++);
3405         }
3406 #endif
3407         index = __find_msr_index(vmx, MSR_EFER);
3408         if (index >= 0 && update_transition_efer(vmx, index))
3409                 move_msr_up(vmx, index, save_nmsrs++);
3410
3411         vmx->save_nmsrs = save_nmsrs;
3412
3413         if (cpu_has_vmx_msr_bitmap())
3414                 vmx_update_msr_bitmap(&vmx->vcpu);
3415 }
3416
3417 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
3418 {
3419         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3420
3421         if (is_guest_mode(vcpu) &&
3422             (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3423                 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3424
3425         return vcpu->arch.tsc_offset;
3426 }
3427
3428 /*
3429  * writes 'offset' into guest's timestamp counter offset register
3430  */
3431 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
3432 {
3433         if (is_guest_mode(vcpu)) {
3434                 /*
3435                  * We're here if L1 chose not to trap WRMSR to TSC. According
3436                  * to the spec, this should set L1's TSC; The offset that L1
3437                  * set for L2 remains unchanged, and still needs to be added
3438                  * to the newly set TSC to get L2's TSC.
3439                  */
3440                 struct vmcs12 *vmcs12;
3441                 /* recalculate vmcs02.TSC_OFFSET: */
3442                 vmcs12 = get_vmcs12(vcpu);
3443                 vmcs_write64(TSC_OFFSET, offset +
3444                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3445                          vmcs12->tsc_offset : 0));
3446         } else {
3447                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3448                                            vmcs_read64(TSC_OFFSET), offset);
3449                 vmcs_write64(TSC_OFFSET, offset);
3450         }
3451 }
3452
3453 /*
3454  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3455  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3456  * all guests if the "nested" module option is off, and can also be disabled
3457  * for a single guest by disabling its VMX cpuid bit.
3458  */
3459 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3460 {
3461         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
3462 }
3463
3464 /*
3465  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3466  * returned for the various VMX controls MSRs when nested VMX is enabled.
3467  * The same values should also be used to verify that vmcs12 control fields are
3468  * valid during nested entry from L1 to L2.
3469  * Each of these control msrs has a low and high 32-bit half: A low bit is on
3470  * if the corresponding bit in the (32-bit) control field *must* be on, and a
3471  * bit in the high half is on if the corresponding bit in the control field
3472  * may be on. See also vmx_control_verify().
3473  */
3474 static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3475 {
3476         if (!nested) {
3477                 memset(msrs, 0, sizeof(*msrs));
3478                 return;
3479         }
3480
3481         /*
3482          * Note that as a general rule, the high half of the MSRs (bits in
3483          * the control fields which may be 1) should be initialized by the
3484          * intersection of the underlying hardware's MSR (i.e., features which
3485          * can be supported) and the list of features we want to expose -
3486          * because they are known to be properly supported in our code.
3487          * Also, usually, the low half of the MSRs (bits which must be 1) can
3488          * be set to 0, meaning that L1 may turn off any of these bits. The
3489          * reason is that if one of these bits is necessary, it will appear
3490          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3491          * fields of vmcs01 and vmcs02, will turn these bits off - and
3492          * nested_vmx_exit_reflected() will not pass related exits to L1.
3493          * These rules have exceptions below.
3494          */
3495
3496         /* pin-based controls */
3497         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
3498                 msrs->pinbased_ctls_low,
3499                 msrs->pinbased_ctls_high);
3500         msrs->pinbased_ctls_low |=
3501                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3502         msrs->pinbased_ctls_high &=
3503                 PIN_BASED_EXT_INTR_MASK |
3504                 PIN_BASED_NMI_EXITING |
3505                 PIN_BASED_VIRTUAL_NMIS |
3506                 (apicv ? PIN_BASED_POSTED_INTR : 0);
3507         msrs->pinbased_ctls_high |=
3508                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3509                 PIN_BASED_VMX_PREEMPTION_TIMER;
3510
3511         /* exit controls */
3512         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
3513                 msrs->exit_ctls_low,
3514                 msrs->exit_ctls_high);
3515         msrs->exit_ctls_low =
3516                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3517
3518         msrs->exit_ctls_high &=
3519 #ifdef CONFIG_X86_64
3520                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
3521 #endif
3522                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
3523         msrs->exit_ctls_high |=
3524                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
3525                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3526                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3527
3528         /* We support free control of debug control saving. */
3529         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3530
3531         /* entry controls */
3532         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
3533                 msrs->entry_ctls_low,
3534                 msrs->entry_ctls_high);
3535         msrs->entry_ctls_low =
3536                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3537         msrs->entry_ctls_high &=
3538 #ifdef CONFIG_X86_64
3539                 VM_ENTRY_IA32E_MODE |
3540 #endif
3541                 VM_ENTRY_LOAD_IA32_PAT;
3542         msrs->entry_ctls_high |=
3543                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3544
3545         /* We support free control of debug control loading. */
3546         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
3547
3548         /* cpu-based controls */
3549         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
3550                 msrs->procbased_ctls_low,
3551                 msrs->procbased_ctls_high);
3552         msrs->procbased_ctls_low =
3553                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3554         msrs->procbased_ctls_high &=
3555                 CPU_BASED_VIRTUAL_INTR_PENDING |
3556                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
3557                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3558                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3559                 CPU_BASED_CR3_STORE_EXITING |
3560 #ifdef CONFIG_X86_64
3561                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3562 #endif
3563                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
3564                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3565                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3566                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3567                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3568         /*
3569          * We can allow some features even when not supported by the
3570          * hardware. For example, L1 can specify an MSR bitmap - and we
3571          * can use it to avoid exits to L1 - even when L0 runs L2
3572          * without MSR bitmaps.
3573          */
3574         msrs->procbased_ctls_high |=
3575                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
3576                 CPU_BASED_USE_MSR_BITMAPS;
3577
3578         /* We support free control of CR3 access interception. */
3579         msrs->procbased_ctls_low &=
3580                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3581
3582         /*
3583          * secondary cpu-based controls.  Do not include those that
3584          * depend on CPUID bits, they are added later by vmx_cpuid_update.
3585          */
3586         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
3587                 msrs->secondary_ctls_low,
3588                 msrs->secondary_ctls_high);
3589         msrs->secondary_ctls_low = 0;
3590         msrs->secondary_ctls_high &=
3591                 SECONDARY_EXEC_DESC |
3592                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3593                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3594                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3595                 SECONDARY_EXEC_WBINVD_EXITING;
3596
3597         /*
3598          * We can emulate "VMCS shadowing," even if the hardware
3599          * doesn't support it.
3600          */
3601         msrs->secondary_ctls_high |=
3602                 SECONDARY_EXEC_SHADOW_VMCS;
3603
3604         if (enable_ept) {
3605                 /* nested EPT: emulate EPT also to L1 */
3606                 msrs->secondary_ctls_high |=
3607                         SECONDARY_EXEC_ENABLE_EPT;
3608                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
3609                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
3610                 if (cpu_has_vmx_ept_execute_only())
3611                         msrs->ept_caps |=
3612                                 VMX_EPT_EXECUTE_ONLY_BIT;
3613                 msrs->ept_caps &= vmx_capability.ept;
3614                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
3615                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3616                         VMX_EPT_1GB_PAGE_BIT;
3617                 if (enable_ept_ad_bits) {
3618                         msrs->secondary_ctls_high |=
3619                                 SECONDARY_EXEC_ENABLE_PML;
3620                         msrs->ept_caps |= VMX_EPT_AD_BIT;
3621                 }
3622         }
3623
3624         if (cpu_has_vmx_vmfunc()) {
3625                 msrs->secondary_ctls_high |=
3626                         SECONDARY_EXEC_ENABLE_VMFUNC;
3627                 /*
3628                  * Advertise EPTP switching unconditionally
3629                  * since we emulate it
3630                  */
3631                 if (enable_ept)
3632                         msrs->vmfunc_controls =
3633                                 VMX_VMFUNC_EPTP_SWITCHING;
3634         }
3635
3636         /*
3637          * Old versions of KVM use the single-context version without
3638          * checking for support, so declare that it is supported even
3639          * though it is treated as global context.  The alternative is
3640          * not failing the single-context invvpid, and it is worse.
3641          */
3642         if (enable_vpid) {
3643                 msrs->secondary_ctls_high |=
3644                         SECONDARY_EXEC_ENABLE_VPID;
3645                 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
3646                         VMX_VPID_EXTENT_SUPPORTED_MASK;
3647         }
3648
3649         if (enable_unrestricted_guest)
3650                 msrs->secondary_ctls_high |=
3651                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
3652
3653         if (flexpriority_enabled)
3654                 msrs->secondary_ctls_high |=
3655                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3656
3657         /* miscellaneous data */
3658         rdmsr(MSR_IA32_VMX_MISC,
3659                 msrs->misc_low,
3660                 msrs->misc_high);
3661         msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
3662         msrs->misc_low |=
3663                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
3664                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
3665                 VMX_MISC_ACTIVITY_HLT;
3666         msrs->misc_high = 0;
3667
3668         /*
3669          * This MSR reports some information about VMX support. We
3670          * should return information about the VMX we emulate for the
3671          * guest, and the VMCS structure we give it - not about the
3672          * VMX support of the underlying hardware.
3673          */
3674         msrs->basic =
3675                 VMCS12_REVISION |
3676                 VMX_BASIC_TRUE_CTLS |
3677                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
3678                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
3679
3680         if (cpu_has_vmx_basic_inout())
3681                 msrs->basic |= VMX_BASIC_INOUT;
3682
3683         /*
3684          * These MSRs specify bits which the guest must keep fixed on
3685          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
3686          * We picked the standard core2 setting.
3687          */
3688 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
3689 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
3690         msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
3691         msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
3692
3693         /* These MSRs specify bits which the guest must keep fixed off. */
3694         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
3695         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
3696
3697         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
3698         msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
3699 }
3700
3701 /*
3702  * if fixed0[i] == 1: val[i] must be 1
3703  * if fixed1[i] == 0: val[i] must be 0
3704  */
3705 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
3706 {
3707         return ((val & fixed1) | fixed0) == val;
3708 }
3709
3710 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
3711 {
3712         return fixed_bits_valid(control, low, high);
3713 }
3714
3715 static inline u64 vmx_control_msr(u32 low, u32 high)
3716 {
3717         return low | ((u64)high << 32);
3718 }
3719
3720 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
3721 {
3722         superset &= mask;
3723         subset &= mask;
3724
3725         return (superset | subset) == superset;
3726 }
3727
3728 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
3729 {
3730         const u64 feature_and_reserved =
3731                 /* feature (except bit 48; see below) */
3732                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
3733                 /* reserved */
3734                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
3735         u64 vmx_basic = vmx->nested.msrs.basic;
3736
3737         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
3738                 return -EINVAL;
3739
3740         /*
3741          * KVM does not emulate a version of VMX that constrains physical
3742          * addresses of VMX structures (e.g. VMCS) to 32-bits.
3743          */
3744         if (data & BIT_ULL(48))
3745                 return -EINVAL;
3746
3747         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
3748             vmx_basic_vmcs_revision_id(data))
3749                 return -EINVAL;
3750
3751         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
3752                 return -EINVAL;
3753
3754         vmx->nested.msrs.basic = data;
3755         return 0;
3756 }
3757
3758 static int
3759 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3760 {
3761         u64 supported;
3762         u32 *lowp, *highp;
3763
3764         switch (msr_index) {
3765         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3766                 lowp = &vmx->nested.msrs.pinbased_ctls_low;
3767                 highp = &vmx->nested.msrs.pinbased_ctls_high;
3768                 break;
3769         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3770                 lowp = &vmx->nested.msrs.procbased_ctls_low;
3771                 highp = &vmx->nested.msrs.procbased_ctls_high;
3772                 break;
3773         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3774                 lowp = &vmx->nested.msrs.exit_ctls_low;
3775                 highp = &vmx->nested.msrs.exit_ctls_high;
3776                 break;
3777         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3778                 lowp = &vmx->nested.msrs.entry_ctls_low;
3779                 highp = &vmx->nested.msrs.entry_ctls_high;
3780                 break;
3781         case MSR_IA32_VMX_PROCBASED_CTLS2:
3782                 lowp = &vmx->nested.msrs.secondary_ctls_low;
3783                 highp = &vmx->nested.msrs.secondary_ctls_high;
3784                 break;
3785         default:
3786                 BUG();
3787         }
3788
3789         supported = vmx_control_msr(*lowp, *highp);
3790
3791         /* Check must-be-1 bits are still 1. */
3792         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3793                 return -EINVAL;
3794
3795         /* Check must-be-0 bits are still 0. */
3796         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3797                 return -EINVAL;
3798
3799         *lowp = data;
3800         *highp = data >> 32;
3801         return 0;
3802 }
3803
3804 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3805 {
3806         const u64 feature_and_reserved_bits =
3807                 /* feature */
3808                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3809                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3810                 /* reserved */
3811                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3812         u64 vmx_misc;
3813
3814         vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
3815                                    vmx->nested.msrs.misc_high);
3816
3817         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3818                 return -EINVAL;
3819
3820         if ((vmx->nested.msrs.pinbased_ctls_high &
3821              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3822             vmx_misc_preemption_timer_rate(data) !=
3823             vmx_misc_preemption_timer_rate(vmx_misc))
3824                 return -EINVAL;
3825
3826         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3827                 return -EINVAL;
3828
3829         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3830                 return -EINVAL;
3831
3832         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3833                 return -EINVAL;
3834
3835         vmx->nested.msrs.misc_low = data;
3836         vmx->nested.msrs.misc_high = data >> 32;
3837
3838         /*
3839          * If L1 has read-only VM-exit information fields, use the
3840          * less permissive vmx_vmwrite_bitmap to specify write
3841          * permissions for the shadow VMCS.
3842          */
3843         if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
3844                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
3845
3846         return 0;
3847 }
3848
3849 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3850 {
3851         u64 vmx_ept_vpid_cap;
3852
3853         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
3854                                            vmx->nested.msrs.vpid_caps);
3855
3856         /* Every bit is either reserved or a feature bit. */
3857         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3858                 return -EINVAL;
3859
3860         vmx->nested.msrs.ept_caps = data;
3861         vmx->nested.msrs.vpid_caps = data >> 32;
3862         return 0;
3863 }
3864
3865 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3866 {
3867         u64 *msr;
3868
3869         switch (msr_index) {
3870         case MSR_IA32_VMX_CR0_FIXED0:
3871                 msr = &vmx->nested.msrs.cr0_fixed0;
3872                 break;
3873         case MSR_IA32_VMX_CR4_FIXED0:
3874                 msr = &vmx->nested.msrs.cr4_fixed0;
3875                 break;
3876         default:
3877                 BUG();
3878         }
3879
3880         /*
3881          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3882          * must be 1 in the restored value.
3883          */
3884         if (!is_bitwise_subset(data, *msr, -1ULL))
3885                 return -EINVAL;
3886
3887         *msr = data;
3888         return 0;
3889 }
3890
3891 /*
3892  * Called when userspace is restoring VMX MSRs.
3893  *
3894  * Returns 0 on success, non-0 otherwise.
3895  */
3896 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3897 {
3898         struct vcpu_vmx *vmx = to_vmx(vcpu);
3899
3900         /*
3901          * Don't allow changes to the VMX capability MSRs while the vCPU
3902          * is in VMX operation.
3903          */
3904         if (vmx->nested.vmxon)
3905                 return -EBUSY;
3906
3907         switch (msr_index) {
3908         case MSR_IA32_VMX_BASIC:
3909                 return vmx_restore_vmx_basic(vmx, data);
3910         case MSR_IA32_VMX_PINBASED_CTLS:
3911         case MSR_IA32_VMX_PROCBASED_CTLS:
3912         case MSR_IA32_VMX_EXIT_CTLS:
3913         case MSR_IA32_VMX_ENTRY_CTLS:
3914                 /*
3915                  * The "non-true" VMX capability MSRs are generated from the
3916                  * "true" MSRs, so we do not support restoring them directly.
3917                  *
3918                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3919                  * should restore the "true" MSRs with the must-be-1 bits
3920                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3921                  * DEFAULT SETTINGS".
3922                  */
3923                 return -EINVAL;
3924         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3925         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3926         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3927         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3928         case MSR_IA32_VMX_PROCBASED_CTLS2:
3929                 return vmx_restore_control_msr(vmx, msr_index, data);
3930         case MSR_IA32_VMX_MISC:
3931                 return vmx_restore_vmx_misc(vmx, data);
3932         case MSR_IA32_VMX_CR0_FIXED0:
3933         case MSR_IA32_VMX_CR4_FIXED0:
3934                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3935         case MSR_IA32_VMX_CR0_FIXED1:
3936         case MSR_IA32_VMX_CR4_FIXED1:
3937                 /*
3938                  * These MSRs are generated based on the vCPU's CPUID, so we
3939                  * do not support restoring them directly.
3940                  */
3941                 return -EINVAL;
3942         case MSR_IA32_VMX_EPT_VPID_CAP:
3943                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3944         case MSR_IA32_VMX_VMCS_ENUM:
3945                 vmx->nested.msrs.vmcs_enum = data;
3946                 return 0;
3947         default:
3948                 /*
3949                  * The rest of the VMX capability MSRs do not support restore.
3950                  */
3951                 return -EINVAL;
3952         }
3953 }
3954
3955 /* Returns 0 on success, non-0 otherwise. */
3956 static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
3957 {
3958         switch (msr_index) {
3959         case MSR_IA32_VMX_BASIC:
3960                 *pdata = msrs->basic;
3961                 break;
3962         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3963         case MSR_IA32_VMX_PINBASED_CTLS:
3964                 *pdata = vmx_control_msr(
3965                         msrs->pinbased_ctls_low,
3966                         msrs->pinbased_ctls_high);
3967                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3968                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3969                 break;
3970         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3971         case MSR_IA32_VMX_PROCBASED_CTLS:
3972                 *pdata = vmx_control_msr(
3973                         msrs->procbased_ctls_low,
3974                         msrs->procbased_ctls_high);
3975                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3976                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3977                 break;
3978         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3979         case MSR_IA32_VMX_EXIT_CTLS:
3980                 *pdata = vmx_control_msr(
3981                         msrs->exit_ctls_low,
3982                         msrs->exit_ctls_high);
3983                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3984                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3985                 break;
3986         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3987         case MSR_IA32_VMX_ENTRY_CTLS:
3988                 *pdata = vmx_control_msr(
3989                         msrs->entry_ctls_low,
3990                         msrs->entry_ctls_high);
3991                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3992                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3993                 break;
3994         case MSR_IA32_VMX_MISC:
3995                 *pdata = vmx_control_msr(
3996                         msrs->misc_low,
3997                         msrs->misc_high);
3998                 break;
3999         case MSR_IA32_VMX_CR0_FIXED0:
4000                 *pdata = msrs->cr0_fixed0;
4001                 break;
4002         case MSR_IA32_VMX_CR0_FIXED1:
4003                 *pdata = msrs->cr0_fixed1;
4004                 break;
4005         case MSR_IA32_VMX_CR4_FIXED0:
4006                 *pdata = msrs->cr4_fixed0;
4007                 break;
4008         case MSR_IA32_VMX_CR4_FIXED1:
4009                 *pdata = msrs->cr4_fixed1;
4010                 break;
4011         case MSR_IA32_VMX_VMCS_ENUM:
4012                 *pdata = msrs->vmcs_enum;
4013                 break;
4014         case MSR_IA32_VMX_PROCBASED_CTLS2:
4015                 *pdata = vmx_control_msr(
4016                         msrs->secondary_ctls_low,
4017                         msrs->secondary_ctls_high);
4018                 break;
4019         case MSR_IA32_VMX_EPT_VPID_CAP:
4020                 *pdata = msrs->ept_caps |
4021                         ((u64)msrs->vpid_caps << 32);
4022                 break;
4023         case MSR_IA32_VMX_VMFUNC:
4024                 *pdata = msrs->vmfunc_controls;
4025                 break;
4026         default:
4027                 return 1;
4028         }
4029
4030         return 0;
4031 }
4032
4033 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
4034                                                  uint64_t val)
4035 {
4036         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
4037
4038         return !(val & ~valid_bits);
4039 }
4040
4041 static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
4042 {
4043         switch (msr->index) {
4044         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4045                 if (!nested)
4046                         return 1;
4047                 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
4048         default:
4049                 return 1;
4050         }
4051
4052         return 0;
4053 }
4054
4055 /*
4056  * Reads an msr value (of 'msr_index') into 'pdata'.
4057  * Returns 0 on success, non-0 otherwise.
4058  * Assumes vcpu_load() was already called.
4059  */
4060 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4061 {
4062         struct vcpu_vmx *vmx = to_vmx(vcpu);
4063         struct shared_msr_entry *msr;
4064
4065         switch (msr_info->index) {
4066 #ifdef CONFIG_X86_64
4067         case MSR_FS_BASE:
4068                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
4069                 break;
4070         case MSR_GS_BASE:
4071                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
4072                 break;
4073         case MSR_KERNEL_GS_BASE:
4074                 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
4075                 break;
4076 #endif
4077         case MSR_EFER:
4078                 return kvm_get_msr_common(vcpu, msr_info);
4079         case MSR_IA32_SPEC_CTRL:
4080                 if (!msr_info->host_initiated &&
4081                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4082                         return 1;
4083
4084                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
4085                 break;
4086         case MSR_IA32_ARCH_CAPABILITIES:
4087                 if (!msr_info->host_initiated &&
4088                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4089                         return 1;
4090                 msr_info->data = to_vmx(vcpu)->arch_capabilities;
4091                 break;
4092         case MSR_IA32_SYSENTER_CS:
4093                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
4094                 break;
4095         case MSR_IA32_SYSENTER_EIP:
4096                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
4097                 break;
4098         case MSR_IA32_SYSENTER_ESP:
4099                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
4100                 break;
4101         case MSR_IA32_BNDCFGS:
4102                 if (!kvm_mpx_supported() ||
4103                     (!msr_info->host_initiated &&
4104                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
4105                         return 1;
4106                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
4107                 break;
4108         case MSR_IA32_MCG_EXT_CTL:
4109                 if (!msr_info->host_initiated &&
4110                     !(vmx->msr_ia32_feature_control &
4111                       FEATURE_CONTROL_LMCE))
4112                         return 1;
4113                 msr_info->data = vcpu->arch.mcg_ext_ctl;
4114                 break;
4115         case MSR_IA32_FEATURE_CONTROL:
4116                 msr_info->data = vmx->msr_ia32_feature_control;
4117                 break;
4118         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4119                 if (!nested_vmx_allowed(vcpu))
4120                         return 1;
4121                 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
4122                                        &msr_info->data);
4123         case MSR_IA32_XSS:
4124                 if (!vmx_xsaves_supported())
4125                         return 1;
4126                 msr_info->data = vcpu->arch.ia32_xss;
4127                 break;
4128         case MSR_TSC_AUX:
4129                 if (!msr_info->host_initiated &&
4130                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
4131                         return 1;
4132                 /* Otherwise falls through */
4133         default:
4134                 msr = find_msr_entry(vmx, msr_info->index);
4135                 if (msr) {
4136                         msr_info->data = msr->data;
4137                         break;
4138                 }
4139                 return kvm_get_msr_common(vcpu, msr_info);
4140         }
4141
4142         return 0;
4143 }
4144
4145 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
4146
4147 /*
4148  * Writes msr value into into the appropriate "register".
4149  * Returns 0 on success, non-0 otherwise.
4150  * Assumes vcpu_load() was already called.
4151  */
4152 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4153 {
4154         struct vcpu_vmx *vmx = to_vmx(vcpu);
4155         struct shared_msr_entry *msr;
4156         int ret = 0;
4157         u32 msr_index = msr_info->index;
4158         u64 data = msr_info->data;
4159
4160         switch (msr_index) {
4161         case MSR_EFER:
4162                 ret = kvm_set_msr_common(vcpu, msr_info);
4163                 break;
4164 #ifdef CONFIG_X86_64
4165         case MSR_FS_BASE:
4166                 vmx_segment_cache_clear(vmx);
4167                 vmcs_writel(GUEST_FS_BASE, data);
4168                 break;
4169         case MSR_GS_BASE:
4170                 vmx_segment_cache_clear(vmx);
4171                 vmcs_writel(GUEST_GS_BASE, data);
4172                 break;
4173         case MSR_KERNEL_GS_BASE:
4174                 vmx_write_guest_kernel_gs_base(vmx, data);
4175                 break;
4176 #endif
4177         case MSR_IA32_SYSENTER_CS:
4178                 vmcs_write32(GUEST_SYSENTER_CS, data);
4179                 break;
4180         case MSR_IA32_SYSENTER_EIP:
4181                 vmcs_writel(GUEST_SYSENTER_EIP, data);
4182                 break;
4183         case MSR_IA32_SYSENTER_ESP:
4184                 vmcs_writel(GUEST_SYSENTER_ESP, data);
4185                 break;
4186         case MSR_IA32_BNDCFGS:
4187                 if (!kvm_mpx_supported() ||
4188                     (!msr_info->host_initiated &&
4189                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
4190                         return 1;
4191                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
4192                     (data & MSR_IA32_BNDCFGS_RSVD))
4193                         return 1;
4194                 vmcs_write64(GUEST_BNDCFGS, data);
4195                 break;
4196         case MSR_IA32_SPEC_CTRL:
4197                 if (!msr_info->host_initiated &&
4198                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4199                         return 1;
4200
4201                 /* The STIBP bit doesn't fault even if it's not advertised */
4202                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4203                         return 1;
4204
4205                 vmx->spec_ctrl = data;
4206
4207                 if (!data)
4208                         break;
4209
4210                 /*
4211                  * For non-nested:
4212                  * When it's written (to non-zero) for the first time, pass
4213                  * it through.
4214                  *
4215                  * For nested:
4216                  * The handling of the MSR bitmap for L2 guests is done in
4217                  * nested_vmx_merge_msr_bitmap. We should not touch the
4218                  * vmcs02.msr_bitmap here since it gets completely overwritten
4219                  * in the merging. We update the vmcs01 here for L1 as well
4220                  * since it will end up touching the MSR anyway now.
4221                  */
4222                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
4223                                               MSR_IA32_SPEC_CTRL,
4224                                               MSR_TYPE_RW);
4225                 break;
4226         case MSR_IA32_PRED_CMD:
4227                 if (!msr_info->host_initiated &&
4228                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4229                         return 1;
4230
4231                 if (data & ~PRED_CMD_IBPB)
4232                         return 1;
4233
4234                 if (!data)
4235                         break;
4236
4237                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4238
4239                 /*
4240                  * For non-nested:
4241                  * When it's written (to non-zero) for the first time, pass
4242                  * it through.
4243                  *
4244                  * For nested:
4245                  * The handling of the MSR bitmap for L2 guests is done in
4246                  * nested_vmx_merge_msr_bitmap. We should not touch the
4247                  * vmcs02.msr_bitmap here since it gets completely overwritten
4248                  * in the merging.
4249                  */
4250                 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
4251                                               MSR_TYPE_W);
4252                 break;
4253         case MSR_IA32_ARCH_CAPABILITIES:
4254                 if (!msr_info->host_initiated)
4255                         return 1;
4256                 vmx->arch_capabilities = data;
4257                 break;
4258         case MSR_IA32_CR_PAT:
4259                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
4260                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4261                                 return 1;
4262                         vmcs_write64(GUEST_IA32_PAT, data);
4263                         vcpu->arch.pat = data;
4264                         break;
4265                 }
4266                 ret = kvm_set_msr_common(vcpu, msr_info);
4267                 break;
4268         case MSR_IA32_TSC_ADJUST:
4269                 ret = kvm_set_msr_common(vcpu, msr_info);
4270                 break;
4271         case MSR_IA32_MCG_EXT_CTL:
4272                 if ((!msr_info->host_initiated &&
4273                      !(to_vmx(vcpu)->msr_ia32_feature_control &
4274                        FEATURE_CONTROL_LMCE)) ||
4275                     (data & ~MCG_EXT_CTL_LMCE_EN))
4276                         return 1;
4277                 vcpu->arch.mcg_ext_ctl = data;
4278                 break;
4279         case MSR_IA32_FEATURE_CONTROL:
4280                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
4281                     (to_vmx(vcpu)->msr_ia32_feature_control &
4282                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
4283                         return 1;
4284                 vmx->msr_ia32_feature_control = data;
4285                 if (msr_info->host_initiated && data == 0)
4286                         vmx_leave_nested(vcpu);
4287                 break;
4288         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4289                 if (!msr_info->host_initiated)
4290                         return 1; /* they are read-only */
4291                 if (!nested_vmx_allowed(vcpu))
4292                         return 1;
4293                 return vmx_set_vmx_msr(vcpu, msr_index, data);
4294         case MSR_IA32_XSS:
4295                 if (!vmx_xsaves_supported())
4296                         return 1;
4297                 /*
4298                  * The only supported bit as of Skylake is bit 8, but
4299                  * it is not supported on KVM.
4300                  */
4301                 if (data != 0)
4302                         return 1;
4303                 vcpu->arch.ia32_xss = data;
4304                 if (vcpu->arch.ia32_xss != host_xss)
4305                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
4306                                 vcpu->arch.ia32_xss, host_xss, false);
4307                 else
4308                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
4309                 break;
4310         case MSR_TSC_AUX:
4311                 if (!msr_info->host_initiated &&
4312                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
4313                         return 1;
4314                 /* Check reserved bit, higher 32 bits should be zero */
4315                 if ((data >> 32) != 0)
4316                         return 1;
4317                 /* Otherwise falls through */
4318         default:
4319                 msr = find_msr_entry(vmx, msr_index);
4320                 if (msr) {
4321                         u64 old_msr_data = msr->data;
4322                         msr->data = data;
4323                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
4324                                 preempt_disable();
4325                                 ret = kvm_set_shared_msr(msr->index, msr->data,
4326                                                          msr->mask);
4327                                 preempt_enable();
4328                                 if (ret)
4329                                         msr->data = old_msr_data;
4330                         }
4331                         break;
4332                 }
4333                 ret = kvm_set_msr_common(vcpu, msr_info);
4334         }
4335
4336         return ret;
4337 }
4338
4339 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
4340 {
4341         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
4342         switch (reg) {
4343         case VCPU_REGS_RSP:
4344                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
4345                 break;
4346         case VCPU_REGS_RIP:
4347                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
4348                 break;
4349         case VCPU_EXREG_PDPTR:
4350                 if (enable_ept)
4351                         ept_save_pdptrs(vcpu);
4352                 break;
4353         default:
4354                 break;
4355         }
4356 }
4357
4358 static __init int cpu_has_kvm_support(void)
4359 {
4360         return cpu_has_vmx();
4361 }
4362
4363 static __init int vmx_disabled_by_bios(void)
4364 {
4365         u64 msr;
4366
4367         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
4368         if (msr & FEATURE_CONTROL_LOCKED) {
4369                 /* launched w/ TXT and VMX disabled */
4370                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
4371                         && tboot_enabled())
4372                         return 1;
4373                 /* launched w/o TXT and VMX only enabled w/ TXT */
4374                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
4375                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
4376                         && !tboot_enabled()) {
4377                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
4378                                 "activate TXT before enabling KVM\n");
4379                         return 1;
4380                 }
4381                 /* launched w/o TXT and VMX disabled */
4382                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
4383                         && !tboot_enabled())
4384                         return 1;
4385         }
4386
4387         return 0;
4388 }
4389
4390 static void kvm_cpu_vmxon(u64 addr)
4391 {
4392         cr4_set_bits(X86_CR4_VMXE);
4393         intel_pt_handle_vmx(1);
4394
4395         asm volatile (ASM_VMX_VMXON_RAX
4396                         : : "a"(&addr), "m"(addr)
4397                         : "memory", "cc");
4398 }
4399
4400 static int hardware_enable(void)
4401 {
4402         int cpu = raw_smp_processor_id();
4403         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
4404         u64 old, test_bits;
4405
4406         if (cr4_read_shadow() & X86_CR4_VMXE)
4407                 return -EBUSY;
4408
4409         /*
4410          * This can happen if we hot-added a CPU but failed to allocate
4411          * VP assist page for it.
4412          */
4413         if (static_branch_unlikely(&enable_evmcs) &&
4414             !hv_get_vp_assist_page(cpu))
4415                 return -EFAULT;
4416
4417         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
4418         INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
4419         spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
4420
4421         /*
4422          * Now we can enable the vmclear operation in kdump
4423          * since the loaded_vmcss_on_cpu list on this cpu
4424          * has been initialized.
4425          *
4426          * Though the cpu is not in VMX operation now, there
4427          * is no problem to enable the vmclear operation
4428          * for the loaded_vmcss_on_cpu list is empty!
4429          */
4430         crash_enable_local_vmclear(cpu);
4431
4432         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
4433
4434         test_bits = FEATURE_CONTROL_LOCKED;
4435         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4436         if (tboot_enabled())
4437                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
4438
4439         if ((old & test_bits) != test_bits) {
4440                 /* enable and lock */
4441                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
4442         }
4443         kvm_cpu_vmxon(phys_addr);
4444         if (enable_ept)
4445                 ept_sync_global();
4446
4447         return 0;
4448 }
4449
4450 static void vmclear_local_loaded_vmcss(void)
4451 {
4452         int cpu = raw_smp_processor_id();
4453         struct loaded_vmcs *v, *n;
4454
4455         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
4456                                  loaded_vmcss_on_cpu_link)
4457                 __loaded_vmcs_clear(v);
4458 }
4459
4460
4461 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
4462  * tricks.
4463  */
4464 static void kvm_cpu_vmxoff(void)
4465 {
4466         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
4467
4468         intel_pt_handle_vmx(0);
4469         cr4_clear_bits(X86_CR4_VMXE);
4470 }
4471
4472 static void hardware_disable(void)
4473 {
4474         vmclear_local_loaded_vmcss();
4475         kvm_cpu_vmxoff();
4476 }
4477
4478 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
4479                                       u32 msr, u32 *result)
4480 {
4481         u32 vmx_msr_low, vmx_msr_high;
4482         u32 ctl = ctl_min | ctl_opt;
4483
4484         rdmsr(msr, vmx_msr_low, vmx_msr_high);
4485
4486         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
4487         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
4488
4489         /* Ensure minimum (required) set of control bits are supported. */
4490         if (ctl_min & ~ctl)
4491                 return -EIO;
4492
4493         *result = ctl;
4494         return 0;
4495 }
4496
4497 static __init bool allow_1_setting(u32 msr, u32 ctl)
4498 {
4499         u32 vmx_msr_low, vmx_msr_high;
4500
4501         rdmsr(msr, vmx_msr_low, vmx_msr_high);
4502         return vmx_msr_high & ctl;
4503 }
4504
4505 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
4506 {
4507         u32 vmx_msr_low, vmx_msr_high;
4508         u32 min, opt, min2, opt2;
4509         u32 _pin_based_exec_control = 0;
4510         u32 _cpu_based_exec_control = 0;
4511         u32 _cpu_based_2nd_exec_control = 0;
4512         u32 _vmexit_control = 0;
4513         u32 _vmentry_control = 0;
4514
4515         memset(vmcs_conf, 0, sizeof(*vmcs_conf));
4516         min = CPU_BASED_HLT_EXITING |
4517 #ifdef CONFIG_X86_64
4518               CPU_BASED_CR8_LOAD_EXITING |
4519               CPU_BASED_CR8_STORE_EXITING |
4520 #endif
4521               CPU_BASED_CR3_LOAD_EXITING |
4522               CPU_BASED_CR3_STORE_EXITING |
4523               CPU_BASED_UNCOND_IO_EXITING |
4524               CPU_BASED_MOV_DR_EXITING |
4525               CPU_BASED_USE_TSC_OFFSETING |
4526               CPU_BASED_MWAIT_EXITING |
4527               CPU_BASED_MONITOR_EXITING |
4528               CPU_BASED_INVLPG_EXITING |
4529               CPU_BASED_RDPMC_EXITING;
4530
4531         opt = CPU_BASED_TPR_SHADOW |
4532               CPU_BASED_USE_MSR_BITMAPS |
4533               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
4534         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
4535                                 &_cpu_based_exec_control) < 0)
4536                 return -EIO;
4537 #ifdef CONFIG_X86_64
4538         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4539                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
4540                                            ~CPU_BASED_CR8_STORE_EXITING;
4541 #endif
4542         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
4543                 min2 = 0;
4544                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
4545                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
4546                         SECONDARY_EXEC_WBINVD_EXITING |
4547                         SECONDARY_EXEC_ENABLE_VPID |
4548                         SECONDARY_EXEC_ENABLE_EPT |
4549                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
4550                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
4551                         SECONDARY_EXEC_DESC |
4552                         SECONDARY_EXEC_RDTSCP |
4553                         SECONDARY_EXEC_ENABLE_INVPCID |
4554                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
4555                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
4556                         SECONDARY_EXEC_SHADOW_VMCS |
4557                         SECONDARY_EXEC_XSAVES |
4558                         SECONDARY_EXEC_RDSEED_EXITING |
4559                         SECONDARY_EXEC_RDRAND_EXITING |
4560                         SECONDARY_EXEC_ENABLE_PML |
4561                         SECONDARY_EXEC_TSC_SCALING |
4562                         SECONDARY_EXEC_ENABLE_VMFUNC |
4563                         SECONDARY_EXEC_ENCLS_EXITING;
4564                 if (adjust_vmx_controls(min2, opt2,
4565                                         MSR_IA32_VMX_PROCBASED_CTLS2,
4566                                         &_cpu_based_2nd_exec_control) < 0)
4567                         return -EIO;
4568         }
4569 #ifndef CONFIG_X86_64
4570         if (!(_cpu_based_2nd_exec_control &
4571                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
4572                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
4573 #endif
4574
4575         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4576                 _cpu_based_2nd_exec_control &= ~(
4577                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4578                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
4579                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4580
4581         rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
4582                 &vmx_capability.ept, &vmx_capability.vpid);
4583
4584         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
4585                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
4586                    enabled */
4587                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4588                                              CPU_BASED_CR3_STORE_EXITING |
4589                                              CPU_BASED_INVLPG_EXITING);
4590         } else if (vmx_capability.ept) {
4591                 vmx_capability.ept = 0;
4592                 pr_warn_once("EPT CAP should not exist if not support "
4593                                 "1-setting enable EPT VM-execution control\n");
4594         }
4595         if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
4596                 vmx_capability.vpid) {
4597                 vmx_capability.vpid = 0;
4598                 pr_warn_once("VPID CAP should not exist if not support "
4599                                 "1-setting enable VPID VM-execution control\n");
4600         }
4601
4602         min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
4603 #ifdef CONFIG_X86_64
4604         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
4605 #endif
4606         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
4607                 VM_EXIT_CLEAR_BNDCFGS;
4608         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
4609                                 &_vmexit_control) < 0)
4610                 return -EIO;
4611
4612         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
4613         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
4614                  PIN_BASED_VMX_PREEMPTION_TIMER;
4615         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
4616                                 &_pin_based_exec_control) < 0)
4617                 return -EIO;
4618
4619         if (cpu_has_broken_vmx_preemption_timer())
4620                 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4621         if (!(_cpu_based_2nd_exec_control &
4622                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
4623                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
4624
4625         min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
4626         opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
4627         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
4628                                 &_vmentry_control) < 0)
4629                 return -EIO;
4630
4631         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
4632
4633         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
4634         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
4635                 return -EIO;
4636
4637 #ifdef CONFIG_X86_64
4638         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
4639         if (vmx_msr_high & (1u<<16))
4640                 return -EIO;
4641 #endif
4642
4643         /* Require Write-Back (WB) memory type for VMCS accesses. */
4644         if (((vmx_msr_high >> 18) & 15) != 6)
4645                 return -EIO;
4646
4647         vmcs_conf->size = vmx_msr_high & 0x1fff;
4648         vmcs_conf->order = get_order(vmcs_conf->size);
4649         vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
4650
4651         vmcs_conf->revision_id = vmx_msr_low;
4652
4653         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
4654         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
4655         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
4656         vmcs_conf->vmexit_ctrl         = _vmexit_control;
4657         vmcs_conf->vmentry_ctrl        = _vmentry_control;
4658
4659         if (static_branch_unlikely(&enable_evmcs))
4660                 evmcs_sanitize_exec_ctrls(vmcs_conf);
4661
4662         cpu_has_load_ia32_efer =
4663                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4664                                 VM_ENTRY_LOAD_IA32_EFER)
4665                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4666                                    VM_EXIT_LOAD_IA32_EFER);
4667
4668         cpu_has_load_perf_global_ctrl =
4669                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4670                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
4671                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4672                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
4673
4674         /*
4675          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
4676          * but due to errata below it can't be used. Workaround is to use
4677          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
4678          *
4679          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
4680          *
4681          * AAK155             (model 26)
4682          * AAP115             (model 30)
4683          * AAT100             (model 37)
4684          * BC86,AAY89,BD102   (model 44)
4685          * BA97               (model 46)
4686          *
4687          */
4688         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
4689                 switch (boot_cpu_data.x86_model) {
4690                 case 26:
4691                 case 30:
4692                 case 37:
4693                 case 44:
4694                 case 46:
4695                         cpu_has_load_perf_global_ctrl = false;
4696                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
4697                                         "does not work properly. Using workaround\n");
4698                         break;
4699                 default:
4700                         break;
4701                 }
4702         }
4703
4704         if (boot_cpu_has(X86_FEATURE_XSAVES))
4705                 rdmsrl(MSR_IA32_XSS, host_xss);
4706
4707         return 0;
4708 }
4709
4710 static struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
4711 {
4712         int node = cpu_to_node(cpu);
4713         struct page *pages;
4714         struct vmcs *vmcs;
4715
4716         pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
4717         if (!pages)
4718                 return NULL;
4719         vmcs = page_address(pages);
4720         memset(vmcs, 0, vmcs_config.size);
4721
4722         /* KVM supports Enlightened VMCS v1 only */
4723         if (static_branch_unlikely(&enable_evmcs))
4724                 vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
4725         else
4726                 vmcs->hdr.revision_id = vmcs_config.revision_id;
4727
4728         if (shadow)
4729                 vmcs->hdr.shadow_vmcs = 1;
4730         return vmcs;
4731 }
4732
4733 static void free_vmcs(struct vmcs *vmcs)
4734 {
4735         free_pages((unsigned long)vmcs, vmcs_config.order);
4736 }
4737
4738 /*
4739  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
4740  */
4741 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4742 {
4743         if (!loaded_vmcs->vmcs)
4744                 return;
4745         loaded_vmcs_clear(loaded_vmcs);
4746         free_vmcs(loaded_vmcs->vmcs);
4747         loaded_vmcs->vmcs = NULL;
4748         if (loaded_vmcs->msr_bitmap)
4749                 free_page((unsigned long)loaded_vmcs->msr_bitmap);
4750         WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
4751 }
4752
4753 static struct vmcs *alloc_vmcs(bool shadow)
4754 {
4755         return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
4756 }
4757
4758 static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4759 {
4760         loaded_vmcs->vmcs = alloc_vmcs(false);
4761         if (!loaded_vmcs->vmcs)
4762                 return -ENOMEM;
4763
4764         loaded_vmcs->shadow_vmcs = NULL;
4765         loaded_vmcs_init(loaded_vmcs);
4766
4767         if (cpu_has_vmx_msr_bitmap()) {
4768                 loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
4769                 if (!loaded_vmcs->msr_bitmap)
4770                         goto out_vmcs;
4771                 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
4772
4773                 if (IS_ENABLED(CONFIG_HYPERV) &&
4774                     static_branch_unlikely(&enable_evmcs) &&
4775                     (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
4776                         struct hv_enlightened_vmcs *evmcs =
4777                                 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
4778
4779                         evmcs->hv_enlightenments_control.msr_bitmap = 1;
4780                 }
4781         }
4782
4783         memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
4784
4785         return 0;
4786
4787 out_vmcs:
4788         free_loaded_vmcs(loaded_vmcs);
4789         return -ENOMEM;
4790 }
4791
4792 static void free_kvm_area(void)
4793 {
4794         int cpu;
4795
4796         for_each_possible_cpu(cpu) {
4797                 free_vmcs(per_cpu(vmxarea, cpu));
4798                 per_cpu(vmxarea, cpu) = NULL;
4799         }
4800 }
4801
4802 enum vmcs_field_width {
4803         VMCS_FIELD_WIDTH_U16 = 0,
4804         VMCS_FIELD_WIDTH_U64 = 1,
4805         VMCS_FIELD_WIDTH_U32 = 2,
4806         VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
4807 };
4808
4809 static inline int vmcs_field_width(unsigned long field)
4810 {
4811         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
4812                 return VMCS_FIELD_WIDTH_U32;
4813         return (field >> 13) & 0x3 ;
4814 }
4815
4816 static inline int vmcs_field_readonly(unsigned long field)
4817 {
4818         return (((field >> 10) & 0x3) == 1);
4819 }
4820
4821 static void init_vmcs_shadow_fields(void)
4822 {
4823         int i, j;
4824
4825         for (i = j = 0; i < max_shadow_read_only_fields; i++) {
4826                 u16 field = shadow_read_only_fields[i];
4827                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
4828                     (i + 1 == max_shadow_read_only_fields ||
4829                      shadow_read_only_fields[i + 1] != field + 1))
4830                         pr_err("Missing field from shadow_read_only_field %x\n",
4831                                field + 1);
4832
4833                 clear_bit(field, vmx_vmread_bitmap);
4834 #ifdef CONFIG_X86_64
4835                 if (field & 1)
4836                         continue;
4837 #endif
4838                 if (j < i)
4839                         shadow_read_only_fields[j] = field;
4840                 j++;
4841         }
4842         max_shadow_read_only_fields = j;
4843
4844         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
4845                 u16 field = shadow_read_write_fields[i];
4846                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
4847                     (i + 1 == max_shadow_read_write_fields ||
4848                      shadow_read_write_fields[i + 1] != field + 1))
4849                         pr_err("Missing field from shadow_read_write_field %x\n",
4850                                field + 1);
4851
4852                 /*
4853                  * PML and the preemption timer can be emulated, but the
4854                  * processor cannot vmwrite to fields that don't exist
4855                  * on bare metal.
4856                  */
4857                 switch (field) {
4858                 case GUEST_PML_INDEX:
4859                         if (!cpu_has_vmx_pml())
4860                                 continue;
4861                         break;
4862                 case VMX_PREEMPTION_TIMER_VALUE:
4863                         if (!cpu_has_vmx_preemption_timer())
4864                                 continue;
4865                         break;
4866                 case GUEST_INTR_STATUS:
4867                         if (!cpu_has_vmx_apicv())
4868                                 continue;
4869                         break;
4870                 default:
4871                         break;
4872                 }
4873
4874                 clear_bit(field, vmx_vmwrite_bitmap);
4875                 clear_bit(field, vmx_vmread_bitmap);
4876 #ifdef CONFIG_X86_64
4877                 if (field & 1)
4878                         continue;
4879 #endif
4880                 if (j < i)
4881                         shadow_read_write_fields[j] = field;
4882                 j++;
4883         }
4884         max_shadow_read_write_fields = j;
4885 }
4886
4887 static __init int alloc_kvm_area(void)
4888 {
4889         int cpu;
4890
4891         for_each_possible_cpu(cpu) {
4892                 struct vmcs *vmcs;
4893
4894                 vmcs = alloc_vmcs_cpu(false, cpu);
4895                 if (!vmcs) {
4896                         free_kvm_area();
4897                         return -ENOMEM;
4898                 }
4899
4900                 /*
4901                  * When eVMCS is enabled, alloc_vmcs_cpu() sets
4902                  * vmcs->revision_id to KVM_EVMCS_VERSION instead of
4903                  * revision_id reported by MSR_IA32_VMX_BASIC.
4904                  *
4905                  * However, even though not explictly documented by
4906                  * TLFS, VMXArea passed as VMXON argument should
4907                  * still be marked with revision_id reported by
4908                  * physical CPU.
4909                  */
4910                 if (static_branch_unlikely(&enable_evmcs))
4911                         vmcs->hdr.revision_id = vmcs_config.revision_id;
4912
4913                 per_cpu(vmxarea, cpu) = vmcs;
4914         }
4915         return 0;
4916 }
4917
4918 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
4919                 struct kvm_segment *save)
4920 {
4921         if (!emulate_invalid_guest_state) {
4922                 /*
4923                  * CS and SS RPL should be equal during guest entry according
4924                  * to VMX spec, but in reality it is not always so. Since vcpu
4925                  * is in the middle of the transition from real mode to
4926                  * protected mode it is safe to assume that RPL 0 is a good
4927                  * default value.
4928                  */
4929                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4930                         save->selector &= ~SEGMENT_RPL_MASK;
4931                 save->dpl = save->selector & SEGMENT_RPL_MASK;
4932                 save->s = 1;
4933         }
4934         vmx_set_segment(vcpu, save, seg);
4935 }
4936
4937 static void enter_pmode(struct kvm_vcpu *vcpu)
4938 {
4939         unsigned long flags;
4940         struct vcpu_vmx *vmx = to_vmx(vcpu);
4941
4942         /*
4943          * Update real mode segment cache. It may be not up-to-date if sement
4944          * register was written while vcpu was in a guest mode.
4945          */
4946         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
4947         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
4948         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
4949         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
4950         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
4951         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
4952
4953         vmx->rmode.vm86_active = 0;
4954
4955         vmx_segment_cache_clear(vmx);
4956
4957         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
4958
4959         flags = vmcs_readl(GUEST_RFLAGS);
4960         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
4961         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
4962         vmcs_writel(GUEST_RFLAGS, flags);
4963
4964         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
4965                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
4966
4967         update_exception_bitmap(vcpu);
4968
4969         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
4970         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
4971         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
4972         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
4973         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
4974         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
4975 }
4976
4977 static void fix_rmode_seg(int seg, struct kvm_segment *save)
4978 {
4979         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
4980         struct kvm_segment var = *save;
4981
4982         var.dpl = 0x3;
4983         if (seg == VCPU_SREG_CS)
4984                 var.type = 0x3;
4985
4986         if (!emulate_invalid_guest_state) {
4987                 var.selector = var.base >> 4;
4988                 var.base = var.base & 0xffff0;
4989                 var.limit = 0xffff;
4990                 var.g = 0;
4991                 var.db = 0;
4992                 var.present = 1;
4993                 var.s = 1;
4994                 var.l = 0;
4995                 var.unusable = 0;
4996                 var.type = 0x3;
4997                 var.avl = 0;
4998                 if (save->base & 0xf)
4999                         printk_once(KERN_WARNING "kvm: segment base is not "
5000                                         "paragraph aligned when entering "
5001                                         "protected mode (seg=%d)", seg);
5002         }
5003
5004         vmcs_write16(sf->selector, var.selector);
5005         vmcs_writel(sf->base, var.base);
5006         vmcs_write32(sf->limit, var.limit);
5007         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
5008 }
5009
5010 static void enter_rmode(struct kvm_vcpu *vcpu)
5011 {
5012         unsigned long flags;
5013         struct vcpu_vmx *vmx = to_vmx(vcpu);
5014         struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
5015
5016         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
5017         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
5018         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
5019         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
5020         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
5021         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
5022         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
5023
5024         vmx->rmode.vm86_active = 1;
5025
5026         /*
5027          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
5028          * vcpu. Warn the user that an update is overdue.
5029          */
5030         if (!kvm_vmx->tss_addr)
5031                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
5032                              "called before entering vcpu\n");
5033
5034         vmx_segment_cache_clear(vmx);
5035
5036         vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
5037         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
5038         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
5039
5040         flags = vmcs_readl(GUEST_RFLAGS);
5041         vmx->rmode.save_rflags = flags;
5042
5043         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
5044
5045         vmcs_writel(GUEST_RFLAGS, flags);
5046         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
5047         update_exception_bitmap(vcpu);
5048
5049         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
5050         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
5051         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
5052         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
5053         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
5054         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
5055
5056         kvm_mmu_reset_context(vcpu);
5057 }
5058
5059 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
5060 {
5061         struct vcpu_vmx *vmx = to_vmx(vcpu);
5062         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
5063
5064         if (!msr)
5065                 return;
5066
5067         vcpu->arch.efer = efer;
5068         if (efer & EFER_LMA) {
5069                 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5070                 msr->data = efer;
5071         } else {
5072                 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5073
5074                 msr->data = efer & ~EFER_LME;
5075         }
5076         setup_msrs(vmx);
5077 }
5078
5079 #ifdef CONFIG_X86_64
5080
5081 static void enter_lmode(struct kvm_vcpu *vcpu)
5082 {
5083         u32 guest_tr_ar;
5084
5085         vmx_segment_cache_clear(to_vmx(vcpu));
5086
5087         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
5088         if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
5089                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
5090                                      __func__);
5091                 vmcs_write32(GUEST_TR_AR_BYTES,
5092                              (guest_tr_ar & ~VMX_AR_TYPE_MASK)
5093                              | VMX_AR_TYPE_BUSY_64_TSS);
5094         }
5095         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
5096 }
5097
5098 static void exit_lmode(struct kvm_vcpu *vcpu)
5099 {
5100         vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
5101         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
5102 }
5103
5104 #endif
5105
5106 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
5107                                 bool invalidate_gpa)
5108 {
5109         if (enable_ept && (invalidate_gpa || !enable_vpid)) {
5110                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
5111                         return;
5112                 ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
5113         } else {
5114                 vpid_sync_context(vpid);
5115         }
5116 }
5117
5118 static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5119 {
5120         __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
5121 }
5122
5123 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
5124 {
5125         int vpid = to_vmx(vcpu)->vpid;
5126
5127         if (!vpid_sync_vcpu_addr(vpid, addr))
5128                 vpid_sync_context(vpid);
5129
5130         /*
5131          * If VPIDs are not supported or enabled, then the above is a no-op.
5132          * But we don't really need a TLB flush in that case anyway, because
5133          * each VM entry/exit includes an implicit flush when VPID is 0.
5134          */
5135 }
5136
5137 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
5138 {
5139         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
5140
5141         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
5142         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
5143 }
5144
5145 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
5146 {
5147         if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
5148                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
5149         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5150 }
5151
5152 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
5153 {
5154         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
5155
5156         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
5157         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
5158 }
5159
5160 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
5161 {
5162         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5163
5164         if (!test_bit(VCPU_EXREG_PDPTR,
5165                       (unsigned long *)&vcpu->arch.regs_dirty))
5166                 return;
5167
5168         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
5169                 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
5170                 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
5171                 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
5172                 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
5173         }
5174 }
5175
5176 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
5177 {
5178         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
5179
5180         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
5181                 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
5182                 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
5183                 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
5184                 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
5185         }
5186
5187         __set_bit(VCPU_EXREG_PDPTR,
5188                   (unsigned long *)&vcpu->arch.regs_avail);
5189         __set_bit(VCPU_EXREG_PDPTR,
5190                   (unsigned long *)&vcpu->arch.regs_dirty);
5191 }
5192
5193 static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5194 {
5195         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
5196         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
5197         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5198
5199         if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
5200                 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5201             nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5202                 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
5203
5204         return fixed_bits_valid(val, fixed0, fixed1);
5205 }
5206
5207 static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5208 {
5209         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
5210         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
5211
5212         return fixed_bits_valid(val, fixed0, fixed1);
5213 }
5214
5215 static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
5216 {
5217         u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
5218         u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
5219
5220         return fixed_bits_valid(val, fixed0, fixed1);
5221 }
5222
5223 /* No difference in the restrictions on guest and host CR4 in VMX operation. */
5224 #define nested_guest_cr4_valid  nested_cr4_valid
5225 #define nested_host_cr4_valid   nested_cr4_valid
5226
5227 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
5228
5229 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
5230                                         unsigned long cr0,
5231                                         struct kvm_vcpu *vcpu)
5232 {
5233         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
5234                 vmx_decache_cr3(vcpu);
5235         if (!(cr0 & X86_CR0_PG)) {
5236                 /* From paging/starting to nonpaging */
5237                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
5238                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
5239                              (CPU_BASED_CR3_LOAD_EXITING |
5240                               CPU_BASED_CR3_STORE_EXITING));
5241                 vcpu->arch.cr0 = cr0;
5242                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
5243         } else if (!is_paging(vcpu)) {
5244                 /* From nonpaging to paging */
5245                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
5246                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
5247                              ~(CPU_BASED_CR3_LOAD_EXITING |
5248                                CPU_BASED_CR3_STORE_EXITING));
5249                 vcpu->arch.cr0 = cr0;
5250                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
5251         }
5252
5253         if (!(cr0 & X86_CR0_WP))
5254                 *hw_cr0 &= ~X86_CR0_WP;
5255 }
5256
5257 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
5258 {
5259         struct vcpu_vmx *vmx = to_vmx(vcpu);
5260         unsigned long hw_cr0;
5261
5262         hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
5263         if (enable_unrestricted_guest)
5264                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
5265         else {
5266                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
5267
5268                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
5269                         enter_pmode(vcpu);
5270
5271                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
5272                         enter_rmode(vcpu);
5273         }
5274
5275 #ifdef CONFIG_X86_64
5276         if (vcpu->arch.efer & EFER_LME) {
5277                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
5278                         enter_lmode(vcpu);
5279                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
5280                         exit_lmode(vcpu);
5281         }
5282 #endif
5283
5284         if (enable_ept && !enable_unrestricted_guest)
5285                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
5286
5287         vmcs_writel(CR0_READ_SHADOW, cr0);
5288         vmcs_writel(GUEST_CR0, hw_cr0);
5289         vcpu->arch.cr0 = cr0;
5290
5291         /* depends on vcpu->arch.cr0 to be set to a new value */
5292         vmx->emulation_required = emulation_required(vcpu);
5293 }
5294
5295 static int get_ept_level(struct kvm_vcpu *vcpu)
5296 {
5297         if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
5298                 return 5;
5299         return 4;
5300 }
5301
5302 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
5303 {
5304         u64 eptp = VMX_EPTP_MT_WB;
5305
5306         eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
5307
5308         if (enable_ept_ad_bits &&
5309             (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
5310                 eptp |= VMX_EPTP_AD_ENABLE_BIT;
5311         eptp |= (root_hpa & PAGE_MASK);
5312
5313         return eptp;
5314 }
5315
5316 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
5317 {
5318         struct kvm *kvm = vcpu->kvm;
5319         unsigned long guest_cr3;
5320         u64 eptp;
5321
5322         guest_cr3 = cr3;
5323         if (enable_ept) {
5324                 eptp = construct_eptp(vcpu, cr3);
5325                 vmcs_write64(EPT_POINTER, eptp);
5326
5327                 if (kvm_x86_ops->tlb_remote_flush) {
5328                         spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
5329                         to_vmx(vcpu)->ept_pointer = eptp;
5330                         to_kvm_vmx(kvm)->ept_pointers_match
5331                                 = EPT_POINTERS_CHECK;
5332                         spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
5333                 }
5334
5335                 if (enable_unrestricted_guest || is_paging(vcpu) ||
5336                     is_guest_mode(vcpu))
5337                         guest_cr3 = kvm_read_cr3(vcpu);
5338                 else
5339                         guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
5340                 ept_load_pdptrs(vcpu);
5341         }
5342
5343         vmcs_writel(GUEST_CR3, guest_cr3);
5344 }
5345
5346 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
5347 {
5348         /*
5349          * Pass through host's Machine Check Enable value to hw_cr4, which
5350          * is in force while we are in guest mode.  Do not let guests control
5351          * this bit, even if host CR4.MCE == 0.
5352          */
5353         unsigned long hw_cr4;
5354
5355         hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
5356         if (enable_unrestricted_guest)
5357                 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
5358         else if (to_vmx(vcpu)->rmode.vm86_active)
5359                 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
5360         else
5361                 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
5362
5363         if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
5364                 if (cr4 & X86_CR4_UMIP) {
5365                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
5366                                 SECONDARY_EXEC_DESC);
5367                         hw_cr4 &= ~X86_CR4_UMIP;
5368                 } else if (!is_guest_mode(vcpu) ||
5369                         !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
5370                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
5371                                         SECONDARY_EXEC_DESC);
5372         }
5373
5374         if (cr4 & X86_CR4_VMXE) {
5375                 /*
5376                  * To use VMXON (and later other VMX instructions), a guest
5377                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
5378                  * So basically the check on whether to allow nested VMX
5379                  * is here.  We operate under the default treatment of SMM,
5380                  * so VMX cannot be enabled under SMM.
5381                  */
5382                 if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
5383                         return 1;
5384         }
5385
5386         if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
5387                 return 1;
5388
5389         vcpu->arch.cr4 = cr4;
5390
5391         if (!enable_unrestricted_guest) {
5392                 if (enable_ept) {
5393                         if (!is_paging(vcpu)) {
5394                                 hw_cr4 &= ~X86_CR4_PAE;
5395                                 hw_cr4 |= X86_CR4_PSE;
5396                         } else if (!(cr4 & X86_CR4_PAE)) {
5397                                 hw_cr4 &= ~X86_CR4_PAE;
5398                         }
5399                 }
5400
5401                 /*
5402                  * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
5403                  * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
5404                  * to be manually disabled when guest switches to non-paging
5405                  * mode.
5406                  *
5407                  * If !enable_unrestricted_guest, the CPU is always running
5408                  * with CR0.PG=1 and CR4 needs to be modified.
5409                  * If enable_unrestricted_guest, the CPU automatically
5410                  * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
5411                  */
5412                 if (!is_paging(vcpu))
5413                         hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
5414         }
5415
5416         vmcs_writel(CR4_READ_SHADOW, cr4);
5417         vmcs_writel(GUEST_CR4, hw_cr4);
5418         return 0;
5419 }
5420
5421 static void vmx_get_segment(struct kvm_vcpu *vcpu,
5422                             struct kvm_segment *var, int seg)
5423 {
5424         struct vcpu_vmx *vmx = to_vmx(vcpu);
5425         u32 ar;
5426
5427         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
5428                 *var = vmx->rmode.segs[seg];
5429                 if (seg == VCPU_SREG_TR
5430                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
5431                         return;
5432                 var->base = vmx_read_guest_seg_base(vmx, seg);
5433                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
5434                 return;
5435         }
5436         var->base = vmx_read_guest_seg_base(vmx, seg);
5437         var->limit = vmx_read_guest_seg_limit(vmx, seg);
5438         var->selector = vmx_read_guest_seg_selector(vmx, seg);
5439         ar = vmx_read_guest_seg_ar(vmx, seg);
5440         var->unusable = (ar >> 16) & 1;
5441         var->type = ar & 15;
5442         var->s = (ar >> 4) & 1;
5443         var->dpl = (ar >> 5) & 3;
5444         /*
5445          * Some userspaces do not preserve unusable property. Since usable
5446          * segment has to be present according to VMX spec we can use present
5447          * property to amend userspace bug by making unusable segment always
5448          * nonpresent. vmx_segment_access_rights() already marks nonpresent
5449          * segment as unusable.
5450          */
5451         var->present = !var->unusable;
5452         var->avl = (ar >> 12) & 1;
5453         var->l = (ar >> 13) & 1;
5454         var->db = (ar >> 14) & 1;
5455         var->g = (ar >> 15) & 1;
5456 }
5457
5458 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
5459 {
5460         struct kvm_segment s;
5461
5462         if (to_vmx(vcpu)->rmode.vm86_active) {
5463                 vmx_get_segment(vcpu, &s, seg);
5464                 return s.base;
5465         }
5466         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
5467 }
5468
5469 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
5470 {
5471         struct vcpu_vmx *vmx = to_vmx(vcpu);
5472
5473         if (unlikely(vmx->rmode.vm86_active))
5474                 return 0;
5475         else {
5476                 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
5477                 return VMX_AR_DPL(ar);
5478         }
5479 }
5480
5481 static u32 vmx_segment_access_rights(struct kvm_segment *var)
5482 {
5483         u32 ar;
5484
5485         if (var->unusable || !var->present)
5486                 ar = 1 << 16;
5487         else {
5488                 ar = var->type & 15;
5489                 ar |= (var->s & 1) << 4;
5490                 ar |= (var->dpl & 3) << 5;
5491                 ar |= (var->present & 1) << 7;
5492                 ar |= (var->avl & 1) << 12;
5493                 ar |= (var->l & 1) << 13;
5494                 ar |= (var->db & 1) << 14;
5495                 ar |= (var->g & 1) << 15;
5496         }
5497
5498         return ar;
5499 }
5500
5501 static void vmx_set_segment(struct kvm_vcpu *vcpu,
5502                             struct kvm_segment *var, int seg)
5503 {
5504         struct vcpu_vmx *vmx = to_vmx(vcpu);
5505         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
5506
5507         vmx_segment_cache_clear(vmx);
5508
5509         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
5510                 vmx->rmode.segs[seg] = *var;
5511                 if (seg == VCPU_SREG_TR)
5512                         vmcs_write16(sf->selector, var->selector);
5513                 else if (var->s)
5514                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
5515                 goto out;
5516         }
5517
5518         vmcs_writel(sf->base, var->base);
5519         vmcs_write32(sf->limit, var->limit);
5520         vmcs_write16(sf->selector, var->selector);
5521
5522         /*
5523          *   Fix the "Accessed" bit in AR field of segment registers for older
5524          * qemu binaries.
5525          *   IA32 arch specifies that at the time of processor reset the
5526          * "Accessed" bit in the AR field of segment registers is 1. And qemu
5527          * is setting it to 0 in the userland code. This causes invalid guest
5528          * state vmexit when "unrestricted guest" mode is turned on.
5529          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
5530          * tree. Newer qemu binaries with that qemu fix would not need this
5531          * kvm hack.
5532          */
5533         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
5534                 var->type |= 0x1; /* Accessed */
5535
5536         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
5537
5538 out:
5539         vmx->emulation_required = emulation_required(vcpu);
5540 }
5541
5542 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5543 {
5544         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
5545
5546         *db = (ar >> 14) & 1;
5547         *l = (ar >> 13) & 1;
5548 }
5549
5550 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5551 {
5552         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
5553         dt->address = vmcs_readl(GUEST_IDTR_BASE);
5554 }
5555
5556 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5557 {
5558         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
5559         vmcs_writel(GUEST_IDTR_BASE, dt->address);
5560 }
5561
5562 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5563 {
5564         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
5565         dt->address = vmcs_readl(GUEST_GDTR_BASE);
5566 }
5567
5568 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
5569 {
5570         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
5571         vmcs_writel(GUEST_GDTR_BASE, dt->address);
5572 }
5573
5574 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
5575 {
5576         struct kvm_segment var;
5577         u32 ar;
5578
5579         vmx_get_segment(vcpu, &var, seg);
5580         var.dpl = 0x3;
5581         if (seg == VCPU_SREG_CS)
5582                 var.type = 0x3;
5583         ar = vmx_segment_access_rights(&var);
5584
5585         if (var.base != (var.selector << 4))
5586                 return false;
5587         if (var.limit != 0xffff)
5588                 return false;
5589         if (ar != 0xf3)
5590                 return false;
5591
5592         return true;
5593 }
5594
5595 static bool code_segment_valid(struct kvm_vcpu *vcpu)
5596 {
5597         struct kvm_segment cs;
5598         unsigned int cs_rpl;
5599
5600         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5601         cs_rpl = cs.selector & SEGMENT_RPL_MASK;
5602
5603         if (cs.unusable)
5604                 return false;
5605         if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
5606                 return false;
5607         if (!cs.s)
5608                 return false;
5609         if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
5610                 if (cs.dpl > cs_rpl)
5611                         return false;
5612         } else {
5613                 if (cs.dpl != cs_rpl)
5614                         return false;
5615         }
5616         if (!cs.present)
5617                 return false;
5618
5619         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
5620         return true;
5621 }
5622
5623 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
5624 {
5625         struct kvm_segment ss;
5626         unsigned int ss_rpl;
5627
5628         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
5629         ss_rpl = ss.selector & SEGMENT_RPL_MASK;
5630
5631         if (ss.unusable)
5632                 return true;
5633         if (ss.type != 3 && ss.type != 7)
5634                 return false;
5635         if (!ss.s)
5636                 return false;
5637         if (ss.dpl != ss_rpl) /* DPL != RPL */
5638                 return false;
5639         if (!ss.present)
5640                 return false;
5641
5642         return true;
5643 }
5644
5645 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
5646 {
5647         struct kvm_segment var;
5648         unsigned int rpl;
5649
5650         vmx_get_segment(vcpu, &var, seg);
5651         rpl = var.selector & SEGMENT_RPL_MASK;
5652
5653         if (var.unusable)
5654                 return true;
5655         if (!var.s)
5656                 return false;
5657         if (!var.present)
5658                 return false;
5659         if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
5660                 if (var.dpl < rpl) /* DPL < RPL */
5661                         return false;
5662         }
5663
5664         /* TODO: Add other members to kvm_segment_field to allow checking for other access
5665          * rights flags
5666          */
5667         return true;
5668 }
5669
5670 static bool tr_valid(struct kvm_vcpu *vcpu)
5671 {
5672         struct kvm_segment tr;
5673
5674         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
5675
5676         if (tr.unusable)
5677                 return false;
5678         if (tr.selector & SEGMENT_TI_MASK)      /* TI = 1 */
5679                 return false;
5680         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
5681                 return false;
5682         if (!tr.present)
5683                 return false;
5684
5685         return true;
5686 }
5687
5688 static bool ldtr_valid(struct kvm_vcpu *vcpu)
5689 {
5690         struct kvm_segment ldtr;
5691
5692         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
5693
5694         if (ldtr.unusable)
5695                 return true;
5696         if (ldtr.selector & SEGMENT_TI_MASK)    /* TI = 1 */
5697                 return false;
5698         if (ldtr.type != 2)
5699                 return false;
5700         if (!ldtr.present)
5701                 return false;
5702
5703         return true;
5704 }
5705
5706 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
5707 {
5708         struct kvm_segment cs, ss;
5709
5710         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5711         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
5712
5713         return ((cs.selector & SEGMENT_RPL_MASK) ==
5714                  (ss.selector & SEGMENT_RPL_MASK));
5715 }
5716
5717 /*
5718  * Check if guest state is valid. Returns true if valid, false if
5719  * not.
5720  * We assume that registers are always usable
5721  */
5722 static bool guest_state_valid(struct kvm_vcpu *vcpu)
5723 {
5724         if (enable_unrestricted_guest)
5725                 return true;
5726
5727         /* real mode guest state checks */
5728         if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
5729                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
5730                         return false;
5731                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
5732                         return false;
5733                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
5734                         return false;
5735                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
5736                         return false;
5737                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
5738                         return false;
5739                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
5740                         return false;
5741         } else {
5742         /* protected mode guest state checks */
5743                 if (!cs_ss_rpl_check(vcpu))
5744                         return false;
5745                 if (!code_segment_valid(vcpu))
5746                         return false;
5747                 if (!stack_segment_valid(vcpu))
5748                         return false;
5749                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
5750                         return false;
5751                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
5752                         return false;
5753                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
5754                         return false;
5755                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
5756                         return false;
5757                 if (!tr_valid(vcpu))
5758                         return false;
5759                 if (!ldtr_valid(vcpu))
5760                         return false;
5761         }
5762         /* TODO:
5763          * - Add checks on RIP
5764          * - Add checks on RFLAGS
5765          */
5766
5767         return true;
5768 }
5769
5770 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
5771 {
5772         return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
5773 }
5774
5775 static int init_rmode_tss(struct kvm *kvm)
5776 {
5777         gfn_t fn;
5778         u16 data = 0;
5779         int idx, r;
5780
5781         idx = srcu_read_lock(&kvm->srcu);
5782         fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
5783         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5784         if (r < 0)
5785                 goto out;
5786         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
5787         r = kvm_write_guest_page(kvm, fn++, &data,
5788                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
5789         if (r < 0)
5790                 goto out;
5791         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
5792         if (r < 0)
5793                 goto out;
5794         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5795         if (r < 0)
5796                 goto out;
5797         data = ~0;
5798         r = kvm_write_guest_page(kvm, fn, &data,
5799                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
5800                                  sizeof(u8));
5801 out:
5802         srcu_read_unlock(&kvm->srcu, idx);
5803         return r;
5804 }
5805
5806 static int init_rmode_identity_map(struct kvm *kvm)
5807 {
5808         struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
5809         int i, idx, r = 0;
5810         kvm_pfn_t identity_map_pfn;
5811         u32 tmp;
5812
5813         /* Protect kvm_vmx->ept_identity_pagetable_done. */
5814         mutex_lock(&kvm->slots_lock);
5815
5816         if (likely(kvm_vmx->ept_identity_pagetable_done))
5817                 goto out2;
5818
5819         if (!kvm_vmx->ept_identity_map_addr)
5820                 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
5821         identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
5822
5823         r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
5824                                     kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
5825         if (r < 0)
5826                 goto out2;
5827
5828         idx = srcu_read_lock(&kvm->srcu);
5829         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
5830         if (r < 0)
5831                 goto out;
5832         /* Set up identity-mapping pagetable for EPT in real mode */
5833         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
5834                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
5835                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
5836                 r = kvm_write_guest_page(kvm, identity_map_pfn,
5837                                 &tmp, i * sizeof(tmp), sizeof(tmp));
5838                 if (r < 0)
5839                         goto out;
5840         }
5841         kvm_vmx->ept_identity_pagetable_done = true;
5842
5843 out:
5844         srcu_read_unlock(&kvm->srcu, idx);
5845
5846 out2:
5847         mutex_unlock(&kvm->slots_lock);
5848         return r;
5849 }
5850
5851 static void seg_setup(int seg)
5852 {
5853         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
5854         unsigned int ar;
5855
5856         vmcs_write16(sf->selector, 0);
5857         vmcs_writel(sf->base, 0);
5858         vmcs_write32(sf->limit, 0xffff);
5859         ar = 0x93;
5860         if (seg == VCPU_SREG_CS)
5861                 ar |= 0x08; /* code segment */
5862
5863         vmcs_write32(sf->ar_bytes, ar);
5864 }
5865
5866 static int alloc_apic_access_page(struct kvm *kvm)
5867 {
5868         struct page *page;
5869         int r = 0;
5870
5871         mutex_lock(&kvm->slots_lock);
5872         if (kvm->arch.apic_access_page_done)
5873                 goto out;
5874         r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
5875                                     APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
5876         if (r)
5877                 goto out;
5878
5879         page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
5880         if (is_error_page(page)) {
5881                 r = -EFAULT;
5882                 goto out;
5883         }
5884
5885         /*
5886          * Do not pin the page in memory, so that memory hot-unplug
5887          * is able to migrate it.
5888          */
5889         put_page(page);
5890         kvm->arch.apic_access_page_done = true;
5891 out:
5892         mutex_unlock(&kvm->slots_lock);
5893         return r;
5894 }
5895
5896 static int allocate_vpid(void)
5897 {
5898         int vpid;
5899
5900         if (!enable_vpid)
5901                 return 0;
5902         spin_lock(&vmx_vpid_lock);
5903         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
5904         if (vpid < VMX_NR_VPIDS)
5905                 __set_bit(vpid, vmx_vpid_bitmap);
5906         else
5907                 vpid = 0;
5908         spin_unlock(&vmx_vpid_lock);
5909         return vpid;
5910 }
5911
5912 static void free_vpid(int vpid)
5913 {
5914         if (!enable_vpid || vpid == 0)
5915                 return;
5916         spin_lock(&vmx_vpid_lock);
5917         __clear_bit(vpid, vmx_vpid_bitmap);
5918         spin_unlock(&vmx_vpid_lock);
5919 }
5920
5921 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
5922                                                           u32 msr, int type)
5923 {
5924         int f = sizeof(unsigned long);
5925
5926         if (!cpu_has_vmx_msr_bitmap())
5927                 return;
5928
5929         if (static_branch_unlikely(&enable_evmcs))
5930                 evmcs_touch_msr_bitmap();
5931
5932         /*
5933          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5934          * have the write-low and read-high bitmap offsets the wrong way round.
5935          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5936          */
5937         if (msr <= 0x1fff) {
5938                 if (type & MSR_TYPE_R)
5939                         /* read-low */
5940                         __clear_bit(msr, msr_bitmap + 0x000 / f);
5941
5942                 if (type & MSR_TYPE_W)
5943                         /* write-low */
5944                         __clear_bit(msr, msr_bitmap + 0x800 / f);
5945
5946         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5947                 msr &= 0x1fff;
5948                 if (type & MSR_TYPE_R)
5949                         /* read-high */
5950                         __clear_bit(msr, msr_bitmap + 0x400 / f);
5951
5952                 if (type & MSR_TYPE_W)
5953                         /* write-high */
5954                         __clear_bit(msr, msr_bitmap + 0xc00 / f);
5955
5956         }
5957 }
5958
5959 static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
5960                                                          u32 msr, int type)
5961 {
5962         int f = sizeof(unsigned long);
5963
5964         if (!cpu_has_vmx_msr_bitmap())
5965                 return;
5966
5967         if (static_branch_unlikely(&enable_evmcs))
5968                 evmcs_touch_msr_bitmap();
5969
5970         /*
5971          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5972          * have the write-low and read-high bitmap offsets the wrong way round.
5973          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5974          */
5975         if (msr <= 0x1fff) {
5976                 if (type & MSR_TYPE_R)
5977                         /* read-low */
5978                         __set_bit(msr, msr_bitmap + 0x000 / f);
5979
5980                 if (type & MSR_TYPE_W)
5981                         /* write-low */
5982                         __set_bit(msr, msr_bitmap + 0x800 / f);
5983
5984         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5985                 msr &= 0x1fff;
5986                 if (type & MSR_TYPE_R)
5987                         /* read-high */
5988                         __set_bit(msr, msr_bitmap + 0x400 / f);
5989
5990                 if (type & MSR_TYPE_W)
5991                         /* write-high */
5992                         __set_bit(msr, msr_bitmap + 0xc00 / f);
5993
5994         }
5995 }
5996
5997 static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
5998                                                       u32 msr, int type, bool value)
5999 {
6000         if (value)
6001                 vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
6002         else
6003                 vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
6004 }
6005
6006 /*
6007  * If a msr is allowed by L0, we should check whether it is allowed by L1.
6008  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
6009  */
6010 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
6011                                                unsigned long *msr_bitmap_nested,
6012                                                u32 msr, int type)
6013 {
6014         int f = sizeof(unsigned long);
6015
6016         /*
6017          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
6018          * have the write-low and read-high bitmap offsets the wrong way round.
6019          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
6020          */
6021         if (msr <= 0x1fff) {
6022                 if (type & MSR_TYPE_R &&
6023                    !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
6024                         /* read-low */
6025                         __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
6026
6027                 if (type & MSR_TYPE_W &&
6028                    !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
6029                         /* write-low */
6030                         __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
6031
6032         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
6033                 msr &= 0x1fff;
6034                 if (type & MSR_TYPE_R &&
6035                    !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
6036                         /* read-high */
6037                         __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
6038
6039                 if (type & MSR_TYPE_W &&
6040                    !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
6041                         /* write-high */
6042                         __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
6043
6044         }
6045 }
6046
6047 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
6048 {
6049         u8 mode = 0;
6050
6051         if (cpu_has_secondary_exec_ctrls() &&
6052             (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
6053              SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
6054                 mode |= MSR_BITMAP_MODE_X2APIC;
6055                 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
6056                         mode |= MSR_BITMAP_MODE_X2APIC_APICV;
6057         }
6058
6059         return mode;
6060 }
6061
6062 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
6063
6064 static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
6065                                          u8 mode)
6066 {
6067         int msr;
6068
6069         for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
6070                 unsigned word = msr / BITS_PER_LONG;
6071                 msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
6072                 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
6073         }
6074
6075         if (mode & MSR_BITMAP_MODE_X2APIC) {
6076                 /*
6077                  * TPR reads and writes can be virtualized even if virtual interrupt
6078                  * delivery is not in use.
6079                  */
6080                 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
6081                 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
6082                         vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
6083                         vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
6084                         vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
6085                 }
6086         }
6087 }
6088
6089 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
6090 {
6091         struct vcpu_vmx *vmx = to_vmx(vcpu);
6092         unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
6093         u8 mode = vmx_msr_bitmap_mode(vcpu);
6094         u8 changed = mode ^ vmx->msr_bitmap_mode;
6095
6096         if (!changed)
6097                 return;
6098
6099         if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
6100                 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
6101
6102         vmx->msr_bitmap_mode = mode;
6103 }
6104
6105 static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
6106 {
6107         return enable_apicv;
6108 }
6109
6110 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
6111 {
6112         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6113         gfn_t gfn;
6114
6115         /*
6116          * Don't need to mark the APIC access page dirty; it is never
6117          * written to by the CPU during APIC virtualization.
6118          */
6119
6120         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
6121                 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
6122                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
6123         }
6124
6125         if (nested_cpu_has_posted_intr(vmcs12)) {
6126                 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
6127                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
6128         }
6129 }
6130
6131
6132 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
6133 {
6134         struct vcpu_vmx *vmx = to_vmx(vcpu);
6135         int max_irr;
6136         void *vapic_page;
6137         u16 status;
6138
6139         if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
6140                 return;
6141
6142         vmx->nested.pi_pending = false;
6143         if (!pi_test_and_clear_on(vmx->nested.pi_desc))
6144                 return;
6145
6146         max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
6147         if (max_irr != 256) {
6148                 vapic_page = kmap(vmx->nested.virtual_apic_page);
6149                 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
6150                         vapic_page, &max_irr);
6151                 kunmap(vmx->nested.virtual_apic_page);
6152
6153                 status = vmcs_read16(GUEST_INTR_STATUS);
6154                 if ((u8)max_irr > ((u8)status & 0xff)) {
6155                         status &= ~0xff;
6156                         status |= (u8)max_irr;
6157                         vmcs_write16(GUEST_INTR_STATUS, status);
6158                 }
6159         }
6160
6161         nested_mark_vmcs12_pages_dirty(vcpu);
6162 }
6163
6164 static u8 vmx_get_rvi(void)
6165 {
6166         return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
6167 }
6168
6169 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6170 {
6171         struct vcpu_vmx *vmx = to_vmx(vcpu);
6172         void *vapic_page;
6173         u32 vppr;
6174         int rvi;
6175
6176         if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
6177                 !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
6178                 WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
6179                 return false;
6180
6181         rvi = vmx_get_rvi();
6182
6183         vapic_page = kmap(vmx->nested.virtual_apic_page);
6184         vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
6185         kunmap(vmx->nested.virtual_apic_page);
6186
6187         return ((rvi & 0xf0) > (vppr & 0xf0));
6188 }
6189
6190 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
6191                                                      bool nested)
6192 {
6193 #ifdef CONFIG_SMP
6194         int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
6195
6196         if (vcpu->mode == IN_GUEST_MODE) {
6197                 /*
6198                  * The vector of interrupt to be delivered to vcpu had
6199                  * been set in PIR before this function.
6200                  *
6201                  * Following cases will be reached in this block, and
6202                  * we always send a notification event in all cases as
6203                  * explained below.
6204                  *
6205                  * Case 1: vcpu keeps in non-root mode. Sending a
6206                  * notification event posts the interrupt to vcpu.
6207                  *
6208                  * Case 2: vcpu exits to root mode and is still
6209                  * runnable. PIR will be synced to vIRR before the
6210                  * next vcpu entry. Sending a notification event in
6211                  * this case has no effect, as vcpu is not in root
6212                  * mode.
6213                  *
6214                  * Case 3: vcpu exits to root mode and is blocked.
6215                  * vcpu_block() has already synced PIR to vIRR and
6216                  * never blocks vcpu if vIRR is not cleared. Therefore,
6217                  * a blocked vcpu here does not wait for any requested
6218                  * interrupts in PIR, and sending a notification event
6219                  * which has no effect is safe here.
6220                  */
6221
6222                 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
6223                 return true;
6224         }
6225 #endif
6226         return false;
6227 }
6228
6229 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
6230                                                 int vector)
6231 {
6232         struct vcpu_vmx *vmx = to_vmx(vcpu);
6233
6234         if (is_guest_mode(vcpu) &&
6235             vector == vmx->nested.posted_intr_nv) {
6236                 /*
6237                  * If a posted intr is not recognized by hardware,
6238                  * we will accomplish it in the next vmentry.
6239                  */
6240                 vmx->nested.pi_pending = true;
6241                 kvm_make_request(KVM_REQ_EVENT, vcpu);
6242                 /* the PIR and ON have been set by L1. */
6243                 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
6244                         kvm_vcpu_kick(vcpu);
6245                 return 0;
6246         }
6247         return -1;
6248 }
6249 /*
6250  * Send interrupt to vcpu via posted interrupt way.
6251  * 1. If target vcpu is running(non-root mode), send posted interrupt
6252  * notification to vcpu and hardware will sync PIR to vIRR atomically.
6253  * 2. If target vcpu isn't running(root mode), kick it to pick up the
6254  * interrupt from PIR in next vmentry.
6255  */
6256 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
6257 {
6258         struct vcpu_vmx *vmx = to_vmx(vcpu);
6259         int r;
6260
6261         r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
6262         if (!r)
6263                 return;
6264
6265         if (pi_test_and_set_pir(vector, &vmx->pi_desc))
6266                 return;
6267
6268         /* If a previous notification has sent the IPI, nothing to do.  */
6269         if (pi_test_and_set_on(&vmx->pi_desc))
6270                 return;
6271
6272         if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
6273                 kvm_vcpu_kick(vcpu);
6274 }
6275
6276 /*
6277  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
6278  * will not change in the lifetime of the guest.
6279  * Note that host-state that does change is set elsewhere. E.g., host-state
6280  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
6281  */
6282 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
6283 {
6284         u32 low32, high32;
6285         unsigned long tmpl;
6286         struct desc_ptr dt;
6287         unsigned long cr0, cr3, cr4;
6288
6289         cr0 = read_cr0();
6290         WARN_ON(cr0 & X86_CR0_TS);
6291         vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
6292
6293         /*
6294          * Save the most likely value for this task's CR3 in the VMCS.
6295          * We can't use __get_current_cr3_fast() because we're not atomic.
6296          */
6297         cr3 = __read_cr3();
6298         vmcs_writel(HOST_CR3, cr3);             /* 22.2.3  FIXME: shadow tables */
6299         vmx->loaded_vmcs->host_state.cr3 = cr3;
6300
6301         /* Save the most likely value for this task's CR4 in the VMCS. */
6302         cr4 = cr4_read_shadow();
6303         vmcs_writel(HOST_CR4, cr4);                     /* 22.2.3, 22.2.5 */
6304         vmx->loaded_vmcs->host_state.cr4 = cr4;
6305
6306         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
6307 #ifdef CONFIG_X86_64
6308         /*
6309          * Load null selectors, so we can avoid reloading them in
6310          * vmx_prepare_switch_to_host(), in case userspace uses
6311          * the null selectors too (the expected case).
6312          */
6313         vmcs_write16(HOST_DS_SELECTOR, 0);
6314         vmcs_write16(HOST_ES_SELECTOR, 0);
6315 #else
6316         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6317         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6318 #endif
6319         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
6320         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
6321
6322         store_idt(&dt);
6323         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
6324         vmx->host_idt_base = dt.address;
6325
6326         vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
6327
6328         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
6329         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
6330         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
6331         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
6332
6333         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
6334                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
6335                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
6336         }
6337
6338         if (cpu_has_load_ia32_efer)
6339                 vmcs_write64(HOST_IA32_EFER, host_efer);
6340 }
6341
6342 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
6343 {
6344         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
6345         if (enable_ept)
6346                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
6347         if (is_guest_mode(&vmx->vcpu))
6348                 vmx->vcpu.arch.cr4_guest_owned_bits &=
6349                         ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
6350         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
6351 }
6352
6353 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
6354 {
6355         u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
6356
6357         if (!kvm_vcpu_apicv_active(&vmx->vcpu))
6358                 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
6359
6360         if (!enable_vnmi)
6361                 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
6362
6363         /* Enable the preemption timer dynamically */
6364         pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
6365         return pin_based_exec_ctrl;
6366 }
6367
6368 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
6369 {
6370         struct vcpu_vmx *vmx = to_vmx(vcpu);
6371
6372         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
6373         if (cpu_has_secondary_exec_ctrls()) {
6374                 if (kvm_vcpu_apicv_active(vcpu))
6375                         vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
6376                                       SECONDARY_EXEC_APIC_REGISTER_VIRT |
6377                                       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6378                 else
6379                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
6380                                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
6381                                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6382         }
6383
6384         if (cpu_has_vmx_msr_bitmap())
6385                 vmx_update_msr_bitmap(vcpu);
6386 }
6387
6388 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
6389 {
6390         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
6391
6392         if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
6393                 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
6394
6395         if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
6396                 exec_control &= ~CPU_BASED_TPR_SHADOW;
6397 #ifdef CONFIG_X86_64
6398                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
6399                                 CPU_BASED_CR8_LOAD_EXITING;
6400 #endif
6401         }
6402         if (!enable_ept)
6403                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
6404                                 CPU_BASED_CR3_LOAD_EXITING  |
6405                                 CPU_BASED_INVLPG_EXITING;
6406         if (kvm_mwait_in_guest(vmx->vcpu.kvm))
6407                 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
6408                                 CPU_BASED_MONITOR_EXITING);
6409         if (kvm_hlt_in_guest(vmx->vcpu.kvm))
6410                 exec_control &= ~CPU_BASED_HLT_EXITING;
6411         return exec_control;
6412 }
6413
6414 static bool vmx_rdrand_supported(void)
6415 {
6416         return vmcs_config.cpu_based_2nd_exec_ctrl &
6417                 SECONDARY_EXEC_RDRAND_EXITING;
6418 }
6419
6420 static bool vmx_rdseed_supported(void)
6421 {
6422         return vmcs_config.cpu_based_2nd_exec_ctrl &
6423                 SECONDARY_EXEC_RDSEED_EXITING;
6424 }
6425
6426 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
6427 {
6428         struct kvm_vcpu *vcpu = &vmx->vcpu;
6429
6430         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
6431
6432         if (!cpu_need_virtualize_apic_accesses(vcpu))
6433                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6434         if (vmx->vpid == 0)
6435                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
6436         if (!enable_ept) {
6437                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
6438                 enable_unrestricted_guest = 0;
6439         }
6440         if (!enable_unrestricted_guest)
6441                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
6442         if (kvm_pause_in_guest(vmx->vcpu.kvm))
6443                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
6444         if (!kvm_vcpu_apicv_active(vcpu))
6445                 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
6446                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6447         exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6448
6449         /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
6450          * in vmx_set_cr4.  */
6451         exec_control &= ~SECONDARY_EXEC_DESC;
6452
6453         /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
6454            (handle_vmptrld).
6455            We can NOT enable shadow_vmcs here because we don't have yet
6456            a current VMCS12
6457         */
6458         exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
6459
6460         if (!enable_pml)
6461                 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
6462
6463         if (vmx_xsaves_supported()) {
6464                 /* Exposing XSAVES only when XSAVE is exposed */
6465                 bool xsaves_enabled =
6466                         guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
6467                         guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
6468
6469                 if (!xsaves_enabled)
6470                         exec_control &= ~SECONDARY_EXEC_XSAVES;
6471
6472                 if (nested) {
6473                         if (xsaves_enabled)
6474                                 vmx->nested.msrs.secondary_ctls_high |=
6475                                         SECONDARY_EXEC_XSAVES;
6476                         else
6477                                 vmx->nested.msrs.secondary_ctls_high &=
6478                                         ~SECONDARY_EXEC_XSAVES;
6479                 }
6480         }
6481
6482         if (vmx_rdtscp_supported()) {
6483                 bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
6484                 if (!rdtscp_enabled)
6485                         exec_control &= ~SECONDARY_EXEC_RDTSCP;
6486
6487                 if (nested) {
6488                         if (rdtscp_enabled)
6489                                 vmx->nested.msrs.secondary_ctls_high |=
6490                                         SECONDARY_EXEC_RDTSCP;
6491                         else
6492                                 vmx->nested.msrs.secondary_ctls_high &=
6493                                         ~SECONDARY_EXEC_RDTSCP;
6494                 }
6495         }
6496
6497         if (vmx_invpcid_supported()) {
6498                 /* Exposing INVPCID only when PCID is exposed */
6499                 bool invpcid_enabled =
6500                         guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
6501                         guest_cpuid_has(vcpu, X86_FEATURE_PCID);
6502
6503                 if (!invpcid_enabled) {
6504                         exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
6505                         guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
6506                 }
6507
6508                 if (nested) {
6509                         if (invpcid_enabled)
6510                                 vmx->nested.msrs.secondary_ctls_high |=
6511                                         SECONDARY_EXEC_ENABLE_INVPCID;
6512                         else
6513                                 vmx->nested.msrs.secondary_ctls_high &=
6514                                         ~SECONDARY_EXEC_ENABLE_INVPCID;
6515                 }
6516         }
6517
6518         if (vmx_rdrand_supported()) {
6519                 bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
6520                 if (rdrand_enabled)
6521                         exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
6522
6523                 if (nested) {
6524                         if (rdrand_enabled)
6525                                 vmx->nested.msrs.secondary_ctls_high |=
6526                                         SECONDARY_EXEC_RDRAND_EXITING;
6527                         else
6528                                 vmx->nested.msrs.secondary_ctls_high &=
6529                                         ~SECONDARY_EXEC_RDRAND_EXITING;
6530                 }
6531         }
6532
6533         if (vmx_rdseed_supported()) {
6534                 bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
6535                 if (rdseed_enabled)
6536                         exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
6537
6538                 if (nested) {
6539                         if (rdseed_enabled)
6540                                 vmx->nested.msrs.secondary_ctls_high |=
6541                                         SECONDARY_EXEC_RDSEED_EXITING;
6542                         else
6543                                 vmx->nested.msrs.secondary_ctls_high &=
6544                                         ~SECONDARY_EXEC_RDSEED_EXITING;
6545                 }
6546         }
6547
6548         vmx->secondary_exec_control = exec_control;
6549 }
6550
6551 static void ept_set_mmio_spte_mask(void)
6552 {
6553         /*
6554          * EPT Misconfigurations can be generated if the value of bits 2:0
6555          * of an EPT paging-structure entry is 110b (write/execute).
6556          */
6557         kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
6558                                    VMX_EPT_MISCONFIG_WX_VALUE);
6559 }
6560
6561 #define VMX_XSS_EXIT_BITMAP 0
6562 /*
6563  * Sets up the vmcs for emulated real mode.
6564  */
6565 static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
6566 {
6567         int i;
6568
6569         if (enable_shadow_vmcs) {
6570                 /*
6571                  * At vCPU creation, "VMWRITE to any supported field
6572                  * in the VMCS" is supported, so use the more
6573                  * permissive vmx_vmread_bitmap to specify both read
6574                  * and write permissions for the shadow VMCS.
6575                  */
6576                 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6577                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
6578         }
6579         if (cpu_has_vmx_msr_bitmap())
6580                 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
6581
6582         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
6583
6584         /* Control */
6585         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
6586         vmx->hv_deadline_tsc = -1;
6587
6588         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
6589
6590         if (cpu_has_secondary_exec_ctrls()) {
6591                 vmx_compute_secondary_exec_control(vmx);
6592                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6593                              vmx->secondary_exec_control);
6594         }
6595
6596         if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
6597                 vmcs_write64(EOI_EXIT_BITMAP0, 0);
6598                 vmcs_write64(EOI_EXIT_BITMAP1, 0);
6599                 vmcs_write64(EOI_EXIT_BITMAP2, 0);
6600                 vmcs_write64(EOI_EXIT_BITMAP3, 0);
6601
6602                 vmcs_write16(GUEST_INTR_STATUS, 0);
6603
6604                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
6605                 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
6606         }
6607
6608         if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
6609                 vmcs_write32(PLE_GAP, ple_gap);
6610                 vmx->ple_window = ple_window;
6611                 vmx->ple_window_dirty = true;
6612         }
6613
6614         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
6615         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
6616         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
6617
6618         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
6619         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
6620         vmx_set_constant_host_state(vmx);
6621         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
6622         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
6623
6624         if (cpu_has_vmx_vmfunc())
6625                 vmcs_write64(VM_FUNCTION_CONTROL, 0);
6626
6627         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
6628         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
6629         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
6630         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6631         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
6632
6633         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
6634                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
6635
6636         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
6637                 u32 index = vmx_msr_index[i];
6638                 u32 data_low, data_high;
6639                 int j = vmx->nmsrs;
6640
6641                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
6642                         continue;
6643                 if (wrmsr_safe(index, data_low, data_high) < 0)
6644                         continue;
6645                 vmx->guest_msrs[j].index = i;
6646                 vmx->guest_msrs[j].data = 0;
6647                 vmx->guest_msrs[j].mask = -1ull;
6648                 ++vmx->nmsrs;
6649         }
6650
6651         vmx->arch_capabilities = kvm_get_arch_capabilities();
6652
6653         vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
6654
6655         /* 22.2.1, 20.8.1 */
6656         vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
6657
6658         vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
6659         vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
6660
6661         set_cr4_guest_host_mask(vmx);
6662
6663         if (vmx_xsaves_supported())
6664                 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
6665
6666         if (enable_pml) {
6667                 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
6668                 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6669         }
6670
6671         if (cpu_has_vmx_encls_vmexit())
6672                 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
6673 }
6674
6675 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
6676 {
6677         struct vcpu_vmx *vmx = to_vmx(vcpu);
6678         struct msr_data apic_base_msr;
6679         u64 cr0;
6680
6681         vmx->rmode.vm86_active = 0;
6682         vmx->spec_ctrl = 0;
6683
6684         vcpu->arch.microcode_version = 0x100000000ULL;
6685         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
6686         kvm_set_cr8(vcpu, 0);
6687
6688         if (!init_event) {
6689                 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
6690                                      MSR_IA32_APICBASE_ENABLE;
6691                 if (kvm_vcpu_is_reset_bsp(vcpu))
6692                         apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
6693                 apic_base_msr.host_initiated = true;
6694                 kvm_set_apic_base(vcpu, &apic_base_msr);
6695         }
6696
6697         vmx_segment_cache_clear(vmx);
6698
6699         seg_setup(VCPU_SREG_CS);
6700         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
6701         vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
6702
6703         seg_setup(VCPU_SREG_DS);
6704         seg_setup(VCPU_SREG_ES);
6705         seg_setup(VCPU_SREG_FS);
6706         seg_setup(VCPU_SREG_GS);
6707         seg_setup(VCPU_SREG_SS);
6708
6709         vmcs_write16(GUEST_TR_SELECTOR, 0);
6710         vmcs_writel(GUEST_TR_BASE, 0);
6711         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
6712         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
6713
6714         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
6715         vmcs_writel(GUEST_LDTR_BASE, 0);
6716         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
6717         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
6718
6719         if (!init_event) {
6720                 vmcs_write32(GUEST_SYSENTER_CS, 0);
6721                 vmcs_writel(GUEST_SYSENTER_ESP, 0);
6722                 vmcs_writel(GUEST_SYSENTER_EIP, 0);
6723                 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
6724         }
6725
6726         kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
6727         kvm_rip_write(vcpu, 0xfff0);
6728
6729         vmcs_writel(GUEST_GDTR_BASE, 0);
6730         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
6731
6732         vmcs_writel(GUEST_IDTR_BASE, 0);
6733         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
6734
6735         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
6736         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
6737         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
6738         if (kvm_mpx_supported())
6739                 vmcs_write64(GUEST_BNDCFGS, 0);
6740
6741         setup_msrs(vmx);
6742
6743         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
6744
6745         if (cpu_has_vmx_tpr_shadow() && !init_event) {
6746                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
6747                 if (cpu_need_tpr_shadow(vcpu))
6748                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
6749                                      __pa(vcpu->arch.apic->regs));
6750                 vmcs_write32(TPR_THRESHOLD, 0);
6751         }
6752
6753         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6754
6755         if (vmx->vpid != 0)
6756                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
6757
6758         cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
6759         vmx->vcpu.arch.cr0 = cr0;
6760         vmx_set_cr0(vcpu, cr0); /* enter rmode */
6761         vmx_set_cr4(vcpu, 0);
6762         vmx_set_efer(vcpu, 0);
6763
6764         update_exception_bitmap(vcpu);
6765
6766         vpid_sync_context(vmx->vpid);
6767         if (init_event)
6768                 vmx_clear_hlt(vcpu);
6769 }
6770
6771 /*
6772  * In nested virtualization, check if L1 asked to exit on external interrupts.
6773  * For most existing hypervisors, this will always return true.
6774  */
6775 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
6776 {
6777         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
6778                 PIN_BASED_EXT_INTR_MASK;
6779 }
6780
6781 /*
6782  * In nested virtualization, check if L1 has set
6783  * VM_EXIT_ACK_INTR_ON_EXIT
6784  */
6785 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
6786 {
6787         return get_vmcs12(vcpu)->vm_exit_controls &
6788                 VM_EXIT_ACK_INTR_ON_EXIT;
6789 }
6790
6791 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
6792 {
6793         return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
6794 }
6795
6796 static void enable_irq_window(struct kvm_vcpu *vcpu)
6797 {
6798         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6799                       CPU_BASED_VIRTUAL_INTR_PENDING);
6800 }
6801
6802 static void enable_nmi_window(struct kvm_vcpu *vcpu)
6803 {
6804         if (!enable_vnmi ||
6805             vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
6806                 enable_irq_window(vcpu);
6807                 return;
6808         }
6809
6810         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6811                       CPU_BASED_VIRTUAL_NMI_PENDING);
6812 }
6813
6814 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
6815 {
6816         struct vcpu_vmx *vmx = to_vmx(vcpu);
6817         uint32_t intr;
6818         int irq = vcpu->arch.interrupt.nr;
6819
6820         trace_kvm_inj_virq(irq);
6821
6822         ++vcpu->stat.irq_injections;
6823         if (vmx->rmode.vm86_active) {
6824                 int inc_eip = 0;
6825                 if (vcpu->arch.interrupt.soft)
6826                         inc_eip = vcpu->arch.event_exit_inst_len;
6827                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
6828                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6829                 return;
6830         }
6831         intr = irq | INTR_INFO_VALID_MASK;
6832         if (vcpu->arch.interrupt.soft) {
6833                 intr |= INTR_TYPE_SOFT_INTR;
6834                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
6835                              vmx->vcpu.arch.event_exit_inst_len);
6836         } else
6837                 intr |= INTR_TYPE_EXT_INTR;
6838         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
6839
6840         vmx_clear_hlt(vcpu);
6841 }
6842
6843 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
6844 {
6845         struct vcpu_vmx *vmx = to_vmx(vcpu);
6846
6847         if (!enable_vnmi) {
6848                 /*
6849                  * Tracking the NMI-blocked state in software is built upon
6850                  * finding the next open IRQ window. This, in turn, depends on
6851                  * well-behaving guests: They have to keep IRQs disabled at
6852                  * least as long as the NMI handler runs. Otherwise we may
6853                  * cause NMI nesting, maybe breaking the guest. But as this is
6854                  * highly unlikely, we can live with the residual risk.
6855                  */
6856                 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
6857                 vmx->loaded_vmcs->vnmi_blocked_time = 0;
6858         }
6859
6860         ++vcpu->stat.nmi_injections;
6861         vmx->loaded_vmcs->nmi_known_unmasked = false;
6862
6863         if (vmx->rmode.vm86_active) {
6864                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
6865                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6866                 return;
6867         }
6868
6869         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
6870                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
6871
6872         vmx_clear_hlt(vcpu);
6873 }
6874
6875 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
6876 {
6877         struct vcpu_vmx *vmx = to_vmx(vcpu);
6878         bool masked;
6879
6880         if (!enable_vnmi)
6881                 return vmx->loaded_vmcs->soft_vnmi_blocked;
6882         if (vmx->loaded_vmcs->nmi_known_unmasked)
6883                 return false;
6884         masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
6885         vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6886         return masked;
6887 }
6888
6889 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
6890 {
6891         struct vcpu_vmx *vmx = to_vmx(vcpu);
6892
6893         if (!enable_vnmi) {
6894                 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
6895                         vmx->loaded_vmcs->soft_vnmi_blocked = masked;
6896                         vmx->loaded_vmcs->vnmi_blocked_time = 0;
6897                 }
6898         } else {
6899                 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6900                 if (masked)
6901                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6902                                       GUEST_INTR_STATE_NMI);
6903                 else
6904                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
6905                                         GUEST_INTR_STATE_NMI);
6906         }
6907 }
6908
6909 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
6910 {
6911         if (to_vmx(vcpu)->nested.nested_run_pending)
6912                 return 0;
6913
6914         if (!enable_vnmi &&
6915             to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
6916                 return 0;
6917
6918         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6919                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
6920                    | GUEST_INTR_STATE_NMI));
6921 }
6922
6923 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
6924 {
6925         return (!to_vmx(vcpu)->nested.nested_run_pending &&
6926                 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
6927                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6928                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
6929 }
6930
6931 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
6932 {
6933         int ret;
6934
6935         if (enable_unrestricted_guest)
6936                 return 0;
6937
6938         ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
6939                                     PAGE_SIZE * 3);
6940         if (ret)
6941                 return ret;
6942         to_kvm_vmx(kvm)->tss_addr = addr;
6943         return init_rmode_tss(kvm);
6944 }
6945
6946 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
6947 {
6948         to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
6949         return 0;
6950 }
6951
6952 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
6953 {
6954         switch (vec) {
6955         case BP_VECTOR:
6956                 /*
6957                  * Update instruction length as we may reinject the exception
6958                  * from user space while in guest debugging mode.
6959                  */
6960                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
6961                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
6962                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
6963                         return false;
6964                 /* fall through */
6965         case DB_VECTOR:
6966                 if (vcpu->guest_debug &
6967                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6968                         return false;
6969                 /* fall through */
6970         case DE_VECTOR:
6971         case OF_VECTOR:
6972         case BR_VECTOR:
6973         case UD_VECTOR:
6974         case DF_VECTOR:
6975         case SS_VECTOR:
6976         case GP_VECTOR:
6977         case MF_VECTOR:
6978                 return true;
6979         break;
6980         }
6981         return false;
6982 }
6983
6984 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
6985                                   int vec, u32 err_code)
6986 {
6987         /*
6988          * Instruction with address size override prefix opcode 0x67
6989          * Cause the #SS fault with 0 error code in VM86 mode.
6990          */
6991         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
6992                 if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
6993                         if (vcpu->arch.halt_request) {
6994                                 vcpu->arch.halt_request = 0;
6995                                 return kvm_vcpu_halt(vcpu);
6996                         }
6997                         return 1;
6998                 }
6999                 return 0;
7000         }
7001
7002         /*
7003          * Forward all other exceptions that are valid in real mode.
7004          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
7005          *        the required debugging infrastructure rework.
7006          */
7007         kvm_queue_exception(vcpu, vec);
7008         return 1;
7009 }
7010
7011 /*
7012  * Trigger machine check on the host. We assume all the MSRs are already set up
7013  * by the CPU and that we still run on the same CPU as the MCE occurred on.
7014  * We pass a fake environment to the machine check handler because we want
7015  * the guest to be always treated like user space, no matter what context
7016  * it used internally.
7017  */
7018 static void kvm_machine_check(void)
7019 {
7020 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
7021         struct pt_regs regs = {
7022                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
7023                 .flags = X86_EFLAGS_IF,
7024         };
7025
7026         do_machine_check(&regs, 0);
7027 #endif
7028 }
7029
7030 static int handle_machine_check(struct kvm_vcpu *vcpu)
7031 {
7032         /* already handled by vcpu_run */
7033         return 1;
7034 }
7035
7036 static int handle_exception(struct kvm_vcpu *vcpu)
7037 {
7038         struct vcpu_vmx *vmx = to_vmx(vcpu);
7039         struct kvm_run *kvm_run = vcpu->run;
7040         u32 intr_info, ex_no, error_code;
7041         unsigned long cr2, rip, dr6;
7042         u32 vect_info;
7043         enum emulation_result er;
7044
7045         vect_info = vmx->idt_vectoring_info;
7046         intr_info = vmx->exit_intr_info;
7047
7048         if (is_machine_check(intr_info))
7049                 return handle_machine_check(vcpu);
7050
7051         if (is_nmi(intr_info))
7052                 return 1;  /* already handled by vmx_vcpu_run() */
7053
7054         if (is_invalid_opcode(intr_info))
7055                 return handle_ud(vcpu);
7056
7057         error_code = 0;
7058         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
7059                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
7060
7061         if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
7062                 WARN_ON_ONCE(!enable_vmware_backdoor);
7063                 er = kvm_emulate_instruction(vcpu,
7064                         EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
7065                 if (er == EMULATE_USER_EXIT)
7066                         return 0;
7067                 else if (er != EMULATE_DONE)
7068                         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
7069                 return 1;
7070         }
7071
7072         /*
7073          * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
7074          * MMIO, it is better to report an internal error.
7075          * See the comments in vmx_handle_exit.
7076          */
7077         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
7078             !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
7079                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7080                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
7081                 vcpu->run->internal.ndata = 3;
7082                 vcpu->run->internal.data[0] = vect_info;
7083                 vcpu->run->internal.data[1] = intr_info;
7084                 vcpu->run->internal.data[2] = error_code;
7085                 return 0;
7086         }
7087
7088         if (is_page_fault(intr_info)) {
7089                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
7090                 /* EPT won't cause page fault directly */
7091                 WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
7092                 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
7093         }
7094
7095         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
7096
7097         if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
7098                 return handle_rmode_exception(vcpu, ex_no, error_code);
7099
7100         switch (ex_no) {
7101         case AC_VECTOR:
7102                 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
7103                 return 1;
7104         case DB_VECTOR:
7105                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
7106                 if (!(vcpu->guest_debug &
7107                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
7108                         vcpu->arch.dr6 &= ~15;
7109                         vcpu->arch.dr6 |= dr6 | DR6_RTM;
7110                         if (is_icebp(intr_info))
7111                                 skip_emulated_instruction(vcpu);
7112
7113                         kvm_queue_exception(vcpu, DB_VECTOR);
7114                         return 1;
7115                 }
7116                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
7117                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
7118                 /* fall through */
7119         case BP_VECTOR:
7120                 /*
7121                  * Update instruction length as we may reinject #BP from
7122                  * user space while in guest debugging mode. Reading it for
7123                  * #DB as well causes no harm, it is not used in that case.
7124                  */
7125                 vmx->vcpu.arch.event_exit_inst_len =
7126                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
7127                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
7128                 rip = kvm_rip_read(vcpu);
7129                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
7130                 kvm_run->debug.arch.exception = ex_no;
7131                 break;
7132         default:
7133                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
7134                 kvm_run->ex.exception = ex_no;
7135                 kvm_run->ex.error_code = error_code;
7136                 break;
7137         }
7138         return 0;
7139 }
7140
7141 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
7142 {
7143         ++vcpu->stat.irq_exits;
7144         return 1;
7145 }
7146
7147 static int handle_triple_fault(struct kvm_vcpu *vcpu)
7148 {
7149         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
7150         vcpu->mmio_needed = 0;
7151         return 0;
7152 }
7153
7154 static int handle_io(struct kvm_vcpu *vcpu)
7155 {
7156         unsigned long exit_qualification;
7157         int size, in, string;
7158         unsigned port;
7159
7160         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7161         string = (exit_qualification & 16) != 0;
7162
7163         ++vcpu->stat.io_exits;
7164
7165         if (string)
7166                 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7167
7168         port = exit_qualification >> 16;
7169         size = (exit_qualification & 7) + 1;
7170         in = (exit_qualification & 8) != 0;
7171
7172         return kvm_fast_pio(vcpu, size, port, in);
7173 }
7174
7175 static void
7176 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
7177 {
7178         /*
7179          * Patch in the VMCALL instruction:
7180          */
7181         hypercall[0] = 0x0f;
7182         hypercall[1] = 0x01;
7183         hypercall[2] = 0xc1;
7184 }
7185
7186 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
7187 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
7188 {
7189         if (is_guest_mode(vcpu)) {
7190                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7191                 unsigned long orig_val = val;
7192
7193                 /*
7194                  * We get here when L2 changed cr0 in a way that did not change
7195                  * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
7196                  * but did change L0 shadowed bits. So we first calculate the
7197                  * effective cr0 value that L1 would like to write into the
7198                  * hardware. It consists of the L2-owned bits from the new
7199                  * value combined with the L1-owned bits from L1's guest_cr0.
7200                  */
7201                 val = (val & ~vmcs12->cr0_guest_host_mask) |
7202                         (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
7203
7204                 if (!nested_guest_cr0_valid(vcpu, val))
7205                         return 1;
7206
7207                 if (kvm_set_cr0(vcpu, val))
7208                         return 1;
7209                 vmcs_writel(CR0_READ_SHADOW, orig_val);
7210                 return 0;
7211         } else {
7212                 if (to_vmx(vcpu)->nested.vmxon &&
7213                     !nested_host_cr0_valid(vcpu, val))
7214                         return 1;
7215
7216                 return kvm_set_cr0(vcpu, val);
7217         }
7218 }
7219
7220 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
7221 {
7222         if (is_guest_mode(vcpu)) {
7223                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7224                 unsigned long orig_val = val;
7225
7226                 /* analogously to handle_set_cr0 */
7227                 val = (val & ~vmcs12->cr4_guest_host_mask) |
7228                         (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
7229                 if (kvm_set_cr4(vcpu, val))
7230                         return 1;
7231                 vmcs_writel(CR4_READ_SHADOW, orig_val);
7232                 return 0;
7233         } else
7234                 return kvm_set_cr4(vcpu, val);
7235 }
7236
7237 static int handle_desc(struct kvm_vcpu *vcpu)
7238 {
7239         WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
7240         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7241 }
7242
7243 static int handle_cr(struct kvm_vcpu *vcpu)
7244 {
7245         unsigned long exit_qualification, val;
7246         int cr;
7247         int reg;
7248         int err;
7249         int ret;
7250
7251         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7252         cr = exit_qualification & 15;
7253         reg = (exit_qualification >> 8) & 15;
7254         switch ((exit_qualification >> 4) & 3) {
7255         case 0: /* mov to cr */
7256                 val = kvm_register_readl(vcpu, reg);
7257                 trace_kvm_cr_write(cr, val);
7258                 switch (cr) {
7259                 case 0:
7260                         err = handle_set_cr0(vcpu, val);
7261                         return kvm_complete_insn_gp(vcpu, err);
7262                 case 3:
7263                         WARN_ON_ONCE(enable_unrestricted_guest);
7264                         err = kvm_set_cr3(vcpu, val);
7265                         return kvm_complete_insn_gp(vcpu, err);
7266                 case 4:
7267                         err = handle_set_cr4(vcpu, val);
7268                         return kvm_complete_insn_gp(vcpu, err);
7269                 case 8: {
7270                                 u8 cr8_prev = kvm_get_cr8(vcpu);
7271                                 u8 cr8 = (u8)val;
7272                                 err = kvm_set_cr8(vcpu, cr8);
7273                                 ret = kvm_complete_insn_gp(vcpu, err);
7274                                 if (lapic_in_kernel(vcpu))
7275                                         return ret;
7276                                 if (cr8_prev <= cr8)
7277                                         return ret;
7278                                 /*
7279                                  * TODO: we might be squashing a
7280                                  * KVM_GUESTDBG_SINGLESTEP-triggered
7281                                  * KVM_EXIT_DEBUG here.
7282                                  */
7283                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
7284                                 return 0;
7285                         }
7286                 }
7287                 break;
7288         case 2: /* clts */
7289                 WARN_ONCE(1, "Guest should always own CR0.TS");
7290                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
7291                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
7292                 return kvm_skip_emulated_instruction(vcpu);
7293         case 1: /*mov from cr*/
7294                 switch (cr) {
7295                 case 3:
7296                         WARN_ON_ONCE(enable_unrestricted_guest);
7297                         val = kvm_read_cr3(vcpu);
7298                         kvm_register_write(vcpu, reg, val);
7299                         trace_kvm_cr_read(cr, val);
7300                         return kvm_skip_emulated_instruction(vcpu);
7301                 case 8:
7302                         val = kvm_get_cr8(vcpu);
7303                         kvm_register_write(vcpu, reg, val);
7304                         trace_kvm_cr_read(cr, val);
7305                         return kvm_skip_emulated_instruction(vcpu);
7306                 }
7307                 break;
7308         case 3: /* lmsw */
7309                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
7310                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
7311                 kvm_lmsw(vcpu, val);
7312
7313                 return kvm_skip_emulated_instruction(vcpu);
7314         default:
7315                 break;
7316         }
7317         vcpu->run->exit_reason = 0;
7318         vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
7319                (int)(exit_qualification >> 4) & 3, cr);
7320         return 0;
7321 }
7322
7323 static int handle_dr(struct kvm_vcpu *vcpu)
7324 {
7325         unsigned long exit_qualification;
7326         int dr, dr7, reg;
7327
7328         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7329         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
7330
7331         /* First, if DR does not exist, trigger UD */
7332         if (!kvm_require_dr(vcpu, dr))
7333                 return 1;
7334
7335         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
7336         if (!kvm_require_cpl(vcpu, 0))
7337                 return 1;
7338         dr7 = vmcs_readl(GUEST_DR7);
7339         if (dr7 & DR7_GD) {
7340                 /*
7341                  * As the vm-exit takes precedence over the debug trap, we
7342                  * need to emulate the latter, either for the host or the
7343                  * guest debugging itself.
7344                  */
7345                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
7346                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
7347                         vcpu->run->debug.arch.dr7 = dr7;
7348                         vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
7349                         vcpu->run->debug.arch.exception = DB_VECTOR;
7350                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
7351                         return 0;
7352                 } else {
7353                         vcpu->arch.dr6 &= ~15;
7354                         vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
7355                         kvm_queue_exception(vcpu, DB_VECTOR);
7356                         return 1;
7357                 }
7358         }
7359
7360         if (vcpu->guest_debug == 0) {
7361                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7362                                 CPU_BASED_MOV_DR_EXITING);
7363
7364                 /*
7365                  * No more DR vmexits; force a reload of the debug registers
7366                  * and reenter on this instruction.  The next vmexit will
7367                  * retrieve the full state of the debug registers.
7368                  */
7369                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
7370                 return 1;
7371         }
7372
7373         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
7374         if (exit_qualification & TYPE_MOV_FROM_DR) {
7375                 unsigned long val;
7376
7377                 if (kvm_get_dr(vcpu, dr, &val))
7378                         return 1;
7379                 kvm_register_write(vcpu, reg, val);
7380         } else
7381                 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
7382                         return 1;
7383
7384         return kvm_skip_emulated_instruction(vcpu);
7385 }
7386
7387 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
7388 {
7389         return vcpu->arch.dr6;
7390 }
7391
7392 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
7393 {
7394 }
7395
7396 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
7397 {
7398         get_debugreg(vcpu->arch.db[0], 0);
7399         get_debugreg(vcpu->arch.db[1], 1);
7400         get_debugreg(vcpu->arch.db[2], 2);
7401         get_debugreg(vcpu->arch.db[3], 3);
7402         get_debugreg(vcpu->arch.dr6, 6);
7403         vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
7404
7405         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
7406         vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
7407 }
7408
7409 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
7410 {
7411         vmcs_writel(GUEST_DR7, val);
7412 }
7413
7414 static int handle_cpuid(struct kvm_vcpu *vcpu)
7415 {
7416         return kvm_emulate_cpuid(vcpu);
7417 }
7418
7419 static int handle_rdmsr(struct kvm_vcpu *vcpu)
7420 {
7421         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
7422         struct msr_data msr_info;
7423
7424         msr_info.index = ecx;
7425         msr_info.host_initiated = false;
7426         if (vmx_get_msr(vcpu, &msr_info)) {
7427                 trace_kvm_msr_read_ex(ecx);
7428                 kvm_inject_gp(vcpu, 0);
7429                 return 1;
7430         }
7431
7432         trace_kvm_msr_read(ecx, msr_info.data);
7433
7434         /* FIXME: handling of bits 32:63 of rax, rdx */
7435         vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
7436         vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
7437         return kvm_skip_emulated_instruction(vcpu);
7438 }
7439
7440 static int handle_wrmsr(struct kvm_vcpu *vcpu)
7441 {
7442         struct msr_data msr;
7443         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
7444         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
7445                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
7446
7447         msr.data = data;
7448         msr.index = ecx;
7449         msr.host_initiated = false;
7450         if (kvm_set_msr(vcpu, &msr) != 0) {
7451                 trace_kvm_msr_write_ex(ecx, data);
7452                 kvm_inject_gp(vcpu, 0);
7453                 return 1;
7454         }
7455
7456         trace_kvm_msr_write(ecx, data);
7457         return kvm_skip_emulated_instruction(vcpu);
7458 }
7459
7460 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
7461 {
7462         kvm_apic_update_ppr(vcpu);
7463         return 1;
7464 }
7465
7466 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
7467 {
7468         vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7469                         CPU_BASED_VIRTUAL_INTR_PENDING);
7470
7471         kvm_make_request(KVM_REQ_EVENT, vcpu);
7472
7473         ++vcpu->stat.irq_window_exits;
7474         return 1;
7475 }
7476
7477 static int handle_halt(struct kvm_vcpu *vcpu)
7478 {
7479         return kvm_emulate_halt(vcpu);
7480 }
7481
7482 static int handle_vmcall(struct kvm_vcpu *vcpu)
7483 {
7484         return kvm_emulate_hypercall(vcpu);
7485 }
7486
7487 static int handle_invd(struct kvm_vcpu *vcpu)
7488 {
7489         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7490 }
7491
7492 static int handle_invlpg(struct kvm_vcpu *vcpu)
7493 {
7494         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7495
7496         kvm_mmu_invlpg(vcpu, exit_qualification);
7497         return kvm_skip_emulated_instruction(vcpu);
7498 }
7499
7500 static int handle_rdpmc(struct kvm_vcpu *vcpu)
7501 {
7502         int err;
7503
7504         err = kvm_rdpmc(vcpu);
7505         return kvm_complete_insn_gp(vcpu, err);
7506 }
7507
7508 static int handle_wbinvd(struct kvm_vcpu *vcpu)
7509 {
7510         return kvm_emulate_wbinvd(vcpu);
7511 }
7512
7513 static int handle_xsetbv(struct kvm_vcpu *vcpu)
7514 {
7515         u64 new_bv = kvm_read_edx_eax(vcpu);
7516         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
7517
7518         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
7519                 return kvm_skip_emulated_instruction(vcpu);
7520         return 1;
7521 }
7522
7523 static int handle_xsaves(struct kvm_vcpu *vcpu)
7524 {
7525         kvm_skip_emulated_instruction(vcpu);
7526         WARN(1, "this should never happen\n");
7527         return 1;
7528 }
7529
7530 static int handle_xrstors(struct kvm_vcpu *vcpu)
7531 {
7532         kvm_skip_emulated_instruction(vcpu);
7533         WARN(1, "this should never happen\n");
7534         return 1;
7535 }
7536
7537 static int handle_apic_access(struct kvm_vcpu *vcpu)
7538 {
7539         if (likely(fasteoi)) {
7540                 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7541                 int access_type, offset;
7542
7543                 access_type = exit_qualification & APIC_ACCESS_TYPE;
7544                 offset = exit_qualification & APIC_ACCESS_OFFSET;
7545                 /*
7546                  * Sane guest uses MOV to write EOI, with written value
7547                  * not cared. So make a short-circuit here by avoiding
7548                  * heavy instruction emulation.
7549                  */
7550                 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
7551                     (offset == APIC_EOI)) {
7552                         kvm_lapic_set_eoi(vcpu);
7553                         return kvm_skip_emulated_instruction(vcpu);
7554                 }
7555         }
7556         return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7557 }
7558
7559 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
7560 {
7561         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7562         int vector = exit_qualification & 0xff;
7563
7564         /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
7565         kvm_apic_set_eoi_accelerated(vcpu, vector);
7566         return 1;
7567 }
7568
7569 static int handle_apic_write(struct kvm_vcpu *vcpu)
7570 {
7571         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7572         u32 offset = exit_qualification & 0xfff;
7573
7574         /* APIC-write VM exit is trap-like and thus no need to adjust IP */
7575         kvm_apic_write_nodecode(vcpu, offset);
7576         return 1;
7577 }
7578
7579 static int handle_task_switch(struct kvm_vcpu *vcpu)
7580 {
7581         struct vcpu_vmx *vmx = to_vmx(vcpu);
7582         unsigned long exit_qualification;
7583         bool has_error_code = false;
7584         u32 error_code = 0;
7585         u16 tss_selector;
7586         int reason, type, idt_v, idt_index;
7587
7588         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
7589         idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
7590         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
7591
7592         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7593
7594         reason = (u32)exit_qualification >> 30;
7595         if (reason == TASK_SWITCH_GATE && idt_v) {
7596                 switch (type) {
7597                 case INTR_TYPE_NMI_INTR:
7598                         vcpu->arch.nmi_injected = false;
7599                         vmx_set_nmi_mask(vcpu, true);
7600                         break;
7601                 case INTR_TYPE_EXT_INTR:
7602                 case INTR_TYPE_SOFT_INTR:
7603                         kvm_clear_interrupt_queue(vcpu);
7604                         break;
7605                 case INTR_TYPE_HARD_EXCEPTION:
7606                         if (vmx->idt_vectoring_info &
7607                             VECTORING_INFO_DELIVER_CODE_MASK) {
7608                                 has_error_code = true;
7609                                 error_code =
7610                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
7611                         }
7612                         /* fall through */
7613                 case INTR_TYPE_SOFT_EXCEPTION:
7614                         kvm_clear_exception_queue(vcpu);
7615                         break;
7616                 default:
7617                         break;
7618                 }
7619         }
7620         tss_selector = exit_qualification;
7621
7622         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
7623                        type != INTR_TYPE_EXT_INTR &&
7624                        type != INTR_TYPE_NMI_INTR))
7625                 skip_emulated_instruction(vcpu);
7626
7627         if (kvm_task_switch(vcpu, tss_selector,
7628                             type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
7629                             has_error_code, error_code) == EMULATE_FAIL) {
7630                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7631                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7632                 vcpu->run->internal.ndata = 0;
7633                 return 0;
7634         }
7635
7636         /*
7637          * TODO: What about debug traps on tss switch?
7638          *       Are we supposed to inject them and update dr6?
7639          */
7640
7641         return 1;
7642 }
7643
7644 static int handle_ept_violation(struct kvm_vcpu *vcpu)
7645 {
7646         unsigned long exit_qualification;
7647         gpa_t gpa;
7648         u64 error_code;
7649
7650         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7651
7652         /*
7653          * EPT violation happened while executing iret from NMI,
7654          * "blocked by NMI" bit has to be set before next VM entry.
7655          * There are errata that may cause this bit to not be set:
7656          * AAK134, BY25.
7657          */
7658         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
7659                         enable_vnmi &&
7660                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
7661                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
7662
7663         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
7664         trace_kvm_page_fault(gpa, exit_qualification);
7665
7666         /* Is it a read fault? */
7667         error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
7668                      ? PFERR_USER_MASK : 0;
7669         /* Is it a write fault? */
7670         error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
7671                       ? PFERR_WRITE_MASK : 0;
7672         /* Is it a fetch fault? */
7673         error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
7674                       ? PFERR_FETCH_MASK : 0;
7675         /* ept page table entry is present? */
7676         error_code |= (exit_qualification &
7677                        (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
7678                         EPT_VIOLATION_EXECUTABLE))
7679                       ? PFERR_PRESENT_MASK : 0;
7680
7681         error_code |= (exit_qualification & 0x100) != 0 ?
7682                PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
7683
7684         vcpu->arch.exit_qualification = exit_qualification;
7685         return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
7686 }
7687
7688 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
7689 {
7690         gpa_t gpa;
7691
7692         /*
7693          * A nested guest cannot optimize MMIO vmexits, because we have an
7694          * nGPA here instead of the required GPA.
7695          */
7696         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
7697         if (!is_guest_mode(vcpu) &&
7698             !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
7699                 trace_kvm_fast_mmio(gpa);
7700                 /*
7701                  * Doing kvm_skip_emulated_instruction() depends on undefined
7702                  * behavior: Intel's manual doesn't mandate
7703                  * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
7704                  * occurs and while on real hardware it was observed to be set,
7705                  * other hypervisors (namely Hyper-V) don't set it, we end up
7706                  * advancing IP with some random value. Disable fast mmio when
7707                  * running nested and keep it for real hardware in hope that
7708                  * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
7709                  */
7710                 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
7711                         return kvm_skip_emulated_instruction(vcpu);
7712                 else
7713                         return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
7714                                                                 EMULATE_DONE;
7715         }
7716
7717         return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
7718 }
7719
7720 static int handle_nmi_window(struct kvm_vcpu *vcpu)
7721 {
7722         WARN_ON_ONCE(!enable_vnmi);
7723         vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7724                         CPU_BASED_VIRTUAL_NMI_PENDING);
7725         ++vcpu->stat.nmi_window_exits;
7726         kvm_make_request(KVM_REQ_EVENT, vcpu);
7727
7728         return 1;
7729 }
7730
7731 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
7732 {
7733         struct vcpu_vmx *vmx = to_vmx(vcpu);
7734         enum emulation_result err = EMULATE_DONE;
7735         int ret = 1;
7736         u32 cpu_exec_ctrl;
7737         bool intr_window_requested;
7738         unsigned count = 130;
7739
7740         /*
7741          * We should never reach the point where we are emulating L2
7742          * due to invalid guest state as that means we incorrectly
7743          * allowed a nested VMEntry with an invalid vmcs12.
7744          */
7745         WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
7746
7747         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
7748         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
7749
7750         while (vmx->emulation_required && count-- != 0) {
7751                 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
7752                         return handle_interrupt_window(&vmx->vcpu);
7753
7754                 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
7755                         return 1;
7756
7757                 err = kvm_emulate_instruction(vcpu, 0);
7758
7759                 if (err == EMULATE_USER_EXIT) {
7760                         ++vcpu->stat.mmio_exits;
7761                         ret = 0;
7762                         goto out;
7763                 }
7764
7765                 if (err != EMULATE_DONE)
7766                         goto emulation_error;
7767
7768                 if (vmx->emulation_required && !vmx->rmode.vm86_active &&
7769                     vcpu->arch.exception.pending)
7770                         goto emulation_error;
7771
7772                 if (vcpu->arch.halt_request) {
7773                         vcpu->arch.halt_request = 0;
7774                         ret = kvm_vcpu_halt(vcpu);
7775                         goto out;
7776                 }
7777
7778                 if (signal_pending(current))
7779                         goto out;
7780                 if (need_resched())
7781                         schedule();
7782         }
7783
7784 out:
7785         return ret;
7786
7787 emulation_error:
7788         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7789         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7790         vcpu->run->internal.ndata = 0;
7791         return 0;
7792 }
7793
7794 static void grow_ple_window(struct kvm_vcpu *vcpu)
7795 {
7796         struct vcpu_vmx *vmx = to_vmx(vcpu);
7797         int old = vmx->ple_window;
7798
7799         vmx->ple_window = __grow_ple_window(old, ple_window,
7800                                             ple_window_grow,
7801                                             ple_window_max);
7802
7803         if (vmx->ple_window != old)
7804                 vmx->ple_window_dirty = true;
7805
7806         trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
7807 }
7808
7809 static void shrink_ple_window(struct kvm_vcpu *vcpu)
7810 {
7811         struct vcpu_vmx *vmx = to_vmx(vcpu);
7812         int old = vmx->ple_window;
7813
7814         vmx->ple_window = __shrink_ple_window(old, ple_window,
7815                                               ple_window_shrink,
7816                                               ple_window);
7817
7818         if (vmx->ple_window != old)
7819                 vmx->ple_window_dirty = true;
7820
7821         trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
7822 }
7823
7824 /*
7825  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
7826  */
7827 static void wakeup_handler(void)
7828 {
7829         struct kvm_vcpu *vcpu;
7830         int cpu = smp_processor_id();
7831
7832         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7833         list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
7834                         blocked_vcpu_list) {
7835                 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
7836
7837                 if (pi_test_on(pi_desc) == 1)
7838                         kvm_vcpu_kick(vcpu);
7839         }
7840         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7841 }
7842
7843 static void vmx_enable_tdp(void)
7844 {
7845         kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
7846                 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
7847                 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
7848                 0ull, VMX_EPT_EXECUTABLE_MASK,
7849                 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
7850                 VMX_EPT_RWX_MASK, 0ull);
7851
7852         ept_set_mmio_spte_mask();
7853         kvm_enable_tdp();
7854 }
7855
7856 static __init int hardware_setup(void)
7857 {
7858         unsigned long host_bndcfgs;
7859         int r = -ENOMEM, i;
7860
7861         rdmsrl_safe(MSR_EFER, &host_efer);
7862
7863         for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
7864                 kvm_define_shared_msr(i, vmx_msr_index[i]);
7865
7866         for (i = 0; i < VMX_BITMAP_NR; i++) {
7867                 vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
7868                 if (!vmx_bitmap[i])
7869                         goto out;
7870         }
7871
7872         memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
7873         memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
7874
7875         if (setup_vmcs_config(&vmcs_config) < 0) {
7876                 r = -EIO;
7877                 goto out;
7878         }
7879
7880         if (boot_cpu_has(X86_FEATURE_NX))
7881                 kvm_enable_efer_bits(EFER_NX);
7882
7883         if (boot_cpu_has(X86_FEATURE_MPX)) {
7884                 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
7885                 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
7886         }
7887
7888         if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
7889                 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
7890                 enable_vpid = 0;
7891
7892         if (!cpu_has_vmx_ept() ||
7893             !cpu_has_vmx_ept_4levels() ||
7894             !cpu_has_vmx_ept_mt_wb() ||
7895             !cpu_has_vmx_invept_global())
7896                 enable_ept = 0;
7897
7898         if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
7899                 enable_ept_ad_bits = 0;
7900
7901         if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
7902                 enable_unrestricted_guest = 0;
7903
7904         if (!cpu_has_vmx_flexpriority())
7905                 flexpriority_enabled = 0;
7906
7907         if (!cpu_has_virtual_nmis())
7908                 enable_vnmi = 0;
7909
7910         /*
7911          * set_apic_access_page_addr() is used to reload apic access
7912          * page upon invalidation.  No need to do anything if not
7913          * using the APIC_ACCESS_ADDR VMCS field.
7914          */
7915         if (!flexpriority_enabled)
7916                 kvm_x86_ops->set_apic_access_page_addr = NULL;
7917
7918         if (!cpu_has_vmx_tpr_shadow())
7919                 kvm_x86_ops->update_cr8_intercept = NULL;
7920
7921         if (enable_ept && !cpu_has_vmx_ept_2m_page())
7922                 kvm_disable_largepages();
7923
7924 #if IS_ENABLED(CONFIG_HYPERV)
7925         if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
7926             && enable_ept)
7927                 kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
7928 #endif
7929
7930         if (!cpu_has_vmx_ple()) {
7931                 ple_gap = 0;
7932                 ple_window = 0;
7933                 ple_window_grow = 0;
7934                 ple_window_max = 0;
7935                 ple_window_shrink = 0;
7936         }
7937
7938         if (!cpu_has_vmx_apicv()) {
7939                 enable_apicv = 0;
7940                 kvm_x86_ops->sync_pir_to_irr = NULL;
7941         }
7942
7943         if (cpu_has_vmx_tsc_scaling()) {
7944                 kvm_has_tsc_control = true;
7945                 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
7946                 kvm_tsc_scaling_ratio_frac_bits = 48;
7947         }
7948
7949         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
7950
7951         if (enable_ept)
7952                 vmx_enable_tdp();
7953         else
7954                 kvm_disable_tdp();
7955
7956         if (!nested) {
7957                 kvm_x86_ops->get_nested_state = NULL;
7958                 kvm_x86_ops->set_nested_state = NULL;
7959         }
7960
7961         /*
7962          * Only enable PML when hardware supports PML feature, and both EPT
7963          * and EPT A/D bit features are enabled -- PML depends on them to work.
7964          */
7965         if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
7966                 enable_pml = 0;
7967
7968         if (!enable_pml) {
7969                 kvm_x86_ops->slot_enable_log_dirty = NULL;
7970                 kvm_x86_ops->slot_disable_log_dirty = NULL;
7971                 kvm_x86_ops->flush_log_dirty = NULL;
7972                 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
7973         }
7974
7975         if (!cpu_has_vmx_preemption_timer())
7976                 kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
7977
7978         if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
7979                 u64 vmx_msr;
7980
7981                 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
7982                 cpu_preemption_timer_multi =
7983                          vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
7984         } else {
7985                 kvm_x86_ops->set_hv_timer = NULL;
7986                 kvm_x86_ops->cancel_hv_timer = NULL;
7987         }
7988
7989         if (!cpu_has_vmx_shadow_vmcs())
7990                 enable_shadow_vmcs = 0;
7991         if (enable_shadow_vmcs)
7992                 init_vmcs_shadow_fields();
7993
7994         kvm_set_posted_intr_wakeup_handler(wakeup_handler);
7995         nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv);
7996
7997         kvm_mce_cap_supported |= MCG_LMCE_P;
7998
7999         return alloc_kvm_area();
8000
8001 out:
8002         for (i = 0; i < VMX_BITMAP_NR; i++)
8003                 free_page((unsigned long)vmx_bitmap[i]);
8004
8005     return r;
8006 }
8007
8008 static __exit void hardware_unsetup(void)
8009 {
8010         int i;
8011
8012         for (i = 0; i < VMX_BITMAP_NR; i++)
8013                 free_page((unsigned long)vmx_bitmap[i]);
8014
8015         free_kvm_area();
8016 }
8017
8018 /*
8019  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
8020  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
8021  */
8022 static int handle_pause(struct kvm_vcpu *vcpu)
8023 {
8024         if (!kvm_pause_in_guest(vcpu->kvm))
8025                 grow_ple_window(vcpu);
8026
8027         /*
8028          * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
8029          * VM-execution control is ignored if CPL > 0. OTOH, KVM
8030          * never set PAUSE_EXITING and just set PLE if supported,
8031          * so the vcpu must be CPL=0 if it gets a PAUSE exit.
8032          */
8033         kvm_vcpu_on_spin(vcpu, true);
8034         return kvm_skip_emulated_instruction(vcpu);
8035 }
8036
8037 static int handle_nop(struct kvm_vcpu *vcpu)
8038 {
8039         return kvm_skip_emulated_instruction(vcpu);
8040 }
8041
8042 static int handle_mwait(struct kvm_vcpu *vcpu)
8043 {
8044         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
8045         return handle_nop(vcpu);
8046 }
8047
8048 static int handle_invalid_op(struct kvm_vcpu *vcpu)
8049 {
8050         kvm_queue_exception(vcpu, UD_VECTOR);
8051         return 1;
8052 }
8053
8054 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
8055 {
8056         return 1;
8057 }
8058
8059 static int handle_monitor(struct kvm_vcpu *vcpu)
8060 {
8061         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
8062         return handle_nop(vcpu);
8063 }
8064
8065 /*
8066  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
8067  * set the success or error code of an emulated VMX instruction (as specified
8068  * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
8069  * instruction.
8070  */
8071 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
8072 {
8073         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
8074                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
8075                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
8076         return kvm_skip_emulated_instruction(vcpu);
8077 }
8078
8079 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
8080 {
8081         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
8082                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
8083                             X86_EFLAGS_SF | X86_EFLAGS_OF))
8084                         | X86_EFLAGS_CF);
8085         return kvm_skip_emulated_instruction(vcpu);
8086 }
8087
8088 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
8089                                 u32 vm_instruction_error)
8090 {
8091         /*
8092          * failValid writes the error number to the current VMCS, which
8093          * can't be done if there isn't a current VMCS.
8094          */
8095         if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
8096                 return nested_vmx_failInvalid(vcpu);
8097
8098         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
8099                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
8100                             X86_EFLAGS_SF | X86_EFLAGS_OF))
8101                         | X86_EFLAGS_ZF);
8102         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
8103         /*
8104          * We don't need to force a shadow sync because
8105          * VM_INSTRUCTION_ERROR is not shadowed
8106          */
8107         return kvm_skip_emulated_instruction(vcpu);
8108 }
8109
8110 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
8111 {
8112         /* TODO: not to reset guest simply here. */
8113         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8114         pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
8115 }
8116
8117 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
8118 {
8119         struct vcpu_vmx *vmx =
8120                 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
8121
8122         vmx->nested.preemption_timer_expired = true;
8123         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
8124         kvm_vcpu_kick(&vmx->vcpu);
8125
8126         return HRTIMER_NORESTART;
8127 }
8128
8129 /*
8130  * Decode the memory-address operand of a vmx instruction, as recorded on an
8131  * exit caused by such an instruction (run by a guest hypervisor).
8132  * On success, returns 0. When the operand is invalid, returns 1 and throws
8133  * #UD or #GP.
8134  */
8135 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
8136                                  unsigned long exit_qualification,
8137                                  u32 vmx_instruction_info, bool wr, gva_t *ret)
8138 {
8139         gva_t off;
8140         bool exn;
8141         struct kvm_segment s;
8142
8143         /*
8144          * According to Vol. 3B, "Information for VM Exits Due to Instruction
8145          * Execution", on an exit, vmx_instruction_info holds most of the
8146          * addressing components of the operand. Only the displacement part
8147          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
8148          * For how an actual address is calculated from all these components,
8149          * refer to Vol. 1, "Operand Addressing".
8150          */
8151         int  scaling = vmx_instruction_info & 3;
8152         int  addr_size = (vmx_instruction_info >> 7) & 7;
8153         bool is_reg = vmx_instruction_info & (1u << 10);
8154         int  seg_reg = (vmx_instruction_info >> 15) & 7;
8155         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
8156         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
8157         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
8158         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
8159
8160         if (is_reg) {
8161                 kvm_queue_exception(vcpu, UD_VECTOR);
8162                 return 1;
8163         }
8164
8165         /* Addr = segment_base + offset */
8166         /* offset = base + [index * scale] + displacement */
8167         off = exit_qualification; /* holds the displacement */
8168         if (base_is_valid)
8169                 off += kvm_register_read(vcpu, base_reg);
8170         if (index_is_valid)
8171                 off += kvm_register_read(vcpu, index_reg)<<scaling;
8172         vmx_get_segment(vcpu, &s, seg_reg);
8173         *ret = s.base + off;
8174
8175         if (addr_size == 1) /* 32 bit */
8176                 *ret &= 0xffffffff;
8177
8178         /* Checks for #GP/#SS exceptions. */
8179         exn = false;
8180         if (is_long_mode(vcpu)) {
8181                 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
8182                  * non-canonical form. This is the only check on the memory
8183                  * destination for long mode!
8184                  */
8185                 exn = is_noncanonical_address(*ret, vcpu);
8186         } else if (is_protmode(vcpu)) {
8187                 /* Protected mode: apply checks for segment validity in the
8188                  * following order:
8189                  * - segment type check (#GP(0) may be thrown)
8190                  * - usability check (#GP(0)/#SS(0))
8191                  * - limit check (#GP(0)/#SS(0))
8192                  */
8193                 if (wr)
8194                         /* #GP(0) if the destination operand is located in a
8195                          * read-only data segment or any code segment.
8196                          */
8197                         exn = ((s.type & 0xa) == 0 || (s.type & 8));
8198                 else
8199                         /* #GP(0) if the source operand is located in an
8200                          * execute-only code segment
8201                          */
8202                         exn = ((s.type & 0xa) == 8);
8203                 if (exn) {
8204                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8205                         return 1;
8206                 }
8207                 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
8208                  */
8209                 exn = (s.unusable != 0);
8210                 /* Protected mode: #GP(0)/#SS(0) if the memory
8211                  * operand is outside the segment limit.
8212                  */
8213                 exn = exn || (off + sizeof(u64) > s.limit);
8214         }
8215         if (exn) {
8216                 kvm_queue_exception_e(vcpu,
8217                                       seg_reg == VCPU_SREG_SS ?
8218                                                 SS_VECTOR : GP_VECTOR,
8219                                       0);
8220                 return 1;
8221         }
8222
8223         return 0;
8224 }
8225
8226 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
8227 {
8228         gva_t gva;
8229         struct x86_exception e;
8230
8231         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
8232                         vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
8233                 return 1;
8234
8235         if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
8236                 kvm_inject_page_fault(vcpu, &e);
8237                 return 1;
8238         }
8239
8240         return 0;
8241 }
8242
8243 /*
8244  * Allocate a shadow VMCS and associate it with the currently loaded
8245  * VMCS, unless such a shadow VMCS already exists. The newly allocated
8246  * VMCS is also VMCLEARed, so that it is ready for use.
8247  */
8248 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
8249 {
8250         struct vcpu_vmx *vmx = to_vmx(vcpu);
8251         struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
8252
8253         /*
8254          * We should allocate a shadow vmcs for vmcs01 only when L1
8255          * executes VMXON and free it when L1 executes VMXOFF.
8256          * As it is invalid to execute VMXON twice, we shouldn't reach
8257          * here when vmcs01 already have an allocated shadow vmcs.
8258          */
8259         WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
8260
8261         if (!loaded_vmcs->shadow_vmcs) {
8262                 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
8263                 if (loaded_vmcs->shadow_vmcs)
8264                         vmcs_clear(loaded_vmcs->shadow_vmcs);
8265         }
8266         return loaded_vmcs->shadow_vmcs;
8267 }
8268
8269 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
8270 {
8271         struct vcpu_vmx *vmx = to_vmx(vcpu);
8272         int r;
8273
8274         r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
8275         if (r < 0)
8276                 goto out_vmcs02;
8277
8278         vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
8279         if (!vmx->nested.cached_vmcs12)
8280                 goto out_cached_vmcs12;
8281
8282         vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
8283         if (!vmx->nested.cached_shadow_vmcs12)
8284                 goto out_cached_shadow_vmcs12;
8285
8286         if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
8287                 goto out_shadow_vmcs;
8288
8289         hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
8290                      HRTIMER_MODE_REL_PINNED);
8291         vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
8292
8293         vmx->nested.vpid02 = allocate_vpid();
8294
8295         vmx->nested.vmcs02_initialized = false;
8296         vmx->nested.vmxon = true;
8297         return 0;
8298
8299 out_shadow_vmcs:
8300         kfree(vmx->nested.cached_shadow_vmcs12);
8301
8302 out_cached_shadow_vmcs12:
8303         kfree(vmx->nested.cached_vmcs12);
8304
8305 out_cached_vmcs12:
8306         free_loaded_vmcs(&vmx->nested.vmcs02);
8307
8308 out_vmcs02:
8309         return -ENOMEM;
8310 }
8311
8312 /*
8313  * Emulate the VMXON instruction.
8314  * Currently, we just remember that VMX is active, and do not save or even
8315  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
8316  * do not currently need to store anything in that guest-allocated memory
8317  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
8318  * argument is different from the VMXON pointer (which the spec says they do).
8319  */
8320 static int handle_vmon(struct kvm_vcpu *vcpu)
8321 {
8322         int ret;
8323         gpa_t vmptr;
8324         struct page *page;
8325         struct vcpu_vmx *vmx = to_vmx(vcpu);
8326         const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
8327                 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
8328
8329         /*
8330          * The Intel VMX Instruction Reference lists a bunch of bits that are
8331          * prerequisite to running VMXON, most notably cr4.VMXE must be set to
8332          * 1 (see vmx_set_cr4() for when we allow the guest to set this).
8333          * Otherwise, we should fail with #UD.  But most faulting conditions
8334          * have already been checked by hardware, prior to the VM-exit for
8335          * VMXON.  We do test guest cr4.VMXE because processor CR4 always has
8336          * that bit set to 1 in non-root mode.
8337          */
8338         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
8339                 kvm_queue_exception(vcpu, UD_VECTOR);
8340                 return 1;
8341         }
8342
8343         /* CPL=0 must be checked manually. */
8344         if (vmx_get_cpl(vcpu)) {
8345                 kvm_inject_gp(vcpu, 0);
8346                 return 1;
8347         }
8348
8349         if (vmx->nested.vmxon)
8350                 return nested_vmx_failValid(vcpu,
8351                         VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
8352
8353         if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
8354                         != VMXON_NEEDED_FEATURES) {
8355                 kvm_inject_gp(vcpu, 0);
8356                 return 1;
8357         }
8358
8359         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8360                 return 1;
8361
8362         /*
8363          * SDM 3: 24.11.5
8364          * The first 4 bytes of VMXON region contain the supported
8365          * VMCS revision identifier
8366          *
8367          * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
8368          * which replaces physical address width with 32
8369          */
8370         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8371                 return nested_vmx_failInvalid(vcpu);
8372
8373         page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8374         if (is_error_page(page))
8375                 return nested_vmx_failInvalid(vcpu);
8376
8377         if (*(u32 *)kmap(page) != VMCS12_REVISION) {
8378                 kunmap(page);
8379                 kvm_release_page_clean(page);
8380                 return nested_vmx_failInvalid(vcpu);
8381         }
8382         kunmap(page);
8383         kvm_release_page_clean(page);
8384
8385         vmx->nested.vmxon_ptr = vmptr;
8386         ret = enter_vmx_operation(vcpu);
8387         if (ret)
8388                 return ret;
8389
8390         return nested_vmx_succeed(vcpu);
8391 }
8392
8393 /*
8394  * Intel's VMX Instruction Reference specifies a common set of prerequisites
8395  * for running VMX instructions (except VMXON, whose prerequisites are
8396  * slightly different). It also specifies what exception to inject otherwise.
8397  * Note that many of these exceptions have priority over VM exits, so they
8398  * don't have to be checked again here.
8399  */
8400 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
8401 {
8402         if (!to_vmx(vcpu)->nested.vmxon) {
8403                 kvm_queue_exception(vcpu, UD_VECTOR);
8404                 return 0;
8405         }
8406
8407         if (vmx_get_cpl(vcpu)) {
8408                 kvm_inject_gp(vcpu, 0);
8409                 return 0;
8410         }
8411
8412         return 1;
8413 }
8414
8415 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
8416 {
8417         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
8418         vmcs_write64(VMCS_LINK_POINTER, -1ull);
8419 }
8420
8421 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
8422 {
8423         if (vmx->nested.current_vmptr == -1ull)
8424                 return;
8425
8426         if (enable_shadow_vmcs) {
8427                 /* copy to memory all shadowed fields in case
8428                    they were modified */
8429                 copy_shadow_to_vmcs12(vmx);
8430                 vmx->nested.sync_shadow_vmcs = false;
8431                 vmx_disable_shadow_vmcs(vmx);
8432         }
8433         vmx->nested.posted_intr_nv = -1;
8434
8435         /* Flush VMCS12 to guest memory */
8436         kvm_vcpu_write_guest_page(&vmx->vcpu,
8437                                   vmx->nested.current_vmptr >> PAGE_SHIFT,
8438                                   vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
8439
8440         vmx->nested.current_vmptr = -1ull;
8441 }
8442
8443 /*
8444  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
8445  * just stops using VMX.
8446  */
8447 static void free_nested(struct vcpu_vmx *vmx)
8448 {
8449         if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
8450                 return;
8451
8452         vmx->nested.vmxon = false;
8453         vmx->nested.smm.vmxon = false;
8454         free_vpid(vmx->nested.vpid02);
8455         vmx->nested.posted_intr_nv = -1;
8456         vmx->nested.current_vmptr = -1ull;
8457         if (enable_shadow_vmcs) {
8458                 vmx_disable_shadow_vmcs(vmx);
8459                 vmcs_clear(vmx->vmcs01.shadow_vmcs);
8460                 free_vmcs(vmx->vmcs01.shadow_vmcs);
8461                 vmx->vmcs01.shadow_vmcs = NULL;
8462         }
8463         kfree(vmx->nested.cached_vmcs12);
8464         kfree(vmx->nested.cached_shadow_vmcs12);
8465         /* Unpin physical memory we referred to in the vmcs02 */
8466         if (vmx->nested.apic_access_page) {
8467                 kvm_release_page_dirty(vmx->nested.apic_access_page);
8468                 vmx->nested.apic_access_page = NULL;
8469         }
8470         if (vmx->nested.virtual_apic_page) {
8471                 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
8472                 vmx->nested.virtual_apic_page = NULL;
8473         }
8474         if (vmx->nested.pi_desc_page) {
8475                 kunmap(vmx->nested.pi_desc_page);
8476                 kvm_release_page_dirty(vmx->nested.pi_desc_page);
8477                 vmx->nested.pi_desc_page = NULL;
8478                 vmx->nested.pi_desc = NULL;
8479         }
8480
8481         free_loaded_vmcs(&vmx->nested.vmcs02);
8482 }
8483
8484 /* Emulate the VMXOFF instruction */
8485 static int handle_vmoff(struct kvm_vcpu *vcpu)
8486 {
8487         if (!nested_vmx_check_permission(vcpu))
8488                 return 1;
8489         free_nested(to_vmx(vcpu));
8490         return nested_vmx_succeed(vcpu);
8491 }
8492
8493 /* Emulate the VMCLEAR instruction */
8494 static int handle_vmclear(struct kvm_vcpu *vcpu)
8495 {
8496         struct vcpu_vmx *vmx = to_vmx(vcpu);
8497         u32 zero = 0;
8498         gpa_t vmptr;
8499
8500         if (!nested_vmx_check_permission(vcpu))
8501                 return 1;
8502
8503         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8504                 return 1;
8505
8506         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8507                 return nested_vmx_failValid(vcpu,
8508                         VMXERR_VMCLEAR_INVALID_ADDRESS);
8509
8510         if (vmptr == vmx->nested.vmxon_ptr)
8511                 return nested_vmx_failValid(vcpu,
8512                         VMXERR_VMCLEAR_VMXON_POINTER);
8513
8514         if (vmptr == vmx->nested.current_vmptr)
8515                 nested_release_vmcs12(vmx);
8516
8517         kvm_vcpu_write_guest(vcpu,
8518                         vmptr + offsetof(struct vmcs12, launch_state),
8519                         &zero, sizeof(zero));
8520
8521         return nested_vmx_succeed(vcpu);
8522 }
8523
8524 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
8525
8526 /* Emulate the VMLAUNCH instruction */
8527 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
8528 {
8529         return nested_vmx_run(vcpu, true);
8530 }
8531
8532 /* Emulate the VMRESUME instruction */
8533 static int handle_vmresume(struct kvm_vcpu *vcpu)
8534 {
8535
8536         return nested_vmx_run(vcpu, false);
8537 }
8538
8539 /*
8540  * Read a vmcs12 field. Since these can have varying lengths and we return
8541  * one type, we chose the biggest type (u64) and zero-extend the return value
8542  * to that size. Note that the caller, handle_vmread, might need to use only
8543  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
8544  * 64-bit fields are to be returned).
8545  */
8546 static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
8547                                   unsigned long field, u64 *ret)
8548 {
8549         short offset = vmcs_field_to_offset(field);
8550         char *p;
8551
8552         if (offset < 0)
8553                 return offset;
8554
8555         p = (char *)vmcs12 + offset;
8556
8557         switch (vmcs_field_width(field)) {
8558         case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
8559                 *ret = *((natural_width *)p);
8560                 return 0;
8561         case VMCS_FIELD_WIDTH_U16:
8562                 *ret = *((u16 *)p);
8563                 return 0;
8564         case VMCS_FIELD_WIDTH_U32:
8565                 *ret = *((u32 *)p);
8566                 return 0;
8567         case VMCS_FIELD_WIDTH_U64:
8568                 *ret = *((u64 *)p);
8569                 return 0;
8570         default:
8571                 WARN_ON(1);
8572                 return -ENOENT;
8573         }
8574 }
8575
8576
8577 static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
8578                                    unsigned long field, u64 field_value){
8579         short offset = vmcs_field_to_offset(field);
8580         char *p = (char *)vmcs12 + offset;
8581         if (offset < 0)
8582                 return offset;
8583
8584         switch (vmcs_field_width(field)) {
8585         case VMCS_FIELD_WIDTH_U16:
8586                 *(u16 *)p = field_value;
8587                 return 0;
8588         case VMCS_FIELD_WIDTH_U32:
8589                 *(u32 *)p = field_value;
8590                 return 0;
8591         case VMCS_FIELD_WIDTH_U64:
8592                 *(u64 *)p = field_value;
8593                 return 0;
8594         case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
8595                 *(natural_width *)p = field_value;
8596                 return 0;
8597         default:
8598                 WARN_ON(1);
8599                 return -ENOENT;
8600         }
8601
8602 }
8603
8604 /*
8605  * Copy the writable VMCS shadow fields back to the VMCS12, in case
8606  * they have been modified by the L1 guest. Note that the "read-only"
8607  * VM-exit information fields are actually writable if the vCPU is
8608  * configured to support "VMWRITE to any supported field in the VMCS."
8609  */
8610 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
8611 {
8612         const u16 *fields[] = {
8613                 shadow_read_write_fields,
8614                 shadow_read_only_fields
8615         };
8616         const int max_fields[] = {
8617                 max_shadow_read_write_fields,
8618                 max_shadow_read_only_fields
8619         };
8620         int i, q;
8621         unsigned long field;
8622         u64 field_value;
8623         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
8624
8625         preempt_disable();
8626
8627         vmcs_load(shadow_vmcs);
8628
8629         for (q = 0; q < ARRAY_SIZE(fields); q++) {
8630                 for (i = 0; i < max_fields[q]; i++) {
8631                         field = fields[q][i];
8632                         field_value = __vmcs_readl(field);
8633                         vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
8634                 }
8635                 /*
8636                  * Skip the VM-exit information fields if they are read-only.
8637                  */
8638                 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
8639                         break;
8640         }
8641
8642         vmcs_clear(shadow_vmcs);
8643         vmcs_load(vmx->loaded_vmcs->vmcs);
8644
8645         preempt_enable();
8646 }
8647
8648 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
8649 {
8650         const u16 *fields[] = {
8651                 shadow_read_write_fields,
8652                 shadow_read_only_fields
8653         };
8654         const int max_fields[] = {
8655                 max_shadow_read_write_fields,
8656                 max_shadow_read_only_fields
8657         };
8658         int i, q;
8659         unsigned long field;
8660         u64 field_value = 0;
8661         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
8662
8663         vmcs_load(shadow_vmcs);
8664
8665         for (q = 0; q < ARRAY_SIZE(fields); q++) {
8666                 for (i = 0; i < max_fields[q]; i++) {
8667                         field = fields[q][i];
8668                         vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
8669                         __vmcs_writel(field, field_value);
8670                 }
8671         }
8672
8673         vmcs_clear(shadow_vmcs);
8674         vmcs_load(vmx->loaded_vmcs->vmcs);
8675 }
8676
8677 static int handle_vmread(struct kvm_vcpu *vcpu)
8678 {
8679         unsigned long field;
8680         u64 field_value;
8681         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8682         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8683         gva_t gva = 0;
8684         struct vmcs12 *vmcs12;
8685
8686         if (!nested_vmx_check_permission(vcpu))
8687                 return 1;
8688
8689         if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
8690                 return nested_vmx_failInvalid(vcpu);
8691
8692         if (!is_guest_mode(vcpu))
8693                 vmcs12 = get_vmcs12(vcpu);
8694         else {
8695                 /*
8696                  * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
8697                  * to shadowed-field sets the ALU flags for VMfailInvalid.
8698                  */
8699                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
8700                         return nested_vmx_failInvalid(vcpu);
8701                 vmcs12 = get_shadow_vmcs12(vcpu);
8702         }
8703
8704         /* Decode instruction info and find the field to read */
8705         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
8706         /* Read the field, zero-extended to a u64 field_value */
8707         if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
8708                 return nested_vmx_failValid(vcpu,
8709                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8710
8711         /*
8712          * Now copy part of this value to register or memory, as requested.
8713          * Note that the number of bits actually copied is 32 or 64 depending
8714          * on the guest's mode (32 or 64 bit), not on the given field's length.
8715          */
8716         if (vmx_instruction_info & (1u << 10)) {
8717                 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
8718                         field_value);
8719         } else {
8720                 if (get_vmx_mem_address(vcpu, exit_qualification,
8721                                 vmx_instruction_info, true, &gva))
8722                         return 1;
8723                 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
8724                 kvm_write_guest_virt_system(vcpu, gva, &field_value,
8725                                             (is_long_mode(vcpu) ? 8 : 4), NULL);
8726         }
8727
8728         return nested_vmx_succeed(vcpu);
8729 }
8730
8731
8732 static int handle_vmwrite(struct kvm_vcpu *vcpu)
8733 {
8734         unsigned long field;
8735         gva_t gva;
8736         struct vcpu_vmx *vmx = to_vmx(vcpu);
8737         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8738         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8739
8740         /* The value to write might be 32 or 64 bits, depending on L1's long
8741          * mode, and eventually we need to write that into a field of several
8742          * possible lengths. The code below first zero-extends the value to 64
8743          * bit (field_value), and then copies only the appropriate number of
8744          * bits into the vmcs12 field.
8745          */
8746         u64 field_value = 0;
8747         struct x86_exception e;
8748         struct vmcs12 *vmcs12;
8749
8750         if (!nested_vmx_check_permission(vcpu))
8751                 return 1;
8752
8753         if (vmx->nested.current_vmptr == -1ull)
8754                 return nested_vmx_failInvalid(vcpu);
8755
8756         if (vmx_instruction_info & (1u << 10))
8757                 field_value = kvm_register_readl(vcpu,
8758                         (((vmx_instruction_info) >> 3) & 0xf));
8759         else {
8760                 if (get_vmx_mem_address(vcpu, exit_qualification,
8761                                 vmx_instruction_info, false, &gva))
8762                         return 1;
8763                 if (kvm_read_guest_virt(vcpu, gva, &field_value,
8764                                         (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
8765                         kvm_inject_page_fault(vcpu, &e);
8766                         return 1;
8767                 }
8768         }
8769
8770
8771         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
8772         /*
8773          * If the vCPU supports "VMWRITE to any supported field in the
8774          * VMCS," then the "read-only" fields are actually read/write.
8775          */
8776         if (vmcs_field_readonly(field) &&
8777             !nested_cpu_has_vmwrite_any_field(vcpu))
8778                 return nested_vmx_failValid(vcpu,
8779                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
8780
8781         if (!is_guest_mode(vcpu))
8782                 vmcs12 = get_vmcs12(vcpu);
8783         else {
8784                 /*
8785                  * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
8786                  * to shadowed-field sets the ALU flags for VMfailInvalid.
8787                  */
8788                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
8789                         return nested_vmx_failInvalid(vcpu);
8790                 vmcs12 = get_shadow_vmcs12(vcpu);
8791         }
8792
8793         if (vmcs12_write_any(vmcs12, field, field_value) < 0)
8794                 return nested_vmx_failValid(vcpu,
8795                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8796
8797         /*
8798          * Do not track vmcs12 dirty-state if in guest-mode
8799          * as we actually dirty shadow vmcs12 instead of vmcs12.
8800          */
8801         if (!is_guest_mode(vcpu)) {
8802                 switch (field) {
8803 #define SHADOW_FIELD_RW(x) case x:
8804 #include "vmx_shadow_fields.h"
8805                         /*
8806                          * The fields that can be updated by L1 without a vmexit are
8807                          * always updated in the vmcs02, the others go down the slow
8808                          * path of prepare_vmcs02.
8809                          */
8810                         break;
8811                 default:
8812                         vmx->nested.dirty_vmcs12 = true;
8813                         break;
8814                 }
8815         }
8816
8817         return nested_vmx_succeed(vcpu);
8818 }
8819
8820 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
8821 {
8822         vmx->nested.current_vmptr = vmptr;
8823         if (enable_shadow_vmcs) {
8824                 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
8825                               SECONDARY_EXEC_SHADOW_VMCS);
8826                 vmcs_write64(VMCS_LINK_POINTER,
8827                              __pa(vmx->vmcs01.shadow_vmcs));
8828                 vmx->nested.sync_shadow_vmcs = true;
8829         }
8830         vmx->nested.dirty_vmcs12 = true;
8831 }
8832
8833 /* Emulate the VMPTRLD instruction */
8834 static int handle_vmptrld(struct kvm_vcpu *vcpu)
8835 {
8836         struct vcpu_vmx *vmx = to_vmx(vcpu);
8837         gpa_t vmptr;
8838
8839         if (!nested_vmx_check_permission(vcpu))
8840                 return 1;
8841
8842         if (nested_vmx_get_vmptr(vcpu, &vmptr))
8843                 return 1;
8844
8845         if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
8846                 return nested_vmx_failValid(vcpu,
8847                         VMXERR_VMPTRLD_INVALID_ADDRESS);
8848
8849         if (vmptr == vmx->nested.vmxon_ptr)
8850                 return nested_vmx_failValid(vcpu,
8851                         VMXERR_VMPTRLD_VMXON_POINTER);
8852
8853         if (vmx->nested.current_vmptr != vmptr) {
8854                 struct vmcs12 *new_vmcs12;
8855                 struct page *page;
8856                 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8857                 if (is_error_page(page))
8858                         return nested_vmx_failInvalid(vcpu);
8859
8860                 new_vmcs12 = kmap(page);
8861                 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
8862                     (new_vmcs12->hdr.shadow_vmcs &&
8863                      !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
8864                         kunmap(page);
8865                         kvm_release_page_clean(page);
8866                         return nested_vmx_failValid(vcpu,
8867                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
8868                 }
8869
8870                 nested_release_vmcs12(vmx);
8871                 /*
8872                  * Load VMCS12 from guest memory since it is not already
8873                  * cached.
8874                  */
8875                 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
8876                 kunmap(page);
8877                 kvm_release_page_clean(page);
8878
8879                 set_current_vmptr(vmx, vmptr);
8880         }
8881
8882         return nested_vmx_succeed(vcpu);
8883 }
8884
8885 /* Emulate the VMPTRST instruction */
8886 static int handle_vmptrst(struct kvm_vcpu *vcpu)
8887 {
8888         unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
8889         u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8890         gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
8891         struct x86_exception e;
8892         gva_t gva;
8893
8894         if (!nested_vmx_check_permission(vcpu))
8895                 return 1;
8896
8897         if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
8898                 return 1;
8899         /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
8900         if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
8901                                         sizeof(gpa_t), &e)) {
8902                 kvm_inject_page_fault(vcpu, &e);
8903                 return 1;
8904         }
8905         return nested_vmx_succeed(vcpu);
8906 }
8907
8908 /* Emulate the INVEPT instruction */
8909 static int handle_invept(struct kvm_vcpu *vcpu)
8910 {
8911         struct vcpu_vmx *vmx = to_vmx(vcpu);
8912         u32 vmx_instruction_info, types;
8913         unsigned long type;
8914         gva_t gva;
8915         struct x86_exception e;
8916         struct {
8917                 u64 eptp, gpa;
8918         } operand;
8919
8920         if (!(vmx->nested.msrs.secondary_ctls_high &
8921               SECONDARY_EXEC_ENABLE_EPT) ||
8922             !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
8923                 kvm_queue_exception(vcpu, UD_VECTOR);
8924                 return 1;
8925         }
8926
8927         if (!nested_vmx_check_permission(vcpu))
8928                 return 1;
8929
8930         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8931         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
8932
8933         types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
8934
8935         if (type >= 32 || !(types & (1 << type)))
8936                 return nested_vmx_failValid(vcpu,
8937                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
8938
8939         /* According to the Intel VMX instruction reference, the memory
8940          * operand is read even if it isn't needed (e.g., for type==global)
8941          */
8942         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
8943                         vmx_instruction_info, false, &gva))
8944                 return 1;
8945         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
8946                 kvm_inject_page_fault(vcpu, &e);
8947                 return 1;
8948         }
8949
8950         switch (type) {
8951         case VMX_EPT_EXTENT_GLOBAL:
8952         /*
8953          * TODO: track mappings and invalidate
8954          * single context requests appropriately
8955          */
8956         case VMX_EPT_EXTENT_CONTEXT:
8957                 kvm_mmu_sync_roots(vcpu);
8958                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
8959                 break;
8960         default:
8961                 BUG_ON(1);
8962                 break;
8963         }
8964
8965         return nested_vmx_succeed(vcpu);
8966 }
8967
8968 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
8969 {
8970         struct vcpu_vmx *vmx = to_vmx(vcpu);
8971
8972         return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
8973 }
8974
8975 static int handle_invvpid(struct kvm_vcpu *vcpu)
8976 {
8977         struct vcpu_vmx *vmx = to_vmx(vcpu);
8978         u32 vmx_instruction_info;
8979         unsigned long type, types;
8980         gva_t gva;
8981         struct x86_exception e;
8982         struct {
8983                 u64 vpid;
8984                 u64 gla;
8985         } operand;
8986         u16 vpid02;
8987
8988         if (!(vmx->nested.msrs.secondary_ctls_high &
8989               SECONDARY_EXEC_ENABLE_VPID) ||
8990                         !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
8991                 kvm_queue_exception(vcpu, UD_VECTOR);
8992                 return 1;
8993         }
8994
8995         if (!nested_vmx_check_permission(vcpu))
8996                 return 1;
8997
8998         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8999         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
9000
9001         types = (vmx->nested.msrs.vpid_caps &
9002                         VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
9003
9004         if (type >= 32 || !(types & (1 << type)))
9005                 return nested_vmx_failValid(vcpu,
9006                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9007
9008         /* according to the intel vmx instruction reference, the memory
9009          * operand is read even if it isn't needed (e.g., for type==global)
9010          */
9011         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
9012                         vmx_instruction_info, false, &gva))
9013                 return 1;
9014         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
9015                 kvm_inject_page_fault(vcpu, &e);
9016                 return 1;
9017         }
9018         if (operand.vpid >> 16)
9019                 return nested_vmx_failValid(vcpu,
9020                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9021
9022         vpid02 = nested_get_vpid02(vcpu);
9023         switch (type) {
9024         case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
9025                 if (!operand.vpid ||
9026                     is_noncanonical_address(operand.gla, vcpu))
9027                         return nested_vmx_failValid(vcpu,
9028                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9029                 if (cpu_has_vmx_invvpid_individual_addr()) {
9030                         __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
9031                                 vpid02, operand.gla);
9032                 } else
9033                         __vmx_flush_tlb(vcpu, vpid02, false);
9034                 break;
9035         case VMX_VPID_EXTENT_SINGLE_CONTEXT:
9036         case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
9037                 if (!operand.vpid)
9038                         return nested_vmx_failValid(vcpu,
9039                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
9040                 __vmx_flush_tlb(vcpu, vpid02, false);
9041                 break;
9042         case VMX_VPID_EXTENT_ALL_CONTEXT:
9043                 __vmx_flush_tlb(vcpu, vpid02, false);
9044                 break;
9045         default:
9046                 WARN_ON_ONCE(1);
9047                 return kvm_skip_emulated_instruction(vcpu);
9048         }
9049
9050         return nested_vmx_succeed(vcpu);
9051 }
9052
9053 static int handle_invpcid(struct kvm_vcpu *vcpu)
9054 {
9055         u32 vmx_instruction_info;
9056         unsigned long type;
9057         bool pcid_enabled;
9058         gva_t gva;
9059         struct x86_exception e;
9060         unsigned i;
9061         unsigned long roots_to_free = 0;
9062         struct {
9063                 u64 pcid;
9064                 u64 gla;
9065         } operand;
9066
9067         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
9068                 kvm_queue_exception(vcpu, UD_VECTOR);
9069                 return 1;
9070         }
9071
9072         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
9073         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
9074
9075         if (type > 3) {
9076                 kvm_inject_gp(vcpu, 0);
9077                 return 1;
9078         }
9079
9080         /* According to the Intel instruction reference, the memory operand
9081          * is read even if it isn't needed (e.g., for type==all)
9082          */
9083         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
9084                                 vmx_instruction_info, false, &gva))
9085                 return 1;
9086
9087         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
9088                 kvm_inject_page_fault(vcpu, &e);
9089                 return 1;
9090         }
9091
9092         if (operand.pcid >> 12 != 0) {
9093                 kvm_inject_gp(vcpu, 0);
9094                 return 1;
9095         }
9096
9097         pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
9098
9099         switch (type) {
9100         case INVPCID_TYPE_INDIV_ADDR:
9101                 if ((!pcid_enabled && (operand.pcid != 0)) ||
9102                     is_noncanonical_address(operand.gla, vcpu)) {
9103                         kvm_inject_gp(vcpu, 0);
9104                         return 1;
9105                 }
9106                 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
9107                 return kvm_skip_emulated_instruction(vcpu);
9108
9109         case INVPCID_TYPE_SINGLE_CTXT:
9110                 if (!pcid_enabled && (operand.pcid != 0)) {
9111                         kvm_inject_gp(vcpu, 0);
9112                         return 1;
9113                 }
9114
9115                 if (kvm_get_active_pcid(vcpu) == operand.pcid) {
9116                         kvm_mmu_sync_roots(vcpu);
9117                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
9118                 }
9119
9120                 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
9121                         if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3)
9122                             == operand.pcid)
9123                                 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
9124
9125                 kvm_mmu_free_roots(vcpu, roots_to_free);
9126                 /*
9127                  * If neither the current cr3 nor any of the prev_roots use the
9128                  * given PCID, then nothing needs to be done here because a
9129                  * resync will happen anyway before switching to any other CR3.
9130                  */
9131
9132                 return kvm_skip_emulated_instruction(vcpu);
9133
9134         case INVPCID_TYPE_ALL_NON_GLOBAL:
9135                 /*
9136                  * Currently, KVM doesn't mark global entries in the shadow
9137                  * page tables, so a non-global flush just degenerates to a
9138                  * global flush. If needed, we could optimize this later by
9139                  * keeping track of global entries in shadow page tables.
9140                  */
9141
9142                 /* fall-through */
9143         case INVPCID_TYPE_ALL_INCL_GLOBAL:
9144                 kvm_mmu_unload(vcpu);
9145                 return kvm_skip_emulated_instruction(vcpu);
9146
9147         default:
9148                 BUG(); /* We have already checked above that type <= 3 */
9149         }
9150 }
9151
9152 static int handle_pml_full(struct kvm_vcpu *vcpu)
9153 {
9154         unsigned long exit_qualification;
9155
9156         trace_kvm_pml_full(vcpu->vcpu_id);
9157
9158         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9159
9160         /*
9161          * PML buffer FULL happened while executing iret from NMI,
9162          * "blocked by NMI" bit has to be set before next VM entry.
9163          */
9164         if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
9165                         enable_vnmi &&
9166                         (exit_qualification & INTR_INFO_UNBLOCK_NMI))
9167                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
9168                                 GUEST_INTR_STATE_NMI);
9169
9170         /*
9171          * PML buffer already flushed at beginning of VMEXIT. Nothing to do
9172          * here.., and there's no userspace involvement needed for PML.
9173          */
9174         return 1;
9175 }
9176
9177 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
9178 {
9179         if (!to_vmx(vcpu)->req_immediate_exit)
9180                 kvm_lapic_expired_hv_timer(vcpu);
9181         return 1;
9182 }
9183
9184 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
9185 {
9186         struct vcpu_vmx *vmx = to_vmx(vcpu);
9187         int maxphyaddr = cpuid_maxphyaddr(vcpu);
9188
9189         /* Check for memory type validity */
9190         switch (address & VMX_EPTP_MT_MASK) {
9191         case VMX_EPTP_MT_UC:
9192                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
9193                         return false;
9194                 break;
9195         case VMX_EPTP_MT_WB:
9196                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
9197                         return false;
9198                 break;
9199         default:
9200                 return false;
9201         }
9202
9203         /* only 4 levels page-walk length are valid */
9204         if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
9205                 return false;
9206
9207         /* Reserved bits should not be set */
9208         if (address >> maxphyaddr || ((address >> 7) & 0x1f))
9209                 return false;
9210
9211         /* AD, if set, should be supported */
9212         if (address & VMX_EPTP_AD_ENABLE_BIT) {
9213                 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
9214                         return false;
9215         }
9216
9217         return true;
9218 }
9219
9220 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
9221                                      struct vmcs12 *vmcs12)
9222 {
9223         u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
9224         u64 address;
9225         bool accessed_dirty;
9226         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
9227
9228         if (!nested_cpu_has_eptp_switching(vmcs12) ||
9229             !nested_cpu_has_ept(vmcs12))
9230                 return 1;
9231
9232         if (index >= VMFUNC_EPTP_ENTRIES)
9233                 return 1;
9234
9235
9236         if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
9237                                      &address, index * 8, 8))
9238                 return 1;
9239
9240         accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
9241
9242         /*
9243          * If the (L2) guest does a vmfunc to the currently
9244          * active ept pointer, we don't have to do anything else
9245          */
9246         if (vmcs12->ept_pointer != address) {
9247                 if (!valid_ept_address(vcpu, address))
9248                         return 1;
9249
9250                 kvm_mmu_unload(vcpu);
9251                 mmu->ept_ad = accessed_dirty;
9252                 mmu->base_role.ad_disabled = !accessed_dirty;
9253                 vmcs12->ept_pointer = address;
9254                 /*
9255                  * TODO: Check what's the correct approach in case
9256                  * mmu reload fails. Currently, we just let the next
9257                  * reload potentially fail
9258                  */
9259                 kvm_mmu_reload(vcpu);
9260         }
9261
9262         return 0;
9263 }
9264
9265 static int handle_vmfunc(struct kvm_vcpu *vcpu)
9266 {
9267         struct vcpu_vmx *vmx = to_vmx(vcpu);
9268         struct vmcs12 *vmcs12;
9269         u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
9270
9271         /*
9272          * VMFUNC is only supported for nested guests, but we always enable the
9273          * secondary control for simplicity; for non-nested mode, fake that we
9274          * didn't by injecting #UD.
9275          */
9276         if (!is_guest_mode(vcpu)) {
9277                 kvm_queue_exception(vcpu, UD_VECTOR);
9278                 return 1;
9279         }
9280
9281         vmcs12 = get_vmcs12(vcpu);
9282         if ((vmcs12->vm_function_control & (1 << function)) == 0)
9283                 goto fail;
9284
9285         switch (function) {
9286         case 0:
9287                 if (nested_vmx_eptp_switching(vcpu, vmcs12))
9288                         goto fail;
9289                 break;
9290         default:
9291                 goto fail;
9292         }
9293         return kvm_skip_emulated_instruction(vcpu);
9294
9295 fail:
9296         nested_vmx_vmexit(vcpu, vmx->exit_reason,
9297                           vmcs_read32(VM_EXIT_INTR_INFO),
9298                           vmcs_readl(EXIT_QUALIFICATION));
9299         return 1;
9300 }
9301
9302 static int handle_encls(struct kvm_vcpu *vcpu)
9303 {
9304         /*
9305          * SGX virtualization is not yet supported.  There is no software
9306          * enable bit for SGX, so we have to trap ENCLS and inject a #UD
9307          * to prevent the guest from executing ENCLS.
9308          */
9309         kvm_queue_exception(vcpu, UD_VECTOR);
9310         return 1;
9311 }
9312
9313 /*
9314  * The exit handlers return 1 if the exit was handled fully and guest execution
9315  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
9316  * to be done to userspace and return 0.
9317  */
9318 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
9319         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
9320         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
9321         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
9322         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
9323         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
9324         [EXIT_REASON_CR_ACCESS]               = handle_cr,
9325         [EXIT_REASON_DR_ACCESS]               = handle_dr,
9326         [EXIT_REASON_CPUID]                   = handle_cpuid,
9327         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
9328         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
9329         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
9330         [EXIT_REASON_HLT]                     = handle_halt,
9331         [EXIT_REASON_INVD]                    = handle_invd,
9332         [EXIT_REASON_INVLPG]                  = handle_invlpg,
9333         [EXIT_REASON_RDPMC]                   = handle_rdpmc,
9334         [EXIT_REASON_VMCALL]                  = handle_vmcall,
9335         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
9336         [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
9337         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
9338         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
9339         [EXIT_REASON_VMREAD]                  = handle_vmread,
9340         [EXIT_REASON_VMRESUME]                = handle_vmresume,
9341         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
9342         [EXIT_REASON_VMOFF]                   = handle_vmoff,
9343         [EXIT_REASON_VMON]                    = handle_vmon,
9344         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
9345         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
9346         [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
9347         [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
9348         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
9349         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
9350         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
9351         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
9352         [EXIT_REASON_GDTR_IDTR]               = handle_desc,
9353         [EXIT_REASON_LDTR_TR]                 = handle_desc,
9354         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
9355         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
9356         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
9357         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_mwait,
9358         [EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
9359         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
9360         [EXIT_REASON_INVEPT]                  = handle_invept,
9361         [EXIT_REASON_INVVPID]                 = handle_invvpid,
9362         [EXIT_REASON_RDRAND]                  = handle_invalid_op,
9363         [EXIT_REASON_RDSEED]                  = handle_invalid_op,
9364         [EXIT_REASON_XSAVES]                  = handle_xsaves,
9365         [EXIT_REASON_XRSTORS]                 = handle_xrstors,
9366         [EXIT_REASON_PML_FULL]                = handle_pml_full,
9367         [EXIT_REASON_INVPCID]                 = handle_invpcid,
9368         [EXIT_REASON_VMFUNC]                  = handle_vmfunc,
9369         [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
9370         [EXIT_REASON_ENCLS]                   = handle_encls,
9371 };
9372
9373 static const int kvm_vmx_max_exit_handlers =
9374         ARRAY_SIZE(kvm_vmx_exit_handlers);
9375
9376 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
9377                                        struct vmcs12 *vmcs12)
9378 {
9379         unsigned long exit_qualification;
9380         gpa_t bitmap, last_bitmap;
9381         unsigned int port;
9382         int size;
9383         u8 b;
9384
9385         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
9386                 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
9387
9388         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9389
9390         port = exit_qualification >> 16;
9391         size = (exit_qualification & 7) + 1;
9392
9393         last_bitmap = (gpa_t)-1;
9394         b = -1;
9395
9396         while (size > 0) {
9397                 if (port < 0x8000)
9398                         bitmap = vmcs12->io_bitmap_a;
9399                 else if (port < 0x10000)
9400                         bitmap = vmcs12->io_bitmap_b;
9401                 else
9402                         return true;
9403                 bitmap += (port & 0x7fff) / 8;
9404
9405                 if (last_bitmap != bitmap)
9406                         if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
9407                                 return true;
9408                 if (b & (1 << (port & 7)))
9409                         return true;
9410
9411                 port++;
9412                 size--;
9413                 last_bitmap = bitmap;
9414         }
9415
9416         return false;
9417 }
9418
9419 /*
9420  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
9421  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
9422  * disinterest in the current event (read or write a specific MSR) by using an
9423  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
9424  */
9425 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
9426         struct vmcs12 *vmcs12, u32 exit_reason)
9427 {
9428         u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
9429         gpa_t bitmap;
9430
9431         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
9432                 return true;
9433
9434         /*
9435          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
9436          * for the four combinations of read/write and low/high MSR numbers.
9437          * First we need to figure out which of the four to use:
9438          */
9439         bitmap = vmcs12->msr_bitmap;
9440         if (exit_reason == EXIT_REASON_MSR_WRITE)
9441                 bitmap += 2048;
9442         if (msr_index >= 0xc0000000) {
9443                 msr_index -= 0xc0000000;
9444                 bitmap += 1024;
9445         }
9446
9447         /* Then read the msr_index'th bit from this bitmap: */
9448         if (msr_index < 1024*8) {
9449                 unsigned char b;
9450                 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
9451                         return true;
9452                 return 1 & (b >> (msr_index & 7));
9453         } else
9454                 return true; /* let L1 handle the wrong parameter */
9455 }
9456
9457 /*
9458  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
9459  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
9460  * intercept (via guest_host_mask etc.) the current event.
9461  */
9462 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
9463         struct vmcs12 *vmcs12)
9464 {
9465         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9466         int cr = exit_qualification & 15;
9467         int reg;
9468         unsigned long val;
9469
9470         switch ((exit_qualification >> 4) & 3) {
9471         case 0: /* mov to cr */
9472                 reg = (exit_qualification >> 8) & 15;
9473                 val = kvm_register_readl(vcpu, reg);
9474                 switch (cr) {
9475                 case 0:
9476                         if (vmcs12->cr0_guest_host_mask &
9477                             (val ^ vmcs12->cr0_read_shadow))
9478                                 return true;
9479                         break;
9480                 case 3:
9481                         if ((vmcs12->cr3_target_count >= 1 &&
9482                                         vmcs12->cr3_target_value0 == val) ||
9483                                 (vmcs12->cr3_target_count >= 2 &&
9484                                         vmcs12->cr3_target_value1 == val) ||
9485                                 (vmcs12->cr3_target_count >= 3 &&
9486                                         vmcs12->cr3_target_value2 == val) ||
9487                                 (vmcs12->cr3_target_count >= 4 &&
9488                                         vmcs12->cr3_target_value3 == val))
9489                                 return false;
9490                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
9491                                 return true;
9492                         break;
9493                 case 4:
9494                         if (vmcs12->cr4_guest_host_mask &
9495                             (vmcs12->cr4_read_shadow ^ val))
9496                                 return true;
9497                         break;
9498                 case 8:
9499                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
9500                                 return true;
9501                         break;
9502                 }
9503                 break;
9504         case 2: /* clts */
9505                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
9506                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
9507                         return true;
9508                 break;
9509         case 1: /* mov from cr */
9510                 switch (cr) {
9511                 case 3:
9512                         if (vmcs12->cpu_based_vm_exec_control &
9513                             CPU_BASED_CR3_STORE_EXITING)
9514                                 return true;
9515                         break;
9516                 case 8:
9517                         if (vmcs12->cpu_based_vm_exec_control &
9518                             CPU_BASED_CR8_STORE_EXITING)
9519                                 return true;
9520                         break;
9521                 }
9522                 break;
9523         case 3: /* lmsw */
9524                 /*
9525                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
9526                  * cr0. Other attempted changes are ignored, with no exit.
9527                  */
9528                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
9529                 if (vmcs12->cr0_guest_host_mask & 0xe &
9530                     (val ^ vmcs12->cr0_read_shadow))
9531                         return true;
9532                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
9533                     !(vmcs12->cr0_read_shadow & 0x1) &&
9534                     (val & 0x1))
9535                         return true;
9536                 break;
9537         }
9538         return false;
9539 }
9540
9541 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
9542         struct vmcs12 *vmcs12, gpa_t bitmap)
9543 {
9544         u32 vmx_instruction_info;
9545         unsigned long field;
9546         u8 b;
9547
9548         if (!nested_cpu_has_shadow_vmcs(vmcs12))
9549                 return true;
9550
9551         /* Decode instruction info and find the field to access */
9552         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
9553         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
9554
9555         /* Out-of-range fields always cause a VM exit from L2 to L1 */
9556         if (field >> 15)
9557                 return true;
9558
9559         if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
9560                 return true;
9561
9562         return 1 & (b >> (field & 7));
9563 }
9564
9565 /*
9566  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
9567  * should handle it ourselves in L0 (and then continue L2). Only call this
9568  * when in is_guest_mode (L2).
9569  */
9570 static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
9571 {
9572         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9573         struct vcpu_vmx *vmx = to_vmx(vcpu);
9574         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9575
9576         if (vmx->nested.nested_run_pending)
9577                 return false;
9578
9579         if (unlikely(vmx->fail)) {
9580                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
9581                                     vmcs_read32(VM_INSTRUCTION_ERROR));
9582                 return true;
9583         }
9584
9585         /*
9586          * The host physical addresses of some pages of guest memory
9587          * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
9588          * Page). The CPU may write to these pages via their host
9589          * physical address while L2 is running, bypassing any
9590          * address-translation-based dirty tracking (e.g. EPT write
9591          * protection).
9592          *
9593          * Mark them dirty on every exit from L2 to prevent them from
9594          * getting out of sync with dirty tracking.
9595          */
9596         nested_mark_vmcs12_pages_dirty(vcpu);
9597
9598         trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
9599                                 vmcs_readl(EXIT_QUALIFICATION),
9600                                 vmx->idt_vectoring_info,
9601                                 intr_info,
9602                                 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9603                                 KVM_ISA_VMX);
9604
9605         switch (exit_reason) {
9606         case EXIT_REASON_EXCEPTION_NMI:
9607                 if (is_nmi(intr_info))
9608                         return false;
9609                 else if (is_page_fault(intr_info))
9610                         return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
9611                 else if (is_debug(intr_info) &&
9612                          vcpu->guest_debug &
9613                          (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
9614                         return false;
9615                 else if (is_breakpoint(intr_info) &&
9616                          vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
9617                         return false;
9618                 return vmcs12->exception_bitmap &
9619                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
9620         case EXIT_REASON_EXTERNAL_INTERRUPT:
9621                 return false;
9622         case EXIT_REASON_TRIPLE_FAULT:
9623                 return true;
9624         case EXIT_REASON_PENDING_INTERRUPT:
9625                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
9626         case EXIT_REASON_NMI_WINDOW:
9627                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
9628         case EXIT_REASON_TASK_SWITCH:
9629                 return true;
9630         case EXIT_REASON_CPUID:
9631                 return true;
9632         case EXIT_REASON_HLT:
9633                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
9634         case EXIT_REASON_INVD:
9635                 return true;
9636         case EXIT_REASON_INVLPG:
9637                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
9638         case EXIT_REASON_RDPMC:
9639                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
9640         case EXIT_REASON_RDRAND:
9641                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
9642         case EXIT_REASON_RDSEED:
9643                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
9644         case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
9645                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
9646         case EXIT_REASON_VMREAD:
9647                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
9648                         vmcs12->vmread_bitmap);
9649         case EXIT_REASON_VMWRITE:
9650                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
9651                         vmcs12->vmwrite_bitmap);
9652         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
9653         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
9654         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
9655         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
9656         case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
9657                 /*
9658                  * VMX instructions trap unconditionally. This allows L1 to
9659                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
9660                  */
9661                 return true;
9662         case EXIT_REASON_CR_ACCESS:
9663                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
9664         case EXIT_REASON_DR_ACCESS:
9665                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
9666         case EXIT_REASON_IO_INSTRUCTION:
9667                 return nested_vmx_exit_handled_io(vcpu, vmcs12);
9668         case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
9669                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
9670         case EXIT_REASON_MSR_READ:
9671         case EXIT_REASON_MSR_WRITE:
9672                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
9673         case EXIT_REASON_INVALID_STATE:
9674                 return true;
9675         case EXIT_REASON_MWAIT_INSTRUCTION:
9676                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
9677         case EXIT_REASON_MONITOR_TRAP_FLAG:
9678                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
9679         case EXIT_REASON_MONITOR_INSTRUCTION:
9680                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
9681         case EXIT_REASON_PAUSE_INSTRUCTION:
9682                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
9683                         nested_cpu_has2(vmcs12,
9684                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
9685         case EXIT_REASON_MCE_DURING_VMENTRY:
9686                 return false;
9687         case EXIT_REASON_TPR_BELOW_THRESHOLD:
9688                 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
9689         case EXIT_REASON_APIC_ACCESS:
9690         case EXIT_REASON_APIC_WRITE:
9691         case EXIT_REASON_EOI_INDUCED:
9692                 /*
9693                  * The controls for "virtualize APIC accesses," "APIC-
9694                  * register virtualization," and "virtual-interrupt
9695                  * delivery" only come from vmcs12.
9696                  */
9697                 return true;
9698         case EXIT_REASON_EPT_VIOLATION:
9699                 /*
9700                  * L0 always deals with the EPT violation. If nested EPT is
9701                  * used, and the nested mmu code discovers that the address is
9702                  * missing in the guest EPT table (EPT12), the EPT violation
9703                  * will be injected with nested_ept_inject_page_fault()
9704                  */
9705                 return false;
9706         case EXIT_REASON_EPT_MISCONFIG:
9707                 /*
9708                  * L2 never uses directly L1's EPT, but rather L0's own EPT
9709                  * table (shadow on EPT) or a merged EPT table that L0 built
9710                  * (EPT on EPT). So any problems with the structure of the
9711                  * table is L0's fault.
9712                  */
9713                 return false;
9714         case EXIT_REASON_INVPCID:
9715                 return
9716                         nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
9717                         nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
9718         case EXIT_REASON_WBINVD:
9719                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
9720         case EXIT_REASON_XSETBV:
9721                 return true;
9722         case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
9723                 /*
9724                  * This should never happen, since it is not possible to
9725                  * set XSS to a non-zero value---neither in L1 nor in L2.
9726                  * If if it were, XSS would have to be checked against
9727                  * the XSS exit bitmap in vmcs12.
9728                  */
9729                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
9730         case EXIT_REASON_PREEMPTION_TIMER:
9731                 return false;
9732         case EXIT_REASON_PML_FULL:
9733                 /* We emulate PML support to L1. */
9734                 return false;
9735         case EXIT_REASON_VMFUNC:
9736                 /* VM functions are emulated through L2->L0 vmexits. */
9737                 return false;
9738         case EXIT_REASON_ENCLS:
9739                 /* SGX is never exposed to L1 */
9740                 return false;
9741         default:
9742                 return true;
9743         }
9744 }
9745
9746 static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
9747 {
9748         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9749
9750         /*
9751          * At this point, the exit interruption info in exit_intr_info
9752          * is only valid for EXCEPTION_NMI exits.  For EXTERNAL_INTERRUPT
9753          * we need to query the in-kernel LAPIC.
9754          */
9755         WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
9756         if ((exit_intr_info &
9757              (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
9758             (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
9759                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9760                 vmcs12->vm_exit_intr_error_code =
9761                         vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
9762         }
9763
9764         nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
9765                           vmcs_readl(EXIT_QUALIFICATION));
9766         return 1;
9767 }
9768
9769 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
9770 {
9771         *info1 = vmcs_readl(EXIT_QUALIFICATION);
9772         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
9773 }
9774
9775 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
9776 {
9777         if (vmx->pml_pg) {
9778                 __free_page(vmx->pml_pg);
9779                 vmx->pml_pg = NULL;
9780         }
9781 }
9782
9783 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
9784 {
9785         struct vcpu_vmx *vmx = to_vmx(vcpu);
9786         u64 *pml_buf;
9787         u16 pml_idx;
9788
9789         pml_idx = vmcs_read16(GUEST_PML_INDEX);
9790
9791         /* Do nothing if PML buffer is empty */
9792         if (pml_idx == (PML_ENTITY_NUM - 1))
9793                 return;
9794
9795         /* PML index always points to next available PML buffer entity */
9796         if (pml_idx >= PML_ENTITY_NUM)
9797                 pml_idx = 0;
9798         else
9799                 pml_idx++;
9800
9801         pml_buf = page_address(vmx->pml_pg);
9802         for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
9803                 u64 gpa;
9804
9805                 gpa = pml_buf[pml_idx];
9806                 WARN_ON(gpa & (PAGE_SIZE - 1));
9807                 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
9808         }
9809
9810         /* reset PML index */
9811         vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
9812 }
9813
9814 /*
9815  * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
9816  * Called before reporting dirty_bitmap to userspace.
9817  */
9818 static void kvm_flush_pml_buffers(struct kvm *kvm)
9819 {
9820         int i;
9821         struct kvm_vcpu *vcpu;
9822         /*
9823          * We only need to kick vcpu out of guest mode here, as PML buffer
9824          * is flushed at beginning of all VMEXITs, and it's obvious that only
9825          * vcpus running in guest are possible to have unflushed GPAs in PML
9826          * buffer.
9827          */
9828         kvm_for_each_vcpu(i, vcpu, kvm)
9829                 kvm_vcpu_kick(vcpu);
9830 }
9831
9832 static void vmx_dump_sel(char *name, uint32_t sel)
9833 {
9834         pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
9835                name, vmcs_read16(sel),
9836                vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
9837                vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
9838                vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
9839 }
9840
9841 static void vmx_dump_dtsel(char *name, uint32_t limit)
9842 {
9843         pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
9844                name, vmcs_read32(limit),
9845                vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
9846 }
9847
9848 static void dump_vmcs(void)
9849 {
9850         u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
9851         u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
9852         u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
9853         u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
9854         u32 secondary_exec_control = 0;
9855         unsigned long cr4 = vmcs_readl(GUEST_CR4);
9856         u64 efer = vmcs_read64(GUEST_IA32_EFER);
9857         int i, n;
9858
9859         if (cpu_has_secondary_exec_ctrls())
9860                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
9861
9862         pr_err("*** Guest State ***\n");
9863         pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9864                vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
9865                vmcs_readl(CR0_GUEST_HOST_MASK));
9866         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9867                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
9868         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
9869         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
9870             (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
9871         {
9872                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
9873                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
9874                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
9875                        vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
9876         }
9877         pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
9878                vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
9879         pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
9880                vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
9881         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9882                vmcs_readl(GUEST_SYSENTER_ESP),
9883                vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
9884         vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
9885         vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
9886         vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
9887         vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
9888         vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
9889         vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
9890         vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
9891         vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
9892         vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
9893         vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
9894         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
9895             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
9896                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
9897                        efer, vmcs_read64(GUEST_IA32_PAT));
9898         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
9899                vmcs_read64(GUEST_IA32_DEBUGCTL),
9900                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
9901         if (cpu_has_load_perf_global_ctrl &&
9902             vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
9903                 pr_err("PerfGlobCtl = 0x%016llx\n",
9904                        vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
9905         if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
9906                 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
9907         pr_err("Interruptibility = %08x  ActivityState = %08x\n",
9908                vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
9909                vmcs_read32(GUEST_ACTIVITY_STATE));
9910         if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
9911                 pr_err("InterruptStatus = %04x\n",
9912                        vmcs_read16(GUEST_INTR_STATUS));
9913
9914         pr_err("*** Host State ***\n");
9915         pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
9916                vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
9917         pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
9918                vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
9919                vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
9920                vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
9921                vmcs_read16(HOST_TR_SELECTOR));
9922         pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
9923                vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
9924                vmcs_readl(HOST_TR_BASE));
9925         pr_err("GDTBase=%016lx IDTBase=%016lx\n",
9926                vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
9927         pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
9928                vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
9929                vmcs_readl(HOST_CR4));
9930         pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9931                vmcs_readl(HOST_IA32_SYSENTER_ESP),
9932                vmcs_read32(HOST_IA32_SYSENTER_CS),
9933                vmcs_readl(HOST_IA32_SYSENTER_EIP));
9934         if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
9935                 pr_err("EFER = 0x%016llx  PAT = 0x%016llx\n",
9936                        vmcs_read64(HOST_IA32_EFER),
9937                        vmcs_read64(HOST_IA32_PAT));
9938         if (cpu_has_load_perf_global_ctrl &&
9939             vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
9940                 pr_err("PerfGlobCtl = 0x%016llx\n",
9941                        vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
9942
9943         pr_err("*** Control State ***\n");
9944         pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
9945                pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
9946         pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
9947         pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
9948                vmcs_read32(EXCEPTION_BITMAP),
9949                vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
9950                vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
9951         pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
9952                vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
9953                vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
9954                vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
9955         pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
9956                vmcs_read32(VM_EXIT_INTR_INFO),
9957                vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9958                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
9959         pr_err("        reason=%08x qualification=%016lx\n",
9960                vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
9961         pr_err("IDTVectoring: info=%08x errcode=%08x\n",
9962                vmcs_read32(IDT_VECTORING_INFO_FIELD),
9963                vmcs_read32(IDT_VECTORING_ERROR_CODE));
9964         pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
9965         if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
9966                 pr_err("TSC Multiplier = 0x%016llx\n",
9967                        vmcs_read64(TSC_MULTIPLIER));
9968         if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
9969                 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
9970         if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
9971                 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
9972         if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
9973                 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
9974         n = vmcs_read32(CR3_TARGET_COUNT);
9975         for (i = 0; i + 1 < n; i += 4)
9976                 pr_err("CR3 target%u=%016lx target%u=%016lx\n",
9977                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
9978                        i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
9979         if (i < n)
9980                 pr_err("CR3 target%u=%016lx\n",
9981                        i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
9982         if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
9983                 pr_err("PLE Gap=%08x Window=%08x\n",
9984                        vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
9985         if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
9986                 pr_err("Virtual processor ID = 0x%04x\n",
9987                        vmcs_read16(VIRTUAL_PROCESSOR_ID));
9988 }
9989
9990 /*
9991  * The guest has exited.  See if we can fix it or if we need userspace
9992  * assistance.
9993  */
9994 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
9995 {
9996         struct vcpu_vmx *vmx = to_vmx(vcpu);
9997         u32 exit_reason = vmx->exit_reason;
9998         u32 vectoring_info = vmx->idt_vectoring_info;
9999
10000         trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
10001
10002         /*
10003          * Flush logged GPAs PML buffer, this will make dirty_bitmap more
10004          * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
10005          * querying dirty_bitmap, we only need to kick all vcpus out of guest
10006          * mode as if vcpus is in root mode, the PML buffer must has been
10007          * flushed already.
10008          */
10009         if (enable_pml)
10010                 vmx_flush_pml_buffer(vcpu);
10011
10012         /* If guest state is invalid, start emulating */
10013         if (vmx->emulation_required)
10014                 return handle_invalid_guest_state(vcpu);
10015
10016         if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
10017                 return nested_vmx_reflect_vmexit(vcpu, exit_reason);
10018
10019         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
10020                 dump_vmcs();
10021                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
10022                 vcpu->run->fail_entry.hardware_entry_failure_reason
10023                         = exit_reason;
10024                 return 0;
10025         }
10026
10027         if (unlikely(vmx->fail)) {
10028                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
10029                 vcpu->run->fail_entry.hardware_entry_failure_reason
10030                         = vmcs_read32(VM_INSTRUCTION_ERROR);
10031                 return 0;
10032         }
10033
10034         /*
10035          * Note:
10036          * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
10037          * delivery event since it indicates guest is accessing MMIO.
10038          * The vm-exit can be triggered again after return to guest that
10039          * will cause infinite loop.
10040          */
10041         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
10042                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
10043                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
10044                         exit_reason != EXIT_REASON_PML_FULL &&
10045                         exit_reason != EXIT_REASON_TASK_SWITCH)) {
10046                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
10047                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
10048                 vcpu->run->internal.ndata = 3;
10049                 vcpu->run->internal.data[0] = vectoring_info;
10050                 vcpu->run->internal.data[1] = exit_reason;
10051                 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
10052                 if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
10053                         vcpu->run->internal.ndata++;
10054                         vcpu->run->internal.data[3] =
10055                                 vmcs_read64(GUEST_PHYSICAL_ADDRESS);
10056                 }
10057                 return 0;
10058         }
10059
10060         if (unlikely(!enable_vnmi &&
10061                      vmx->loaded_vmcs->soft_vnmi_blocked)) {
10062                 if (vmx_interrupt_allowed(vcpu)) {
10063                         vmx->loaded_vmcs->soft_vnmi_blocked = 0;
10064                 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
10065                            vcpu->arch.nmi_pending) {
10066                         /*
10067                          * This CPU don't support us in finding the end of an
10068                          * NMI-blocked window if the guest runs with IRQs
10069                          * disabled. So we pull the trigger after 1 s of
10070                          * futile waiting, but inform the user about this.
10071                          */
10072                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
10073                                "state on VCPU %d after 1 s timeout\n",
10074                                __func__, vcpu->vcpu_id);
10075                         vmx->loaded_vmcs->soft_vnmi_blocked = 0;
10076                 }
10077         }
10078
10079         if (exit_reason < kvm_vmx_max_exit_handlers
10080             && kvm_vmx_exit_handlers[exit_reason])
10081                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
10082         else {
10083                 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
10084                                 exit_reason);
10085                 kvm_queue_exception(vcpu, UD_VECTOR);
10086                 return 1;
10087         }
10088 }
10089
10090 /*
10091  * Software based L1D cache flush which is used when microcode providing
10092  * the cache control MSR is not loaded.
10093  *
10094  * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
10095  * flush it is required to read in 64 KiB because the replacement algorithm
10096  * is not exactly LRU. This could be sized at runtime via topology
10097  * information but as all relevant affected CPUs have 32KiB L1D cache size
10098  * there is no point in doing so.
10099  */
10100 static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
10101 {
10102         int size = PAGE_SIZE << L1D_CACHE_ORDER;
10103
10104         /*
10105          * This code is only executed when the the flush mode is 'cond' or
10106          * 'always'
10107          */
10108         if (static_branch_likely(&vmx_l1d_flush_cond)) {
10109                 bool flush_l1d;
10110
10111                 /*
10112                  * Clear the per-vcpu flush bit, it gets set again
10113                  * either from vcpu_run() or from one of the unsafe
10114                  * VMEXIT handlers.
10115                  */
10116                 flush_l1d = vcpu->arch.l1tf_flush_l1d;
10117                 vcpu->arch.l1tf_flush_l1d = false;
10118
10119                 /*
10120                  * Clear the per-cpu flush bit, it gets set again from
10121                  * the interrupt handlers.
10122                  */
10123                 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
10124                 kvm_clear_cpu_l1tf_flush_l1d();
10125
10126                 if (!flush_l1d)
10127                         return;
10128         }
10129
10130         vcpu->stat.l1d_flush++;
10131
10132         if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
10133                 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
10134                 return;
10135         }
10136
10137         asm volatile(
10138                 /* First ensure the pages are in the TLB */
10139                 "xorl   %%eax, %%eax\n"
10140                 ".Lpopulate_tlb:\n\t"
10141                 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
10142                 "addl   $4096, %%eax\n\t"
10143                 "cmpl   %%eax, %[size]\n\t"
10144                 "jne    .Lpopulate_tlb\n\t"
10145                 "xorl   %%eax, %%eax\n\t"
10146                 "cpuid\n\t"
10147                 /* Now fill the cache */
10148                 "xorl   %%eax, %%eax\n"
10149                 ".Lfill_cache:\n"
10150                 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
10151                 "addl   $64, %%eax\n\t"
10152                 "cmpl   %%eax, %[size]\n\t"
10153                 "jne    .Lfill_cache\n\t"
10154                 "lfence\n"
10155                 :: [flush_pages] "r" (vmx_l1d_flush_pages),
10156                     [size] "r" (size)
10157                 : "eax", "ebx", "ecx", "edx");
10158 }
10159
10160 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
10161 {
10162         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
10163
10164         if (is_guest_mode(vcpu) &&
10165                 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
10166                 return;
10167
10168         if (irr == -1 || tpr < irr) {
10169                 vmcs_write32(TPR_THRESHOLD, 0);
10170                 return;
10171         }
10172
10173         vmcs_write32(TPR_THRESHOLD, irr);
10174 }
10175
10176 static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
10177 {
10178         u32 sec_exec_control;
10179
10180         if (!lapic_in_kernel(vcpu))
10181                 return;
10182
10183         if (!flexpriority_enabled &&
10184             !cpu_has_vmx_virtualize_x2apic_mode())
10185                 return;
10186
10187         /* Postpone execution until vmcs01 is the current VMCS. */
10188         if (is_guest_mode(vcpu)) {
10189                 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
10190                 return;
10191         }
10192
10193         sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
10194         sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
10195                               SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
10196
10197         switch (kvm_get_apic_mode(vcpu)) {
10198         case LAPIC_MODE_INVALID:
10199                 WARN_ONCE(true, "Invalid local APIC state");
10200         case LAPIC_MODE_DISABLED:
10201                 break;
10202         case LAPIC_MODE_XAPIC:
10203                 if (flexpriority_enabled) {
10204                         sec_exec_control |=
10205                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
10206                         vmx_flush_tlb(vcpu, true);
10207                 }
10208                 break;
10209         case LAPIC_MODE_X2APIC:
10210                 if (cpu_has_vmx_virtualize_x2apic_mode())
10211                         sec_exec_control |=
10212                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
10213                 break;
10214         }
10215         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
10216
10217         vmx_update_msr_bitmap(vcpu);
10218 }
10219
10220 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
10221 {
10222         if (!is_guest_mode(vcpu)) {
10223                 vmcs_write64(APIC_ACCESS_ADDR, hpa);
10224                 vmx_flush_tlb(vcpu, true);
10225         }
10226 }
10227
10228 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
10229 {
10230         u16 status;
10231         u8 old;
10232
10233         if (max_isr == -1)
10234                 max_isr = 0;
10235
10236         status = vmcs_read16(GUEST_INTR_STATUS);
10237         old = status >> 8;
10238         if (max_isr != old) {
10239                 status &= 0xff;
10240                 status |= max_isr << 8;
10241                 vmcs_write16(GUEST_INTR_STATUS, status);
10242         }
10243 }
10244
10245 static void vmx_set_rvi(int vector)
10246 {
10247         u16 status;
10248         u8 old;
10249
10250         if (vector == -1)
10251                 vector = 0;
10252
10253         status = vmcs_read16(GUEST_INTR_STATUS);
10254         old = (u8)status & 0xff;
10255         if ((u8)vector != old) {
10256                 status &= ~0xff;
10257                 status |= (u8)vector;
10258                 vmcs_write16(GUEST_INTR_STATUS, status);
10259         }
10260 }
10261
10262 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
10263 {
10264         /*
10265          * When running L2, updating RVI is only relevant when
10266          * vmcs12 virtual-interrupt-delivery enabled.
10267          * However, it can be enabled only when L1 also
10268          * intercepts external-interrupts and in that case
10269          * we should not update vmcs02 RVI but instead intercept
10270          * interrupt. Therefore, do nothing when running L2.
10271          */
10272         if (!is_guest_mode(vcpu))
10273                 vmx_set_rvi(max_irr);
10274 }
10275
10276 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
10277 {
10278         struct vcpu_vmx *vmx = to_vmx(vcpu);
10279         int max_irr;
10280         bool max_irr_updated;
10281
10282         WARN_ON(!vcpu->arch.apicv_active);
10283         if (pi_test_on(&vmx->pi_desc)) {
10284                 pi_clear_on(&vmx->pi_desc);
10285                 /*
10286                  * IOMMU can write to PIR.ON, so the barrier matters even on UP.
10287                  * But on x86 this is just a compiler barrier anyway.
10288                  */
10289                 smp_mb__after_atomic();
10290                 max_irr_updated =
10291                         kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
10292
10293                 /*
10294                  * If we are running L2 and L1 has a new pending interrupt
10295                  * which can be injected, we should re-evaluate
10296                  * what should be done with this new L1 interrupt.
10297                  * If L1 intercepts external-interrupts, we should
10298                  * exit from L2 to L1. Otherwise, interrupt should be
10299                  * delivered directly to L2.
10300                  */
10301                 if (is_guest_mode(vcpu) && max_irr_updated) {
10302                         if (nested_exit_on_intr(vcpu))
10303                                 kvm_vcpu_exiting_guest_mode(vcpu);
10304                         else
10305                                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10306                 }
10307         } else {
10308                 max_irr = kvm_lapic_find_highest_irr(vcpu);
10309         }
10310         vmx_hwapic_irr_update(vcpu, max_irr);
10311         return max_irr;
10312 }
10313
10314 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
10315 {
10316         u8 rvi = vmx_get_rvi();
10317         u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
10318
10319         return ((rvi & 0xf0) > (vppr & 0xf0));
10320 }
10321
10322 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
10323 {
10324         if (!kvm_vcpu_apicv_active(vcpu))
10325                 return;
10326
10327         vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
10328         vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
10329         vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
10330         vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
10331 }
10332
10333 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
10334 {
10335         struct vcpu_vmx *vmx = to_vmx(vcpu);
10336
10337         pi_clear_on(&vmx->pi_desc);
10338         memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
10339 }
10340
10341 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
10342 {
10343         u32 exit_intr_info = 0;
10344         u16 basic_exit_reason = (u16)vmx->exit_reason;
10345
10346         if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
10347               || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
10348                 return;
10349
10350         if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
10351                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10352         vmx->exit_intr_info = exit_intr_info;
10353
10354         /* if exit due to PF check for async PF */
10355         if (is_page_fault(exit_intr_info))
10356                 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
10357
10358         /* Handle machine checks before interrupts are enabled */
10359         if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
10360             is_machine_check(exit_intr_info))
10361                 kvm_machine_check();
10362
10363         /* We need to handle NMIs before interrupts are enabled */
10364         if (is_nmi(exit_intr_info)) {
10365                 kvm_before_interrupt(&vmx->vcpu);
10366                 asm("int $2");
10367                 kvm_after_interrupt(&vmx->vcpu);
10368         }
10369 }
10370
10371 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
10372 {
10373         u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10374
10375         if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
10376                         == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
10377                 unsigned int vector;
10378                 unsigned long entry;
10379                 gate_desc *desc;
10380                 struct vcpu_vmx *vmx = to_vmx(vcpu);
10381 #ifdef CONFIG_X86_64
10382                 unsigned long tmp;
10383 #endif
10384
10385                 vector =  exit_intr_info & INTR_INFO_VECTOR_MASK;
10386                 desc = (gate_desc *)vmx->host_idt_base + vector;
10387                 entry = gate_offset(desc);
10388                 asm volatile(
10389 #ifdef CONFIG_X86_64
10390                         "mov %%" _ASM_SP ", %[sp]\n\t"
10391                         "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
10392                         "push $%c[ss]\n\t"
10393                         "push %[sp]\n\t"
10394 #endif
10395                         "pushf\n\t"
10396                         __ASM_SIZE(push) " $%c[cs]\n\t"
10397                         CALL_NOSPEC
10398                         :
10399 #ifdef CONFIG_X86_64
10400                         [sp]"=&r"(tmp),
10401 #endif
10402                         ASM_CALL_CONSTRAINT
10403                         :
10404                         THUNK_TARGET(entry),
10405                         [ss]"i"(__KERNEL_DS),
10406                         [cs]"i"(__KERNEL_CS)
10407                         );
10408         }
10409 }
10410 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
10411
10412 static bool vmx_has_emulated_msr(int index)
10413 {
10414         switch (index) {
10415         case MSR_IA32_SMBASE:
10416                 /*
10417                  * We cannot do SMM unless we can run the guest in big
10418                  * real mode.
10419                  */
10420                 return enable_unrestricted_guest || emulate_invalid_guest_state;
10421         case MSR_AMD64_VIRT_SPEC_CTRL:
10422                 /* This is AMD only.  */
10423                 return false;
10424         default:
10425                 return true;
10426         }
10427 }
10428
10429 static bool vmx_mpx_supported(void)
10430 {
10431         return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
10432                 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
10433 }
10434
10435 static bool vmx_xsaves_supported(void)
10436 {
10437         return vmcs_config.cpu_based_2nd_exec_ctrl &
10438                 SECONDARY_EXEC_XSAVES;
10439 }
10440
10441 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
10442 {
10443         u32 exit_intr_info;
10444         bool unblock_nmi;
10445         u8 vector;
10446         bool idtv_info_valid;
10447
10448         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
10449
10450         if (enable_vnmi) {
10451                 if (vmx->loaded_vmcs->nmi_known_unmasked)
10452                         return;
10453                 /*
10454                  * Can't use vmx->exit_intr_info since we're not sure what
10455                  * the exit reason is.
10456                  */
10457                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10458                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
10459                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
10460                 /*
10461                  * SDM 3: 27.7.1.2 (September 2008)
10462                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
10463                  * a guest IRET fault.
10464                  * SDM 3: 23.2.2 (September 2008)
10465                  * Bit 12 is undefined in any of the following cases:
10466                  *  If the VM exit sets the valid bit in the IDT-vectoring
10467                  *   information field.
10468                  *  If the VM exit is due to a double fault.
10469                  */
10470                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
10471                     vector != DF_VECTOR && !idtv_info_valid)
10472                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
10473                                       GUEST_INTR_STATE_NMI);
10474                 else
10475                         vmx->loaded_vmcs->nmi_known_unmasked =
10476                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
10477                                   & GUEST_INTR_STATE_NMI);
10478         } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
10479                 vmx->loaded_vmcs->vnmi_blocked_time +=
10480                         ktime_to_ns(ktime_sub(ktime_get(),
10481                                               vmx->loaded_vmcs->entry_time));
10482 }
10483
10484 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
10485                                       u32 idt_vectoring_info,
10486                                       int instr_len_field,
10487                                       int error_code_field)
10488 {
10489         u8 vector;
10490         int type;
10491         bool idtv_info_valid;
10492
10493         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
10494
10495         vcpu->arch.nmi_injected = false;
10496         kvm_clear_exception_queue(vcpu);
10497         kvm_clear_interrupt_queue(vcpu);
10498
10499         if (!idtv_info_valid)
10500                 return;
10501
10502         kvm_make_request(KVM_REQ_EVENT, vcpu);
10503
10504         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
10505         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
10506
10507         switch (type) {
10508         case INTR_TYPE_NMI_INTR:
10509                 vcpu->arch.nmi_injected = true;
10510                 /*
10511                  * SDM 3: 27.7.1.2 (September 2008)
10512                  * Clear bit "block by NMI" before VM entry if a NMI
10513                  * delivery faulted.
10514                  */
10515                 vmx_set_nmi_mask(vcpu, false);
10516                 break;
10517         case INTR_TYPE_SOFT_EXCEPTION:
10518                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
10519                 /* fall through */
10520         case INTR_TYPE_HARD_EXCEPTION:
10521                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
10522                         u32 err = vmcs_read32(error_code_field);
10523                         kvm_requeue_exception_e(vcpu, vector, err);
10524                 } else
10525                         kvm_requeue_exception(vcpu, vector);
10526                 break;
10527         case INTR_TYPE_SOFT_INTR:
10528                 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
10529                 /* fall through */
10530         case INTR_TYPE_EXT_INTR:
10531                 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
10532                 break;
10533         default:
10534                 break;
10535         }
10536 }
10537
10538 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
10539 {
10540         __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
10541                                   VM_EXIT_INSTRUCTION_LEN,
10542                                   IDT_VECTORING_ERROR_CODE);
10543 }
10544
10545 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
10546 {
10547         __vmx_complete_interrupts(vcpu,
10548                                   vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
10549                                   VM_ENTRY_INSTRUCTION_LEN,
10550                                   VM_ENTRY_EXCEPTION_ERROR_CODE);
10551
10552         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
10553 }
10554
10555 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
10556 {
10557         int i, nr_msrs;
10558         struct perf_guest_switch_msr *msrs;
10559
10560         msrs = perf_guest_get_msrs(&nr_msrs);
10561
10562         if (!msrs)
10563                 return;
10564
10565         for (i = 0; i < nr_msrs; i++)
10566                 if (msrs[i].host == msrs[i].guest)
10567                         clear_atomic_switch_msr(vmx, msrs[i].msr);
10568                 else
10569                         add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
10570                                         msrs[i].host, false);
10571 }
10572
10573 static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
10574 {
10575         vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
10576         if (!vmx->loaded_vmcs->hv_timer_armed)
10577                 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10578                               PIN_BASED_VMX_PREEMPTION_TIMER);
10579         vmx->loaded_vmcs->hv_timer_armed = true;
10580 }
10581
10582 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
10583 {
10584         struct vcpu_vmx *vmx = to_vmx(vcpu);
10585         u64 tscl;
10586         u32 delta_tsc;
10587
10588         if (vmx->req_immediate_exit) {
10589                 vmx_arm_hv_timer(vmx, 0);
10590                 return;
10591         }
10592
10593         if (vmx->hv_deadline_tsc != -1) {
10594                 tscl = rdtsc();
10595                 if (vmx->hv_deadline_tsc > tscl)
10596                         /* set_hv_timer ensures the delta fits in 32-bits */
10597                         delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10598                                 cpu_preemption_timer_multi);
10599                 else
10600                         delta_tsc = 0;
10601
10602                 vmx_arm_hv_timer(vmx, delta_tsc);
10603                 return;
10604         }
10605
10606         if (vmx->loaded_vmcs->hv_timer_armed)
10607                 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10608                                 PIN_BASED_VMX_PREEMPTION_TIMER);
10609         vmx->loaded_vmcs->hv_timer_armed = false;
10610 }
10611
10612 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
10613 {
10614         struct vcpu_vmx *vmx = to_vmx(vcpu);
10615         unsigned long cr3, cr4, evmcs_rsp;
10616
10617         /* Record the guest's net vcpu time for enforced NMI injections. */
10618         if (unlikely(!enable_vnmi &&
10619                      vmx->loaded_vmcs->soft_vnmi_blocked))
10620                 vmx->loaded_vmcs->entry_time = ktime_get();
10621
10622         /* Don't enter VMX if guest state is invalid, let the exit handler
10623            start emulation until we arrive back to a valid state */
10624         if (vmx->emulation_required)
10625                 return;
10626
10627         if (vmx->ple_window_dirty) {
10628                 vmx->ple_window_dirty = false;
10629                 vmcs_write32(PLE_WINDOW, vmx->ple_window);
10630         }
10631
10632         if (vmx->nested.sync_shadow_vmcs) {
10633                 copy_vmcs12_to_shadow(vmx);
10634                 vmx->nested.sync_shadow_vmcs = false;
10635         }
10636
10637         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
10638                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
10639         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
10640                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
10641
10642         cr3 = __get_current_cr3_fast();
10643         if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
10644                 vmcs_writel(HOST_CR3, cr3);
10645                 vmx->loaded_vmcs->host_state.cr3 = cr3;
10646         }
10647
10648         cr4 = cr4_read_shadow();
10649         if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
10650                 vmcs_writel(HOST_CR4, cr4);
10651                 vmx->loaded_vmcs->host_state.cr4 = cr4;
10652         }
10653
10654         /* When single-stepping over STI and MOV SS, we must clear the
10655          * corresponding interruptibility bits in the guest state. Otherwise
10656          * vmentry fails as it then expects bit 14 (BS) in pending debug
10657          * exceptions being set, but that's not correct for the guest debugging
10658          * case. */
10659         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
10660                 vmx_set_interrupt_shadow(vcpu, 0);
10661
10662         if (static_cpu_has(X86_FEATURE_PKU) &&
10663             kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
10664             vcpu->arch.pkru != vmx->host_pkru)
10665                 __write_pkru(vcpu->arch.pkru);
10666
10667         atomic_switch_perf_msrs(vmx);
10668
10669         vmx_update_hv_timer(vcpu);
10670
10671         /*
10672          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
10673          * it's non-zero. Since vmentry is serialising on affected CPUs, there
10674          * is no need to worry about the conditional branch over the wrmsr
10675          * being speculatively taken.
10676          */
10677         x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
10678
10679         vmx->__launched = vmx->loaded_vmcs->launched;
10680
10681         evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
10682                 (unsigned long)&current_evmcs->host_rsp : 0;
10683
10684         if (static_branch_unlikely(&vmx_l1d_should_flush))
10685                 vmx_l1d_flush(vcpu);
10686
10687         asm(
10688                 /* Store host registers */
10689                 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
10690                 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
10691                 "push %%" _ASM_CX " \n\t"
10692                 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
10693                 "je 1f \n\t"
10694                 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
10695                 /* Avoid VMWRITE when Enlightened VMCS is in use */
10696                 "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
10697                 "jz 2f \n\t"
10698                 "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
10699                 "jmp 1f \n\t"
10700                 "2: \n\t"
10701                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
10702                 "1: \n\t"
10703                 /* Reload cr2 if changed */
10704                 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
10705                 "mov %%cr2, %%" _ASM_DX " \n\t"
10706                 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
10707                 "je 3f \n\t"
10708                 "mov %%" _ASM_AX", %%cr2 \n\t"
10709                 "3: \n\t"
10710                 /* Check if vmlaunch of vmresume is needed */
10711                 "cmpl $0, %c[launched](%0) \n\t"
10712                 /* Load guest registers.  Don't clobber flags. */
10713                 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
10714                 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
10715                 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
10716                 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
10717                 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
10718                 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
10719 #ifdef CONFIG_X86_64
10720                 "mov %c[r8](%0),  %%r8  \n\t"
10721                 "mov %c[r9](%0),  %%r9  \n\t"
10722                 "mov %c[r10](%0), %%r10 \n\t"
10723                 "mov %c[r11](%0), %%r11 \n\t"
10724                 "mov %c[r12](%0), %%r12 \n\t"
10725                 "mov %c[r13](%0), %%r13 \n\t"
10726                 "mov %c[r14](%0), %%r14 \n\t"
10727                 "mov %c[r15](%0), %%r15 \n\t"
10728 #endif
10729                 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
10730
10731                 /* Enter guest mode */
10732                 "jne 1f \n\t"
10733                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
10734                 "jmp 2f \n\t"
10735                 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
10736                 "2: "
10737                 /* Save guest registers, load host registers, keep flags */
10738                 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
10739                 "pop %0 \n\t"
10740                 "setbe %c[fail](%0)\n\t"
10741                 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
10742                 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
10743                 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
10744                 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
10745                 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
10746                 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
10747                 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
10748 #ifdef CONFIG_X86_64
10749                 "mov %%r8,  %c[r8](%0) \n\t"
10750                 "mov %%r9,  %c[r9](%0) \n\t"
10751                 "mov %%r10, %c[r10](%0) \n\t"
10752                 "mov %%r11, %c[r11](%0) \n\t"
10753                 "mov %%r12, %c[r12](%0) \n\t"
10754                 "mov %%r13, %c[r13](%0) \n\t"
10755                 "mov %%r14, %c[r14](%0) \n\t"
10756                 "mov %%r15, %c[r15](%0) \n\t"
10757                 "xor %%r8d,  %%r8d \n\t"
10758                 "xor %%r9d,  %%r9d \n\t"
10759                 "xor %%r10d, %%r10d \n\t"
10760                 "xor %%r11d, %%r11d \n\t"
10761                 "xor %%r12d, %%r12d \n\t"
10762                 "xor %%r13d, %%r13d \n\t"
10763                 "xor %%r14d, %%r14d \n\t"
10764                 "xor %%r15d, %%r15d \n\t"
10765 #endif
10766                 "mov %%cr2, %%" _ASM_AX "   \n\t"
10767                 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
10768
10769                 "xor %%eax, %%eax \n\t"
10770                 "xor %%ebx, %%ebx \n\t"
10771                 "xor %%esi, %%esi \n\t"
10772                 "xor %%edi, %%edi \n\t"
10773                 "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
10774                 ".pushsection .rodata \n\t"
10775                 ".global vmx_return \n\t"
10776                 "vmx_return: " _ASM_PTR " 2b \n\t"
10777                 ".popsection"
10778               : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
10779                 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
10780                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
10781                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
10782                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
10783                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
10784                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
10785                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
10786                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
10787                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
10788                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
10789 #ifdef CONFIG_X86_64
10790                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
10791                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
10792                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
10793                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
10794                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
10795                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
10796                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
10797                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
10798 #endif
10799                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
10800                 [wordsize]"i"(sizeof(ulong))
10801               : "cc", "memory"
10802 #ifdef CONFIG_X86_64
10803                 , "rax", "rbx", "rdi"
10804                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
10805 #else
10806                 , "eax", "ebx", "edi"
10807 #endif
10808               );
10809
10810         /*
10811          * We do not use IBRS in the kernel. If this vCPU has used the
10812          * SPEC_CTRL MSR it may have left it on; save the value and
10813          * turn it off. This is much more efficient than blindly adding
10814          * it to the atomic save/restore list. Especially as the former
10815          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
10816          *
10817          * For non-nested case:
10818          * If the L01 MSR bitmap does not intercept the MSR, then we need to
10819          * save it.
10820          *
10821          * For nested case:
10822          * If the L02 MSR bitmap does not intercept the MSR, then we need to
10823          * save it.
10824          */
10825         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
10826                 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
10827
10828         x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
10829
10830         /* Eliminate branch target predictions from guest mode */
10831         vmexit_fill_RSB();
10832
10833         /* All fields are clean at this point */
10834         if (static_branch_unlikely(&enable_evmcs))
10835                 current_evmcs->hv_clean_fields |=
10836                         HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
10837
10838         /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
10839         if (vmx->host_debugctlmsr)
10840                 update_debugctlmsr(vmx->host_debugctlmsr);
10841
10842 #ifndef CONFIG_X86_64
10843         /*
10844          * The sysexit path does not restore ds/es, so we must set them to
10845          * a reasonable value ourselves.
10846          *
10847          * We can't defer this to vmx_prepare_switch_to_host() since that
10848          * function may be executed in interrupt context, which saves and
10849          * restore segments around it, nullifying its effect.
10850          */
10851         loadsegment(ds, __USER_DS);
10852         loadsegment(es, __USER_DS);
10853 #endif
10854
10855         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
10856                                   | (1 << VCPU_EXREG_RFLAGS)
10857                                   | (1 << VCPU_EXREG_PDPTR)
10858                                   | (1 << VCPU_EXREG_SEGMENTS)
10859                                   | (1 << VCPU_EXREG_CR3));
10860         vcpu->arch.regs_dirty = 0;
10861
10862         /*
10863          * eager fpu is enabled if PKEY is supported and CR4 is switched
10864          * back on host, so it is safe to read guest PKRU from current
10865          * XSAVE.
10866          */
10867         if (static_cpu_has(X86_FEATURE_PKU) &&
10868             kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
10869                 vcpu->arch.pkru = __read_pkru();
10870                 if (vcpu->arch.pkru != vmx->host_pkru)
10871                         __write_pkru(vmx->host_pkru);
10872         }
10873
10874         vmx->nested.nested_run_pending = 0;
10875         vmx->idt_vectoring_info = 0;
10876
10877         vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
10878         if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
10879                 return;
10880
10881         vmx->loaded_vmcs->launched = 1;
10882         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
10883
10884         vmx_complete_atomic_exit(vmx);
10885         vmx_recover_nmi_blocking(vmx);
10886         vmx_complete_interrupts(vmx);
10887 }
10888 STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
10889
10890 static struct kvm *vmx_vm_alloc(void)
10891 {
10892         struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
10893         return &kvm_vmx->kvm;
10894 }
10895
10896 static void vmx_vm_free(struct kvm *kvm)
10897 {
10898         vfree(to_kvm_vmx(kvm));
10899 }
10900
10901 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
10902 {
10903         struct vcpu_vmx *vmx = to_vmx(vcpu);
10904         int cpu;
10905
10906         if (vmx->loaded_vmcs == vmcs)
10907                 return;
10908
10909         cpu = get_cpu();
10910         vmx_vcpu_put(vcpu);
10911         vmx->loaded_vmcs = vmcs;
10912         vmx_vcpu_load(vcpu, cpu);
10913         put_cpu();
10914
10915         vm_entry_controls_reset_shadow(vmx);
10916         vm_exit_controls_reset_shadow(vmx);
10917         vmx_segment_cache_clear(vmx);
10918 }
10919
10920 /*
10921  * Ensure that the current vmcs of the logical processor is the
10922  * vmcs01 of the vcpu before calling free_nested().
10923  */
10924 static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
10925 {
10926        struct vcpu_vmx *vmx = to_vmx(vcpu);
10927
10928        vcpu_load(vcpu);
10929        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
10930        free_nested(vmx);
10931        vcpu_put(vcpu);
10932 }
10933
10934 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
10935 {
10936         struct vcpu_vmx *vmx = to_vmx(vcpu);
10937
10938         if (enable_pml)
10939                 vmx_destroy_pml_buffer(vmx);
10940         free_vpid(vmx->vpid);
10941         leave_guest_mode(vcpu);
10942         vmx_free_vcpu_nested(vcpu);
10943         free_loaded_vmcs(vmx->loaded_vmcs);
10944         kfree(vmx->guest_msrs);
10945         kvm_vcpu_uninit(vcpu);
10946         kmem_cache_free(kvm_vcpu_cache, vmx);
10947 }
10948
10949 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
10950 {
10951         int err;
10952         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
10953         unsigned long *msr_bitmap;
10954         int cpu;
10955
10956         if (!vmx)
10957                 return ERR_PTR(-ENOMEM);
10958
10959         vmx->vpid = allocate_vpid();
10960
10961         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
10962         if (err)
10963                 goto free_vcpu;
10964
10965         err = -ENOMEM;
10966
10967         /*
10968          * If PML is turned on, failure on enabling PML just results in failure
10969          * of creating the vcpu, therefore we can simplify PML logic (by
10970          * avoiding dealing with cases, such as enabling PML partially on vcpus
10971          * for the guest, etc.
10972          */
10973         if (enable_pml) {
10974                 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
10975                 if (!vmx->pml_pg)
10976                         goto uninit_vcpu;
10977         }
10978
10979         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
10980         BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
10981                      > PAGE_SIZE);
10982
10983         if (!vmx->guest_msrs)
10984                 goto free_pml;
10985
10986         err = alloc_loaded_vmcs(&vmx->vmcs01);
10987         if (err < 0)
10988                 goto free_msrs;
10989
10990         msr_bitmap = vmx->vmcs01.msr_bitmap;
10991         vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
10992         vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
10993         vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
10994         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
10995         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
10996         vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
10997         vmx->msr_bitmap_mode = 0;
10998
10999         vmx->loaded_vmcs = &vmx->vmcs01;
11000         cpu = get_cpu();
11001         vmx_vcpu_load(&vmx->vcpu, cpu);
11002         vmx->vcpu.cpu = cpu;
11003         vmx_vcpu_setup(vmx);
11004         vmx_vcpu_put(&vmx->vcpu);
11005         put_cpu();
11006         if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
11007                 err = alloc_apic_access_page(kvm);
11008                 if (err)
11009                         goto free_vmcs;
11010         }
11011
11012         if (enable_ept && !enable_unrestricted_guest) {
11013                 err = init_rmode_identity_map(kvm);
11014                 if (err)
11015                         goto free_vmcs;
11016         }
11017
11018         if (nested)
11019                 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
11020                                            kvm_vcpu_apicv_active(&vmx->vcpu));
11021
11022         vmx->nested.posted_intr_nv = -1;
11023         vmx->nested.current_vmptr = -1ull;
11024
11025         vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
11026
11027         /*
11028          * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
11029          * or POSTED_INTR_WAKEUP_VECTOR.
11030          */
11031         vmx->pi_desc.nv = POSTED_INTR_VECTOR;
11032         vmx->pi_desc.sn = 1;
11033
11034         return &vmx->vcpu;
11035
11036 free_vmcs:
11037         free_loaded_vmcs(vmx->loaded_vmcs);
11038 free_msrs:
11039         kfree(vmx->guest_msrs);
11040 free_pml:
11041         vmx_destroy_pml_buffer(vmx);
11042 uninit_vcpu:
11043         kvm_vcpu_uninit(&vmx->vcpu);
11044 free_vcpu:
11045         free_vpid(vmx->vpid);
11046         kmem_cache_free(kvm_vcpu_cache, vmx);
11047         return ERR_PTR(err);
11048 }
11049
11050 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
11051 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
11052
11053 static int vmx_vm_init(struct kvm *kvm)
11054 {
11055         spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
11056
11057         if (!ple_gap)
11058                 kvm->arch.pause_in_guest = true;
11059
11060         if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
11061                 switch (l1tf_mitigation) {
11062                 case L1TF_MITIGATION_OFF:
11063                 case L1TF_MITIGATION_FLUSH_NOWARN:
11064                         /* 'I explicitly don't care' is set */
11065                         break;
11066                 case L1TF_MITIGATION_FLUSH:
11067                 case L1TF_MITIGATION_FLUSH_NOSMT:
11068                 case L1TF_MITIGATION_FULL:
11069                         /*
11070                          * Warn upon starting the first VM in a potentially
11071                          * insecure environment.
11072                          */
11073                         if (cpu_smt_control == CPU_SMT_ENABLED)
11074                                 pr_warn_once(L1TF_MSG_SMT);
11075                         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
11076                                 pr_warn_once(L1TF_MSG_L1D);
11077                         break;
11078                 case L1TF_MITIGATION_FULL_FORCE:
11079                         /* Flush is enforced */
11080                         break;
11081                 }
11082         }
11083         return 0;
11084 }
11085
11086 static void __init vmx_check_processor_compat(void *rtn)
11087 {
11088         struct vmcs_config vmcs_conf;
11089
11090         *(int *)rtn = 0;
11091         if (setup_vmcs_config(&vmcs_conf) < 0)
11092                 *(int *)rtn = -EIO;
11093         nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv);
11094         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
11095                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
11096                                 smp_processor_id());
11097                 *(int *)rtn = -EIO;
11098         }
11099 }
11100
11101 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
11102 {
11103         u8 cache;
11104         u64 ipat = 0;
11105
11106         /* For VT-d and EPT combination
11107          * 1. MMIO: always map as UC
11108          * 2. EPT with VT-d:
11109          *   a. VT-d without snooping control feature: can't guarantee the
11110          *      result, try to trust guest.
11111          *   b. VT-d with snooping control feature: snooping control feature of
11112          *      VT-d engine can guarantee the cache correctness. Just set it
11113          *      to WB to keep consistent with host. So the same as item 3.
11114          * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
11115          *    consistent with host MTRR
11116          */
11117         if (is_mmio) {
11118                 cache = MTRR_TYPE_UNCACHABLE;
11119                 goto exit;
11120         }
11121
11122         if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
11123                 ipat = VMX_EPT_IPAT_BIT;
11124                 cache = MTRR_TYPE_WRBACK;
11125                 goto exit;
11126         }
11127
11128         if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
11129                 ipat = VMX_EPT_IPAT_BIT;
11130                 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
11131                         cache = MTRR_TYPE_WRBACK;
11132                 else
11133                         cache = MTRR_TYPE_UNCACHABLE;
11134                 goto exit;
11135         }
11136
11137         cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
11138
11139 exit:
11140         return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
11141 }
11142
11143 static int vmx_get_lpage_level(void)
11144 {
11145         if (enable_ept && !cpu_has_vmx_ept_1g_page())
11146                 return PT_DIRECTORY_LEVEL;
11147         else
11148                 /* For shadow and EPT supported 1GB page */
11149                 return PT_PDPE_LEVEL;
11150 }
11151
11152 static void vmcs_set_secondary_exec_control(u32 new_ctl)
11153 {
11154         /*
11155          * These bits in the secondary execution controls field
11156          * are dynamic, the others are mostly based on the hypervisor
11157          * architecture and the guest's CPUID.  Do not touch the
11158          * dynamic bits.
11159          */
11160         u32 mask =
11161                 SECONDARY_EXEC_SHADOW_VMCS |
11162                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
11163                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
11164                 SECONDARY_EXEC_DESC;
11165
11166         u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
11167
11168         vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
11169                      (new_ctl & ~mask) | (cur_ctl & mask));
11170 }
11171
11172 /*
11173  * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
11174  * (indicating "allowed-1") if they are supported in the guest's CPUID.
11175  */
11176 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
11177 {
11178         struct vcpu_vmx *vmx = to_vmx(vcpu);
11179         struct kvm_cpuid_entry2 *entry;
11180
11181         vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
11182         vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
11183
11184 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {            \
11185         if (entry && (entry->_reg & (_cpuid_mask)))                     \
11186                 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);     \
11187 } while (0)
11188
11189         entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
11190         cr4_fixed1_update(X86_CR4_VME,        edx, bit(X86_FEATURE_VME));
11191         cr4_fixed1_update(X86_CR4_PVI,        edx, bit(X86_FEATURE_VME));
11192         cr4_fixed1_update(X86_CR4_TSD,        edx, bit(X86_FEATURE_TSC));
11193         cr4_fixed1_update(X86_CR4_DE,         edx, bit(X86_FEATURE_DE));
11194         cr4_fixed1_update(X86_CR4_PSE,        edx, bit(X86_FEATURE_PSE));
11195         cr4_fixed1_update(X86_CR4_PAE,        edx, bit(X86_FEATURE_PAE));
11196         cr4_fixed1_update(X86_CR4_MCE,        edx, bit(X86_FEATURE_MCE));
11197         cr4_fixed1_update(X86_CR4_PGE,        edx, bit(X86_FEATURE_PGE));
11198         cr4_fixed1_update(X86_CR4_OSFXSR,     edx, bit(X86_FEATURE_FXSR));
11199         cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
11200         cr4_fixed1_update(X86_CR4_VMXE,       ecx, bit(X86_FEATURE_VMX));
11201         cr4_fixed1_update(X86_CR4_SMXE,       ecx, bit(X86_FEATURE_SMX));
11202         cr4_fixed1_update(X86_CR4_PCIDE,      ecx, bit(X86_FEATURE_PCID));
11203         cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, bit(X86_FEATURE_XSAVE));
11204
11205         entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
11206         cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, bit(X86_FEATURE_FSGSBASE));
11207         cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
11208         cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
11209         cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
11210         cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
11211
11212 #undef cr4_fixed1_update
11213 }
11214
11215 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
11216 {
11217         struct vcpu_vmx *vmx = to_vmx(vcpu);
11218
11219         if (kvm_mpx_supported()) {
11220                 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
11221
11222                 if (mpx_enabled) {
11223                         vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
11224                         vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
11225                 } else {
11226                         vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
11227                         vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
11228                 }
11229         }
11230 }
11231
11232 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
11233 {
11234         struct vcpu_vmx *vmx = to_vmx(vcpu);
11235
11236         if (cpu_has_secondary_exec_ctrls()) {
11237                 vmx_compute_secondary_exec_control(vmx);
11238                 vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
11239         }
11240
11241         if (nested_vmx_allowed(vcpu))
11242                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
11243                         FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
11244         else
11245                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
11246                         ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
11247
11248         if (nested_vmx_allowed(vcpu)) {
11249                 nested_vmx_cr_fixed1_bits_update(vcpu);
11250                 nested_vmx_entry_exit_ctls_update(vcpu);
11251         }
11252 }
11253
11254 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
11255 {
11256         if (func == 1 && nested)
11257                 entry->ecx |= bit(X86_FEATURE_VMX);
11258 }
11259
11260 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
11261                 struct x86_exception *fault)
11262 {
11263         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11264         struct vcpu_vmx *vmx = to_vmx(vcpu);
11265         u32 exit_reason;
11266         unsigned long exit_qualification = vcpu->arch.exit_qualification;
11267
11268         if (vmx->nested.pml_full) {
11269                 exit_reason = EXIT_REASON_PML_FULL;
11270                 vmx->nested.pml_full = false;
11271                 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
11272         } else if (fault->error_code & PFERR_RSVD_MASK)
11273                 exit_reason = EXIT_REASON_EPT_MISCONFIG;
11274         else
11275                 exit_reason = EXIT_REASON_EPT_VIOLATION;
11276
11277         nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
11278         vmcs12->guest_physical_address = fault->address;
11279 }
11280
11281 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
11282 {
11283         return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
11284 }
11285
11286 /* Callbacks for nested_ept_init_mmu_context: */
11287
11288 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
11289 {
11290         /* return the page table to be shadowed - in our case, EPT12 */
11291         return get_vmcs12(vcpu)->ept_pointer;
11292 }
11293
11294 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
11295 {
11296         WARN_ON(mmu_is_nested(vcpu));
11297
11298         kvm_init_shadow_ept_mmu(vcpu,
11299                         to_vmx(vcpu)->nested.msrs.ept_caps &
11300                         VMX_EPT_EXECUTE_ONLY_BIT,
11301                         nested_ept_ad_enabled(vcpu),
11302                         nested_ept_get_cr3(vcpu));
11303         vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
11304         vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
11305         vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
11306
11307         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
11308 }
11309
11310 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
11311 {
11312         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
11313 }
11314
11315 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
11316                                             u16 error_code)
11317 {
11318         bool inequality, bit;
11319
11320         bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
11321         inequality =
11322                 (error_code & vmcs12->page_fault_error_code_mask) !=
11323                  vmcs12->page_fault_error_code_match;
11324         return inequality ^ bit;
11325 }
11326
11327 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
11328                 struct x86_exception *fault)
11329 {
11330         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11331
11332         WARN_ON(!is_guest_mode(vcpu));
11333
11334         if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
11335                 !to_vmx(vcpu)->nested.nested_run_pending) {
11336                 vmcs12->vm_exit_intr_error_code = fault->error_code;
11337                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
11338                                   PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
11339                                   INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
11340                                   fault->address);
11341         } else {
11342                 kvm_inject_page_fault(vcpu, fault);
11343         }
11344 }
11345
11346 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
11347                                                  struct vmcs12 *vmcs12);
11348
11349 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
11350 {
11351         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11352         struct vcpu_vmx *vmx = to_vmx(vcpu);
11353         struct page *page;
11354         u64 hpa;
11355
11356         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11357                 /*
11358                  * Translate L1 physical address to host physical
11359                  * address for vmcs02. Keep the page pinned, so this
11360                  * physical address remains valid. We keep a reference
11361                  * to it so we can release it later.
11362                  */
11363                 if (vmx->nested.apic_access_page) { /* shouldn't happen */
11364                         kvm_release_page_dirty(vmx->nested.apic_access_page);
11365                         vmx->nested.apic_access_page = NULL;
11366                 }
11367                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
11368                 /*
11369                  * If translation failed, no matter: This feature asks
11370                  * to exit when accessing the given address, and if it
11371                  * can never be accessed, this feature won't do
11372                  * anything anyway.
11373                  */
11374                 if (!is_error_page(page)) {
11375                         vmx->nested.apic_access_page = page;
11376                         hpa = page_to_phys(vmx->nested.apic_access_page);
11377                         vmcs_write64(APIC_ACCESS_ADDR, hpa);
11378                 } else {
11379                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
11380                                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
11381                 }
11382         }
11383
11384         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
11385                 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
11386                         kvm_release_page_dirty(vmx->nested.virtual_apic_page);
11387                         vmx->nested.virtual_apic_page = NULL;
11388                 }
11389                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
11390
11391                 /*
11392                  * If translation failed, VM entry will fail because
11393                  * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
11394                  * Failing the vm entry is _not_ what the processor
11395                  * does but it's basically the only possibility we
11396                  * have.  We could still enter the guest if CR8 load
11397                  * exits are enabled, CR8 store exits are enabled, and
11398                  * virtualize APIC access is disabled; in this case
11399                  * the processor would never use the TPR shadow and we
11400                  * could simply clear the bit from the execution
11401                  * control.  But such a configuration is useless, so
11402                  * let's keep the code simple.
11403                  */
11404                 if (!is_error_page(page)) {
11405                         vmx->nested.virtual_apic_page = page;
11406                         hpa = page_to_phys(vmx->nested.virtual_apic_page);
11407                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
11408                 }
11409         }
11410
11411         if (nested_cpu_has_posted_intr(vmcs12)) {
11412                 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
11413                         kunmap(vmx->nested.pi_desc_page);
11414                         kvm_release_page_dirty(vmx->nested.pi_desc_page);
11415                         vmx->nested.pi_desc_page = NULL;
11416                 }
11417                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
11418                 if (is_error_page(page))
11419                         return;
11420                 vmx->nested.pi_desc_page = page;
11421                 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
11422                 vmx->nested.pi_desc =
11423                         (struct pi_desc *)((void *)vmx->nested.pi_desc +
11424                         (unsigned long)(vmcs12->posted_intr_desc_addr &
11425                         (PAGE_SIZE - 1)));
11426                 vmcs_write64(POSTED_INTR_DESC_ADDR,
11427                         page_to_phys(vmx->nested.pi_desc_page) +
11428                         (unsigned long)(vmcs12->posted_intr_desc_addr &
11429                         (PAGE_SIZE - 1)));
11430         }
11431         if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
11432                 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
11433                               CPU_BASED_USE_MSR_BITMAPS);
11434         else
11435                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
11436                                 CPU_BASED_USE_MSR_BITMAPS);
11437 }
11438
11439 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
11440 {
11441         u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
11442         struct vcpu_vmx *vmx = to_vmx(vcpu);
11443
11444         /*
11445          * A timer value of zero is architecturally guaranteed to cause
11446          * a VMExit prior to executing any instructions in the guest.
11447          */
11448         if (preemption_timeout == 0) {
11449                 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
11450                 return;
11451         }
11452
11453         if (vcpu->arch.virtual_tsc_khz == 0)
11454                 return;
11455
11456         preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
11457         preemption_timeout *= 1000000;
11458         do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
11459         hrtimer_start(&vmx->nested.preemption_timer,
11460                       ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
11461 }
11462
11463 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
11464                                                struct vmcs12 *vmcs12)
11465 {
11466         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
11467                 return 0;
11468
11469         if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
11470             !page_address_valid(vcpu, vmcs12->io_bitmap_b))
11471                 return -EINVAL;
11472
11473         return 0;
11474 }
11475
11476 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
11477                                                 struct vmcs12 *vmcs12)
11478 {
11479         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
11480                 return 0;
11481
11482         if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
11483                 return -EINVAL;
11484
11485         return 0;
11486 }
11487
11488 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
11489                                                 struct vmcs12 *vmcs12)
11490 {
11491         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
11492                 return 0;
11493
11494         if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
11495                 return -EINVAL;
11496
11497         return 0;
11498 }
11499
11500 /*
11501  * Merge L0's and L1's MSR bitmap, return false to indicate that
11502  * we do not use the hardware.
11503  */
11504 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
11505                                                  struct vmcs12 *vmcs12)
11506 {
11507         int msr;
11508         struct page *page;
11509         unsigned long *msr_bitmap_l1;
11510         unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
11511         /*
11512          * pred_cmd & spec_ctrl are trying to verify two things:
11513          *
11514          * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
11515          *    ensures that we do not accidentally generate an L02 MSR bitmap
11516          *    from the L12 MSR bitmap that is too permissive.
11517          * 2. That L1 or L2s have actually used the MSR. This avoids
11518          *    unnecessarily merging of the bitmap if the MSR is unused. This
11519          *    works properly because we only update the L01 MSR bitmap lazily.
11520          *    So even if L0 should pass L1 these MSRs, the L01 bitmap is only
11521          *    updated to reflect this when L1 (or its L2s) actually write to
11522          *    the MSR.
11523          */
11524         bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
11525         bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
11526
11527         /* Nothing to do if the MSR bitmap is not in use.  */
11528         if (!cpu_has_vmx_msr_bitmap() ||
11529             !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
11530                 return false;
11531
11532         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11533             !pred_cmd && !spec_ctrl)
11534                 return false;
11535
11536         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
11537         if (is_error_page(page))
11538                 return false;
11539
11540         msr_bitmap_l1 = (unsigned long *)kmap(page);
11541         if (nested_cpu_has_apic_reg_virt(vmcs12)) {
11542                 /*
11543                  * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
11544                  * just lets the processor take the value from the virtual-APIC page;
11545                  * take those 256 bits directly from the L1 bitmap.
11546                  */
11547                 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11548                         unsigned word = msr / BITS_PER_LONG;
11549                         msr_bitmap_l0[word] = msr_bitmap_l1[word];
11550                         msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
11551                 }
11552         } else {
11553                 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11554                         unsigned word = msr / BITS_PER_LONG;
11555                         msr_bitmap_l0[word] = ~0;
11556                         msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
11557                 }
11558         }
11559
11560         nested_vmx_disable_intercept_for_msr(
11561                 msr_bitmap_l1, msr_bitmap_l0,
11562                 X2APIC_MSR(APIC_TASKPRI),
11563                 MSR_TYPE_W);
11564
11565         if (nested_cpu_has_vid(vmcs12)) {
11566                 nested_vmx_disable_intercept_for_msr(
11567                         msr_bitmap_l1, msr_bitmap_l0,
11568                         X2APIC_MSR(APIC_EOI),
11569                         MSR_TYPE_W);
11570                 nested_vmx_disable_intercept_for_msr(
11571                         msr_bitmap_l1, msr_bitmap_l0,
11572                         X2APIC_MSR(APIC_SELF_IPI),
11573                         MSR_TYPE_W);
11574         }
11575
11576         if (spec_ctrl)
11577                 nested_vmx_disable_intercept_for_msr(
11578                                         msr_bitmap_l1, msr_bitmap_l0,
11579                                         MSR_IA32_SPEC_CTRL,
11580                                         MSR_TYPE_R | MSR_TYPE_W);
11581
11582         if (pred_cmd)
11583                 nested_vmx_disable_intercept_for_msr(
11584                                         msr_bitmap_l1, msr_bitmap_l0,
11585                                         MSR_IA32_PRED_CMD,
11586                                         MSR_TYPE_W);
11587
11588         kunmap(page);
11589         kvm_release_page_clean(page);
11590
11591         return true;
11592 }
11593
11594 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
11595                                        struct vmcs12 *vmcs12)
11596 {
11597         struct vmcs12 *shadow;
11598         struct page *page;
11599
11600         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
11601             vmcs12->vmcs_link_pointer == -1ull)
11602                 return;
11603
11604         shadow = get_shadow_vmcs12(vcpu);
11605         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
11606
11607         memcpy(shadow, kmap(page), VMCS12_SIZE);
11608
11609         kunmap(page);
11610         kvm_release_page_clean(page);
11611 }
11612
11613 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
11614                                               struct vmcs12 *vmcs12)
11615 {
11616         struct vcpu_vmx *vmx = to_vmx(vcpu);
11617
11618         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
11619             vmcs12->vmcs_link_pointer == -1ull)
11620                 return;
11621
11622         kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
11623                         get_shadow_vmcs12(vcpu), VMCS12_SIZE);
11624 }
11625
11626 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
11627                                           struct vmcs12 *vmcs12)
11628 {
11629         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
11630             !page_address_valid(vcpu, vmcs12->apic_access_addr))
11631                 return -EINVAL;
11632         else
11633                 return 0;
11634 }
11635
11636 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
11637                                            struct vmcs12 *vmcs12)
11638 {
11639         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11640             !nested_cpu_has_apic_reg_virt(vmcs12) &&
11641             !nested_cpu_has_vid(vmcs12) &&
11642             !nested_cpu_has_posted_intr(vmcs12))
11643                 return 0;
11644
11645         /*
11646          * If virtualize x2apic mode is enabled,
11647          * virtualize apic access must be disabled.
11648          */
11649         if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11650             nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
11651                 return -EINVAL;
11652
11653         /*
11654          * If virtual interrupt delivery is enabled,
11655          * we must exit on external interrupts.
11656          */
11657         if (nested_cpu_has_vid(vmcs12) &&
11658            !nested_exit_on_intr(vcpu))
11659                 return -EINVAL;
11660
11661         /*
11662          * bits 15:8 should be zero in posted_intr_nv,
11663          * the descriptor address has been already checked
11664          * in nested_get_vmcs12_pages.
11665          *
11666          * bits 5:0 of posted_intr_desc_addr should be zero.
11667          */
11668         if (nested_cpu_has_posted_intr(vmcs12) &&
11669            (!nested_cpu_has_vid(vmcs12) ||
11670             !nested_exit_intr_ack_set(vcpu) ||
11671             (vmcs12->posted_intr_nv & 0xff00) ||
11672             (vmcs12->posted_intr_desc_addr & 0x3f) ||
11673             (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
11674                 return -EINVAL;
11675
11676         /* tpr shadow is needed by all apicv features. */
11677         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
11678                 return -EINVAL;
11679
11680         return 0;
11681 }
11682
11683 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
11684                                        unsigned long count_field,
11685                                        unsigned long addr_field)
11686 {
11687         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11688         int maxphyaddr;
11689         u64 count, addr;
11690
11691         if (vmcs12_read_any(vmcs12, count_field, &count) ||
11692             vmcs12_read_any(vmcs12, addr_field, &addr)) {
11693                 WARN_ON(1);
11694                 return -EINVAL;
11695         }
11696         if (count == 0)
11697                 return 0;
11698         maxphyaddr = cpuid_maxphyaddr(vcpu);
11699         if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
11700             (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
11701                 pr_debug_ratelimited(
11702                         "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
11703                         addr_field, maxphyaddr, count, addr);
11704                 return -EINVAL;
11705         }
11706         return 0;
11707 }
11708
11709 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
11710                                                 struct vmcs12 *vmcs12)
11711 {
11712         if (vmcs12->vm_exit_msr_load_count == 0 &&
11713             vmcs12->vm_exit_msr_store_count == 0 &&
11714             vmcs12->vm_entry_msr_load_count == 0)
11715                 return 0; /* Fast path */
11716         if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
11717                                         VM_EXIT_MSR_LOAD_ADDR) ||
11718             nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
11719                                         VM_EXIT_MSR_STORE_ADDR) ||
11720             nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
11721                                         VM_ENTRY_MSR_LOAD_ADDR))
11722                 return -EINVAL;
11723         return 0;
11724 }
11725
11726 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
11727                                          struct vmcs12 *vmcs12)
11728 {
11729         if (!nested_cpu_has_pml(vmcs12))
11730                 return 0;
11731
11732         if (!nested_cpu_has_ept(vmcs12) ||
11733             !page_address_valid(vcpu, vmcs12->pml_address))
11734                 return -EINVAL;
11735
11736         return 0;
11737 }
11738
11739 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
11740                                                  struct vmcs12 *vmcs12)
11741 {
11742         if (!nested_cpu_has_shadow_vmcs(vmcs12))
11743                 return 0;
11744
11745         if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
11746             !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
11747                 return -EINVAL;
11748
11749         return 0;
11750 }
11751
11752 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
11753                                        struct vmx_msr_entry *e)
11754 {
11755         /* x2APIC MSR accesses are not allowed */
11756         if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
11757                 return -EINVAL;
11758         if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
11759             e->index == MSR_IA32_UCODE_REV)
11760                 return -EINVAL;
11761         if (e->reserved != 0)
11762                 return -EINVAL;
11763         return 0;
11764 }
11765
11766 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
11767                                      struct vmx_msr_entry *e)
11768 {
11769         if (e->index == MSR_FS_BASE ||
11770             e->index == MSR_GS_BASE ||
11771             e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
11772             nested_vmx_msr_check_common(vcpu, e))
11773                 return -EINVAL;
11774         return 0;
11775 }
11776
11777 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
11778                                       struct vmx_msr_entry *e)
11779 {
11780         if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
11781             nested_vmx_msr_check_common(vcpu, e))
11782                 return -EINVAL;
11783         return 0;
11784 }
11785
11786 /*
11787  * Load guest's/host's msr at nested entry/exit.
11788  * return 0 for success, entry index for failure.
11789  */
11790 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11791 {
11792         u32 i;
11793         struct vmx_msr_entry e;
11794         struct msr_data msr;
11795
11796         msr.host_initiated = false;
11797         for (i = 0; i < count; i++) {
11798                 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
11799                                         &e, sizeof(e))) {
11800                         pr_debug_ratelimited(
11801                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11802                                 __func__, i, gpa + i * sizeof(e));
11803                         goto fail;
11804                 }
11805                 if (nested_vmx_load_msr_check(vcpu, &e)) {
11806                         pr_debug_ratelimited(
11807                                 "%s check failed (%u, 0x%x, 0x%x)\n",
11808                                 __func__, i, e.index, e.reserved);
11809                         goto fail;
11810                 }
11811                 msr.index = e.index;
11812                 msr.data = e.value;
11813                 if (kvm_set_msr(vcpu, &msr)) {
11814                         pr_debug_ratelimited(
11815                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
11816                                 __func__, i, e.index, e.value);
11817                         goto fail;
11818                 }
11819         }
11820         return 0;
11821 fail:
11822         return i + 1;
11823 }
11824
11825 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11826 {
11827         u32 i;
11828         struct vmx_msr_entry e;
11829
11830         for (i = 0; i < count; i++) {
11831                 struct msr_data msr_info;
11832                 if (kvm_vcpu_read_guest(vcpu,
11833                                         gpa + i * sizeof(e),
11834                                         &e, 2 * sizeof(u32))) {
11835                         pr_debug_ratelimited(
11836                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11837                                 __func__, i, gpa + i * sizeof(e));
11838                         return -EINVAL;
11839                 }
11840                 if (nested_vmx_store_msr_check(vcpu, &e)) {
11841                         pr_debug_ratelimited(
11842                                 "%s check failed (%u, 0x%x, 0x%x)\n",
11843                                 __func__, i, e.index, e.reserved);
11844                         return -EINVAL;
11845                 }
11846                 msr_info.host_initiated = false;
11847                 msr_info.index = e.index;
11848                 if (kvm_get_msr(vcpu, &msr_info)) {
11849                         pr_debug_ratelimited(
11850                                 "%s cannot read MSR (%u, 0x%x)\n",
11851                                 __func__, i, e.index);
11852                         return -EINVAL;
11853                 }
11854                 if (kvm_vcpu_write_guest(vcpu,
11855                                          gpa + i * sizeof(e) +
11856                                              offsetof(struct vmx_msr_entry, value),
11857                                          &msr_info.data, sizeof(msr_info.data))) {
11858                         pr_debug_ratelimited(
11859                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
11860                                 __func__, i, e.index, msr_info.data);
11861                         return -EINVAL;
11862                 }
11863         }
11864         return 0;
11865 }
11866
11867 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
11868 {
11869         unsigned long invalid_mask;
11870
11871         invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
11872         return (val & invalid_mask) == 0;
11873 }
11874
11875 /*
11876  * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
11877  * emulating VM entry into a guest with EPT enabled.
11878  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
11879  * is assigned to entry_failure_code on failure.
11880  */
11881 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
11882                                u32 *entry_failure_code)
11883 {
11884         if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
11885                 if (!nested_cr3_valid(vcpu, cr3)) {
11886                         *entry_failure_code = ENTRY_FAIL_DEFAULT;
11887                         return 1;
11888                 }
11889
11890                 /*
11891                  * If PAE paging and EPT are both on, CR3 is not used by the CPU and
11892                  * must not be dereferenced.
11893                  */
11894                 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
11895                     !nested_ept) {
11896                         if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
11897                                 *entry_failure_code = ENTRY_FAIL_PDPTE;
11898                                 return 1;
11899                         }
11900                 }
11901         }
11902
11903         if (!nested_ept)
11904                 kvm_mmu_new_cr3(vcpu, cr3, false);
11905
11906         vcpu->arch.cr3 = cr3;
11907         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
11908
11909         kvm_init_mmu(vcpu, false);
11910
11911         return 0;
11912 }
11913
11914 /*
11915  * Returns if KVM is able to config CPU to tag TLB entries
11916  * populated by L2 differently than TLB entries populated
11917  * by L1.
11918  *
11919  * If L1 uses EPT, then TLB entries are tagged with different EPTP.
11920  *
11921  * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
11922  * with different VPID (L1 entries are tagged with vmx->vpid
11923  * while L2 entries are tagged with vmx->nested.vpid02).
11924  */
11925 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
11926 {
11927         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11928
11929         return nested_cpu_has_ept(vmcs12) ||
11930                (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
11931 }
11932
11933 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
11934 {
11935         if (vmx->nested.nested_run_pending &&
11936             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
11937                 return vmcs12->guest_ia32_efer;
11938         else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
11939                 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
11940         else
11941                 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
11942 }
11943
11944 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
11945 {
11946         /*
11947          * If vmcs02 hasn't been initialized, set the constant vmcs02 state
11948          * according to L0's settings (vmcs12 is irrelevant here).  Host
11949          * fields that come from L0 and are not constant, e.g. HOST_CR3,
11950          * will be set as needed prior to VMLAUNCH/VMRESUME.
11951          */
11952         if (vmx->nested.vmcs02_initialized)
11953                 return;
11954         vmx->nested.vmcs02_initialized = true;
11955
11956         /* All VMFUNCs are currently emulated through L0 vmexits.  */
11957         if (cpu_has_vmx_vmfunc())
11958                 vmcs_write64(VM_FUNCTION_CONTROL, 0);
11959
11960         if (cpu_has_vmx_posted_intr())
11961                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
11962
11963         if (cpu_has_vmx_msr_bitmap())
11964                 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
11965
11966         if (enable_pml)
11967                 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
11968
11969         /*
11970          * Set the MSR load/store lists to match L0's settings.  Only the
11971          * addresses are constant (for vmcs02), the counts can change based
11972          * on L2's behavior, e.g. switching to/from long mode.
11973          */
11974         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
11975         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
11976         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
11977
11978         vmx_set_constant_host_state(vmx);
11979 }
11980
11981 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
11982                                       struct vmcs12 *vmcs12)
11983 {
11984         prepare_vmcs02_constant_state(vmx);
11985
11986         vmcs_write64(VMCS_LINK_POINTER, -1ull);
11987
11988         if (enable_vpid) {
11989                 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
11990                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
11991                 else
11992                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
11993         }
11994 }
11995
11996 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
11997 {
11998         u32 exec_control, vmcs12_exec_ctrl;
11999         u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
12000
12001         if (vmx->nested.dirty_vmcs12)
12002                 prepare_vmcs02_early_full(vmx, vmcs12);
12003
12004         /*
12005          * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
12006          * entry, but only if the current (host) sp changed from the value
12007          * we wrote last (vmx->host_rsp).  This cache is no longer relevant
12008          * if we switch vmcs, and rather than hold a separate cache per vmcs,
12009          * here we just force the write to happen on entry.
12010          */
12011         vmx->host_rsp = 0;
12012
12013         /*
12014          * PIN CONTROLS
12015          */
12016         exec_control = vmcs12->pin_based_vm_exec_control;
12017
12018         /* Preemption timer setting is computed directly in vmx_vcpu_run.  */
12019         exec_control |= vmcs_config.pin_based_exec_ctrl;
12020         exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12021         vmx->loaded_vmcs->hv_timer_armed = false;
12022
12023         /* Posted interrupts setting is only taken from vmcs12.  */
12024         if (nested_cpu_has_posted_intr(vmcs12)) {
12025                 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
12026                 vmx->nested.pi_pending = false;
12027         } else {
12028                 exec_control &= ~PIN_BASED_POSTED_INTR;
12029         }
12030         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
12031
12032         /*
12033          * EXEC CONTROLS
12034          */
12035         exec_control = vmx_exec_control(vmx); /* L0's desires */
12036         exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
12037         exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
12038         exec_control &= ~CPU_BASED_TPR_SHADOW;
12039         exec_control |= vmcs12->cpu_based_vm_exec_control;
12040
12041         /*
12042          * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
12043          * nested_get_vmcs12_pages can't fix it up, the illegal value
12044          * will result in a VM entry failure.
12045          */
12046         if (exec_control & CPU_BASED_TPR_SHADOW) {
12047                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
12048                 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
12049         } else {
12050 #ifdef CONFIG_X86_64
12051                 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
12052                                 CPU_BASED_CR8_STORE_EXITING;
12053 #endif
12054         }
12055
12056         /*
12057          * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
12058          * for I/O port accesses.
12059          */
12060         exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
12061         exec_control |= CPU_BASED_UNCOND_IO_EXITING;
12062         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
12063
12064         /*
12065          * SECONDARY EXEC CONTROLS
12066          */
12067         if (cpu_has_secondary_exec_ctrls()) {
12068                 exec_control = vmx->secondary_exec_control;
12069
12070                 /* Take the following fields only from vmcs12 */
12071                 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
12072                                   SECONDARY_EXEC_ENABLE_INVPCID |
12073                                   SECONDARY_EXEC_RDTSCP |
12074                                   SECONDARY_EXEC_XSAVES |
12075                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
12076                                   SECONDARY_EXEC_APIC_REGISTER_VIRT |
12077                                   SECONDARY_EXEC_ENABLE_VMFUNC);
12078                 if (nested_cpu_has(vmcs12,
12079                                    CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
12080                         vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
12081                                 ~SECONDARY_EXEC_ENABLE_PML;
12082                         exec_control |= vmcs12_exec_ctrl;
12083                 }
12084
12085                 /* VMCS shadowing for L2 is emulated for now */
12086                 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
12087
12088                 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
12089                         vmcs_write16(GUEST_INTR_STATUS,
12090                                 vmcs12->guest_intr_status);
12091
12092                 /*
12093                  * Write an illegal value to APIC_ACCESS_ADDR. Later,
12094                  * nested_get_vmcs12_pages will either fix it up or
12095                  * remove the VM execution control.
12096                  */
12097                 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
12098                         vmcs_write64(APIC_ACCESS_ADDR, -1ull);
12099
12100                 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
12101                         vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
12102
12103                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
12104         }
12105
12106         /*
12107          * ENTRY CONTROLS
12108          *
12109          * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
12110          * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
12111          * on the related bits (if supported by the CPU) in the hope that
12112          * we can avoid VMWrites during vmx_set_efer().
12113          */
12114         exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) &
12115                         ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
12116         if (cpu_has_load_ia32_efer) {
12117                 if (guest_efer & EFER_LMA)
12118                         exec_control |= VM_ENTRY_IA32E_MODE;
12119                 if (guest_efer != host_efer)
12120                         exec_control |= VM_ENTRY_LOAD_IA32_EFER;
12121         }
12122         vm_entry_controls_init(vmx, exec_control);
12123
12124         /*
12125          * EXIT CONTROLS
12126          *
12127          * L2->L1 exit controls are emulated - the hardware exit is to L0 so
12128          * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
12129          * bits may be modified by vmx_set_efer() in prepare_vmcs02().
12130          */
12131         exec_control = vmcs_config.vmexit_ctrl;
12132         if (cpu_has_load_ia32_efer && guest_efer != host_efer)
12133                 exec_control |= VM_EXIT_LOAD_IA32_EFER;
12134         vm_exit_controls_init(vmx, exec_control);
12135
12136         /*
12137          * Conceptually we want to copy the PML address and index from
12138          * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
12139          * since we always flush the log on each vmexit and never change
12140          * the PML address (once set), this happens to be equivalent to
12141          * simply resetting the index in vmcs02.
12142          */
12143         if (enable_pml)
12144                 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
12145
12146         /*
12147          * Interrupt/Exception Fields
12148          */
12149         if (vmx->nested.nested_run_pending) {
12150                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
12151                              vmcs12->vm_entry_intr_info_field);
12152                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
12153                              vmcs12->vm_entry_exception_error_code);
12154                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
12155                              vmcs12->vm_entry_instruction_len);
12156                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
12157                              vmcs12->guest_interruptibility_info);
12158                 vmx->loaded_vmcs->nmi_known_unmasked =
12159                         !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
12160         } else {
12161                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
12162         }
12163 }
12164
12165 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
12166 {
12167         vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
12168         vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
12169         vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
12170         vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
12171         vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
12172         vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
12173         vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
12174         vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
12175         vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
12176         vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
12177         vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
12178         vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
12179         vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
12180         vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
12181         vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
12182         vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
12183         vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
12184         vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
12185         vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
12186         vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
12187         vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
12188         vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
12189         vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
12190         vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
12191         vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
12192         vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
12193         vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
12194         vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
12195         vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
12196         vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
12197         vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
12198
12199         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
12200         vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
12201                 vmcs12->guest_pending_dbg_exceptions);
12202         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
12203         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
12204
12205         if (nested_cpu_has_xsaves(vmcs12))
12206                 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
12207
12208         /*
12209          * Whether page-faults are trapped is determined by a combination of
12210          * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
12211          * If enable_ept, L0 doesn't care about page faults and we should
12212          * set all of these to L1's desires. However, if !enable_ept, L0 does
12213          * care about (at least some) page faults, and because it is not easy
12214          * (if at all possible?) to merge L0 and L1's desires, we simply ask
12215          * to exit on each and every L2 page fault. This is done by setting
12216          * MASK=MATCH=0 and (see below) EB.PF=1.
12217          * Note that below we don't need special code to set EB.PF beyond the
12218          * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
12219          * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
12220          * !enable_ept, EB.PF is 1, so the "or" will always be 1.
12221          */
12222         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
12223                 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
12224         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
12225                 enable_ept ? vmcs12->page_fault_error_code_match : 0);
12226
12227         if (cpu_has_vmx_apicv()) {
12228                 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
12229                 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
12230                 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
12231                 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
12232         }
12233
12234         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
12235         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
12236
12237         set_cr4_guest_host_mask(vmx);
12238
12239         if (kvm_mpx_supported()) {
12240                 if (vmx->nested.nested_run_pending &&
12241                         (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12242                         vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
12243                 else
12244                         vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
12245         }
12246
12247         /*
12248          * L1 may access the L2's PDPTR, so save them to construct vmcs12
12249          */
12250         if (enable_ept) {
12251                 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
12252                 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
12253                 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
12254                 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
12255         }
12256 }
12257
12258 /*
12259  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
12260  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
12261  * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
12262  * guest in a way that will both be appropriate to L1's requests, and our
12263  * needs. In addition to modifying the active vmcs (which is vmcs02), this
12264  * function also has additional necessary side-effects, like setting various
12265  * vcpu->arch fields.
12266  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
12267  * is assigned to entry_failure_code on failure.
12268  */
12269 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12270                           u32 *entry_failure_code)
12271 {
12272         struct vcpu_vmx *vmx = to_vmx(vcpu);
12273
12274         if (vmx->nested.dirty_vmcs12) {
12275                 prepare_vmcs02_full(vmx, vmcs12);
12276                 vmx->nested.dirty_vmcs12 = false;
12277         }
12278
12279         /*
12280          * First, the fields that are shadowed.  This must be kept in sync
12281          * with vmx_shadow_fields.h.
12282          */
12283
12284         vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
12285         vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
12286         vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
12287         vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
12288         vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
12289
12290         if (vmx->nested.nested_run_pending &&
12291             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
12292                 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
12293                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
12294         } else {
12295                 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
12296                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
12297         }
12298         vmx_set_rflags(vcpu, vmcs12->guest_rflags);
12299
12300         vmx->nested.preemption_timer_expired = false;
12301         if (nested_cpu_has_preemption_timer(vmcs12))
12302                 vmx_start_preemption_timer(vcpu);
12303
12304         /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
12305          * bitwise-or of what L1 wants to trap for L2, and what we want to
12306          * trap. Note that CR0.TS also needs updating - we do this later.
12307          */
12308         update_exception_bitmap(vcpu);
12309         vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
12310         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
12311
12312         if (vmx->nested.nested_run_pending &&
12313             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
12314                 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
12315                 vcpu->arch.pat = vmcs12->guest_ia32_pat;
12316         } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
12317                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
12318         }
12319
12320         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
12321
12322         if (kvm_has_tsc_control)
12323                 decache_tsc_multiplier(vmx);
12324
12325         if (enable_vpid) {
12326                 /*
12327                  * There is no direct mapping between vpid02 and vpid12, the
12328                  * vpid02 is per-vCPU for L0 and reused while the value of
12329                  * vpid12 is changed w/ one invvpid during nested vmentry.
12330                  * The vpid12 is allocated by L1 for L2, so it will not
12331                  * influence global bitmap(for vpid01 and vpid02 allocation)
12332                  * even if spawn a lot of nested vCPUs.
12333                  */
12334                 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
12335                         if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
12336                                 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
12337                                 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
12338                         }
12339                 } else {
12340                         /*
12341                          * If L1 use EPT, then L0 needs to execute INVEPT on
12342                          * EPTP02 instead of EPTP01. Therefore, delay TLB
12343                          * flush until vmcs02->eptp is fully updated by
12344                          * KVM_REQ_LOAD_CR3. Note that this assumes
12345                          * KVM_REQ_TLB_FLUSH is evaluated after
12346                          * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
12347                          */
12348                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
12349                 }
12350         }
12351
12352         if (nested_cpu_has_ept(vmcs12))
12353                 nested_ept_init_mmu_context(vcpu);
12354         else if (nested_cpu_has2(vmcs12,
12355                                  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
12356                 vmx_flush_tlb(vcpu, true);
12357
12358         /*
12359          * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
12360          * bits which we consider mandatory enabled.
12361          * The CR0_READ_SHADOW is what L2 should have expected to read given
12362          * the specifications by L1; It's not enough to take
12363          * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
12364          * have more bits than L1 expected.
12365          */
12366         vmx_set_cr0(vcpu, vmcs12->guest_cr0);
12367         vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
12368
12369         vmx_set_cr4(vcpu, vmcs12->guest_cr4);
12370         vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
12371
12372         vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
12373         /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
12374         vmx_set_efer(vcpu, vcpu->arch.efer);
12375
12376         /*
12377          * Guest state is invalid and unrestricted guest is disabled,
12378          * which means L1 attempted VMEntry to L2 with invalid state.
12379          * Fail the VMEntry.
12380          */
12381         if (vmx->emulation_required) {
12382                 *entry_failure_code = ENTRY_FAIL_DEFAULT;
12383                 return 1;
12384         }
12385
12386         /* Shadow page tables on either EPT or shadow page tables. */
12387         if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
12388                                 entry_failure_code))
12389                 return 1;
12390
12391         if (!enable_ept)
12392                 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
12393
12394         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
12395         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
12396         return 0;
12397 }
12398
12399 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
12400 {
12401         if (!nested_cpu_has_nmi_exiting(vmcs12) &&
12402             nested_cpu_has_virtual_nmis(vmcs12))
12403                 return -EINVAL;
12404
12405         if (!nested_cpu_has_virtual_nmis(vmcs12) &&
12406             nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
12407                 return -EINVAL;
12408
12409         return 0;
12410 }
12411
12412 static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12413 {
12414         struct vcpu_vmx *vmx = to_vmx(vcpu);
12415         bool ia32e;
12416
12417         if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
12418             vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
12419                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12420
12421         if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
12422                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12423
12424         if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
12425                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12426
12427         if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
12428                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12429
12430         if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
12431                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12432
12433         if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
12434                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12435
12436         if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
12437                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12438
12439         if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
12440                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12441
12442         if (nested_vmx_check_pml_controls(vcpu, vmcs12))
12443                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12444
12445         if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
12446                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12447
12448         if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
12449                                 vmx->nested.msrs.procbased_ctls_low,
12450                                 vmx->nested.msrs.procbased_ctls_high) ||
12451             (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
12452              !vmx_control_verify(vmcs12->secondary_vm_exec_control,
12453                                  vmx->nested.msrs.secondary_ctls_low,
12454                                  vmx->nested.msrs.secondary_ctls_high)) ||
12455             !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
12456                                 vmx->nested.msrs.pinbased_ctls_low,
12457                                 vmx->nested.msrs.pinbased_ctls_high) ||
12458             !vmx_control_verify(vmcs12->vm_exit_controls,
12459                                 vmx->nested.msrs.exit_ctls_low,
12460                                 vmx->nested.msrs.exit_ctls_high) ||
12461             !vmx_control_verify(vmcs12->vm_entry_controls,
12462                                 vmx->nested.msrs.entry_ctls_low,
12463                                 vmx->nested.msrs.entry_ctls_high))
12464                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12465
12466         if (nested_vmx_check_nmi_controls(vmcs12))
12467                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12468
12469         if (nested_cpu_has_vmfunc(vmcs12)) {
12470                 if (vmcs12->vm_function_control &
12471                     ~vmx->nested.msrs.vmfunc_controls)
12472                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12473
12474                 if (nested_cpu_has_eptp_switching(vmcs12)) {
12475                         if (!nested_cpu_has_ept(vmcs12) ||
12476                             !page_address_valid(vcpu, vmcs12->eptp_list_address))
12477                                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12478                 }
12479         }
12480
12481         if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
12482                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12483
12484         if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
12485             !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
12486             !nested_cr3_valid(vcpu, vmcs12->host_cr3))
12487                 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
12488
12489         /*
12490          * If the load IA32_EFER VM-exit control is 1, bits reserved in the
12491          * IA32_EFER MSR must be 0 in the field for that register. In addition,
12492          * the values of the LMA and LME bits in the field must each be that of
12493          * the host address-space size VM-exit control.
12494          */
12495         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
12496                 ia32e = (vmcs12->vm_exit_controls &
12497                          VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
12498                 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
12499                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
12500                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
12501                         return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
12502         }
12503
12504         /*
12505          * From the Intel SDM, volume 3:
12506          * Fields relevant to VM-entry event injection must be set properly.
12507          * These fields are the VM-entry interruption-information field, the
12508          * VM-entry exception error code, and the VM-entry instruction length.
12509          */
12510         if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
12511                 u32 intr_info = vmcs12->vm_entry_intr_info_field;
12512                 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
12513                 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
12514                 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
12515                 bool should_have_error_code;
12516                 bool urg = nested_cpu_has2(vmcs12,
12517                                            SECONDARY_EXEC_UNRESTRICTED_GUEST);
12518                 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
12519
12520                 /* VM-entry interruption-info field: interruption type */
12521                 if (intr_type == INTR_TYPE_RESERVED ||
12522                     (intr_type == INTR_TYPE_OTHER_EVENT &&
12523                      !nested_cpu_supports_monitor_trap_flag(vcpu)))
12524                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12525
12526                 /* VM-entry interruption-info field: vector */
12527                 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
12528                     (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
12529                     (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
12530                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12531
12532                 /* VM-entry interruption-info field: deliver error code */
12533                 should_have_error_code =
12534                         intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
12535                         x86_exception_has_error_code(vector);
12536                 if (has_error_code != should_have_error_code)
12537                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12538
12539                 /* VM-entry exception error code */
12540                 if (has_error_code &&
12541                     vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
12542                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12543
12544                 /* VM-entry interruption-info field: reserved bits */
12545                 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
12546                         return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12547
12548                 /* VM-entry instruction length */
12549                 switch (intr_type) {
12550                 case INTR_TYPE_SOFT_EXCEPTION:
12551                 case INTR_TYPE_SOFT_INTR:
12552                 case INTR_TYPE_PRIV_SW_EXCEPTION:
12553                         if ((vmcs12->vm_entry_instruction_len > 15) ||
12554                             (vmcs12->vm_entry_instruction_len == 0 &&
12555                              !nested_cpu_has_zero_length_injection(vcpu)))
12556                                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12557                 }
12558         }
12559
12560         if (nested_cpu_has_ept(vmcs12) &&
12561             !valid_ept_address(vcpu, vmcs12->ept_pointer))
12562                 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12563
12564         return 0;
12565 }
12566
12567 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
12568                                           struct vmcs12 *vmcs12)
12569 {
12570         int r;
12571         struct page *page;
12572         struct vmcs12 *shadow;
12573
12574         if (vmcs12->vmcs_link_pointer == -1ull)
12575                 return 0;
12576
12577         if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
12578                 return -EINVAL;
12579
12580         page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
12581         if (is_error_page(page))
12582                 return -EINVAL;
12583
12584         r = 0;
12585         shadow = kmap(page);
12586         if (shadow->hdr.revision_id != VMCS12_REVISION ||
12587             shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
12588                 r = -EINVAL;
12589         kunmap(page);
12590         kvm_release_page_clean(page);
12591         return r;
12592 }
12593
12594 static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12595                                   u32 *exit_qual)
12596 {
12597         bool ia32e;
12598
12599         *exit_qual = ENTRY_FAIL_DEFAULT;
12600
12601         if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
12602             !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
12603                 return 1;
12604
12605         if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
12606                 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
12607                 return 1;
12608         }
12609
12610         /*
12611          * If the load IA32_EFER VM-entry control is 1, the following checks
12612          * are performed on the field for the IA32_EFER MSR:
12613          * - Bits reserved in the IA32_EFER MSR must be 0.
12614          * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
12615          *   the IA-32e mode guest VM-exit control. It must also be identical
12616          *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
12617          *   CR0.PG) is 1.
12618          */
12619         if (to_vmx(vcpu)->nested.nested_run_pending &&
12620             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
12621                 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
12622                 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
12623                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
12624                     ((vmcs12->guest_cr0 & X86_CR0_PG) &&
12625                      ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
12626                         return 1;
12627         }
12628
12629         if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
12630                 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
12631                 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
12632                         return 1;
12633
12634         return 0;
12635 }
12636
12637 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
12638                                    struct vmcs12 *vmcs12);
12639
12640 /*
12641  * If from_vmentry is false, this is being called from state restore (either RSM
12642  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
12643  */
12644 static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
12645                                           bool from_vmentry)
12646 {
12647         struct vcpu_vmx *vmx = to_vmx(vcpu);
12648         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12649         bool evaluate_pending_interrupts;
12650         u32 exit_reason = EXIT_REASON_INVALID_STATE;
12651         u32 exit_qual;
12652
12653         evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
12654                 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
12655         if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
12656                 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
12657
12658         if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
12659                 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
12660         if (kvm_mpx_supported() &&
12661                 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12662                 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
12663
12664         vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
12665
12666         prepare_vmcs02_early(vmx, vmcs12);
12667
12668         if (from_vmentry) {
12669                 nested_get_vmcs12_pages(vcpu);
12670
12671                 if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
12672                         goto vmentry_fail_vmexit;
12673         }
12674
12675         enter_guest_mode(vcpu);
12676         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12677                 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
12678
12679         if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
12680                 goto vmentry_fail_vmexit_guest_mode;
12681
12682         if (from_vmentry) {
12683                 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
12684                 exit_qual = nested_vmx_load_msr(vcpu,
12685                                                 vmcs12->vm_entry_msr_load_addr,
12686                                                 vmcs12->vm_entry_msr_load_count);
12687                 if (exit_qual)
12688                         goto vmentry_fail_vmexit_guest_mode;
12689         } else {
12690                 /*
12691                  * The MMU is not initialized to point at the right entities yet and
12692                  * "get pages" would need to read data from the guest (i.e. we will
12693                  * need to perform gpa to hpa translation). Request a call
12694                  * to nested_get_vmcs12_pages before the next VM-entry.  The MSRs
12695                  * have already been set at vmentry time and should not be reset.
12696                  */
12697                 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
12698         }
12699
12700         /*
12701          * If L1 had a pending IRQ/NMI until it executed
12702          * VMLAUNCH/VMRESUME which wasn't delivered because it was
12703          * disallowed (e.g. interrupts disabled), L0 needs to
12704          * evaluate if this pending event should cause an exit from L2
12705          * to L1 or delivered directly to L2 (e.g. In case L1 don't
12706          * intercept EXTERNAL_INTERRUPT).
12707          *
12708          * Usually this would be handled by the processor noticing an
12709          * IRQ/NMI window request, or checking RVI during evaluation of
12710          * pending virtual interrupts.  However, this setting was done
12711          * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
12712          * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
12713          */
12714         if (unlikely(evaluate_pending_interrupts))
12715                 kvm_make_request(KVM_REQ_EVENT, vcpu);
12716
12717         /*
12718          * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
12719          * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
12720          * returned as far as L1 is concerned. It will only return (and set
12721          * the success flag) when L2 exits (see nested_vmx_vmexit()).
12722          */
12723         return 0;
12724
12725         /*
12726          * A failed consistency check that leads to a VMExit during L1's
12727          * VMEnter to L2 is a variation of a normal VMexit, as explained in
12728          * 26.7 "VM-entry failures during or after loading guest state".
12729          */
12730 vmentry_fail_vmexit_guest_mode:
12731         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12732                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
12733         leave_guest_mode(vcpu);
12734
12735 vmentry_fail_vmexit:
12736         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
12737
12738         if (!from_vmentry)
12739                 return 1;
12740
12741         load_vmcs12_host_state(vcpu, vmcs12);
12742         vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
12743         vmcs12->exit_qualification = exit_qual;
12744         if (enable_shadow_vmcs)
12745                 vmx->nested.sync_shadow_vmcs = true;
12746         return 1;
12747 }
12748
12749 /*
12750  * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
12751  * for running an L2 nested guest.
12752  */
12753 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
12754 {
12755         struct vmcs12 *vmcs12;
12756         struct vcpu_vmx *vmx = to_vmx(vcpu);
12757         u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
12758         int ret;
12759
12760         if (!nested_vmx_check_permission(vcpu))
12761                 return 1;
12762
12763         if (vmx->nested.current_vmptr == -1ull)
12764                 return nested_vmx_failInvalid(vcpu);
12765
12766         vmcs12 = get_vmcs12(vcpu);
12767
12768         /*
12769          * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
12770          * that there *is* a valid VMCS pointer, RFLAGS.CF is set
12771          * rather than RFLAGS.ZF, and no error number is stored to the
12772          * VM-instruction error field.
12773          */
12774         if (vmcs12->hdr.shadow_vmcs)
12775                 return nested_vmx_failInvalid(vcpu);
12776
12777         if (enable_shadow_vmcs)
12778                 copy_shadow_to_vmcs12(vmx);
12779
12780         /*
12781          * The nested entry process starts with enforcing various prerequisites
12782          * on vmcs12 as required by the Intel SDM, and act appropriately when
12783          * they fail: As the SDM explains, some conditions should cause the
12784          * instruction to fail, while others will cause the instruction to seem
12785          * to succeed, but return an EXIT_REASON_INVALID_STATE.
12786          * To speed up the normal (success) code path, we should avoid checking
12787          * for misconfigurations which will anyway be caught by the processor
12788          * when using the merged vmcs02.
12789          */
12790         if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
12791                 return nested_vmx_failValid(vcpu,
12792                         VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
12793
12794         if (vmcs12->launch_state == launch)
12795                 return nested_vmx_failValid(vcpu,
12796                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
12797                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
12798
12799         ret = check_vmentry_prereqs(vcpu, vmcs12);
12800         if (ret)
12801                 return nested_vmx_failValid(vcpu, ret);
12802
12803         /*
12804          * We're finally done with prerequisite checking, and can start with
12805          * the nested entry.
12806          */
12807
12808         vmx->nested.nested_run_pending = 1;
12809         ret = nested_vmx_enter_non_root_mode(vcpu, true);
12810         if (ret) {
12811                 vmx->nested.nested_run_pending = 0;
12812                 return 1;
12813         }
12814
12815         /* Hide L1D cache contents from the nested guest.  */
12816         vmx->vcpu.arch.l1tf_flush_l1d = true;
12817
12818         /*
12819          * Must happen outside of nested_vmx_enter_non_root_mode() as it will
12820          * also be used as part of restoring nVMX state for
12821          * snapshot restore (migration).
12822          *
12823          * In this flow, it is assumed that vmcs12 cache was
12824          * trasferred as part of captured nVMX state and should
12825          * therefore not be read from guest memory (which may not
12826          * exist on destination host yet).
12827          */
12828         nested_cache_shadow_vmcs12(vcpu, vmcs12);
12829
12830         /*
12831          * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
12832          * by event injection, halt vcpu.
12833          */
12834         if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
12835             !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
12836                 vmx->nested.nested_run_pending = 0;
12837                 return kvm_vcpu_halt(vcpu);
12838         }
12839         return 1;
12840 }
12841
12842 /*
12843  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
12844  * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
12845  * This function returns the new value we should put in vmcs12.guest_cr0.
12846  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
12847  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
12848  *     available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
12849  *     didn't trap the bit, because if L1 did, so would L0).
12850  *  2. Bits that L1 asked to trap (and therefore L0 also did) could not have
12851  *     been modified by L2, and L1 knows it. So just leave the old value of
12852  *     the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
12853  *     isn't relevant, because if L0 traps this bit it can set it to anything.
12854  *  3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
12855  *     changed these bits, and therefore they need to be updated, but L0
12856  *     didn't necessarily allow them to be changed in GUEST_CR0 - and rather
12857  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
12858  */
12859 static inline unsigned long
12860 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12861 {
12862         return
12863         /*1*/   (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
12864         /*2*/   (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
12865         /*3*/   (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
12866                         vcpu->arch.cr0_guest_owned_bits));
12867 }
12868
12869 static inline unsigned long
12870 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12871 {
12872         return
12873         /*1*/   (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
12874         /*2*/   (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
12875         /*3*/   (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
12876                         vcpu->arch.cr4_guest_owned_bits));
12877 }
12878
12879 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
12880                                        struct vmcs12 *vmcs12)
12881 {
12882         u32 idt_vectoring;
12883         unsigned int nr;
12884
12885         if (vcpu->arch.exception.injected) {
12886                 nr = vcpu->arch.exception.nr;
12887                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12888
12889                 if (kvm_exception_is_soft(nr)) {
12890                         vmcs12->vm_exit_instruction_len =
12891                                 vcpu->arch.event_exit_inst_len;
12892                         idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
12893                 } else
12894                         idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
12895
12896                 if (vcpu->arch.exception.has_error_code) {
12897                         idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
12898                         vmcs12->idt_vectoring_error_code =
12899                                 vcpu->arch.exception.error_code;
12900                 }
12901
12902                 vmcs12->idt_vectoring_info_field = idt_vectoring;
12903         } else if (vcpu->arch.nmi_injected) {
12904                 vmcs12->idt_vectoring_info_field =
12905                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
12906         } else if (vcpu->arch.interrupt.injected) {
12907                 nr = vcpu->arch.interrupt.nr;
12908                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12909
12910                 if (vcpu->arch.interrupt.soft) {
12911                         idt_vectoring |= INTR_TYPE_SOFT_INTR;
12912                         vmcs12->vm_entry_instruction_len =
12913                                 vcpu->arch.event_exit_inst_len;
12914                 } else
12915                         idt_vectoring |= INTR_TYPE_EXT_INTR;
12916
12917                 vmcs12->idt_vectoring_info_field = idt_vectoring;
12918         }
12919 }
12920
12921 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
12922 {
12923         struct vcpu_vmx *vmx = to_vmx(vcpu);
12924         unsigned long exit_qual;
12925         bool block_nested_events =
12926             vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
12927
12928         if (vcpu->arch.exception.pending &&
12929                 nested_vmx_check_exception(vcpu, &exit_qual)) {
12930                 if (block_nested_events)
12931                         return -EBUSY;
12932                 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
12933                 return 0;
12934         }
12935
12936         if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
12937             vmx->nested.preemption_timer_expired) {
12938                 if (block_nested_events)
12939                         return -EBUSY;
12940                 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
12941                 return 0;
12942         }
12943
12944         if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
12945                 if (block_nested_events)
12946                         return -EBUSY;
12947                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
12948                                   NMI_VECTOR | INTR_TYPE_NMI_INTR |
12949                                   INTR_INFO_VALID_MASK, 0);
12950                 /*
12951                  * The NMI-triggered VM exit counts as injection:
12952                  * clear this one and block further NMIs.
12953                  */
12954                 vcpu->arch.nmi_pending = 0;
12955                 vmx_set_nmi_mask(vcpu, true);
12956                 return 0;
12957         }
12958
12959         if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
12960             nested_exit_on_intr(vcpu)) {
12961                 if (block_nested_events)
12962                         return -EBUSY;
12963                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
12964                 return 0;
12965         }
12966
12967         vmx_complete_nested_posted_interrupt(vcpu);
12968         return 0;
12969 }
12970
12971 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
12972 {
12973         to_vmx(vcpu)->req_immediate_exit = true;
12974 }
12975
12976 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
12977 {
12978         ktime_t remaining =
12979                 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
12980         u64 value;
12981
12982         if (ktime_to_ns(remaining) <= 0)
12983                 return 0;
12984
12985         value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
12986         do_div(value, 1000000);
12987         return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
12988 }
12989
12990 /*
12991  * Update the guest state fields of vmcs12 to reflect changes that
12992  * occurred while L2 was running. (The "IA-32e mode guest" bit of the
12993  * VM-entry controls is also updated, since this is really a guest
12994  * state bit.)
12995  */
12996 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12997 {
12998         vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
12999         vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
13000
13001         vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
13002         vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
13003         vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
13004
13005         vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
13006         vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
13007         vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
13008         vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
13009         vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
13010         vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
13011         vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
13012         vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
13013         vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
13014         vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
13015         vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
13016         vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
13017         vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
13018         vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
13019         vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
13020         vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
13021         vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
13022         vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
13023         vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
13024         vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
13025         vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
13026         vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
13027         vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
13028         vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
13029         vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
13030         vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
13031         vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
13032         vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
13033         vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
13034         vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
13035         vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
13036         vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
13037         vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
13038         vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
13039         vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
13040         vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
13041
13042         vmcs12->guest_interruptibility_info =
13043                 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
13044         vmcs12->guest_pending_dbg_exceptions =
13045                 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
13046         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
13047                 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
13048         else
13049                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
13050
13051         if (nested_cpu_has_preemption_timer(vmcs12)) {
13052                 if (vmcs12->vm_exit_controls &
13053                     VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
13054                         vmcs12->vmx_preemption_timer_value =
13055                                 vmx_get_preemption_timer_value(vcpu);
13056                 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
13057         }
13058
13059         /*
13060          * In some cases (usually, nested EPT), L2 is allowed to change its
13061          * own CR3 without exiting. If it has changed it, we must keep it.
13062          * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
13063          * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
13064          *
13065          * Additionally, restore L2's PDPTR to vmcs12.
13066          */
13067         if (enable_ept) {
13068                 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
13069                 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
13070                 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
13071                 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
13072                 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
13073         }
13074
13075         vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
13076
13077         if (nested_cpu_has_vid(vmcs12))
13078                 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
13079
13080         vmcs12->vm_entry_controls =
13081                 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
13082                 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
13083
13084         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
13085                 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
13086                 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
13087         }
13088
13089         /* TODO: These cannot have changed unless we have MSR bitmaps and
13090          * the relevant bit asks not to trap the change */
13091         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
13092                 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
13093         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
13094                 vmcs12->guest_ia32_efer = vcpu->arch.efer;
13095         vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
13096         vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
13097         vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
13098         if (kvm_mpx_supported())
13099                 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
13100 }
13101
13102 /*
13103  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
13104  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
13105  * and this function updates it to reflect the changes to the guest state while
13106  * L2 was running (and perhaps made some exits which were handled directly by L0
13107  * without going back to L1), and to reflect the exit reason.
13108  * Note that we do not have to copy here all VMCS fields, just those that
13109  * could have changed by the L2 guest or the exit - i.e., the guest-state and
13110  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
13111  * which already writes to vmcs12 directly.
13112  */
13113 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
13114                            u32 exit_reason, u32 exit_intr_info,
13115                            unsigned long exit_qualification)
13116 {
13117         /* update guest state fields: */
13118         sync_vmcs12(vcpu, vmcs12);
13119
13120         /* update exit information fields: */
13121
13122         vmcs12->vm_exit_reason = exit_reason;
13123         vmcs12->exit_qualification = exit_qualification;
13124         vmcs12->vm_exit_intr_info = exit_intr_info;
13125
13126         vmcs12->idt_vectoring_info_field = 0;
13127         vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
13128         vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
13129
13130         if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
13131                 vmcs12->launch_state = 1;
13132
13133                 /* vm_entry_intr_info_field is cleared on exit. Emulate this
13134                  * instead of reading the real value. */
13135                 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
13136
13137                 /*
13138                  * Transfer the event that L0 or L1 may wanted to inject into
13139                  * L2 to IDT_VECTORING_INFO_FIELD.
13140                  */
13141                 vmcs12_save_pending_event(vcpu, vmcs12);
13142         }
13143
13144         /*
13145          * Drop what we picked up for L2 via vmx_complete_interrupts. It is
13146          * preserved above and would only end up incorrectly in L1.
13147          */
13148         vcpu->arch.nmi_injected = false;
13149         kvm_clear_exception_queue(vcpu);
13150         kvm_clear_interrupt_queue(vcpu);
13151 }
13152
13153 /*
13154  * A part of what we need to when the nested L2 guest exits and we want to
13155  * run its L1 parent, is to reset L1's guest state to the host state specified
13156  * in vmcs12.
13157  * This function is to be called not only on normal nested exit, but also on
13158  * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
13159  * Failures During or After Loading Guest State").
13160  * This function should be called when the active VMCS is L1's (vmcs01).
13161  */
13162 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
13163                                    struct vmcs12 *vmcs12)
13164 {
13165         struct kvm_segment seg;
13166         u32 entry_failure_code;
13167
13168         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
13169                 vcpu->arch.efer = vmcs12->host_ia32_efer;
13170         else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
13171                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
13172         else
13173                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
13174         vmx_set_efer(vcpu, vcpu->arch.efer);
13175
13176         kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
13177         kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
13178         vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
13179         vmx_set_interrupt_shadow(vcpu, 0);
13180
13181         /*
13182          * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
13183          * actually changed, because vmx_set_cr0 refers to efer set above.
13184          *
13185          * CR0_GUEST_HOST_MASK is already set in the original vmcs01
13186          * (KVM doesn't change it);
13187          */
13188         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
13189         vmx_set_cr0(vcpu, vmcs12->host_cr0);
13190
13191         /* Same as above - no reason to call set_cr4_guest_host_mask().  */
13192         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
13193         vmx_set_cr4(vcpu, vmcs12->host_cr4);
13194
13195         nested_ept_uninit_mmu_context(vcpu);
13196
13197         /*
13198          * Only PDPTE load can fail as the value of cr3 was checked on entry and
13199          * couldn't have changed.
13200          */
13201         if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
13202                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
13203
13204         if (!enable_ept)
13205                 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
13206
13207         /*
13208          * If vmcs01 doesn't use VPID, CPU flushes TLB on every
13209          * VMEntry/VMExit. Thus, no need to flush TLB.
13210          *
13211          * If vmcs12 doesn't use VPID, L1 expects TLB to be
13212          * flushed on every VMEntry/VMExit.
13213          *
13214          * Otherwise, we can preserve TLB entries as long as we are
13215          * able to tag L1 TLB entries differently than L2 TLB entries.
13216          *
13217          * If vmcs12 uses EPT, we need to execute this flush on EPTP01
13218          * and therefore we request the TLB flush to happen only after VMCS EPTP
13219          * has been set by KVM_REQ_LOAD_CR3.
13220          */
13221         if (enable_vpid &&
13222             (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
13223                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
13224         }
13225
13226         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
13227         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
13228         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
13229         vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
13230         vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
13231         vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
13232         vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
13233
13234         /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
13235         if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
13236                 vmcs_write64(GUEST_BNDCFGS, 0);
13237
13238         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
13239                 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
13240                 vcpu->arch.pat = vmcs12->host_ia32_pat;
13241         }
13242         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
13243                 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
13244                         vmcs12->host_ia32_perf_global_ctrl);
13245
13246         /* Set L1 segment info according to Intel SDM
13247             27.5.2 Loading Host Segment and Descriptor-Table Registers */
13248         seg = (struct kvm_segment) {
13249                 .base = 0,
13250                 .limit = 0xFFFFFFFF,
13251                 .selector = vmcs12->host_cs_selector,
13252                 .type = 11,
13253                 .present = 1,
13254                 .s = 1,
13255                 .g = 1
13256         };
13257         if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
13258                 seg.l = 1;
13259         else
13260                 seg.db = 1;
13261         vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
13262         seg = (struct kvm_segment) {
13263                 .base = 0,
13264                 .limit = 0xFFFFFFFF,
13265                 .type = 3,
13266                 .present = 1,
13267                 .s = 1,
13268                 .db = 1,
13269                 .g = 1
13270         };
13271         seg.selector = vmcs12->host_ds_selector;
13272         vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
13273         seg.selector = vmcs12->host_es_selector;
13274         vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
13275         seg.selector = vmcs12->host_ss_selector;
13276         vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
13277         seg.selector = vmcs12->host_fs_selector;
13278         seg.base = vmcs12->host_fs_base;
13279         vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
13280         seg.selector = vmcs12->host_gs_selector;
13281         seg.base = vmcs12->host_gs_base;
13282         vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
13283         seg = (struct kvm_segment) {
13284                 .base = vmcs12->host_tr_base,
13285                 .limit = 0x67,
13286                 .selector = vmcs12->host_tr_selector,
13287                 .type = 11,
13288                 .present = 1
13289         };
13290         vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
13291
13292         kvm_set_dr(vcpu, 7, 0x400);
13293         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
13294
13295         if (cpu_has_vmx_msr_bitmap())
13296                 vmx_update_msr_bitmap(vcpu);
13297
13298         if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
13299                                 vmcs12->vm_exit_msr_load_count))
13300                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
13301 }
13302
13303 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
13304 {
13305         struct shared_msr_entry *efer_msr;
13306         unsigned int i;
13307
13308         if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
13309                 return vmcs_read64(GUEST_IA32_EFER);
13310
13311         if (cpu_has_load_ia32_efer)
13312                 return host_efer;
13313
13314         for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
13315                 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
13316                         return vmx->msr_autoload.guest.val[i].value;
13317         }
13318
13319         efer_msr = find_msr_entry(vmx, MSR_EFER);
13320         if (efer_msr)
13321                 return efer_msr->data;
13322
13323         return host_efer;
13324 }
13325
13326 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
13327 {
13328         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13329         struct vcpu_vmx *vmx = to_vmx(vcpu);
13330         struct vmx_msr_entry g, h;
13331         struct msr_data msr;
13332         gpa_t gpa;
13333         u32 i, j;
13334
13335         vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
13336
13337         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
13338                 /*
13339                  * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
13340                  * as vmcs01.GUEST_DR7 contains a userspace defined value
13341                  * and vcpu->arch.dr7 is not squirreled away before the
13342                  * nested VMENTER (not worth adding a variable in nested_vmx).
13343                  */
13344                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
13345                         kvm_set_dr(vcpu, 7, DR7_FIXED_1);
13346                 else
13347                         WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
13348         }
13349
13350         /*
13351          * Note that calling vmx_set_{efer,cr0,cr4} is important as they
13352          * handle a variety of side effects to KVM's software model.
13353          */
13354         vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
13355
13356         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
13357         vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
13358
13359         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
13360         vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
13361
13362         nested_ept_uninit_mmu_context(vcpu);
13363         vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
13364         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
13365
13366         /*
13367          * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
13368          * from vmcs01 (if necessary).  The PDPTRs are not loaded on
13369          * VMFail, like everything else we just need to ensure our
13370          * software model is up-to-date.
13371          */
13372         ept_save_pdptrs(vcpu);
13373
13374         kvm_mmu_reset_context(vcpu);
13375
13376         if (cpu_has_vmx_msr_bitmap())
13377                 vmx_update_msr_bitmap(vcpu);
13378
13379         /*
13380          * This nasty bit of open coding is a compromise between blindly
13381          * loading L1's MSRs using the exit load lists (incorrect emulation
13382          * of VMFail), leaving the nested VM's MSRs in the software model
13383          * (incorrect behavior) and snapshotting the modified MSRs (too
13384          * expensive since the lists are unbound by hardware).  For each
13385          * MSR that was (prematurely) loaded from the nested VMEntry load
13386          * list, reload it from the exit load list if it exists and differs
13387          * from the guest value.  The intent is to stuff host state as
13388          * silently as possible, not to fully process the exit load list.
13389          */
13390         msr.host_initiated = false;
13391         for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
13392                 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
13393                 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
13394                         pr_debug_ratelimited(
13395                                 "%s read MSR index failed (%u, 0x%08llx)\n",
13396                                 __func__, i, gpa);
13397                         goto vmabort;
13398                 }
13399
13400                 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
13401                         gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
13402                         if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
13403                                 pr_debug_ratelimited(
13404                                         "%s read MSR failed (%u, 0x%08llx)\n",
13405                                         __func__, j, gpa);
13406                                 goto vmabort;
13407                         }
13408                         if (h.index != g.index)
13409                                 continue;
13410                         if (h.value == g.value)
13411                                 break;
13412
13413                         if (nested_vmx_load_msr_check(vcpu, &h)) {
13414                                 pr_debug_ratelimited(
13415                                         "%s check failed (%u, 0x%x, 0x%x)\n",
13416                                         __func__, j, h.index, h.reserved);
13417                                 goto vmabort;
13418                         }
13419
13420                         msr.index = h.index;
13421                         msr.data = h.value;
13422                         if (kvm_set_msr(vcpu, &msr)) {
13423                                 pr_debug_ratelimited(
13424                                         "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
13425                                         __func__, j, h.index, h.value);
13426                                 goto vmabort;
13427                         }
13428                 }
13429         }
13430
13431         return;
13432
13433 vmabort:
13434         nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
13435 }
13436
13437 /*
13438  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
13439  * and modify vmcs12 to make it see what it would expect to see there if
13440  * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
13441  */
13442 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
13443                               u32 exit_intr_info,
13444                               unsigned long exit_qualification)
13445 {
13446         struct vcpu_vmx *vmx = to_vmx(vcpu);
13447         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13448
13449         /* trying to cancel vmlaunch/vmresume is a bug */
13450         WARN_ON_ONCE(vmx->nested.nested_run_pending);
13451
13452         /*
13453          * The only expected VM-instruction error is "VM entry with
13454          * invalid control field(s)." Anything else indicates a
13455          * problem with L0.
13456          */
13457         WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
13458                                    VMXERR_ENTRY_INVALID_CONTROL_FIELD));
13459
13460         leave_guest_mode(vcpu);
13461
13462         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
13463                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
13464
13465         if (likely(!vmx->fail)) {
13466                 if (exit_reason == -1)
13467                         sync_vmcs12(vcpu, vmcs12);
13468                 else
13469                         prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
13470                                        exit_qualification);
13471
13472                 /*
13473                  * Must happen outside of sync_vmcs12() as it will
13474                  * also be used to capture vmcs12 cache as part of
13475                  * capturing nVMX state for snapshot (migration).
13476                  *
13477                  * Otherwise, this flush will dirty guest memory at a
13478                  * point it is already assumed by user-space to be
13479                  * immutable.
13480                  */
13481                 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
13482
13483                 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
13484                                          vmcs12->vm_exit_msr_store_count))
13485                         nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
13486         }
13487
13488         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
13489
13490         /* Update any VMCS fields that might have changed while L2 ran */
13491         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
13492         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
13493         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
13494
13495         if (kvm_has_tsc_control)
13496                 decache_tsc_multiplier(vmx);
13497
13498         if (vmx->nested.change_vmcs01_virtual_apic_mode) {
13499                 vmx->nested.change_vmcs01_virtual_apic_mode = false;
13500                 vmx_set_virtual_apic_mode(vcpu);
13501         } else if (!nested_cpu_has_ept(vmcs12) &&
13502                    nested_cpu_has2(vmcs12,
13503                                    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
13504                 vmx_flush_tlb(vcpu, true);
13505         }
13506
13507         /* This is needed for same reason as it was needed in prepare_vmcs02 */
13508         vmx->host_rsp = 0;
13509
13510         /* Unpin physical memory we referred to in vmcs02 */
13511         if (vmx->nested.apic_access_page) {
13512                 kvm_release_page_dirty(vmx->nested.apic_access_page);
13513                 vmx->nested.apic_access_page = NULL;
13514         }
13515         if (vmx->nested.virtual_apic_page) {
13516                 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
13517                 vmx->nested.virtual_apic_page = NULL;
13518         }
13519         if (vmx->nested.pi_desc_page) {
13520                 kunmap(vmx->nested.pi_desc_page);
13521                 kvm_release_page_dirty(vmx->nested.pi_desc_page);
13522                 vmx->nested.pi_desc_page = NULL;
13523                 vmx->nested.pi_desc = NULL;
13524         }
13525
13526         /*
13527          * We are now running in L2, mmu_notifier will force to reload the
13528          * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
13529          */
13530         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
13531
13532         if (enable_shadow_vmcs && exit_reason != -1)
13533                 vmx->nested.sync_shadow_vmcs = true;
13534
13535         /* in case we halted in L2 */
13536         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
13537
13538         if (likely(!vmx->fail)) {
13539                 /*
13540                  * TODO: SDM says that with acknowledge interrupt on
13541                  * exit, bit 31 of the VM-exit interrupt information
13542                  * (valid interrupt) is always set to 1 on
13543                  * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
13544                  * need kvm_cpu_has_interrupt().  See the commit
13545                  * message for details.
13546                  */
13547                 if (nested_exit_intr_ack_set(vcpu) &&
13548                     exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
13549                     kvm_cpu_has_interrupt(vcpu)) {
13550                         int irq = kvm_cpu_get_interrupt(vcpu);
13551                         WARN_ON(irq < 0);
13552                         vmcs12->vm_exit_intr_info = irq |
13553                                 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
13554                 }
13555
13556                 if (exit_reason != -1)
13557                         trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
13558                                                        vmcs12->exit_qualification,
13559                                                        vmcs12->idt_vectoring_info_field,
13560                                                        vmcs12->vm_exit_intr_info,
13561                                                        vmcs12->vm_exit_intr_error_code,
13562                                                        KVM_ISA_VMX);
13563
13564                 load_vmcs12_host_state(vcpu, vmcs12);
13565
13566                 return;
13567         }
13568
13569         /*
13570          * After an early L2 VM-entry failure, we're now back
13571          * in L1 which thinks it just finished a VMLAUNCH or
13572          * VMRESUME instruction, so we need to set the failure
13573          * flag and the VM-instruction error field of the VMCS
13574          * accordingly, and skip the emulated instruction.
13575          */
13576         (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
13577
13578         /*
13579          * Restore L1's host state to KVM's software model.  We're here
13580          * because a consistency check was caught by hardware, which
13581          * means some amount of guest state has been propagated to KVM's
13582          * model and needs to be unwound to the host's state.
13583          */
13584         nested_vmx_restore_host_state(vcpu);
13585
13586         vmx->fail = 0;
13587 }
13588
13589 /*
13590  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
13591  */
13592 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
13593 {
13594         if (is_guest_mode(vcpu)) {
13595                 to_vmx(vcpu)->nested.nested_run_pending = 0;
13596                 nested_vmx_vmexit(vcpu, -1, 0, 0);
13597         }
13598         free_nested(to_vmx(vcpu));
13599 }
13600
13601 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
13602                                struct x86_instruction_info *info,
13603                                enum x86_intercept_stage stage)
13604 {
13605         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
13606         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
13607
13608         /*
13609          * RDPID causes #UD if disabled through secondary execution controls.
13610          * Because it is marked as EmulateOnUD, we need to intercept it here.
13611          */
13612         if (info->intercept == x86_intercept_rdtscp &&
13613             !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
13614                 ctxt->exception.vector = UD_VECTOR;
13615                 ctxt->exception.error_code_valid = false;
13616                 return X86EMUL_PROPAGATE_FAULT;
13617         }
13618
13619         /* TODO: check more intercepts... */
13620         return X86EMUL_CONTINUE;
13621 }
13622
13623 #ifdef CONFIG_X86_64
13624 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
13625 static inline int u64_shl_div_u64(u64 a, unsigned int shift,
13626                                   u64 divisor, u64 *result)
13627 {
13628         u64 low = a << shift, high = a >> (64 - shift);
13629
13630         /* To avoid the overflow on divq */
13631         if (high >= divisor)
13632                 return 1;
13633
13634         /* Low hold the result, high hold rem which is discarded */
13635         asm("divq %2\n\t" : "=a" (low), "=d" (high) :
13636             "rm" (divisor), "0" (low), "1" (high));
13637         *result = low;
13638
13639         return 0;
13640 }
13641
13642 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
13643 {
13644         struct vcpu_vmx *vmx;
13645         u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
13646
13647         if (kvm_mwait_in_guest(vcpu->kvm))
13648                 return -EOPNOTSUPP;
13649
13650         vmx = to_vmx(vcpu);
13651         tscl = rdtsc();
13652         guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
13653         delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
13654         lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
13655
13656         if (delta_tsc > lapic_timer_advance_cycles)
13657                 delta_tsc -= lapic_timer_advance_cycles;
13658         else
13659                 delta_tsc = 0;
13660
13661         /* Convert to host delta tsc if tsc scaling is enabled */
13662         if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
13663                         u64_shl_div_u64(delta_tsc,
13664                                 kvm_tsc_scaling_ratio_frac_bits,
13665                                 vcpu->arch.tsc_scaling_ratio,
13666                                 &delta_tsc))
13667                 return -ERANGE;
13668
13669         /*
13670          * If the delta tsc can't fit in the 32 bit after the multi shift,
13671          * we can't use the preemption timer.
13672          * It's possible that it fits on later vmentries, but checking
13673          * on every vmentry is costly so we just use an hrtimer.
13674          */
13675         if (delta_tsc >> (cpu_preemption_timer_multi + 32))
13676                 return -ERANGE;
13677
13678         vmx->hv_deadline_tsc = tscl + delta_tsc;
13679         return delta_tsc == 0;
13680 }
13681
13682 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
13683 {
13684         to_vmx(vcpu)->hv_deadline_tsc = -1;
13685 }
13686 #endif
13687
13688 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
13689 {
13690         if (!kvm_pause_in_guest(vcpu->kvm))
13691                 shrink_ple_window(vcpu);
13692 }
13693
13694 static void vmx_slot_enable_log_dirty(struct kvm *kvm,
13695                                      struct kvm_memory_slot *slot)
13696 {
13697         kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
13698         kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
13699 }
13700
13701 static void vmx_slot_disable_log_dirty(struct kvm *kvm,
13702                                        struct kvm_memory_slot *slot)
13703 {
13704         kvm_mmu_slot_set_dirty(kvm, slot);
13705 }
13706
13707 static void vmx_flush_log_dirty(struct kvm *kvm)
13708 {
13709         kvm_flush_pml_buffers(kvm);
13710 }
13711
13712 static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
13713 {
13714         struct vmcs12 *vmcs12;
13715         struct vcpu_vmx *vmx = to_vmx(vcpu);
13716         gpa_t gpa;
13717         struct page *page = NULL;
13718         u64 *pml_address;
13719
13720         if (is_guest_mode(vcpu)) {
13721                 WARN_ON_ONCE(vmx->nested.pml_full);
13722
13723                 /*
13724                  * Check if PML is enabled for the nested guest.
13725                  * Whether eptp bit 6 is set is already checked
13726                  * as part of A/D emulation.
13727                  */
13728                 vmcs12 = get_vmcs12(vcpu);
13729                 if (!nested_cpu_has_pml(vmcs12))
13730                         return 0;
13731
13732                 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
13733                         vmx->nested.pml_full = true;
13734                         return 1;
13735                 }
13736
13737                 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
13738
13739                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
13740                 if (is_error_page(page))
13741                         return 0;
13742
13743                 pml_address = kmap(page);
13744                 pml_address[vmcs12->guest_pml_index--] = gpa;
13745                 kunmap(page);
13746                 kvm_release_page_clean(page);
13747         }
13748
13749         return 0;
13750 }
13751
13752 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
13753                                            struct kvm_memory_slot *memslot,
13754                                            gfn_t offset, unsigned long mask)
13755 {
13756         kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
13757 }
13758
13759 static void __pi_post_block(struct kvm_vcpu *vcpu)
13760 {
13761         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
13762         struct pi_desc old, new;
13763         unsigned int dest;
13764
13765         do {
13766                 old.control = new.control = pi_desc->control;
13767                 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
13768                      "Wakeup handler not enabled while the VCPU is blocked\n");
13769
13770                 dest = cpu_physical_id(vcpu->cpu);
13771
13772                 if (x2apic_enabled())
13773                         new.ndst = dest;
13774                 else
13775                         new.ndst = (dest << 8) & 0xFF00;
13776
13777                 /* set 'NV' to 'notification vector' */
13778                 new.nv = POSTED_INTR_VECTOR;
13779         } while (cmpxchg64(&pi_desc->control, old.control,
13780                            new.control) != old.control);
13781
13782         if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
13783                 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13784                 list_del(&vcpu->blocked_vcpu_list);
13785                 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13786                 vcpu->pre_pcpu = -1;
13787         }
13788 }
13789
13790 /*
13791  * This routine does the following things for vCPU which is going
13792  * to be blocked if VT-d PI is enabled.
13793  * - Store the vCPU to the wakeup list, so when interrupts happen
13794  *   we can find the right vCPU to wake up.
13795  * - Change the Posted-interrupt descriptor as below:
13796  *      'NDST' <-- vcpu->pre_pcpu
13797  *      'NV' <-- POSTED_INTR_WAKEUP_VECTOR
13798  * - If 'ON' is set during this process, which means at least one
13799  *   interrupt is posted for this vCPU, we cannot block it, in
13800  *   this case, return 1, otherwise, return 0.
13801  *
13802  */
13803 static int pi_pre_block(struct kvm_vcpu *vcpu)
13804 {
13805         unsigned int dest;
13806         struct pi_desc old, new;
13807         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
13808
13809         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
13810                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
13811                 !kvm_vcpu_apicv_active(vcpu))
13812                 return 0;
13813
13814         WARN_ON(irqs_disabled());
13815         local_irq_disable();
13816         if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
13817                 vcpu->pre_pcpu = vcpu->cpu;
13818                 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13819                 list_add_tail(&vcpu->blocked_vcpu_list,
13820                               &per_cpu(blocked_vcpu_on_cpu,
13821                                        vcpu->pre_pcpu));
13822                 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
13823         }
13824
13825         do {
13826                 old.control = new.control = pi_desc->control;
13827
13828                 WARN((pi_desc->sn == 1),
13829                      "Warning: SN field of posted-interrupts "
13830                      "is set before blocking\n");
13831
13832                 /*
13833                  * Since vCPU can be preempted during this process,
13834                  * vcpu->cpu could be different with pre_pcpu, we
13835                  * need to set pre_pcpu as the destination of wakeup
13836                  * notification event, then we can find the right vCPU
13837                  * to wakeup in wakeup handler if interrupts happen
13838                  * when the vCPU is in blocked state.
13839                  */
13840                 dest = cpu_physical_id(vcpu->pre_pcpu);
13841
13842                 if (x2apic_enabled())
13843                         new.ndst = dest;
13844                 else
13845                         new.ndst = (dest << 8) & 0xFF00;
13846
13847                 /* set 'NV' to 'wakeup vector' */
13848                 new.nv = POSTED_INTR_WAKEUP_VECTOR;
13849         } while (cmpxchg64(&pi_desc->control, old.control,
13850                            new.control) != old.control);
13851
13852         /* We should not block the vCPU if an interrupt is posted for it.  */
13853         if (pi_test_on(pi_desc) == 1)
13854                 __pi_post_block(vcpu);
13855
13856         local_irq_enable();
13857         return (vcpu->pre_pcpu == -1);
13858 }
13859
13860 static int vmx_pre_block(struct kvm_vcpu *vcpu)
13861 {
13862         if (pi_pre_block(vcpu))
13863                 return 1;
13864
13865         if (kvm_lapic_hv_timer_in_use(vcpu))
13866                 kvm_lapic_switch_to_sw_timer(vcpu);
13867
13868         return 0;
13869 }
13870
13871 static void pi_post_block(struct kvm_vcpu *vcpu)
13872 {
13873         if (vcpu->pre_pcpu == -1)
13874                 return;
13875
13876         WARN_ON(irqs_disabled());
13877         local_irq_disable();
13878         __pi_post_block(vcpu);
13879         local_irq_enable();
13880 }
13881
13882 static void vmx_post_block(struct kvm_vcpu *vcpu)
13883 {
13884         if (kvm_x86_ops->set_hv_timer)
13885                 kvm_lapic_switch_to_hv_timer(vcpu);
13886
13887         pi_post_block(vcpu);
13888 }
13889
13890 /*
13891  * vmx_update_pi_irte - set IRTE for Posted-Interrupts
13892  *
13893  * @kvm: kvm
13894  * @host_irq: host irq of the interrupt
13895  * @guest_irq: gsi of the interrupt
13896  * @set: set or unset PI
13897  * returns 0 on success, < 0 on failure
13898  */
13899 static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
13900                               uint32_t guest_irq, bool set)
13901 {
13902         struct kvm_kernel_irq_routing_entry *e;
13903         struct kvm_irq_routing_table *irq_rt;
13904         struct kvm_lapic_irq irq;
13905         struct kvm_vcpu *vcpu;
13906         struct vcpu_data vcpu_info;
13907         int idx, ret = 0;
13908
13909         if (!kvm_arch_has_assigned_device(kvm) ||
13910                 !irq_remapping_cap(IRQ_POSTING_CAP) ||
13911                 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
13912                 return 0;
13913
13914         idx = srcu_read_lock(&kvm->irq_srcu);
13915         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
13916         if (guest_irq >= irq_rt->nr_rt_entries ||
13917             hlist_empty(&irq_rt->map[guest_irq])) {
13918                 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
13919                              guest_irq, irq_rt->nr_rt_entries);
13920                 goto out;
13921         }
13922
13923         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
13924                 if (e->type != KVM_IRQ_ROUTING_MSI)
13925                         continue;
13926                 /*
13927                  * VT-d PI cannot support posting multicast/broadcast
13928                  * interrupts to a vCPU, we still use interrupt remapping
13929                  * for these kind of interrupts.
13930                  *
13931                  * For lowest-priority interrupts, we only support
13932                  * those with single CPU as the destination, e.g. user
13933                  * configures the interrupts via /proc/irq or uses
13934                  * irqbalance to make the interrupts single-CPU.
13935                  *
13936                  * We will support full lowest-priority interrupt later.
13937                  */
13938
13939                 kvm_set_msi_irq(kvm, e, &irq);
13940                 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
13941                         /*
13942                          * Make sure the IRTE is in remapped mode if
13943                          * we don't handle it in posted mode.
13944                          */
13945                         ret = irq_set_vcpu_affinity(host_irq, NULL);
13946                         if (ret < 0) {
13947                                 printk(KERN_INFO
13948                                    "failed to back to remapped mode, irq: %u\n",
13949                                    host_irq);
13950                                 goto out;
13951                         }
13952
13953                         continue;
13954                 }
13955
13956                 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
13957                 vcpu_info.vector = irq.vector;
13958
13959                 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
13960                                 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
13961
13962                 if (set)
13963                         ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
13964                 else
13965                         ret = irq_set_vcpu_affinity(host_irq, NULL);
13966
13967                 if (ret < 0) {
13968                         printk(KERN_INFO "%s: failed to update PI IRTE\n",
13969                                         __func__);
13970                         goto out;
13971                 }
13972         }
13973
13974         ret = 0;
13975 out:
13976         srcu_read_unlock(&kvm->irq_srcu, idx);
13977         return ret;
13978 }
13979
13980 static void vmx_setup_mce(struct kvm_vcpu *vcpu)
13981 {
13982         if (vcpu->arch.mcg_cap & MCG_LMCE_P)
13983                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
13984                         FEATURE_CONTROL_LMCE;
13985         else
13986                 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
13987                         ~FEATURE_CONTROL_LMCE;
13988 }
13989
13990 static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
13991 {
13992         /* we need a nested vmexit to enter SMM, postpone if run is pending */
13993         if (to_vmx(vcpu)->nested.nested_run_pending)
13994                 return 0;
13995         return 1;
13996 }
13997
13998 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
13999 {
14000         struct vcpu_vmx *vmx = to_vmx(vcpu);
14001
14002         vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
14003         if (vmx->nested.smm.guest_mode)
14004                 nested_vmx_vmexit(vcpu, -1, 0, 0);
14005
14006         vmx->nested.smm.vmxon = vmx->nested.vmxon;
14007         vmx->nested.vmxon = false;
14008         vmx_clear_hlt(vcpu);
14009         return 0;
14010 }
14011
14012 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
14013 {
14014         struct vcpu_vmx *vmx = to_vmx(vcpu);
14015         int ret;
14016
14017         if (vmx->nested.smm.vmxon) {
14018                 vmx->nested.vmxon = true;
14019                 vmx->nested.smm.vmxon = false;
14020         }
14021
14022         if (vmx->nested.smm.guest_mode) {
14023                 vcpu->arch.hflags &= ~HF_SMM_MASK;
14024                 ret = nested_vmx_enter_non_root_mode(vcpu, false);
14025                 vcpu->arch.hflags |= HF_SMM_MASK;
14026                 if (ret)
14027                         return ret;
14028
14029                 vmx->nested.smm.guest_mode = false;
14030         }
14031         return 0;
14032 }
14033
14034 static int enable_smi_window(struct kvm_vcpu *vcpu)
14035 {
14036         return 0;
14037 }
14038
14039 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
14040                                 struct kvm_nested_state __user *user_kvm_nested_state,
14041                                 u32 user_data_size)
14042 {
14043         struct vcpu_vmx *vmx;
14044         struct vmcs12 *vmcs12;
14045         struct kvm_nested_state kvm_state = {
14046                 .flags = 0,
14047                 .format = 0,
14048                 .size = sizeof(kvm_state),
14049                 .vmx.vmxon_pa = -1ull,
14050                 .vmx.vmcs_pa = -1ull,
14051         };
14052
14053         if (!vcpu)
14054                 return kvm_state.size + 2 * VMCS12_SIZE;
14055
14056         vmx = to_vmx(vcpu);
14057         vmcs12 = get_vmcs12(vcpu);
14058         if (nested_vmx_allowed(vcpu) &&
14059             (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
14060                 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
14061                 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
14062
14063                 if (vmx->nested.current_vmptr != -1ull) {
14064                         kvm_state.size += VMCS12_SIZE;
14065
14066                         if (is_guest_mode(vcpu) &&
14067                             nested_cpu_has_shadow_vmcs(vmcs12) &&
14068                             vmcs12->vmcs_link_pointer != -1ull)
14069                                 kvm_state.size += VMCS12_SIZE;
14070                 }
14071
14072                 if (vmx->nested.smm.vmxon)
14073                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
14074
14075                 if (vmx->nested.smm.guest_mode)
14076                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
14077
14078                 if (is_guest_mode(vcpu)) {
14079                         kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
14080
14081                         if (vmx->nested.nested_run_pending)
14082                                 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
14083                 }
14084         }
14085
14086         if (user_data_size < kvm_state.size)
14087                 goto out;
14088
14089         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
14090                 return -EFAULT;
14091
14092         if (vmx->nested.current_vmptr == -1ull)
14093                 goto out;
14094
14095         /*
14096          * When running L2, the authoritative vmcs12 state is in the
14097          * vmcs02. When running L1, the authoritative vmcs12 state is
14098          * in the shadow vmcs linked to vmcs01, unless
14099          * sync_shadow_vmcs is set, in which case, the authoritative
14100          * vmcs12 state is in the vmcs12 already.
14101          */
14102         if (is_guest_mode(vcpu))
14103                 sync_vmcs12(vcpu, vmcs12);
14104         else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
14105                 copy_shadow_to_vmcs12(vmx);
14106
14107         if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
14108                 return -EFAULT;
14109
14110         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
14111             vmcs12->vmcs_link_pointer != -1ull) {
14112                 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
14113                                  get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
14114                         return -EFAULT;
14115         }
14116
14117 out:
14118         return kvm_state.size;
14119 }
14120
14121 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
14122                                 struct kvm_nested_state __user *user_kvm_nested_state,
14123                                 struct kvm_nested_state *kvm_state)
14124 {
14125         struct vcpu_vmx *vmx = to_vmx(vcpu);
14126         struct vmcs12 *vmcs12;
14127         u32 exit_qual;
14128         int ret;
14129
14130         if (kvm_state->format != 0)
14131                 return -EINVAL;
14132
14133         if (!nested_vmx_allowed(vcpu))
14134                 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
14135
14136         if (kvm_state->vmx.vmxon_pa == -1ull) {
14137                 if (kvm_state->vmx.smm.flags)
14138                         return -EINVAL;
14139
14140                 if (kvm_state->vmx.vmcs_pa != -1ull)
14141                         return -EINVAL;
14142
14143                 vmx_leave_nested(vcpu);
14144                 return 0;
14145         }
14146
14147         if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
14148                 return -EINVAL;
14149
14150         if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
14151                 return -EINVAL;
14152
14153         if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
14154             !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
14155                 return -EINVAL;
14156
14157         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
14158             (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
14159                 return -EINVAL;
14160
14161         if (kvm_state->vmx.smm.flags &
14162             ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
14163                 return -EINVAL;
14164
14165         /*
14166          * SMM temporarily disables VMX, so we cannot be in guest mode,
14167          * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
14168          * must be zero.
14169          */
14170         if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
14171                 return -EINVAL;
14172
14173         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
14174             !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
14175                 return -EINVAL;
14176
14177         vmx_leave_nested(vcpu);
14178         if (kvm_state->vmx.vmxon_pa == -1ull)
14179                 return 0;
14180
14181         vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
14182         ret = enter_vmx_operation(vcpu);
14183         if (ret)
14184                 return ret;
14185
14186         set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
14187
14188         if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
14189                 vmx->nested.smm.vmxon = true;
14190                 vmx->nested.vmxon = false;
14191
14192                 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
14193                         vmx->nested.smm.guest_mode = true;
14194         }
14195
14196         vmcs12 = get_vmcs12(vcpu);
14197         if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
14198                 return -EFAULT;
14199
14200         if (vmcs12->hdr.revision_id != VMCS12_REVISION)
14201                 return -EINVAL;
14202
14203         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
14204                 return 0;
14205
14206         vmx->nested.nested_run_pending =
14207                 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
14208
14209         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
14210             vmcs12->vmcs_link_pointer != -1ull) {
14211                 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
14212                 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
14213                         return -EINVAL;
14214
14215                 if (copy_from_user(shadow_vmcs12,
14216                                    user_kvm_nested_state->data + VMCS12_SIZE,
14217                                    sizeof(*vmcs12)))
14218                         return -EFAULT;
14219
14220                 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
14221                     !shadow_vmcs12->hdr.shadow_vmcs)
14222                         return -EINVAL;
14223         }
14224
14225         if (check_vmentry_prereqs(vcpu, vmcs12) ||
14226             check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
14227                 return -EINVAL;
14228
14229         vmx->nested.dirty_vmcs12 = true;
14230         ret = nested_vmx_enter_non_root_mode(vcpu, false);
14231         if (ret)
14232                 return -EINVAL;
14233
14234         return 0;
14235 }
14236
14237 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
14238         .cpu_has_kvm_support = cpu_has_kvm_support,
14239         .disabled_by_bios = vmx_disabled_by_bios,
14240         .hardware_setup = hardware_setup,
14241         .hardware_unsetup = hardware_unsetup,
14242         .check_processor_compatibility = vmx_check_processor_compat,
14243         .hardware_enable = hardware_enable,
14244         .hardware_disable = hardware_disable,
14245         .cpu_has_accelerated_tpr = report_flexpriority,
14246         .has_emulated_msr = vmx_has_emulated_msr,
14247
14248         .vm_init = vmx_vm_init,
14249         .vm_alloc = vmx_vm_alloc,
14250         .vm_free = vmx_vm_free,
14251
14252         .vcpu_create = vmx_create_vcpu,
14253         .vcpu_free = vmx_free_vcpu,
14254         .vcpu_reset = vmx_vcpu_reset,
14255
14256         .prepare_guest_switch = vmx_prepare_switch_to_guest,
14257         .vcpu_load = vmx_vcpu_load,
14258         .vcpu_put = vmx_vcpu_put,
14259
14260         .update_bp_intercept = update_exception_bitmap,
14261         .get_msr_feature = vmx_get_msr_feature,
14262         .get_msr = vmx_get_msr,
14263         .set_msr = vmx_set_msr,
14264         .get_segment_base = vmx_get_segment_base,
14265         .get_segment = vmx_get_segment,
14266         .set_segment = vmx_set_segment,
14267         .get_cpl = vmx_get_cpl,
14268         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
14269         .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
14270         .decache_cr3 = vmx_decache_cr3,
14271         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
14272         .set_cr0 = vmx_set_cr0,
14273         .set_cr3 = vmx_set_cr3,
14274         .set_cr4 = vmx_set_cr4,
14275         .set_efer = vmx_set_efer,
14276         .get_idt = vmx_get_idt,
14277         .set_idt = vmx_set_idt,
14278         .get_gdt = vmx_get_gdt,
14279         .set_gdt = vmx_set_gdt,
14280         .get_dr6 = vmx_get_dr6,
14281         .set_dr6 = vmx_set_dr6,
14282         .set_dr7 = vmx_set_dr7,
14283         .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
14284         .cache_reg = vmx_cache_reg,
14285         .get_rflags = vmx_get_rflags,
14286         .set_rflags = vmx_set_rflags,
14287
14288         .tlb_flush = vmx_flush_tlb,
14289         .tlb_flush_gva = vmx_flush_tlb_gva,
14290
14291         .run = vmx_vcpu_run,
14292         .handle_exit = vmx_handle_exit,
14293         .skip_emulated_instruction = skip_emulated_instruction,
14294         .set_interrupt_shadow = vmx_set_interrupt_shadow,
14295         .get_interrupt_shadow = vmx_get_interrupt_shadow,
14296         .patch_hypercall = vmx_patch_hypercall,
14297         .set_irq = vmx_inject_irq,
14298         .set_nmi = vmx_inject_nmi,
14299         .queue_exception = vmx_queue_exception,
14300         .cancel_injection = vmx_cancel_injection,
14301         .interrupt_allowed = vmx_interrupt_allowed,
14302         .nmi_allowed = vmx_nmi_allowed,
14303         .get_nmi_mask = vmx_get_nmi_mask,
14304         .set_nmi_mask = vmx_set_nmi_mask,
14305         .enable_nmi_window = enable_nmi_window,
14306         .enable_irq_window = enable_irq_window,
14307         .update_cr8_intercept = update_cr8_intercept,
14308         .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
14309         .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
14310         .get_enable_apicv = vmx_get_enable_apicv,
14311         .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
14312         .load_eoi_exitmap = vmx_load_eoi_exitmap,
14313         .apicv_post_state_restore = vmx_apicv_post_state_restore,
14314         .hwapic_irr_update = vmx_hwapic_irr_update,
14315         .hwapic_isr_update = vmx_hwapic_isr_update,
14316         .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
14317         .sync_pir_to_irr = vmx_sync_pir_to_irr,
14318         .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
14319
14320         .set_tss_addr = vmx_set_tss_addr,
14321         .set_identity_map_addr = vmx_set_identity_map_addr,
14322         .get_tdp_level = get_ept_level,
14323         .get_mt_mask = vmx_get_mt_mask,
14324
14325         .get_exit_info = vmx_get_exit_info,
14326
14327         .get_lpage_level = vmx_get_lpage_level,
14328
14329         .cpuid_update = vmx_cpuid_update,
14330
14331         .rdtscp_supported = vmx_rdtscp_supported,
14332         .invpcid_supported = vmx_invpcid_supported,
14333
14334         .set_supported_cpuid = vmx_set_supported_cpuid,
14335
14336         .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
14337
14338         .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
14339         .write_tsc_offset = vmx_write_tsc_offset,
14340
14341         .set_tdp_cr3 = vmx_set_cr3,
14342
14343         .check_intercept = vmx_check_intercept,
14344         .handle_external_intr = vmx_handle_external_intr,
14345         .mpx_supported = vmx_mpx_supported,
14346         .xsaves_supported = vmx_xsaves_supported,
14347         .umip_emulated = vmx_umip_emulated,
14348
14349         .check_nested_events = vmx_check_nested_events,
14350         .request_immediate_exit = vmx_request_immediate_exit,
14351
14352         .sched_in = vmx_sched_in,
14353
14354         .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
14355         .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
14356         .flush_log_dirty = vmx_flush_log_dirty,
14357         .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
14358         .write_log_dirty = vmx_write_pml_buffer,
14359
14360         .pre_block = vmx_pre_block,
14361         .post_block = vmx_post_block,
14362
14363         .pmu_ops = &intel_pmu_ops,
14364
14365         .update_pi_irte = vmx_update_pi_irte,
14366
14367 #ifdef CONFIG_X86_64
14368         .set_hv_timer = vmx_set_hv_timer,
14369         .cancel_hv_timer = vmx_cancel_hv_timer,
14370 #endif
14371
14372         .setup_mce = vmx_setup_mce,
14373
14374         .get_nested_state = vmx_get_nested_state,
14375         .set_nested_state = vmx_set_nested_state,
14376         .get_vmcs12_pages = nested_get_vmcs12_pages,
14377
14378         .smi_allowed = vmx_smi_allowed,
14379         .pre_enter_smm = vmx_pre_enter_smm,
14380         .pre_leave_smm = vmx_pre_leave_smm,
14381         .enable_smi_window = enable_smi_window,
14382 };
14383
14384 static void vmx_cleanup_l1d_flush(void)
14385 {
14386         if (vmx_l1d_flush_pages) {
14387                 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
14388                 vmx_l1d_flush_pages = NULL;
14389         }
14390         /* Restore state so sysfs ignores VMX */
14391         l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
14392 }
14393
14394 static void vmx_exit(void)
14395 {
14396 #ifdef CONFIG_KEXEC_CORE
14397         RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
14398         synchronize_rcu();
14399 #endif
14400
14401         kvm_exit();
14402
14403 #if IS_ENABLED(CONFIG_HYPERV)
14404         if (static_branch_unlikely(&enable_evmcs)) {
14405                 int cpu;
14406                 struct hv_vp_assist_page *vp_ap;
14407                 /*
14408                  * Reset everything to support using non-enlightened VMCS
14409                  * access later (e.g. when we reload the module with
14410                  * enlightened_vmcs=0)
14411                  */
14412                 for_each_online_cpu(cpu) {
14413                         vp_ap = hv_get_vp_assist_page(cpu);
14414
14415                         if (!vp_ap)
14416                                 continue;
14417
14418                         vp_ap->current_nested_vmcs = 0;
14419                         vp_ap->enlighten_vmentry = 0;
14420                 }
14421
14422                 static_branch_disable(&enable_evmcs);
14423         }
14424 #endif
14425         vmx_cleanup_l1d_flush();
14426 }
14427 module_exit(vmx_exit);
14428
14429 static int __init vmx_init(void)
14430 {
14431         int r;
14432
14433 #if IS_ENABLED(CONFIG_HYPERV)
14434         /*
14435          * Enlightened VMCS usage should be recommended and the host needs
14436          * to support eVMCS v1 or above. We can also disable eVMCS support
14437          * with module parameter.
14438          */
14439         if (enlightened_vmcs &&
14440             ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
14441             (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
14442             KVM_EVMCS_VERSION) {
14443                 int cpu;
14444
14445                 /* Check that we have assist pages on all online CPUs */
14446                 for_each_online_cpu(cpu) {
14447                         if (!hv_get_vp_assist_page(cpu)) {
14448                                 enlightened_vmcs = false;
14449                                 break;
14450                         }
14451                 }
14452
14453                 if (enlightened_vmcs) {
14454                         pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
14455                         static_branch_enable(&enable_evmcs);
14456                 }
14457         } else {
14458                 enlightened_vmcs = false;
14459         }
14460 #endif
14461
14462         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
14463                      __alignof__(struct vcpu_vmx), THIS_MODULE);
14464         if (r)
14465                 return r;
14466
14467         /*
14468          * Must be called after kvm_init() so enable_ept is properly set
14469          * up. Hand the parameter mitigation value in which was stored in
14470          * the pre module init parser. If no parameter was given, it will
14471          * contain 'auto' which will be turned into the default 'cond'
14472          * mitigation mode.
14473          */
14474         if (boot_cpu_has(X86_BUG_L1TF)) {
14475                 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
14476                 if (r) {
14477                         vmx_exit();
14478                         return r;
14479                 }
14480         }
14481
14482 #ifdef CONFIG_KEXEC_CORE
14483         rcu_assign_pointer(crash_vmclear_loaded_vmcss,
14484                            crash_vmclear_local_loaded_vmcss);
14485 #endif
14486         vmx_check_vmcs12_offsets();
14487
14488         return 0;
14489 }
14490 module_init(vmx_init);