2 * Xen HVM emulation support in KVM
4 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
5 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "qemu/main-loop.h"
15 #include "hw/xen/xen.h"
16 #include "sysemu/kvm_int.h"
17 #include "sysemu/kvm_xen.h"
18 #include "kvm/kvm_i386.h"
19 #include "exec/address-spaces.h"
22 #include "sysemu/runstate.h"
24 #include "hw/pci/msi.h"
25 #include "hw/i386/apic-msidef.h"
26 #include "hw/i386/kvm/xen_overlay.h"
27 #include "hw/i386/kvm/xen_evtchn.h"
29 #include "hw/xen/interface/version.h"
30 #include "hw/xen/interface/sched.h"
31 #include "hw/xen/interface/memory.h"
32 #include "hw/xen/interface/hvm/hvm_op.h"
33 #include "hw/xen/interface/hvm/params.h"
34 #include "hw/xen/interface/vcpu.h"
35 #include "hw/xen/interface/event_channel.h"
37 #include "xen-compat.h"
40 #define hypercall_compat32(longmode) (!(longmode))
42 #define hypercall_compat32(longmode) (false)
45 static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
46 size_t *len, bool is_write)
48 struct kvm_translation tr = {
49 .linear_address = gva,
53 *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK);
56 if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid ||
57 (is_write && !tr.writeable)) {
60 *gpa = tr.physical_address;
64 static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
67 uint8_t *buf = (uint8_t *)_buf;
72 if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) {
79 cpu_physical_memory_rw(gpa, buf, len, is_write);
89 static inline int kvm_copy_from_gva(CPUState *cs, uint64_t gva, void *buf,
92 return kvm_gva_rw(cs, gva, buf, sz, false);
95 static inline int kvm_copy_to_gva(CPUState *cs, uint64_t gva, void *buf,
98 return kvm_gva_rw(cs, gva, buf, sz, true);
101 int kvm_xen_init(KVMState *s, uint32_t hypercall_msr)
103 const int required_caps = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
104 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | KVM_XEN_HVM_CONFIG_SHARED_INFO;
105 struct kvm_xen_hvm_config cfg = {
106 .msr = hypercall_msr,
107 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
111 xen_caps = kvm_check_extension(s, KVM_CAP_XEN_HVM);
112 if (required_caps & ~xen_caps) {
113 error_report("kvm: Xen HVM guest support not present or insufficient");
117 if (xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND) {
118 struct kvm_xen_hvm_attr ha = {
119 .type = KVM_XEN_ATTR_TYPE_XEN_VERSION,
120 .u.xen_version = s->xen_version,
122 (void)kvm_vm_ioctl(s, KVM_XEN_HVM_SET_ATTR, &ha);
124 cfg.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
127 ret = kvm_vm_ioctl(s, KVM_XEN_HVM_CONFIG, &cfg);
129 error_report("kvm: Failed to enable Xen HVM support: %s",
134 s->xen_caps = xen_caps;
138 int kvm_xen_init_vcpu(CPUState *cs)
140 X86CPU *cpu = X86_CPU(cs);
141 CPUX86State *env = &cpu->env;
145 * The kernel needs to know the Xen/ACPI vCPU ID because that's
146 * what the guest uses in hypercalls such as timers. It doesn't
147 * match the APIC ID which is generally used for talking to the
148 * kernel about vCPUs. And if vCPU threads race with creating
149 * their KVM vCPUs out of order, it doesn't necessarily match
150 * with the kernel's internal vCPU indices either.
152 if (kvm_xen_has_cap(EVTCHN_SEND)) {
153 struct kvm_xen_vcpu_attr va = {
154 .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID,
155 .u.vcpu_id = cs->cpu_index,
157 err = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va);
159 error_report("kvm: Failed to set Xen vCPU ID attribute: %s",
165 env->xen_vcpu_info_gpa = INVALID_GPA;
166 env->xen_vcpu_info_default_gpa = INVALID_GPA;
167 env->xen_vcpu_time_info_gpa = INVALID_GPA;
168 env->xen_vcpu_runstate_gpa = INVALID_GPA;
173 uint32_t kvm_xen_get_caps(void)
175 return kvm_state->xen_caps;
178 static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
179 int cmd, uint64_t arg)
184 case XENVER_get_features: {
185 struct xen_feature_info fi;
187 /* No need for 32/64 compat handling */
188 qemu_build_assert(sizeof(fi) == 8);
190 err = kvm_copy_from_gva(CPU(cpu), arg, &fi, sizeof(fi));
196 if (fi.submap_idx == 0) {
197 fi.submap |= 1 << XENFEAT_writable_page_tables |
198 1 << XENFEAT_writable_descriptor_tables |
199 1 << XENFEAT_auto_translated_physmap |
200 1 << XENFEAT_supervisor_mode_kernel |
201 1 << XENFEAT_hvm_callback_vector;
204 err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
212 exit->u.hcall.result = err;
216 static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
218 struct kvm_xen_vcpu_attr xhsi;
223 trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa);
225 return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
228 static int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
230 uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
231 struct kvm_xen_vcpu_attr xva;
233 xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
234 xva.u.vector = vector;
236 trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
238 return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
241 static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
243 X86CPU *cpu = X86_CPU(cs);
244 CPUX86State *env = &cpu->env;
246 env->xen_vcpu_callback_vector = data.host_int;
248 if (kvm_xen_has_cap(EVTCHN_SEND)) {
249 kvm_xen_set_vcpu_callback_vector(cs);
253 static int set_vcpu_info(CPUState *cs, uint64_t gpa)
255 X86CPU *cpu = X86_CPU(cs);
256 CPUX86State *env = &cpu->env;
257 MemoryRegionSection mrs = { .mr = NULL };
258 void *vcpu_info_hva = NULL;
261 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa);
262 if (ret || gpa == INVALID_GPA) {
266 mrs = memory_region_find(get_system_memory(), gpa,
267 sizeof(struct vcpu_info));
268 if (mrs.mr && mrs.mr->ram_block &&
269 !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) {
270 vcpu_info_hva = qemu_map_ram_ptr(mrs.mr->ram_block,
271 mrs.offset_within_region);
273 if (!vcpu_info_hva) {
275 memory_region_unref(mrs.mr);
282 if (env->xen_vcpu_info_mr) {
283 memory_region_unref(env->xen_vcpu_info_mr);
285 env->xen_vcpu_info_hva = vcpu_info_hva;
286 env->xen_vcpu_info_mr = mrs.mr;
290 static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
292 X86CPU *cpu = X86_CPU(cs);
293 CPUX86State *env = &cpu->env;
295 env->xen_vcpu_info_default_gpa = data.host_ulong;
297 /* Changing the default does nothing if a vcpu_info was explicitly set. */
298 if (env->xen_vcpu_info_gpa == INVALID_GPA) {
299 set_vcpu_info(cs, env->xen_vcpu_info_default_gpa);
303 static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
305 X86CPU *cpu = X86_CPU(cs);
306 CPUX86State *env = &cpu->env;
308 env->xen_vcpu_info_gpa = data.host_ulong;
310 set_vcpu_info(cs, env->xen_vcpu_info_gpa);
313 void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
315 CPUState *cs = qemu_get_cpu(vcpu_id);
320 return X86_CPU(cs)->env.xen_vcpu_info_hva;
323 void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
325 CPUState *cs = qemu_get_cpu(vcpu_id);
332 vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
335 * The per-vCPU callback vector injected via lapic. Just
336 * deliver it as an MSI.
339 .address = APIC_DEFAULT_ADDRESS | X86_CPU(cs)->apic_id,
340 .data = vector | (1UL << MSI_DATA_LEVEL_SHIFT),
342 kvm_irqchip_send_msi(kvm_state, msg);
347 case HVM_PARAM_CALLBACK_TYPE_VECTOR:
349 * If the evtchn_upcall_pending field in the vcpu_info is set, then
350 * KVM will automatically deliver the vector on entering the vCPU
351 * so all we have to do is kick it out.
358 static int kvm_xen_set_vcpu_timer(CPUState *cs)
360 X86CPU *cpu = X86_CPU(cs);
361 CPUX86State *env = &cpu->env;
363 struct kvm_xen_vcpu_attr va = {
364 .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
365 .u.timer.port = env->xen_virq[VIRQ_TIMER],
366 .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
367 .u.timer.expires_ns = env->xen_singleshot_timer_ns,
370 return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &va);
373 static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data)
375 kvm_xen_set_vcpu_timer(cs);
378 int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port)
380 CPUState *cs = qemu_get_cpu(vcpu_id);
386 /* cpu.h doesn't include the actual Xen header. */
387 qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS);
389 if (virq >= NR_VIRQS) {
393 if (port && X86_CPU(cs)->env.xen_virq[virq]) {
397 X86_CPU(cs)->env.xen_virq[virq] = port;
398 if (virq == VIRQ_TIMER && kvm_xen_has_cap(EVTCHN_SEND)) {
399 async_run_on_cpu(cs, do_set_vcpu_timer_virq,
400 RUN_ON_CPU_HOST_INT(port));
405 static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
407 X86CPU *cpu = X86_CPU(cs);
408 CPUX86State *env = &cpu->env;
410 env->xen_vcpu_time_info_gpa = data.host_ulong;
412 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
413 env->xen_vcpu_time_info_gpa);
416 static void do_set_vcpu_runstate_gpa(CPUState *cs, run_on_cpu_data data)
418 X86CPU *cpu = X86_CPU(cs);
419 CPUX86State *env = &cpu->env;
421 env->xen_vcpu_runstate_gpa = data.host_ulong;
423 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
424 env->xen_vcpu_runstate_gpa);
427 static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
429 X86CPU *cpu = X86_CPU(cs);
430 CPUX86State *env = &cpu->env;
432 env->xen_vcpu_info_gpa = INVALID_GPA;
433 env->xen_vcpu_info_default_gpa = INVALID_GPA;
434 env->xen_vcpu_time_info_gpa = INVALID_GPA;
435 env->xen_vcpu_runstate_gpa = INVALID_GPA;
436 env->xen_vcpu_callback_vector = 0;
437 env->xen_singleshot_timer_ns = 0;
438 memset(env->xen_virq, 0, sizeof(env->xen_virq));
440 set_vcpu_info(cs, INVALID_GPA);
441 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
443 kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
445 if (kvm_xen_has_cap(EVTCHN_SEND)) {
446 kvm_xen_set_vcpu_callback_vector(cs);
447 kvm_xen_set_vcpu_timer(cs);
452 static int xen_set_shared_info(uint64_t gfn)
454 uint64_t gpa = gfn << TARGET_PAGE_BITS;
457 QEMU_IOTHREAD_LOCK_GUARD();
460 * The xen_overlay device tells KVM about it too, since it had to
461 * do that on migration load anyway (unless we're going to jump
462 * through lots of hoops to maintain the fiction that this isn't
465 err = xen_overlay_map_shinfo_page(gpa);
470 trace_kvm_xen_set_shared_info(gfn);
472 for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
473 CPUState *cpu = qemu_get_cpu(i);
475 async_run_on_cpu(cpu, do_set_vcpu_info_default_gpa,
476 RUN_ON_CPU_HOST_ULONG(gpa));
478 gpa += sizeof(vcpu_info_t);
484 static int add_to_physmap_one(uint32_t space, uint64_t idx, uint64_t gfn)
487 case XENMAPSPACE_shared_info:
491 return xen_set_shared_info(gfn);
493 case XENMAPSPACE_grant_table:
494 case XENMAPSPACE_gmfn:
495 case XENMAPSPACE_gmfn_range:
498 case XENMAPSPACE_gmfn_foreign:
499 case XENMAPSPACE_dev_mmio:
507 static int do_add_to_physmap(struct kvm_xen_exit *exit, X86CPU *cpu,
510 struct xen_add_to_physmap xatp;
511 CPUState *cs = CPU(cpu);
513 if (hypercall_compat32(exit->u.hcall.longmode)) {
514 struct compat_xen_add_to_physmap xatp32;
516 qemu_build_assert(sizeof(struct compat_xen_add_to_physmap) == 16);
517 if (kvm_copy_from_gva(cs, arg, &xatp32, sizeof(xatp32))) {
520 xatp.domid = xatp32.domid;
521 xatp.size = xatp32.size;
522 xatp.space = xatp32.space;
523 xatp.idx = xatp32.idx;
524 xatp.gpfn = xatp32.gpfn;
526 if (kvm_copy_from_gva(cs, arg, &xatp, sizeof(xatp))) {
531 if (xatp.domid != DOMID_SELF && xatp.domid != xen_domid) {
535 return add_to_physmap_one(xatp.space, xatp.idx, xatp.gpfn);
538 static int do_add_to_physmap_batch(struct kvm_xen_exit *exit, X86CPU *cpu,
541 struct xen_add_to_physmap_batch xatpb;
542 unsigned long idxs_gva, gpfns_gva, errs_gva;
543 CPUState *cs = CPU(cpu);
546 if (hypercall_compat32(exit->u.hcall.longmode)) {
547 struct compat_xen_add_to_physmap_batch xatpb32;
549 qemu_build_assert(sizeof(struct compat_xen_add_to_physmap_batch) == 20);
550 if (kvm_copy_from_gva(cs, arg, &xatpb32, sizeof(xatpb32))) {
553 xatpb.domid = xatpb32.domid;
554 xatpb.space = xatpb32.space;
555 xatpb.size = xatpb32.size;
557 idxs_gva = xatpb32.idxs.c;
558 gpfns_gva = xatpb32.gpfns.c;
559 errs_gva = xatpb32.errs.c;
560 op_sz = sizeof(uint32_t);
562 if (kvm_copy_from_gva(cs, arg, &xatpb, sizeof(xatpb))) {
565 op_sz = sizeof(unsigned long);
566 idxs_gva = (unsigned long)xatpb.idxs.p;
567 gpfns_gva = (unsigned long)xatpb.gpfns.p;
568 errs_gva = (unsigned long)xatpb.errs.p;
571 if (xatpb.domid != DOMID_SELF && xatpb.domid != xen_domid) {
575 /* Explicitly invalid for the batch op. Not that we implement it anyway. */
576 if (xatpb.space == XENMAPSPACE_gmfn_range) {
580 while (xatpb.size--) {
581 unsigned long idx = 0;
582 unsigned long gpfn = 0;
585 /* For 32-bit compat this only copies the low 32 bits of each */
586 if (kvm_copy_from_gva(cs, idxs_gva, &idx, op_sz) ||
587 kvm_copy_from_gva(cs, gpfns_gva, &gpfn, op_sz)) {
593 err = add_to_physmap_one(xatpb.space, idx, gpfn);
595 if (kvm_copy_to_gva(cs, errs_gva, &err, sizeof(err))) {
598 errs_gva += sizeof(err);
603 static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
604 int cmd, uint64_t arg)
609 case XENMEM_add_to_physmap:
610 err = do_add_to_physmap(exit, cpu, arg);
613 case XENMEM_add_to_physmap_batch:
614 err = do_add_to_physmap_batch(exit, cpu, arg);
621 exit->u.hcall.result = err;
625 static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu,
628 CPUState *cs = CPU(cpu);
629 struct xen_hvm_param hp;
632 /* No need for 32/64 compat handling */
633 qemu_build_assert(sizeof(hp) == 16);
635 if (kvm_copy_from_gva(cs, arg, &hp, sizeof(hp))) {
640 if (hp.domid != DOMID_SELF && hp.domid != xen_domid) {
646 case HVM_PARAM_CALLBACK_IRQ:
647 err = xen_evtchn_set_callback_param(hp.value);
648 xen_set_long_mode(exit->u.hcall.longmode);
655 exit->u.hcall.result = err;
659 static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
660 X86CPU *cpu, uint64_t arg)
662 struct xen_hvm_evtchn_upcall_vector up;
665 /* No need for 32/64 compat handling */
666 qemu_build_assert(sizeof(up) == 8);
668 if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) {
672 if (up.vector < 0x10) {
676 target_cs = qemu_get_cpu(up.vcpu);
681 async_run_on_cpu(target_cs, do_set_vcpu_callback_vector,
682 RUN_ON_CPU_HOST_INT(up.vector));
686 static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
687 int cmd, uint64_t arg)
691 case HVMOP_set_evtchn_upcall_vector:
692 ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
693 exit->u.hcall.params[0]);
696 case HVMOP_pagetable_dying:
700 case HVMOP_set_param:
701 return handle_set_param(exit, cpu, arg);
707 exit->u.hcall.result = ret;
711 static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
714 struct vcpu_register_vcpu_info rvi;
717 /* No need for 32/64 compat handling */
718 qemu_build_assert(sizeof(rvi) == 16);
719 qemu_build_assert(sizeof(struct vcpu_info) == 64);
725 if (kvm_copy_from_gva(cs, arg, &rvi, sizeof(rvi))) {
729 if (rvi.offset > TARGET_PAGE_SIZE - sizeof(struct vcpu_info)) {
733 gpa = ((rvi.mfn << TARGET_PAGE_BITS) + rvi.offset);
734 async_run_on_cpu(target, do_set_vcpu_info_gpa, RUN_ON_CPU_HOST_ULONG(gpa));
738 static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target,
741 struct vcpu_register_time_memory_area tma;
745 /* No need for 32/64 compat handling */
746 qemu_build_assert(sizeof(tma) == 8);
747 qemu_build_assert(sizeof(struct vcpu_time_info) == 32);
753 if (kvm_copy_from_gva(cs, arg, &tma, sizeof(tma))) {
758 * Xen actually uses the GVA and does the translation through the guest
759 * page tables each time. But Linux/KVM uses the GPA, on the assumption
760 * that guests only ever use *global* addresses (kernel virtual addresses)
761 * for it. If Linux is changed to redo the GVA→GPA translation each time,
762 * it will offer a new vCPU attribute for that, and we'll use it instead.
764 if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) ||
765 len < sizeof(struct vcpu_time_info)) {
769 async_run_on_cpu(target, do_set_vcpu_time_info_gpa,
770 RUN_ON_CPU_HOST_ULONG(gpa));
774 static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target,
777 struct vcpu_register_runstate_memory_area rma;
781 /* No need for 32/64 compat handling */
782 qemu_build_assert(sizeof(rma) == 8);
783 /* The runstate area actually does change size, but Linux copes. */
789 if (kvm_copy_from_gva(cs, arg, &rma, sizeof(rma))) {
793 /* As with vcpu_time_info, Xen actually uses the GVA but KVM doesn't. */
794 if (!kvm_gva_to_gpa(cs, rma.addr.p, &gpa, &len, false)) {
798 async_run_on_cpu(target, do_set_vcpu_runstate_gpa,
799 RUN_ON_CPU_HOST_ULONG(gpa));
803 static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
804 int cmd, int vcpu_id, uint64_t arg)
806 CPUState *dest = qemu_get_cpu(vcpu_id);
807 CPUState *cs = CPU(cpu);
811 case VCPUOP_register_runstate_memory_area:
812 err = vcpuop_register_runstate_info(cs, dest, arg);
814 case VCPUOP_register_vcpu_time_memory_area:
815 err = vcpuop_register_vcpu_time_info(cs, dest, arg);
817 case VCPUOP_register_vcpu_info:
818 err = vcpuop_register_vcpu_info(cs, dest, arg);
825 exit->u.hcall.result = err;
829 static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
830 int cmd, uint64_t arg)
832 CPUState *cs = CPU(cpu);
836 case EVTCHNOP_init_control:
837 case EVTCHNOP_expand_array:
838 case EVTCHNOP_set_priority:
839 /* We do not support FIFO channels at this point */
843 case EVTCHNOP_status: {
844 struct evtchn_status status;
846 qemu_build_assert(sizeof(status) == 24);
847 if (kvm_copy_from_gva(cs, arg, &status, sizeof(status))) {
852 err = xen_evtchn_status_op(&status);
853 if (!err && kvm_copy_to_gva(cs, arg, &status, sizeof(status))) {
858 case EVTCHNOP_close: {
859 struct evtchn_close close;
861 qemu_build_assert(sizeof(close) == 4);
862 if (kvm_copy_from_gva(cs, arg, &close, sizeof(close))) {
867 err = xen_evtchn_close_op(&close);
870 case EVTCHNOP_unmask: {
871 struct evtchn_unmask unmask;
873 qemu_build_assert(sizeof(unmask) == 4);
874 if (kvm_copy_from_gva(cs, arg, &unmask, sizeof(unmask))) {
879 err = xen_evtchn_unmask_op(&unmask);
882 case EVTCHNOP_bind_virq: {
883 struct evtchn_bind_virq virq;
885 qemu_build_assert(sizeof(virq) == 12);
886 if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) {
891 err = xen_evtchn_bind_virq_op(&virq);
892 if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) {
897 case EVTCHNOP_bind_ipi: {
898 struct evtchn_bind_ipi ipi;
900 qemu_build_assert(sizeof(ipi) == 8);
901 if (kvm_copy_from_gva(cs, arg, &ipi, sizeof(ipi))) {
906 err = xen_evtchn_bind_ipi_op(&ipi);
907 if (!err && kvm_copy_to_gva(cs, arg, &ipi, sizeof(ipi))) {
916 exit->u.hcall.result = err;
920 int kvm_xen_soft_reset(void)
925 assert(qemu_mutex_iothread_locked());
927 trace_kvm_xen_soft_reset();
930 * Zero is the reset/startup state for HVM_PARAM_CALLBACK_IRQ. Strictly,
931 * it maps to HVM_PARAM_CALLBACK_TYPE_GSI with GSI#0, but Xen refuses to
932 * to deliver to the timer interrupt and treats that as 'disabled'.
934 err = xen_evtchn_set_callback_param(0);
940 async_run_on_cpu(cpu, do_vcpu_soft_reset, RUN_ON_CPU_NULL);
943 err = xen_overlay_map_shinfo_page(INVALID_GFN);
951 static int schedop_shutdown(CPUState *cs, uint64_t arg)
953 struct sched_shutdown shutdown;
956 /* No need for 32/64 compat handling */
957 qemu_build_assert(sizeof(shutdown) == 4);
959 if (kvm_copy_from_gva(cs, arg, &shutdown, sizeof(shutdown))) {
963 switch (shutdown.reason) {
965 cpu_dump_state(cs, stderr, CPU_DUMP_CODE);
966 qemu_system_guest_panicked(NULL);
969 case SHUTDOWN_reboot:
970 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
973 case SHUTDOWN_poweroff:
974 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
977 case SHUTDOWN_soft_reset:
978 qemu_mutex_lock_iothread();
979 ret = kvm_xen_soft_reset();
980 qemu_mutex_unlock_iothread();
991 static bool kvm_xen_hcall_sched_op(struct kvm_xen_exit *exit, X86CPU *cpu,
992 int cmd, uint64_t arg)
994 CPUState *cs = CPU(cpu);
998 case SCHEDOP_shutdown:
999 err = schedop_shutdown(cs, arg);
1004 * Linux will panic if this doesn't work. Just yield; it's not
1005 * worth overthinking it because with event channel handling
1006 * in KVM, the kernel will intercept this and it will never
1007 * reach QEMU anyway. The semantics of the hypercall explicltly
1008 * permit spurious wakeups.
1019 exit->u.hcall.result = err;
1023 static bool do_kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
1025 uint16_t code = exit->u.hcall.input;
1027 if (exit->u.hcall.cpl > 0) {
1028 exit->u.hcall.result = -EPERM;
1033 case __HYPERVISOR_sched_op:
1034 return kvm_xen_hcall_sched_op(exit, cpu, exit->u.hcall.params[0],
1035 exit->u.hcall.params[1]);
1036 case __HYPERVISOR_event_channel_op:
1037 return kvm_xen_hcall_evtchn_op(exit, cpu, exit->u.hcall.params[0],
1038 exit->u.hcall.params[1]);
1039 case __HYPERVISOR_vcpu_op:
1040 return kvm_xen_hcall_vcpu_op(exit, cpu,
1041 exit->u.hcall.params[0],
1042 exit->u.hcall.params[1],
1043 exit->u.hcall.params[2]);
1044 case __HYPERVISOR_hvm_op:
1045 return kvm_xen_hcall_hvm_op(exit, cpu, exit->u.hcall.params[0],
1046 exit->u.hcall.params[1]);
1047 case __HYPERVISOR_memory_op:
1048 return kvm_xen_hcall_memory_op(exit, cpu, exit->u.hcall.params[0],
1049 exit->u.hcall.params[1]);
1050 case __HYPERVISOR_xen_version:
1051 return kvm_xen_hcall_xen_version(exit, cpu, exit->u.hcall.params[0],
1052 exit->u.hcall.params[1]);
1058 int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit)
1060 if (exit->type != KVM_EXIT_XEN_HCALL) {
1065 * The kernel latches the guest 32/64 mode when the MSR is used to fill
1066 * the hypercall page. So if we see a hypercall in a mode that doesn't
1067 * match our own idea of the guest mode, fetch the kernel's idea of the
1068 * "long mode" to remain in sync.
1070 if (exit->u.hcall.longmode != xen_is_long_mode()) {
1071 xen_sync_long_mode();
1074 if (!do_kvm_xen_handle_exit(cpu, exit)) {
1076 * Some hypercalls will be deliberately "implemented" by returning
1077 * -ENOSYS. This case is for hypercalls which are unexpected.
1079 exit->u.hcall.result = -ENOSYS;
1080 qemu_log_mask(LOG_UNIMP, "Unimplemented Xen hypercall %"
1081 PRId64 " (0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 ")\n",
1082 (uint64_t)exit->u.hcall.input,
1083 (uint64_t)exit->u.hcall.params[0],
1084 (uint64_t)exit->u.hcall.params[1],
1085 (uint64_t)exit->u.hcall.params[2]);
1088 trace_kvm_xen_hypercall(CPU(cpu)->cpu_index, exit->u.hcall.cpl,
1089 exit->u.hcall.input, exit->u.hcall.params[0],
1090 exit->u.hcall.params[1], exit->u.hcall.params[2],
1091 exit->u.hcall.result);
1095 int kvm_put_xen_state(CPUState *cs)
1097 X86CPU *cpu = X86_CPU(cs);
1098 CPUX86State *env = &cpu->env;
1102 gpa = env->xen_vcpu_info_gpa;
1103 if (gpa == INVALID_GPA) {
1104 gpa = env->xen_vcpu_info_default_gpa;
1107 if (gpa != INVALID_GPA) {
1108 ret = set_vcpu_info(cs, gpa);
1114 gpa = env->xen_vcpu_time_info_gpa;
1115 if (gpa != INVALID_GPA) {
1116 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
1123 gpa = env->xen_vcpu_runstate_gpa;
1124 if (gpa != INVALID_GPA) {
1125 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
1132 if (!kvm_xen_has_cap(EVTCHN_SEND)) {
1136 if (env->xen_vcpu_callback_vector) {
1137 ret = kvm_xen_set_vcpu_callback_vector(cs);
1143 if (env->xen_virq[VIRQ_TIMER]) {
1144 ret = kvm_xen_set_vcpu_timer(cs);
1152 int kvm_get_xen_state(CPUState *cs)
1154 X86CPU *cpu = X86_CPU(cs);
1155 CPUX86State *env = &cpu->env;
1160 * The kernel does not mark vcpu_info as dirty when it delivers interrupts
1161 * to it. It's up to userspace to *assume* that any page shared thus is
1162 * always considered dirty. The shared_info page is different since it's
1163 * an overlay and migrated separately anyway.
1165 gpa = env->xen_vcpu_info_gpa;
1166 if (gpa == INVALID_GPA) {
1167 gpa = env->xen_vcpu_info_default_gpa;
1169 if (gpa != INVALID_GPA) {
1170 MemoryRegionSection mrs = memory_region_find(get_system_memory(),
1172 sizeof(struct vcpu_info));
1174 !int128_lt(mrs.size, int128_make64(sizeof(struct vcpu_info)))) {
1175 memory_region_set_dirty(mrs.mr, mrs.offset_within_region,
1176 sizeof(struct vcpu_info));
1180 if (!kvm_xen_has_cap(EVTCHN_SEND)) {
1185 * If the kernel is accelerating timers, read out the current value of the
1186 * singleshot timer deadline.
1188 if (env->xen_virq[VIRQ_TIMER]) {
1189 struct kvm_xen_vcpu_attr va = {
1190 .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
1192 ret = kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_GET_ATTR, &va);
1196 env->xen_singleshot_timer_ns = va.u.timer.expires_ns;