2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static int vgic_init(struct kvm *kvm);
95 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
96 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
97 static void vgic_update_state(struct kvm *kvm);
98 static void vgic_kick_vcpus(struct kvm *kvm);
99 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
100 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
101 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
102 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
103 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
104 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
106 static const struct vgic_ops *vgic_ops;
107 static const struct vgic_params *vgic;
109 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
111 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
114 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
116 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
119 int kvm_vgic_map_resources(struct kvm *kvm)
121 return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
125 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
126 * extracts u32s out of them.
128 * This does not work on 64-bit BE systems, because the bitmap access
129 * will store two consecutive 32-bit words with the higher-addressed
130 * register's bits at the lower index and the lower-addressed register's
131 * bits at the higher index.
133 * Therefore, swizzle the register index when accessing the 32-bit word
134 * registers to access the right register's value.
136 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
137 #define REG_OFFSET_SWIZZLE 1
139 #define REG_OFFSET_SWIZZLE 0
142 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
146 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
148 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
152 b->shared = b->private + nr_cpus;
157 static void vgic_free_bitmap(struct vgic_bitmap *b)
165 * Call this function to convert a u64 value to an unsigned long * bitmask
166 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
168 * Warning: Calling this function may modify *val.
170 static unsigned long *u64_to_bitmask(u64 *val)
172 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
173 *val = (*val >> 32) | (*val << 32);
175 return (unsigned long *)val;
178 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
179 int cpuid, u32 offset)
183 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
185 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
188 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
191 if (irq < VGIC_NR_PRIVATE_IRQS)
192 return test_bit(irq, x->private + cpuid);
194 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
197 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
202 if (irq < VGIC_NR_PRIVATE_IRQS) {
203 reg = x->private + cpuid;
206 irq -= VGIC_NR_PRIVATE_IRQS;
215 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
217 return x->private + cpuid;
220 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
225 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
229 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
230 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
232 x->private = kzalloc(size, GFP_KERNEL);
236 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
240 static void vgic_free_bytemap(struct vgic_bytemap *b)
247 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
251 if (offset < VGIC_NR_PRIVATE_IRQS) {
253 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
256 offset -= VGIC_NR_PRIVATE_IRQS;
259 return reg + (offset / sizeof(u32));
262 #define VGIC_CFG_LEVEL 0
263 #define VGIC_CFG_EDGE 1
265 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
267 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
270 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
271 return irq_val == VGIC_CFG_EDGE;
274 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
278 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
281 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
283 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
285 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
288 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
290 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
292 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
295 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
297 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
299 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
302 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
304 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
306 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
309 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
311 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
313 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
316 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
318 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
320 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
323 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
325 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
327 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
330 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
332 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
334 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
337 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
339 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
341 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
344 static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
346 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
348 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
351 static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
353 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
355 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
358 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
360 if (irq < VGIC_NR_PRIVATE_IRQS)
361 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
363 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
364 vcpu->arch.vgic_cpu.pending_shared);
367 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
369 if (irq < VGIC_NR_PRIVATE_IRQS)
370 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
372 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
373 vcpu->arch.vgic_cpu.pending_shared);
376 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
378 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
381 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
383 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
386 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
388 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
392 * vgic_reg_access - access vgic register
393 * @mmio: pointer to the data describing the mmio access
394 * @reg: pointer to the virtual backing of vgic distributor data
395 * @offset: least significant 2 bits used for word offset
396 * @mode: ACCESS_ mode (see defines above)
398 * Helper to make vgic register access easier using one of the access
399 * modes defined for vgic register access
400 * (read,raz,write-ignored,setbit,clearbit,write)
402 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
403 phys_addr_t offset, int mode)
405 int word_offset = (offset & 3) * 8;
406 u32 mask = (1UL << (mmio->len * 8)) - 1;
410 * Any alignment fault should have been delivered to the guest
411 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
417 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
421 if (mmio->is_write) {
422 u32 data = mmio_data_read(mmio, mask) << word_offset;
423 switch (ACCESS_WRITE_MASK(mode)) {
424 case ACCESS_WRITE_IGNORED:
427 case ACCESS_WRITE_SETBIT:
431 case ACCESS_WRITE_CLEARBIT:
435 case ACCESS_WRITE_VALUE:
436 regval = (regval & ~(mask << word_offset)) | data;
441 switch (ACCESS_READ_MASK(mode)) {
442 case ACCESS_READ_RAZ:
446 case ACCESS_READ_VALUE:
447 mmio_data_write(mmio, mask, regval >> word_offset);
452 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
453 struct kvm_exit_mmio *mmio, phys_addr_t offset)
456 u32 word_offset = offset & 3;
458 switch (offset & ~3) {
459 case 0: /* GICD_CTLR */
460 reg = vcpu->kvm->arch.vgic.enabled;
461 vgic_reg_access(mmio, ®, word_offset,
462 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
463 if (mmio->is_write) {
464 vcpu->kvm->arch.vgic.enabled = reg & 1;
465 vgic_update_state(vcpu->kvm);
470 case 4: /* GICD_TYPER */
471 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
472 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
473 vgic_reg_access(mmio, ®, word_offset,
474 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
477 case 8: /* GICD_IIDR */
478 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
479 vgic_reg_access(mmio, ®, word_offset,
480 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
487 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
488 struct kvm_exit_mmio *mmio, phys_addr_t offset)
490 vgic_reg_access(mmio, NULL, offset,
491 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
495 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
496 struct kvm_exit_mmio *mmio,
499 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
500 vcpu->vcpu_id, offset);
501 vgic_reg_access(mmio, reg, offset,
502 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
503 if (mmio->is_write) {
504 vgic_update_state(vcpu->kvm);
511 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
512 struct kvm_exit_mmio *mmio,
515 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
516 vcpu->vcpu_id, offset);
517 vgic_reg_access(mmio, reg, offset,
518 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
519 if (mmio->is_write) {
520 if (offset < 4) /* Force SGI enabled */
522 vgic_retire_disabled_irqs(vcpu);
523 vgic_update_state(vcpu->kvm);
530 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
531 struct kvm_exit_mmio *mmio,
536 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
538 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
539 level_mask = (~(*reg));
541 /* Mark both level and edge triggered irqs as pending */
542 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
544 vgic_reg_access(mmio, reg, offset,
545 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
547 if (mmio->is_write) {
548 /* Set the soft-pending flag only for level-triggered irqs */
549 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
550 vcpu->vcpu_id, offset);
551 vgic_reg_access(mmio, reg, offset,
552 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
555 /* Ignore writes to SGIs */
558 *reg |= orig & 0xffff;
561 vgic_update_state(vcpu->kvm);
568 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
569 struct kvm_exit_mmio *mmio,
574 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
576 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
578 vgic_reg_access(mmio, reg, offset,
579 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
580 if (mmio->is_write) {
581 /* Re-set level triggered level-active interrupts */
582 level_active = vgic_bitmap_get_reg(&dist->irq_level,
583 vcpu->vcpu_id, offset);
584 reg = vgic_bitmap_get_reg(&dist->irq_pending,
585 vcpu->vcpu_id, offset);
586 *reg |= *level_active;
588 /* Ignore writes to SGIs */
591 *reg |= orig & 0xffff;
594 /* Clear soft-pending flags */
595 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
596 vcpu->vcpu_id, offset);
597 vgic_reg_access(mmio, reg, offset,
598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
600 vgic_update_state(vcpu->kvm);
607 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
608 struct kvm_exit_mmio *mmio,
611 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
612 vcpu->vcpu_id, offset);
613 vgic_reg_access(mmio, reg, offset,
614 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
618 #define GICD_ITARGETSR_SIZE 32
619 #define GICD_CPUTARGETS_BITS 8
620 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
621 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
623 struct vgic_dist *dist = &kvm->arch.vgic;
627 irq -= VGIC_NR_PRIVATE_IRQS;
629 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
630 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
635 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
637 struct vgic_dist *dist = &kvm->arch.vgic;
638 struct kvm_vcpu *vcpu;
643 irq -= VGIC_NR_PRIVATE_IRQS;
646 * Pick the LSB in each byte. This ensures we target exactly
647 * one vcpu per IRQ. If the byte is null, assume we target
650 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
651 int shift = i * GICD_CPUTARGETS_BITS;
652 target = ffs((val >> shift) & 0xffU);
653 target = target ? (target - 1) : 0;
654 dist->irq_spi_cpu[irq + i] = target;
655 kvm_for_each_vcpu(c, vcpu, kvm) {
656 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
658 set_bit(irq + i, bmap);
660 clear_bit(irq + i, bmap);
665 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
666 struct kvm_exit_mmio *mmio,
671 /* We treat the banked interrupts targets as read-only */
673 u32 roreg = 1 << vcpu->vcpu_id;
675 roreg |= roreg << 16;
677 vgic_reg_access(mmio, &roreg, offset,
678 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
682 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
683 vgic_reg_access(mmio, ®, offset,
684 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
685 if (mmio->is_write) {
686 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
687 vgic_update_state(vcpu->kvm);
694 static u32 vgic_cfg_expand(u16 val)
700 * Turn a 16bit value like abcd...mnop into a 32bit word
701 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
703 for (i = 0; i < 16; i++)
704 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
709 static u16 vgic_cfg_compress(u32 val)
715 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
716 * abcd...mnop which is what we really care about.
718 for (i = 0; i < 16; i++)
719 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
725 * The distributor uses 2 bits per IRQ for the CFG register, but the
726 * LSB is always 0. As such, we only keep the upper bit, and use the
727 * two above functions to compress/expand the bits
729 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
730 struct kvm_exit_mmio *mmio, phys_addr_t offset)
735 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
736 vcpu->vcpu_id, offset >> 1);
743 val = vgic_cfg_expand(val);
744 vgic_reg_access(mmio, &val, offset,
745 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
746 if (mmio->is_write) {
748 *reg = ~0U; /* Force PPIs/SGIs to 1 */
752 val = vgic_cfg_compress(val);
757 *reg &= 0xffff << 16;
765 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
766 struct kvm_exit_mmio *mmio, phys_addr_t offset)
769 vgic_reg_access(mmio, ®, offset,
770 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
771 if (mmio->is_write) {
772 vgic_dispatch_sgi(vcpu, reg);
773 vgic_update_state(vcpu->kvm);
780 static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
782 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
784 *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
788 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
789 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
791 * Move any pending IRQs that have already been assigned to LRs back to the
792 * emulated distributor state so that the complete emulated state can be read
793 * from the main emulation structures without investigating the LRs.
795 * Note that IRQs in the active state in the LRs get their pending state moved
796 * to the distributor but the active state stays in the LRs, because we don't
797 * track the active state on the distributor side.
799 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
801 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
804 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
805 struct vgic_lr lr = vgic_get_lr(vcpu, i);
808 * There are three options for the state bits:
812 * 11: pending and active
814 * If the LR holds only an active interrupt (not pending) then
815 * just leave it alone.
817 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
821 * Reestablish the pending state on the distributor and the
822 * CPU interface. It may have already been pending, but that
823 * is fine, then we are only setting a few bits that were
826 vgic_dist_irq_set_pending(vcpu, lr.irq);
827 if (lr.irq < VGIC_NR_SGIS)
828 add_sgi_source(vcpu, lr.irq, lr.source);
829 lr.state &= ~LR_STATE_PENDING;
830 vgic_set_lr(vcpu, i, lr);
833 * If there's no state left on the LR (it could still be
834 * active), then the LR does not hold any useful info and can
835 * be marked as free for other use.
837 if (!(lr.state & LR_STATE_MASK)) {
838 vgic_retire_lr(i, lr.irq, vcpu);
839 vgic_irq_clear_queued(vcpu, lr.irq);
842 /* Finally update the VGIC state. */
843 vgic_update_state(vcpu->kvm);
847 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
848 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
849 struct kvm_exit_mmio *mmio,
852 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
854 int min_sgi = (offset & ~0x3);
855 int max_sgi = min_sgi + 3;
856 int vcpu_id = vcpu->vcpu_id;
859 /* Copy source SGIs from distributor side */
860 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
861 int shift = 8 * (sgi - min_sgi);
862 reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
865 mmio_data_write(mmio, ~0, reg);
869 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
870 struct kvm_exit_mmio *mmio,
871 phys_addr_t offset, bool set)
873 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
875 int min_sgi = (offset & ~0x3);
876 int max_sgi = min_sgi + 3;
877 int vcpu_id = vcpu->vcpu_id;
879 bool updated = false;
881 reg = mmio_data_read(mmio, ~0);
883 /* Clear pending SGIs on the distributor */
884 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
885 u8 mask = reg >> (8 * (sgi - min_sgi));
886 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
888 if ((*src & mask) != mask)
899 vgic_update_state(vcpu->kvm);
904 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
905 struct kvm_exit_mmio *mmio,
909 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
911 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
914 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
915 struct kvm_exit_mmio *mmio,
919 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
921 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
925 * I would have liked to use the kvm_bus_io_*() API instead, but it
926 * cannot cope with banked registers (only the VM pointer is passed
927 * around, and we need the vcpu). One of these days, someone please
934 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
938 static const struct mmio_range vgic_dist_ranges[] = {
940 .base = GIC_DIST_CTRL,
943 .handle_mmio = handle_mmio_misc,
946 .base = GIC_DIST_IGROUP,
947 .len = VGIC_MAX_IRQS / 8,
949 .handle_mmio = handle_mmio_raz_wi,
952 .base = GIC_DIST_ENABLE_SET,
953 .len = VGIC_MAX_IRQS / 8,
955 .handle_mmio = handle_mmio_set_enable_reg,
958 .base = GIC_DIST_ENABLE_CLEAR,
959 .len = VGIC_MAX_IRQS / 8,
961 .handle_mmio = handle_mmio_clear_enable_reg,
964 .base = GIC_DIST_PENDING_SET,
965 .len = VGIC_MAX_IRQS / 8,
967 .handle_mmio = handle_mmio_set_pending_reg,
970 .base = GIC_DIST_PENDING_CLEAR,
971 .len = VGIC_MAX_IRQS / 8,
973 .handle_mmio = handle_mmio_clear_pending_reg,
976 .base = GIC_DIST_ACTIVE_SET,
977 .len = VGIC_MAX_IRQS / 8,
979 .handle_mmio = handle_mmio_raz_wi,
982 .base = GIC_DIST_ACTIVE_CLEAR,
983 .len = VGIC_MAX_IRQS / 8,
985 .handle_mmio = handle_mmio_raz_wi,
988 .base = GIC_DIST_PRI,
989 .len = VGIC_MAX_IRQS,
991 .handle_mmio = handle_mmio_priority_reg,
994 .base = GIC_DIST_TARGET,
995 .len = VGIC_MAX_IRQS,
997 .handle_mmio = handle_mmio_target_reg,
1000 .base = GIC_DIST_CONFIG,
1001 .len = VGIC_MAX_IRQS / 4,
1003 .handle_mmio = handle_mmio_cfg_reg,
1006 .base = GIC_DIST_SOFTINT,
1008 .handle_mmio = handle_mmio_sgi_reg,
1011 .base = GIC_DIST_SGI_PENDING_CLEAR,
1012 .len = VGIC_NR_SGIS,
1013 .handle_mmio = handle_mmio_sgi_clear,
1016 .base = GIC_DIST_SGI_PENDING_SET,
1017 .len = VGIC_NR_SGIS,
1018 .handle_mmio = handle_mmio_sgi_set,
1024 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
1025 struct kvm_exit_mmio *mmio,
1028 const struct mmio_range *r = ranges;
1031 if (offset >= r->base &&
1032 (offset + mmio->len) <= (r->base + r->len))
1040 static bool vgic_validate_access(const struct vgic_dist *dist,
1041 const struct mmio_range *range,
1042 unsigned long offset)
1046 if (!range->bits_per_irq)
1047 return true; /* Not an irq-based access */
1049 irq = offset * 8 / range->bits_per_irq;
1050 if (irq >= dist->nr_irqs)
1057 * Call the respective handler function for the given range.
1058 * We split up any 64 bit accesses into two consecutive 32 bit
1059 * handler calls and merge the result afterwards.
1060 * We do this in a little endian fashion regardless of the host's
1061 * or guest's endianness, because the GIC is always LE and the rest of
1062 * the code (vgic_reg_access) also puts it in a LE fashion already.
1063 * At this point we have already identified the handle function, so
1064 * range points to that one entry and offset is relative to this.
1066 static bool call_range_handler(struct kvm_vcpu *vcpu,
1067 struct kvm_exit_mmio *mmio,
1068 unsigned long offset,
1069 const struct mmio_range *range)
1071 u32 *data32 = (void *)mmio->data;
1072 struct kvm_exit_mmio mmio32;
1075 if (likely(mmio->len <= 4))
1076 return range->handle_mmio(vcpu, mmio, offset);
1079 * Any access bigger than 4 bytes (that we currently handle in KVM)
1080 * is actually 8 bytes long, caused by a 64-bit access
1084 mmio32.is_write = mmio->is_write;
1086 mmio32.phys_addr = mmio->phys_addr + 4;
1088 *(u32 *)mmio32.data = data32[1];
1089 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
1090 if (!mmio->is_write)
1091 data32[1] = *(u32 *)mmio32.data;
1093 mmio32.phys_addr = mmio->phys_addr;
1095 *(u32 *)mmio32.data = data32[0];
1096 ret |= range->handle_mmio(vcpu, &mmio32, offset);
1097 if (!mmio->is_write)
1098 data32[0] = *(u32 *)mmio32.data;
1104 * vgic_handle_mmio_range - handle an in-kernel MMIO access
1105 * @vcpu: pointer to the vcpu performing the access
1106 * @run: pointer to the kvm_run structure
1107 * @mmio: pointer to the data describing the access
1108 * @ranges: array of MMIO ranges in a given region
1109 * @mmio_base: base address of that region
1111 * returns true if the MMIO access could be performed
1113 static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
1114 struct kvm_exit_mmio *mmio,
1115 const struct mmio_range *ranges,
1116 unsigned long mmio_base)
1118 const struct mmio_range *range;
1119 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1121 unsigned long offset;
1123 offset = mmio->phys_addr - mmio_base;
1124 range = find_matching_range(ranges, mmio, offset);
1125 if (unlikely(!range || !range->handle_mmio)) {
1126 pr_warn("Unhandled access %d %08llx %d\n",
1127 mmio->is_write, mmio->phys_addr, mmio->len);
1131 spin_lock(&vcpu->kvm->arch.vgic.lock);
1132 offset -= range->base;
1133 if (vgic_validate_access(dist, range, offset)) {
1134 updated_state = call_range_handler(vcpu, mmio, offset, range);
1136 if (!mmio->is_write)
1137 memset(mmio->data, 0, mmio->len);
1138 updated_state = false;
1140 spin_unlock(&vcpu->kvm->arch.vgic.lock);
1141 kvm_prepare_mmio(run, mmio);
1142 kvm_handle_mmio_return(vcpu, run);
1145 vgic_kick_vcpus(vcpu->kvm);
1150 static inline bool is_in_range(phys_addr_t addr, unsigned long len,
1151 phys_addr_t baseaddr, unsigned long size)
1153 return (addr >= baseaddr) && (addr + len <= baseaddr + size);
1156 static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
1157 struct kvm_exit_mmio *mmio)
1159 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
1161 if (!is_in_range(mmio->phys_addr, mmio->len, base,
1162 KVM_VGIC_V2_DIST_SIZE))
1165 /* GICv2 does not support accesses wider than 32 bits */
1166 if (mmio->len > 4) {
1167 kvm_inject_dabt(vcpu, mmio->phys_addr);
1171 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
1175 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
1176 * @vcpu: pointer to the vcpu performing the access
1177 * @run: pointer to the kvm_run structure
1178 * @mmio: pointer to the data describing the access
1180 * returns true if the MMIO access has been performed in kernel space,
1181 * and false if it needs to be emulated in user space.
1182 * Calls the actual handling routine for the selected VGIC model.
1184 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
1185 struct kvm_exit_mmio *mmio)
1187 if (!irqchip_in_kernel(vcpu->kvm))
1191 * This will currently call either vgic_v2_handle_mmio() or
1192 * vgic_v3_handle_mmio(), which in turn will call
1193 * vgic_handle_mmio_range() defined above.
1195 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
1198 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
1200 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
1203 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
1205 struct kvm *kvm = vcpu->kvm;
1206 struct vgic_dist *dist = &kvm->arch.vgic;
1207 int nrcpus = atomic_read(&kvm->online_vcpus);
1209 int sgi, mode, c, vcpu_id;
1211 vcpu_id = vcpu->vcpu_id;
1214 target_cpus = (reg >> 16) & 0xff;
1215 mode = (reg >> 24) & 3;
1224 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
1228 target_cpus = 1 << vcpu_id;
1232 kvm_for_each_vcpu(c, vcpu, kvm) {
1233 if (target_cpus & 1) {
1234 /* Flag the SGI as pending */
1235 vgic_dist_irq_set_pending(vcpu, sgi);
1236 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
1237 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1244 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
1246 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
1249 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
1251 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1252 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
1253 unsigned long pending_private, pending_shared;
1254 int nr_shared = vgic_nr_shared_irqs(dist);
1257 vcpu_id = vcpu->vcpu_id;
1258 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
1259 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
1261 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1262 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
1263 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
1265 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1266 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
1267 bitmap_and(pend_shared, pending, enabled, nr_shared);
1268 bitmap_and(pend_shared, pend_shared,
1269 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
1272 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1273 pending_shared = find_first_bit(pend_shared, nr_shared);
1274 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1275 pending_shared < vgic_nr_shared_irqs(dist));
1279 * Update the interrupt state and determine which CPUs have pending
1280 * interrupts. Must be called with distributor lock held.
1282 static void vgic_update_state(struct kvm *kvm)
1284 struct vgic_dist *dist = &kvm->arch.vgic;
1285 struct kvm_vcpu *vcpu;
1288 if (!dist->enabled) {
1289 set_bit(0, dist->irq_pending_on_cpu);
1293 kvm_for_each_vcpu(c, vcpu, kvm) {
1294 if (compute_pending_for_cpu(vcpu)) {
1295 pr_debug("CPU%d has pending interrupts\n", c);
1296 set_bit(c, dist->irq_pending_on_cpu);
1301 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1303 return vgic_ops->get_lr(vcpu, lr);
1306 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1309 vgic_ops->set_lr(vcpu, lr, vlr);
1312 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1315 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1318 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1320 return vgic_ops->get_elrsr(vcpu);
1323 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1325 return vgic_ops->get_eisr(vcpu);
1328 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1330 return vgic_ops->get_interrupt_status(vcpu);
1333 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1335 vgic_ops->enable_underflow(vcpu);
1338 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1340 vgic_ops->disable_underflow(vcpu);
1343 static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1345 vgic_ops->get_vmcr(vcpu, vmcr);
1348 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1350 vgic_ops->set_vmcr(vcpu, vmcr);
1353 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1355 vgic_ops->enable(vcpu);
1358 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1360 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1361 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1364 vgic_set_lr(vcpu, lr_nr, vlr);
1365 clear_bit(lr_nr, vgic_cpu->lr_used);
1366 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1370 * An interrupt may have been disabled after being made pending on the
1371 * CPU interface (the classic case is a timer running while we're
1372 * rebooting the guest - the interrupt would kick as soon as the CPU
1373 * interface gets enabled, with deadly consequences).
1375 * The solution is to examine already active LRs, and check the
1376 * interrupt is still enabled. If not, just retire it.
1378 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1380 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1383 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1384 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1386 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1387 vgic_retire_lr(lr, vlr.irq, vcpu);
1388 if (vgic_irq_is_queued(vcpu, vlr.irq))
1389 vgic_irq_clear_queued(vcpu, vlr.irq);
1395 * Queue an interrupt to a CPU virtual interface. Return true on success,
1396 * or false if it wasn't possible to queue it.
1398 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1400 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1401 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1405 /* Sanitize the input... */
1406 BUG_ON(sgi_source_id & ~7);
1407 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1408 BUG_ON(irq >= dist->nr_irqs);
1410 kvm_debug("Queue IRQ%d\n", irq);
1412 lr = vgic_cpu->vgic_irq_lr_map[irq];
1414 /* Do we have an active interrupt for the same CPUID? */
1415 if (lr != LR_EMPTY) {
1416 vlr = vgic_get_lr(vcpu, lr);
1417 if (vlr.source == sgi_source_id) {
1418 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1419 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1420 vlr.state |= LR_STATE_PENDING;
1421 vgic_set_lr(vcpu, lr, vlr);
1426 /* Try to use another LR for this interrupt */
1427 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1429 if (lr >= vgic->nr_lr)
1432 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1433 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1434 set_bit(lr, vgic_cpu->lr_used);
1437 vlr.source = sgi_source_id;
1438 vlr.state = LR_STATE_PENDING;
1439 if (!vgic_irq_is_edge(vcpu, irq))
1440 vlr.state |= LR_EOI_INT;
1442 vgic_set_lr(vcpu, lr, vlr);
1447 static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1449 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1450 unsigned long sources;
1451 int vcpu_id = vcpu->vcpu_id;
1454 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
1456 for_each_set_bit(c, &sources, dist->nr_cpus) {
1457 if (vgic_queue_irq(vcpu, c, irq))
1458 clear_bit(c, &sources);
1461 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
1464 * If the sources bitmap has been cleared it means that we
1465 * could queue all the SGIs onto link registers (see the
1466 * clear_bit above), and therefore we are done with them in
1467 * our emulated gic and can get rid of them.
1470 vgic_dist_irq_clear_pending(vcpu, irq);
1471 vgic_cpu_irq_clear(vcpu, irq);
1478 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1480 if (!vgic_can_sample_irq(vcpu, irq))
1481 return true; /* level interrupt, already queued */
1483 if (vgic_queue_irq(vcpu, 0, irq)) {
1484 if (vgic_irq_is_edge(vcpu, irq)) {
1485 vgic_dist_irq_clear_pending(vcpu, irq);
1486 vgic_cpu_irq_clear(vcpu, irq);
1488 vgic_irq_set_queued(vcpu, irq);
1498 * Fill the list registers with pending interrupts before running the
1501 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1503 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1504 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1508 vcpu_id = vcpu->vcpu_id;
1511 * We may not have any pending interrupt, or the interrupts
1512 * may have been serviced from another vcpu. In all cases,
1515 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1516 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1521 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1522 if (!queue_sgi(vcpu, i))
1527 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1528 if (!vgic_queue_hwirq(vcpu, i))
1533 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
1534 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1540 vgic_enable_underflow(vcpu);
1542 vgic_disable_underflow(vcpu);
1544 * We're about to run this VCPU, and we've consumed
1545 * everything the distributor had in store for
1546 * us. Claim we don't have anything pending. We'll
1547 * adjust that if needed while exiting.
1549 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1553 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1555 u32 status = vgic_get_interrupt_status(vcpu);
1556 bool level_pending = false;
1558 kvm_debug("STATUS = %08x\n", status);
1560 if (status & INT_STATUS_EOI) {
1562 * Some level interrupts have been EOIed. Clear their
1565 u64 eisr = vgic_get_eisr(vcpu);
1566 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1569 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1570 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1571 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1573 vgic_irq_clear_queued(vcpu, vlr.irq);
1574 WARN_ON(vlr.state & LR_STATE_MASK);
1576 vgic_set_lr(vcpu, lr, vlr);
1579 * If the IRQ was EOIed it was also ACKed and we we
1580 * therefore assume we can clear the soft pending
1581 * state (should it had been set) for this interrupt.
1583 * Note: if the IRQ soft pending state was set after
1584 * the IRQ was acked, it actually shouldn't be
1585 * cleared, but we have no way of knowing that unless
1586 * we start trapping ACKs when the soft-pending state
1589 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1591 /* Any additional pending interrupt? */
1592 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1593 vgic_cpu_irq_set(vcpu, vlr.irq);
1594 level_pending = true;
1596 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1597 vgic_cpu_irq_clear(vcpu, vlr.irq);
1601 * Despite being EOIed, the LR may not have
1602 * been marked as empty.
1604 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1608 if (status & INT_STATUS_UNDERFLOW)
1609 vgic_disable_underflow(vcpu);
1611 return level_pending;
1615 * Sync back the VGIC state after a guest run. The distributor lock is
1616 * needed so we don't get preempted in the middle of the state processing.
1618 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1620 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1621 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1623 unsigned long *elrsr_ptr;
1627 level_pending = vgic_process_maintenance(vcpu);
1628 elrsr = vgic_get_elrsr(vcpu);
1629 elrsr_ptr = u64_to_bitmask(&elrsr);
1631 /* Clear mappings for empty LRs */
1632 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1635 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1638 vlr = vgic_get_lr(vcpu, lr);
1640 BUG_ON(vlr.irq >= dist->nr_irqs);
1641 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1644 /* Check if we still have something up our sleeve... */
1645 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1646 if (level_pending || pending < vgic->nr_lr)
1647 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1650 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1652 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1654 if (!irqchip_in_kernel(vcpu->kvm))
1657 spin_lock(&dist->lock);
1658 __kvm_vgic_flush_hwstate(vcpu);
1659 spin_unlock(&dist->lock);
1662 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1664 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1666 if (!irqchip_in_kernel(vcpu->kvm))
1669 spin_lock(&dist->lock);
1670 __kvm_vgic_sync_hwstate(vcpu);
1671 spin_unlock(&dist->lock);
1674 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1676 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1678 if (!irqchip_in_kernel(vcpu->kvm))
1681 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1684 static void vgic_kick_vcpus(struct kvm *kvm)
1686 struct kvm_vcpu *vcpu;
1690 * We've injected an interrupt, time to find out who deserves
1693 kvm_for_each_vcpu(c, vcpu, kvm) {
1694 if (kvm_vgic_vcpu_pending_irq(vcpu))
1695 kvm_vcpu_kick(vcpu);
1699 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1701 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1704 * Only inject an interrupt if:
1705 * - edge triggered and we have a rising edge
1706 * - level triggered and we change level
1708 if (edge_triggered) {
1709 int state = vgic_dist_irq_is_pending(vcpu, irq);
1710 return level > state;
1712 int state = vgic_dist_irq_get_level(vcpu, irq);
1713 return level != state;
1717 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1718 unsigned int irq_num, bool level)
1720 struct vgic_dist *dist = &kvm->arch.vgic;
1721 struct kvm_vcpu *vcpu;
1722 int edge_triggered, level_triggered;
1726 spin_lock(&dist->lock);
1728 vcpu = kvm_get_vcpu(kvm, cpuid);
1729 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1730 level_triggered = !edge_triggered;
1732 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1737 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1738 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1739 vcpu = kvm_get_vcpu(kvm, cpuid);
1742 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1745 if (level_triggered)
1746 vgic_dist_irq_set_level(vcpu, irq_num);
1747 vgic_dist_irq_set_pending(vcpu, irq_num);
1749 if (level_triggered) {
1750 vgic_dist_irq_clear_level(vcpu, irq_num);
1751 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1752 vgic_dist_irq_clear_pending(vcpu, irq_num);
1759 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1766 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1768 * Level interrupt in progress, will be picked up
1776 vgic_cpu_irq_set(vcpu, irq_num);
1777 set_bit(cpuid, dist->irq_pending_on_cpu);
1781 spin_unlock(&dist->lock);
1783 return ret ? cpuid : -EINVAL;
1787 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1788 * @kvm: The VM structure pointer
1789 * @cpuid: The CPU for PPIs
1790 * @irq_num: The IRQ number that is assigned to the device
1791 * @level: Edge-triggered: true: to trigger the interrupt
1792 * false: to ignore the call
1793 * Level-sensitive true: activates an interrupt
1794 * false: deactivates an interrupt
1796 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1797 * level-sensitive interrupts. You can think of the level parameter as 1
1798 * being HIGH and 0 being LOW and all devices being active-HIGH.
1800 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1806 if (unlikely(!vgic_initialized(kvm))) {
1808 * We only provide the automatic initialization of the VGIC
1809 * for the legacy case of a GICv2. Any other type must
1810 * be explicitly initialized once setup with the respective
1813 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
1817 mutex_lock(&kvm->lock);
1818 ret = vgic_init(kvm);
1819 mutex_unlock(&kvm->lock);
1825 vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
1827 /* kick the specified vcpu */
1828 kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
1835 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1838 * We cannot rely on the vgic maintenance interrupt to be
1839 * delivered synchronously. This means we can only use it to
1840 * exit the VM, and we perform the handling of EOIed
1841 * interrupts on the exit path (see vgic_process_maintenance).
1846 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1848 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1850 kfree(vgic_cpu->pending_shared);
1851 kfree(vgic_cpu->vgic_irq_lr_map);
1852 vgic_cpu->pending_shared = NULL;
1853 vgic_cpu->vgic_irq_lr_map = NULL;
1856 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1858 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1860 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1861 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1862 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1864 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1865 kvm_vgic_vcpu_destroy(vcpu);
1869 memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
1872 * Store the number of LRs per vcpu, so we don't have to go
1873 * all the way to the distributor structure to find out. Only
1874 * assembly code should use this one.
1876 vgic_cpu->nr_lr = vgic->nr_lr;
1881 void kvm_vgic_destroy(struct kvm *kvm)
1883 struct vgic_dist *dist = &kvm->arch.vgic;
1884 struct kvm_vcpu *vcpu;
1887 kvm_for_each_vcpu(i, vcpu, kvm)
1888 kvm_vgic_vcpu_destroy(vcpu);
1890 vgic_free_bitmap(&dist->irq_enabled);
1891 vgic_free_bitmap(&dist->irq_level);
1892 vgic_free_bitmap(&dist->irq_pending);
1893 vgic_free_bitmap(&dist->irq_soft_pend);
1894 vgic_free_bitmap(&dist->irq_queued);
1895 vgic_free_bitmap(&dist->irq_cfg);
1896 vgic_free_bytemap(&dist->irq_priority);
1897 if (dist->irq_spi_target) {
1898 for (i = 0; i < dist->nr_cpus; i++)
1899 vgic_free_bitmap(&dist->irq_spi_target[i]);
1901 kfree(dist->irq_sgi_sources);
1902 kfree(dist->irq_spi_cpu);
1903 kfree(dist->irq_spi_target);
1904 kfree(dist->irq_pending_on_cpu);
1905 dist->irq_sgi_sources = NULL;
1906 dist->irq_spi_cpu = NULL;
1907 dist->irq_spi_target = NULL;
1908 dist->irq_pending_on_cpu = NULL;
1912 static int vgic_v2_init_model(struct kvm *kvm)
1916 for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
1917 vgic_set_target_reg(kvm, 0, i);
1923 * Allocate and initialize the various data structures. Must be called
1924 * with kvm->lock held!
1926 static int vgic_init(struct kvm *kvm)
1928 struct vgic_dist *dist = &kvm->arch.vgic;
1929 struct kvm_vcpu *vcpu;
1930 int nr_cpus, nr_irqs;
1931 int ret, i, vcpu_id;
1933 if (vgic_initialized(kvm))
1936 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1937 if (!nr_cpus) /* No vcpus? Can't be good... */
1941 * If nobody configured the number of interrupts, use the
1945 dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1947 nr_irqs = dist->nr_irqs;
1949 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1950 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1951 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1952 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1953 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1954 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1955 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1960 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1961 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1962 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1964 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1966 if (!dist->irq_sgi_sources ||
1967 !dist->irq_spi_cpu ||
1968 !dist->irq_spi_target ||
1969 !dist->irq_pending_on_cpu) {
1974 for (i = 0; i < nr_cpus; i++)
1975 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1981 ret = kvm->arch.vgic.vm_ops.init_model(kvm);
1985 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
1986 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1988 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1992 for (i = 0; i < dist->nr_irqs; i++) {
1993 if (i < VGIC_NR_PPIS)
1994 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1995 vcpu->vcpu_id, i, 1);
1996 if (i < VGIC_NR_PRIVATE_IRQS)
1997 vgic_bitmap_set_irq_val(&dist->irq_cfg,
2007 kvm_vgic_destroy(kvm);
2013 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
2014 * @kvm: pointer to the kvm struct
2016 * Map the virtual CPU interface into the VM before running any VCPUs. We
2017 * can't do this at creation time, because user space must first set the
2018 * virtual CPU interface address in the guest physical address space.
2020 static int vgic_v2_map_resources(struct kvm *kvm,
2021 const struct vgic_params *params)
2025 if (!irqchip_in_kernel(kvm))
2028 mutex_lock(&kvm->lock);
2030 if (vgic_ready(kvm))
2033 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
2034 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
2035 kvm_err("Need to set vgic cpu and dist addresses first\n");
2041 * Initialize the vgic if this hasn't already been done on demand by
2042 * accessing the vgic state from userspace.
2044 ret = vgic_init(kvm);
2046 kvm_err("Unable to allocate maps\n");
2050 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
2051 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
2054 kvm_err("Unable to remap VGIC CPU to VCPU\n");
2058 kvm->arch.vgic.ready = true;
2061 kvm_vgic_destroy(kvm);
2062 mutex_unlock(&kvm->lock);
2066 static void vgic_v2_init_emulation(struct kvm *kvm)
2068 struct vgic_dist *dist = &kvm->arch.vgic;
2070 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
2071 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
2072 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
2073 dist->vm_ops.init_model = vgic_v2_init_model;
2074 dist->vm_ops.map_resources = vgic_v2_map_resources;
2077 static int init_vgic_model(struct kvm *kvm, int type)
2080 case KVM_DEV_TYPE_ARM_VGIC_V2:
2081 vgic_v2_init_emulation(kvm);
2090 int kvm_vgic_create(struct kvm *kvm, u32 type)
2092 int i, vcpu_lock_idx = -1, ret;
2093 struct kvm_vcpu *vcpu;
2095 mutex_lock(&kvm->lock);
2097 if (kvm->arch.vgic.vctrl_base) {
2103 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2104 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
2105 * that no other VCPUs are run while we create the vgic.
2108 kvm_for_each_vcpu(i, vcpu, kvm) {
2109 if (!mutex_trylock(&vcpu->mutex))
2114 kvm_for_each_vcpu(i, vcpu, kvm) {
2115 if (vcpu->arch.has_run_once)
2120 ret = init_vgic_model(kvm, type);
2124 spin_lock_init(&kvm->arch.vgic.lock);
2125 kvm->arch.vgic.in_kernel = true;
2126 kvm->arch.vgic.vgic_model = type;
2127 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
2128 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
2129 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
2132 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2133 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2134 mutex_unlock(&vcpu->mutex);
2138 mutex_unlock(&kvm->lock);
2142 static int vgic_ioaddr_overlap(struct kvm *kvm)
2144 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
2145 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
2147 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
2149 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
2150 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
2155 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
2156 phys_addr_t addr, phys_addr_t size)
2160 if (addr & ~KVM_PHYS_MASK)
2163 if (addr & (SZ_4K - 1))
2166 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2168 if (addr + size < addr)
2172 ret = vgic_ioaddr_overlap(kvm);
2174 *ioaddr = VGIC_ADDR_UNDEF;
2180 * kvm_vgic_addr - set or get vgic VM base addresses
2181 * @kvm: pointer to the vm struct
2182 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
2183 * @addr: pointer to address value
2184 * @write: if true set the address in the VM address space, if false read the
2187 * Set or get the vgic base addresses for the distributor and the virtual CPU
2188 * interface in the VM physical address space. These addresses are properties
2189 * of the emulated core/SoC and therefore user space initially knows this
2192 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2195 struct vgic_dist *vgic = &kvm->arch.vgic;
2197 mutex_lock(&kvm->lock);
2199 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2201 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
2202 *addr, KVM_VGIC_V2_DIST_SIZE);
2204 *addr = vgic->vgic_dist_base;
2207 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2209 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
2210 *addr, KVM_VGIC_V2_CPU_SIZE);
2212 *addr = vgic->vgic_cpu_base;
2219 mutex_unlock(&kvm->lock);
2223 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
2224 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2226 bool updated = false;
2227 struct vgic_vmcr vmcr;
2231 vgic_get_vmcr(vcpu, &vmcr);
2233 switch (offset & ~0x3) {
2235 vmcr_field = &vmcr.ctlr;
2237 case GIC_CPU_PRIMASK:
2238 vmcr_field = &vmcr.pmr;
2240 case GIC_CPU_BINPOINT:
2241 vmcr_field = &vmcr.bpr;
2243 case GIC_CPU_ALIAS_BINPOINT:
2244 vmcr_field = &vmcr.abpr;
2250 if (!mmio->is_write) {
2252 mmio_data_write(mmio, ~0, reg);
2254 reg = mmio_data_read(mmio, ~0);
2255 if (reg != *vmcr_field) {
2257 vgic_set_vmcr(vcpu, &vmcr);
2264 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
2265 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2267 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
2270 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
2271 struct kvm_exit_mmio *mmio,
2280 reg = (PRODUCT_ID_KVM << 20) |
2281 (GICC_ARCH_VERSION_V2 << 16) |
2282 (IMPLEMENTER_ARM << 0);
2283 mmio_data_write(mmio, ~0, reg);
2288 * CPU Interface Register accesses - these are not accessed by the VM, but by
2289 * user space for saving and restoring VGIC state.
2291 static const struct mmio_range vgic_cpu_ranges[] = {
2293 .base = GIC_CPU_CTRL,
2295 .handle_mmio = handle_cpu_mmio_misc,
2298 .base = GIC_CPU_ALIAS_BINPOINT,
2300 .handle_mmio = handle_mmio_abpr,
2303 .base = GIC_CPU_ACTIVEPRIO,
2305 .handle_mmio = handle_mmio_raz_wi,
2308 .base = GIC_CPU_IDENT,
2310 .handle_mmio = handle_cpu_mmio_ident,
2314 static int vgic_attr_regs_access(struct kvm_device *dev,
2315 struct kvm_device_attr *attr,
2316 u32 *reg, bool is_write)
2318 const struct mmio_range *r = NULL, *ranges;
2321 struct kvm_vcpu *vcpu, *tmp_vcpu;
2322 struct vgic_dist *vgic;
2323 struct kvm_exit_mmio mmio;
2325 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2326 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
2327 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
2329 mutex_lock(&dev->kvm->lock);
2331 ret = vgic_init(dev->kvm);
2335 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
2340 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
2341 vgic = &dev->kvm->arch.vgic;
2344 mmio.is_write = is_write;
2346 mmio_data_write(&mmio, ~0, *reg);
2347 switch (attr->group) {
2348 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2349 mmio.phys_addr = vgic->vgic_dist_base + offset;
2350 ranges = vgic_dist_ranges;
2352 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2353 mmio.phys_addr = vgic->vgic_cpu_base + offset;
2354 ranges = vgic_cpu_ranges;
2359 r = find_matching_range(ranges, &mmio, offset);
2361 if (unlikely(!r || !r->handle_mmio)) {
2367 spin_lock(&vgic->lock);
2370 * Ensure that no other VCPU is running by checking the vcpu->cpu
2371 * field. If no other VPCUs are running we can safely access the VGIC
2372 * state, because even if another VPU is run after this point, that
2373 * VCPU will not touch the vgic state, because it will block on
2374 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2376 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
2377 if (unlikely(tmp_vcpu->cpu != -1)) {
2379 goto out_vgic_unlock;
2384 * Move all pending IRQs from the LRs on all VCPUs so the pending
2385 * state can be properly represented in the register state accessible
2388 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
2389 vgic_unqueue_irqs(tmp_vcpu);
2392 r->handle_mmio(vcpu, &mmio, offset);
2395 *reg = mmio_data_read(&mmio, ~0);
2399 spin_unlock(&vgic->lock);
2401 mutex_unlock(&dev->kvm->lock);
2405 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2409 switch (attr->group) {
2410 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2411 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2413 unsigned long type = (unsigned long)attr->attr;
2415 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2418 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2419 return (r == -ENODEV) ? -ENXIO : r;
2422 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2423 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2424 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2427 if (get_user(reg, uaddr))
2430 return vgic_attr_regs_access(dev, attr, ®, true);
2432 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2433 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2437 if (get_user(val, uaddr))
2442 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2443 * - at most 1024 interrupts
2444 * - a multiple of 32 interrupts
2446 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2447 val > VGIC_MAX_IRQS ||
2451 mutex_lock(&dev->kvm->lock);
2453 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2456 dev->kvm->arch.vgic.nr_irqs = val;
2458 mutex_unlock(&dev->kvm->lock);
2462 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2463 switch (attr->attr) {
2464 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2465 r = vgic_init(dev->kvm);
2475 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2479 switch (attr->group) {
2480 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2481 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2483 unsigned long type = (unsigned long)attr->attr;
2485 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2487 return (r == -ENODEV) ? -ENXIO : r;
2489 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2494 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2495 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2496 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2499 r = vgic_attr_regs_access(dev, attr, ®, false);
2502 r = put_user(reg, uaddr);
2505 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2506 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2507 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2516 static int vgic_has_attr_regs(const struct mmio_range *ranges,
2519 struct kvm_exit_mmio dev_attr_mmio;
2521 dev_attr_mmio.len = 4;
2522 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2528 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2532 switch (attr->group) {
2533 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2534 switch (attr->attr) {
2535 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2536 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2540 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2541 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2542 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2543 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2544 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2545 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2546 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
2548 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2549 switch (attr->attr) {
2550 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2557 static void vgic_destroy(struct kvm_device *dev)
2562 static int vgic_create(struct kvm_device *dev, u32 type)
2564 return kvm_vgic_create(dev->kvm, type);
2567 static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2568 .name = "kvm-arm-vgic",
2569 .create = vgic_create,
2570 .destroy = vgic_destroy,
2571 .set_attr = vgic_set_attr,
2572 .get_attr = vgic_get_attr,
2573 .has_attr = vgic_has_attr,
2576 static void vgic_init_maintenance_interrupt(void *info)
2578 enable_percpu_irq(vgic->maint_irq, 0);
2581 static int vgic_cpu_notify(struct notifier_block *self,
2582 unsigned long action, void *cpu)
2586 case CPU_STARTING_FROZEN:
2587 vgic_init_maintenance_interrupt(NULL);
2590 case CPU_DYING_FROZEN:
2591 disable_percpu_irq(vgic->maint_irq);
2598 static struct notifier_block vgic_cpu_nb = {
2599 .notifier_call = vgic_cpu_notify,
2602 static const struct of_device_id vgic_ids[] = {
2603 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2604 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2608 int kvm_vgic_hyp_init(void)
2610 const struct of_device_id *matched_id;
2611 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2612 const struct vgic_params **);
2613 struct device_node *vgic_node;
2616 vgic_node = of_find_matching_node_and_match(NULL,
2617 vgic_ids, &matched_id);
2619 kvm_err("error: no compatible GIC node found\n");
2623 vgic_probe = matched_id->data;
2624 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2628 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2629 "vgic", kvm_get_running_vcpus());
2631 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2635 ret = __register_cpu_notifier(&vgic_cpu_nb);
2637 kvm_err("Cannot register vgic CPU notifier\n");
2641 /* Callback into for arch code for setup */
2642 vgic_arch_setup(vgic);
2644 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2646 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2647 KVM_DEV_TYPE_ARM_VGIC_V2);
2650 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());