2 * VGICv3 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/kvm_arm.h>
22 #include <asm/kvm_mmu.h>
25 #include "vgic-mmio.h"
27 /* extract @num bytes at @offset bytes offset in data */
28 unsigned long extract_bytes(u64 data, unsigned int offset,
31 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
34 /* allows updates of any half of a 64-bit register (or the whole thing) */
35 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
38 int lower = (offset & 4) * 8;
39 int upper = lower + 8 * len - 1;
41 reg &= ~GENMASK_ULL(upper, lower);
42 val &= GENMASK_ULL(len * 8 - 1, 0);
44 return reg | ((u64)val << lower);
47 bool vgic_has_its(struct kvm *kvm)
49 struct vgic_dist *dist = &kvm->arch.vgic;
51 if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
57 bool vgic_supports_direct_msis(struct kvm *kvm)
59 return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
63 * The Revision field in the IIDR have the following meanings:
65 * Revision 2: Interrupt groups are guest-configurable and signaled using
66 * their configured groups.
69 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
70 gpa_t addr, unsigned int len)
72 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
75 switch (addr & 0x0c) {
78 value |= GICD_CTLR_ENABLE_SS_G1;
79 value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
82 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
83 value = (value >> 5) - 1;
84 if (vgic_has_its(vcpu->kvm)) {
85 value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
86 value |= GICD_TYPER_LPIS;
88 value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
92 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
93 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
94 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
103 static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
104 gpa_t addr, unsigned int len,
107 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
108 bool was_enabled = dist->enabled;
110 switch (addr & 0x0c) {
112 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
114 if (!was_enabled && dist->enabled)
115 vgic_kick_vcpus(vcpu->kvm);
123 static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
124 gpa_t addr, unsigned int len,
127 switch (addr & 0x0c) {
129 if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
133 vgic_mmio_write_v3_misc(vcpu, addr, len, val);
137 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
138 gpa_t addr, unsigned int len)
140 int intid = VGIC_ADDR_TO_INTID(addr, 64);
141 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
142 unsigned long ret = 0;
147 /* The upper word is RAZ for us. */
149 ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
151 vgic_put_irq(vcpu->kvm, irq);
155 static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
156 gpa_t addr, unsigned int len,
159 int intid = VGIC_ADDR_TO_INTID(addr, 64);
160 struct vgic_irq *irq;
163 /* The upper word is WI for us since we don't implement Aff3. */
167 irq = vgic_get_irq(vcpu->kvm, NULL, intid);
172 spin_lock_irqsave(&irq->irq_lock, flags);
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
178 spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq);
182 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
183 gpa_t addr, unsigned int len)
185 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
187 return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
191 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
192 gpa_t addr, unsigned int len,
195 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
196 bool was_enabled = vgic_cpu->lpis_enabled;
198 if (!vgic_has_its(vcpu->kvm))
201 vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
203 if (!was_enabled && vgic_cpu->lpis_enabled)
204 vgic_enable_lpis(vcpu);
207 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
208 gpa_t addr, unsigned int len)
210 unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
211 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
212 struct vgic_redist_region *rdreg = vgic_cpu->rdreg;
213 int target_vcpu_id = vcpu->vcpu_id;
214 gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
215 (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
218 value = (u64)(mpidr & GENMASK(23, 0)) << 32;
219 value |= ((target_vcpu_id & 0xffff) << 8);
221 if (addr == last_rdist_typer)
222 value |= GICR_TYPER_LAST;
223 if (vgic_has_its(vcpu->kvm))
224 value |= GICR_TYPER_PLPIS;
226 return extract_bytes(value, addr & 7, len);
229 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
230 gpa_t addr, unsigned int len)
232 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
235 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
236 gpa_t addr, unsigned int len)
238 switch (addr & 0xffff) {
240 /* report a GICv3 compliant implementation */
247 static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
248 gpa_t addr, unsigned int len)
250 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
255 * pending state of interrupt is latched in pending_latch variable.
256 * Userspace will save and restore pending state and line_level
258 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
259 * for handling of ISPENDR and ICPENDR.
261 for (i = 0; i < len * 8; i++) {
262 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 if (irq->pending_latch)
267 vgic_put_irq(vcpu->kvm, irq);
273 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
274 gpa_t addr, unsigned int len,
277 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
284 spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) {
287 * pending_latch is set irrespective of irq type
288 * (level or edge) to avoid dependency that VM should
289 * restore irq config before pending info.
291 irq->pending_latch = true;
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags);
298 vgic_put_irq(vcpu->kvm, irq);
304 /* We want to avoid outer shareable. */
305 u64 vgic_sanitise_shareability(u64 field)
308 case GIC_BASER_OuterShareable:
309 return GIC_BASER_InnerShareable;
315 /* Avoid any inner non-cacheable mapping. */
316 u64 vgic_sanitise_inner_cacheability(u64 field)
319 case GIC_BASER_CACHE_nCnB:
320 case GIC_BASER_CACHE_nC:
321 return GIC_BASER_CACHE_RaWb;
327 /* Non-cacheable or same-as-inner are OK. */
328 u64 vgic_sanitise_outer_cacheability(u64 field)
331 case GIC_BASER_CACHE_SameAsInner:
332 case GIC_BASER_CACHE_nC:
335 return GIC_BASER_CACHE_nC;
339 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
340 u64 (*sanitise_fn)(u64))
342 u64 field = (reg & field_mask) >> field_shift;
344 field = sanitise_fn(field) << field_shift;
345 return (reg & ~field_mask) | field;
348 #define PROPBASER_RES0_MASK \
349 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
350 #define PENDBASER_RES0_MASK \
351 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
352 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
354 static u64 vgic_sanitise_pendbaser(u64 reg)
356 reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
357 GICR_PENDBASER_SHAREABILITY_SHIFT,
358 vgic_sanitise_shareability);
359 reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
360 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
361 vgic_sanitise_inner_cacheability);
362 reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
363 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
364 vgic_sanitise_outer_cacheability);
366 reg &= ~PENDBASER_RES0_MASK;
371 static u64 vgic_sanitise_propbaser(u64 reg)
373 reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
374 GICR_PROPBASER_SHAREABILITY_SHIFT,
375 vgic_sanitise_shareability);
376 reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
377 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
378 vgic_sanitise_inner_cacheability);
379 reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
380 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
381 vgic_sanitise_outer_cacheability);
383 reg &= ~PROPBASER_RES0_MASK;
387 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
388 gpa_t addr, unsigned int len)
390 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
392 return extract_bytes(dist->propbaser, addr & 7, len);
395 static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
396 gpa_t addr, unsigned int len,
399 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
400 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
401 u64 old_propbaser, propbaser;
403 /* Storing a value with LPIs already enabled is undefined */
404 if (vgic_cpu->lpis_enabled)
408 old_propbaser = READ_ONCE(dist->propbaser);
409 propbaser = old_propbaser;
410 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
411 propbaser = vgic_sanitise_propbaser(propbaser);
412 } while (cmpxchg64(&dist->propbaser, old_propbaser,
413 propbaser) != old_propbaser);
416 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
417 gpa_t addr, unsigned int len)
419 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
421 return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
424 static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
425 gpa_t addr, unsigned int len,
428 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
429 u64 old_pendbaser, pendbaser;
431 /* Storing a value with LPIs already enabled is undefined */
432 if (vgic_cpu->lpis_enabled)
436 old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
437 pendbaser = old_pendbaser;
438 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
439 pendbaser = vgic_sanitise_pendbaser(pendbaser);
440 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
441 pendbaser) != old_pendbaser);
445 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
446 * redistributors, while SPIs are covered by registers in the distributor
447 * block. Trying to set private IRQs in this block gets ignored.
448 * We take some special care here to fix the calculation of the register
451 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
454 .bits_per_irq = bpi, \
455 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
456 .access_flags = acc, \
457 .read = vgic_mmio_read_raz, \
458 .write = vgic_mmio_write_wi, \
460 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
461 .bits_per_irq = bpi, \
462 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
463 .access_flags = acc, \
466 .uaccess_read = ur, \
467 .uaccess_write = uw, \
470 static const struct vgic_register_region vgic_v3_dist_registers[] = {
471 REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR,
472 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
473 NULL, vgic_mmio_uaccess_write_v3_misc,
474 16, VGIC_ACCESS_32bit),
475 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
476 vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
478 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
479 vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
481 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
482 vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
484 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
485 vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
487 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
488 vgic_mmio_read_pending, vgic_mmio_write_spending,
489 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
491 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
492 vgic_mmio_read_pending, vgic_mmio_write_cpending,
493 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1,
495 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
496 vgic_mmio_read_active, vgic_mmio_write_sactive,
497 NULL, vgic_mmio_uaccess_write_sactive, 1,
499 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
500 vgic_mmio_read_active, vgic_mmio_write_cactive,
501 NULL, vgic_mmio_uaccess_write_cactive,
502 1, VGIC_ACCESS_32bit),
503 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
504 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
505 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
506 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
507 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
508 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
509 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
510 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
512 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
513 vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
515 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
516 vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
517 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
518 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
519 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
523 static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
524 REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
525 vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
527 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
528 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
530 REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
531 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
533 REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
534 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
535 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
536 REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
537 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
539 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
540 vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
541 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
542 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
543 vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
544 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
545 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
546 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
550 static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
551 REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
552 vgic_mmio_read_group, vgic_mmio_write_group, 4,
554 REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
555 vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
557 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
558 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
560 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0,
561 vgic_mmio_read_pending, vgic_mmio_write_spending,
562 vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
564 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0,
565 vgic_mmio_read_pending, vgic_mmio_write_cpending,
566 vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
568 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0,
569 vgic_mmio_read_active, vgic_mmio_write_sactive,
570 NULL, vgic_mmio_uaccess_write_sactive,
571 4, VGIC_ACCESS_32bit),
572 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0,
573 vgic_mmio_read_active, vgic_mmio_write_cactive,
574 NULL, vgic_mmio_uaccess_write_cactive,
575 4, VGIC_ACCESS_32bit),
576 REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
577 vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
578 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
579 REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
580 vgic_mmio_read_config, vgic_mmio_write_config, 8,
582 REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
583 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
585 REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
586 vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
590 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
592 dev->regions = vgic_v3_dist_registers;
593 dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
595 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
601 * vgic_register_redist_iodev - register a single redist iodev
602 * @vcpu: The VCPU to which the redistributor belongs
604 * Register a KVM iodev for this VCPU's redistributor using the address
607 * Return 0 on success, -ERRNO otherwise.
609 int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
611 struct kvm *kvm = vcpu->kvm;
612 struct vgic_dist *vgic = &kvm->arch.vgic;
613 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
614 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
615 struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
616 struct vgic_redist_region *rdreg;
617 gpa_t rd_base, sgi_base;
620 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
624 * We may be creating VCPUs before having set the base address for the
625 * redistributor region, in which case we will come back to this
626 * function for all VCPUs when the base address is set. Just return
627 * without doing any work for now.
629 rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
633 if (!vgic_v3_check_base(kvm))
636 vgic_cpu->rdreg = rdreg;
638 rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
639 sgi_base = rd_base + SZ_64K;
641 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
642 rd_dev->base_addr = rd_base;
643 rd_dev->iodev_type = IODEV_REDIST;
644 rd_dev->regions = vgic_v3_rdbase_registers;
645 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
646 rd_dev->redist_vcpu = vcpu;
648 mutex_lock(&kvm->slots_lock);
649 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
650 SZ_64K, &rd_dev->dev);
651 mutex_unlock(&kvm->slots_lock);
656 kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
657 sgi_dev->base_addr = sgi_base;
658 sgi_dev->iodev_type = IODEV_REDIST;
659 sgi_dev->regions = vgic_v3_sgibase_registers;
660 sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
661 sgi_dev->redist_vcpu = vcpu;
663 mutex_lock(&kvm->slots_lock);
664 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
665 SZ_64K, &sgi_dev->dev);
667 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
674 mutex_unlock(&kvm->slots_lock);
678 static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
680 struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
681 struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
683 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
684 kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &sgi_dev->dev);
687 static int vgic_register_all_redist_iodevs(struct kvm *kvm)
689 struct kvm_vcpu *vcpu;
692 kvm_for_each_vcpu(c, vcpu, kvm) {
693 ret = vgic_register_redist_iodev(vcpu);
699 /* The current c failed, so we start with the previous one. */
700 mutex_lock(&kvm->slots_lock);
701 for (c--; c >= 0; c--) {
702 vcpu = kvm_get_vcpu(kvm, c);
703 vgic_unregister_redist_iodev(vcpu);
705 mutex_unlock(&kvm->slots_lock);
712 * vgic_v3_insert_redist_region - Insert a new redistributor region
714 * Performs various checks before inserting the rdist region in the list.
715 * Those tests depend on whether the size of the rdist region is known
716 * (ie. count != 0). The list is sorted by rdist region index.
719 * @index: redist region index
720 * @base: base of the new rdist region
721 * @count: number of redistributors the region is made of (0 in the old style
722 * single region, whose size is induced from the number of vcpus)
724 * Return 0 on success, < 0 otherwise
726 static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
727 gpa_t base, uint32_t count)
729 struct vgic_dist *d = &kvm->arch.vgic;
730 struct vgic_redist_region *rdreg;
731 struct list_head *rd_regions = &d->rd_regions;
732 size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
735 /* single rdist region already set ?*/
736 if (!count && !list_empty(rd_regions))
739 /* cross the end of memory ? */
740 if (base + size < base)
743 if (list_empty(rd_regions)) {
747 rdreg = list_last_entry(rd_regions,
748 struct vgic_redist_region, list);
749 if (index != rdreg->index + 1)
752 /* Cannot add an explicitly sized regions after legacy region */
758 * For legacy single-region redistributor regions (!count),
759 * check that the redistributor region does not overlap with the
760 * distributor's address space.
762 if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
763 vgic_dist_overlap(kvm, base, size))
766 /* collision with any other rdist region? */
767 if (vgic_v3_rdist_overlap(kvm, base, size))
770 rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL);
774 rdreg->base = VGIC_ADDR_UNDEF;
776 ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K);
781 rdreg->count = count;
782 rdreg->free_index = 0;
783 rdreg->index = index;
785 list_add_tail(&rdreg->list, rd_regions);
792 int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
796 ret = vgic_v3_insert_redist_region(kvm, index, addr, count);
801 * Register iodevs for each existing VCPU. Adding more VCPUs
802 * afterwards will register the iodevs when needed.
804 ret = vgic_register_all_redist_iodevs(kvm);
811 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
813 const struct vgic_register_region *region;
814 struct vgic_io_device iodev;
815 struct vgic_reg_attr reg_attr;
816 struct kvm_vcpu *vcpu;
820 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
824 vcpu = reg_attr.vcpu;
825 addr = reg_attr.addr;
827 switch (attr->group) {
828 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
829 iodev.regions = vgic_v3_dist_registers;
830 iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
833 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
834 iodev.regions = vgic_v3_rdbase_registers;
835 iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
839 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
842 id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
843 return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, ®);
849 /* We only support aligned 32-bit accesses. */
853 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
860 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
861 * generation register ICC_SGI1R_EL1) with a given VCPU.
862 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
865 static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
867 unsigned long affinity;
871 * Split the current VCPU's MPIDR into affinity level 0 and the
872 * rest as this is what we have to compare against.
874 affinity = kvm_vcpu_get_mpidr_aff(vcpu);
875 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
876 affinity &= ~MPIDR_LEVEL_MASK;
878 /* bail out if the upper three levels don't match */
879 if (sgi_aff != affinity)
882 /* Is this VCPU's bit set in the mask ? */
883 if (!(sgi_cpu_mask & BIT(level0)))
890 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
891 * so provide a wrapper to use the existing defines to isolate a certain
894 #define SGI_AFFINITY_LEVEL(reg, level) \
895 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
896 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
899 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
900 * @vcpu: The VCPU requesting a SGI
901 * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
902 * @allow_group1: Does the sysreg access allow generation of G1 SGIs
904 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
905 * This will trap in sys_regs.c and call this function.
906 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
907 * target processors as well as a bitmask of 16 Aff0 CPUs.
908 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
909 * check for matching ones. If this bit is set, we signal all, but not the
912 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
914 struct kvm *kvm = vcpu->kvm;
915 struct kvm_vcpu *c_vcpu;
919 int vcpu_id = vcpu->vcpu_id;
923 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
924 broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
925 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
926 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
927 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
928 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
931 * We iterate over all VCPUs to find the MPIDRs matching the request.
932 * If we have handled one CPU, we clear its bit to detect early
933 * if we are already finished. This avoids iterating through all
934 * VCPUs when most of the times we just signal a single VCPU.
936 kvm_for_each_vcpu(c, c_vcpu, kvm) {
937 struct vgic_irq *irq;
939 /* Exit early if we have dealt with all requested CPUs */
940 if (!broadcast && target_cpus == 0)
943 /* Don't signal the calling VCPU */
944 if (broadcast && c == vcpu_id)
950 level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
954 /* remove this matching VCPU from the mask */
955 target_cpus &= ~BIT(level0);
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
960 spin_lock_irqsave(&irq->irq_lock, flags);
963 * An access targetting Group0 SGIs can only generate
964 * those, while an access targetting Group1 SGIs can
965 * generate interrupts of either group.
967 if (!irq->group || allow_group1) {
968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
971 spin_unlock_irqrestore(&irq->irq_lock, flags);
974 vgic_put_irq(vcpu->kvm, irq);
978 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
979 int offset, u32 *val)
981 struct vgic_io_device dev = {
982 .regions = vgic_v3_dist_registers,
983 .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
986 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
989 int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
990 int offset, u32 *val)
992 struct vgic_io_device rd_dev = {
993 .regions = vgic_v3_rdbase_registers,
994 .nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers),
997 struct vgic_io_device sgi_dev = {
998 .regions = vgic_v3_sgibase_registers,
999 .nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
1002 /* SGI_base is the next 64K frame after RD_base */
1003 if (offset >= SZ_64K)
1004 return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
1007 return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1010 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
1011 u32 intid, u64 *val)
1017 vgic_write_irq_line_level_info(vcpu, intid, *val);
1019 *val = vgic_read_irq_line_level_info(vcpu, intid);