2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
7 * Shashi Mallela <shashi.mallela@linaro.org>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/intc/arm_gicv3_its_common.h"
18 #include "gicv3_internal.h"
19 #include "qom/object.h"
20 #include "qapi/error.h"
22 typedef struct GICv3ITSClass GICv3ITSClass;
23 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
24 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
25 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
27 struct GICv3ITSClass {
28 GICv3ITSCommonClass parent_class;
29 void (*parent_reset)(DeviceState *dev);
33 * This is an internal enum used to distinguish between LPI triggered
34 * via command queue and LPI triggered via gits_translater write.
36 typedef enum ItsCmdType {
37 NONE = 0, /* internal indication for GITS_TRANSLATER write */
49 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
50 * if a command parameter is not correct. These include both "stall
51 * processing of the command queue" and "ignore this command, and
52 * keep processing the queue". In our implementation we choose that
53 * memory transaction errors reading the command packet provoke a
54 * stall, but errors in parameters cause us to ignore the command
55 * and continue processing.
56 * The process_* functions which handle individual ITS commands all
57 * return an ItsCmdResult which tells process_cmdq() whether it should
58 * stall or keep going.
60 typedef enum ItsCmdResult {
65 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
70 case GITS_PAGE_SIZE_4K:
71 case GITS_PAGE_SIZE_16K:
72 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
75 case GITS_PAGE_SIZE_64K:
76 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
77 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
86 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
89 AddressSpace *as = &s->gicv3->dma_as;
94 uint32_t num_l2_entries;
97 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
99 value = address_space_ldq_le(as,
101 (l2t_id * L1TABLE_ENTRY_SIZE),
102 MEMTXATTRS_UNSPECIFIED, res);
104 if (*res == MEMTX_OK) {
105 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
108 num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
110 l2t_addr = value & ((1ULL << 51) - 1);
112 *cte = address_space_ldq_le(as, l2t_addr +
113 ((icid % num_l2_entries) * GITS_CTE_SIZE),
114 MEMTXATTRS_UNSPECIFIED, res);
118 /* Flat level table */
119 *cte = address_space_ldq_le(as, s->ct.base_addr +
120 (icid * GITS_CTE_SIZE),
121 MEMTXATTRS_UNSPECIFIED, res);
124 return FIELD_EX64(*cte, CTE, VALID);
127 static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
130 AddressSpace *as = &s->gicv3->dma_as;
132 MemTxResult res = MEMTX_OK;
134 itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
135 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
137 address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
138 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
141 if (res == MEMTX_OK) {
142 address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
143 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
144 MEMTXATTRS_UNSPECIFIED, &res);
146 if (res != MEMTX_OK) {
153 static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
154 uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
156 AddressSpace *as = &s->gicv3->dma_as;
161 itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
162 itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
164 ite.itel = address_space_ldq_le(as, itt_addr +
165 (eventid * (sizeof(uint64_t) +
166 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
169 if (*res == MEMTX_OK) {
170 ite.iteh = address_space_ldl_le(as, itt_addr +
171 (eventid * (sizeof(uint64_t) +
172 sizeof(uint32_t))) + sizeof(uint32_t),
173 MEMTXATTRS_UNSPECIFIED, res);
175 if (*res == MEMTX_OK) {
176 if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
177 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
178 if (inttype == ITE_INTTYPE_PHYSICAL) {
179 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
180 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
189 static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
191 AddressSpace *as = &s->gicv3->dma_as;
196 uint32_t num_l2_entries;
198 if (s->dt.indirect) {
199 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
201 value = address_space_ldq_le(as,
203 (l2t_id * L1TABLE_ENTRY_SIZE),
204 MEMTXATTRS_UNSPECIFIED, res);
206 if (*res == MEMTX_OK) {
207 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
210 num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
212 l2t_addr = value & ((1ULL << 51) - 1);
214 value = address_space_ldq_le(as, l2t_addr +
215 ((devid % num_l2_entries) * GITS_DTE_SIZE),
216 MEMTXATTRS_UNSPECIFIED, res);
220 /* Flat level table */
221 value = address_space_ldq_le(as, s->dt.base_addr +
222 (devid * GITS_DTE_SIZE),
223 MEMTXATTRS_UNSPECIFIED, res);
230 * This function handles the processing of following commands based on
231 * the ItsCmdType parameter passed:-
232 * 1. triggering of lpi interrupt translation via ITS INT command
233 * 2. triggering of lpi interrupt translation via gits_translater register
234 * 3. handling of ITS CLEAR command
235 * 4. handling of ITS DISCARD command
237 static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
238 uint32_t offset, ItsCmdType cmd)
240 AddressSpace *as = &s->gicv3->dma_as;
241 uint32_t devid, eventid;
242 MemTxResult res = MEMTX_OK;
245 uint64_t num_eventids;
248 bool ite_valid = false;
250 bool cte_valid = false;
256 devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
258 offset += NUM_BYTES_IN_DW;
259 value = address_space_ldq_le(as, s->cq.base_addr + offset,
260 MEMTXATTRS_UNSPECIFIED, &res);
263 if (res != MEMTX_OK) {
267 eventid = (value & EVENTID_MASK);
269 dte = get_dte(s, devid, &res);
271 if (res != MEMTX_OK) {
274 dte_valid = FIELD_EX64(dte, DTE, VALID);
277 qemu_log_mask(LOG_GUEST_ERROR,
278 "%s: invalid command attributes: "
279 "invalid dte: %"PRIx64" for %d\n",
280 __func__, dte, devid);
284 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
286 ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
287 if (res != MEMTX_OK) {
292 qemu_log_mask(LOG_GUEST_ERROR,
293 "%s: invalid command attributes: invalid ITE\n",
298 cte_valid = get_cte(s, icid, &cte, &res);
299 if (res != MEMTX_OK) {
303 qemu_log_mask(LOG_GUEST_ERROR,
304 "%s: invalid command attributes: "
305 "invalid cte: %"PRIx64"\n",
310 if (devid >= s->dt.num_ids) {
311 qemu_log_mask(LOG_GUEST_ERROR,
312 "%s: invalid command attributes: devid %d>=%d",
313 __func__, devid, s->dt.num_ids);
316 if (eventid >= num_eventids) {
317 qemu_log_mask(LOG_GUEST_ERROR,
318 "%s: invalid command attributes: eventid %d >= %"
320 __func__, eventid, num_eventids);
325 * Current implementation only supports rdbase == procnum
326 * Hence rdbase physical address is ignored
328 rdbase = FIELD_EX64(cte, CTE, RDBASE);
330 if (rdbase >= s->gicv3->num_cpu) {
334 if ((cmd == CLEAR) || (cmd == DISCARD)) {
335 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
337 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
340 if (cmd == DISCARD) {
342 /* remove mapping from interrupt translation table */
343 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
348 static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
349 uint32_t offset, bool ignore_pInt)
351 AddressSpace *as = &s->gicv3->dma_as;
352 uint32_t devid, eventid;
354 uint64_t num_eventids;
357 MemTxResult res = MEMTX_OK;
362 devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
363 offset += NUM_BYTES_IN_DW;
364 value = address_space_ldq_le(as, s->cq.base_addr + offset,
365 MEMTXATTRS_UNSPECIFIED, &res);
367 if (res != MEMTX_OK) {
371 eventid = (value & EVENTID_MASK);
376 pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
379 offset += NUM_BYTES_IN_DW;
380 value = address_space_ldq_le(as, s->cq.base_addr + offset,
381 MEMTXATTRS_UNSPECIFIED, &res);
383 if (res != MEMTX_OK) {
387 icid = value & ICID_MASK;
389 dte = get_dte(s, devid, &res);
391 if (res != MEMTX_OK) {
394 dte_valid = FIELD_EX64(dte, DTE, VALID);
395 num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
396 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
398 if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
399 || !dte_valid || (eventid >= num_eventids) ||
400 (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
401 (pIntid != INTID_SPURIOUS))) {
402 qemu_log_mask(LOG_GUEST_ERROR,
403 "%s: invalid command attributes "
404 "devid %d or icid %d or eventid %d or pIntid %d or"
405 "unmapped dte %d\n", __func__, devid, icid, eventid,
408 * in this implementation, in case of error
409 * we ignore this command and move onto the next
410 * command in the queue
415 /* add ite entry to interrupt translation table */
416 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
417 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
418 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
419 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
420 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
422 return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
425 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
428 AddressSpace *as = &s->gicv3->dma_as;
433 uint32_t num_l2_entries;
435 MemTxResult res = MEMTX_OK;
442 /* add mapping entry to collection table */
443 cte = FIELD_DP64(cte, CTE, VALID, 1);
444 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
448 * The specification defines the format of level 1 entries of a
449 * 2-level table, but the format of level 2 entries and the format
450 * of flat-mapped tables is IMPDEF.
452 if (s->ct.indirect) {
453 l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
455 value = address_space_ldq_le(as,
457 (l2t_id * L1TABLE_ENTRY_SIZE),
458 MEMTXATTRS_UNSPECIFIED, &res);
460 if (res != MEMTX_OK) {
464 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
467 num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
469 l2t_addr = value & ((1ULL << 51) - 1);
471 address_space_stq_le(as, l2t_addr +
472 ((icid % num_l2_entries) * GITS_CTE_SIZE),
473 cte, MEMTXATTRS_UNSPECIFIED, &res);
476 /* Flat level table */
477 address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
478 cte, MEMTXATTRS_UNSPECIFIED, &res);
480 if (res != MEMTX_OK) {
487 static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
489 AddressSpace *as = &s->gicv3->dma_as;
493 MemTxResult res = MEMTX_OK;
496 offset += NUM_BYTES_IN_DW;
497 offset += NUM_BYTES_IN_DW;
499 value = address_space_ldq_le(as, s->cq.base_addr + offset,
500 MEMTXATTRS_UNSPECIFIED, &res);
502 if (res != MEMTX_OK) {
506 icid = value & ICID_MASK;
508 rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
509 rdbase &= RDBASE_PROCNUM_MASK;
511 valid = (value & CMD_FIELD_VALID_MASK);
513 if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
514 qemu_log_mask(LOG_GUEST_ERROR,
515 "ITS MAPC: invalid collection table attributes "
516 "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
518 * in this implementation, in case of error
519 * we ignore this command and move onto the next
520 * command in the queue
525 return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
528 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
529 uint8_t size, uint64_t itt_addr)
531 AddressSpace *as = &s->gicv3->dma_as;
536 uint32_t num_l2_entries;
538 MemTxResult res = MEMTX_OK;
542 /* add mapping entry to device table */
543 dte = FIELD_DP64(dte, DTE, VALID, 1);
544 dte = FIELD_DP64(dte, DTE, SIZE, size);
545 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
552 * The specification defines the format of level 1 entries of a
553 * 2-level table, but the format of level 2 entries and the format
554 * of flat-mapped tables is IMPDEF.
556 if (s->dt.indirect) {
557 l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
559 value = address_space_ldq_le(as,
561 (l2t_id * L1TABLE_ENTRY_SIZE),
562 MEMTXATTRS_UNSPECIFIED, &res);
564 if (res != MEMTX_OK) {
568 valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
571 num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
573 l2t_addr = value & ((1ULL << 51) - 1);
575 address_space_stq_le(as, l2t_addr +
576 ((devid % num_l2_entries) * GITS_DTE_SIZE),
577 dte, MEMTXATTRS_UNSPECIFIED, &res);
580 /* Flat level table */
581 address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
582 dte, MEMTXATTRS_UNSPECIFIED, &res);
584 if (res != MEMTX_OK) {
591 static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
594 AddressSpace *as = &s->gicv3->dma_as;
599 MemTxResult res = MEMTX_OK;
601 devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
603 offset += NUM_BYTES_IN_DW;
604 value = address_space_ldq_le(as, s->cq.base_addr + offset,
605 MEMTXATTRS_UNSPECIFIED, &res);
607 if (res != MEMTX_OK) {
611 size = (value & SIZE_MASK);
613 offset += NUM_BYTES_IN_DW;
614 value = address_space_ldq_le(as, s->cq.base_addr + offset,
615 MEMTXATTRS_UNSPECIFIED, &res);
617 if (res != MEMTX_OK) {
621 itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
623 valid = (value & CMD_FIELD_VALID_MASK);
625 if ((devid >= s->dt.num_ids) ||
626 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
627 qemu_log_mask(LOG_GUEST_ERROR,
628 "ITS MAPD: invalid device table attributes "
629 "devid %d or size %d\n", devid, size);
631 * in this implementation, in case of error
632 * we ignore this command and move onto the next
633 * command in the queue
638 return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
642 * Current implementation blocks until all
643 * commands are processed
645 static void process_cmdq(GICv3ITSState *s)
647 uint32_t wr_offset = 0;
648 uint32_t rd_offset = 0;
649 uint32_t cq_offset = 0;
651 AddressSpace *as = &s->gicv3->dma_as;
652 MemTxResult res = MEMTX_OK;
656 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
660 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
662 if (wr_offset >= s->cq.num_entries) {
663 qemu_log_mask(LOG_GUEST_ERROR,
664 "%s: invalid write offset "
665 "%d\n", __func__, wr_offset);
669 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
671 if (rd_offset >= s->cq.num_entries) {
672 qemu_log_mask(LOG_GUEST_ERROR,
673 "%s: invalid read offset "
674 "%d\n", __func__, rd_offset);
678 while (wr_offset != rd_offset) {
679 ItsCmdResult result = CMD_CONTINUE;
681 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
682 data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
683 MEMTXATTRS_UNSPECIFIED, &res);
684 if (res != MEMTX_OK) {
685 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
686 qemu_log_mask(LOG_GUEST_ERROR,
687 "%s: could not read command at 0x%" PRIx64 "\n",
688 __func__, s->cq.base_addr + cq_offset);
692 cmd = (data & CMD_MASK);
696 result = process_its_cmd(s, data, cq_offset, INTERRUPT);
699 result = process_its_cmd(s, data, cq_offset, CLEAR);
703 * Current implementation makes a blocking synchronous call
704 * for every command issued earlier, hence the internal state
705 * is already consistent by the time SYNC command is executed.
706 * Hence no further processing is required for SYNC command.
710 result = process_mapd(s, data, cq_offset);
713 result = process_mapc(s, cq_offset);
716 result = process_mapti(s, data, cq_offset, false);
719 result = process_mapti(s, data, cq_offset, true);
721 case GITS_CMD_DISCARD:
722 result = process_its_cmd(s, data, cq_offset, DISCARD);
725 case GITS_CMD_INVALL:
727 * Current implementation doesn't cache any ITS tables,
728 * but the calculated lpi priority information. We only
729 * need to trigger lpi priority re-calculation to be in
730 * sync with LPI config table or pending table changes.
732 for (i = 0; i < s->gicv3->num_cpu; i++) {
733 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
739 if (result == CMD_CONTINUE) {
741 rd_offset %= s->cq.num_entries;
742 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
745 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
746 qemu_log_mask(LOG_GUEST_ERROR,
747 "%s: 0x%x cmd processing failed, stalling\n",
755 * This function extracts the ITS Device and Collection table specific
756 * parameters (like base_addr, size etc) from GITS_BASER register.
757 * It is called during ITS enable and also during post_load migration
759 static void extract_table_params(GICv3ITSState *s)
761 uint16_t num_pages = 0;
762 uint8_t page_sz_type;
764 uint32_t page_sz = 0;
767 for (int i = 0; i < 8; i++) {
777 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
779 switch (page_sz_type) {
781 page_sz = GITS_PAGE_SIZE_4K;
785 page_sz = GITS_PAGE_SIZE_16K;
790 page_sz = GITS_PAGE_SIZE_64K;
794 g_assert_not_reached();
797 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
799 type = FIELD_EX64(value, GITS_BASER, TYPE);
802 case GITS_BASER_TYPE_DEVICE:
804 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
806 case GITS_BASER_TYPE_COLLECTION:
808 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
809 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
811 /* 16-bit CollectionId supported when CIL == 0 */
817 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
818 * ensures we will only see type values corresponding to
819 * the values set up in gicv3_its_reset().
821 g_assert_not_reached();
824 memset(td, 0, sizeof(*td));
825 td->valid = FIELD_EX64(value, GITS_BASER, VALID);
827 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
828 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
829 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
830 * for the register corresponding to the Collection table but we
831 * still have to process interrupts using non-memory-backed
832 * Collection table entries.)
837 td->page_sz = page_sz;
838 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
839 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
840 td->base_addr = baser_base_addr(value, page_sz);
842 td->num_entries = (num_pages * page_sz) / td->entry_sz;
844 td->num_entries = (((num_pages * page_sz) /
845 L1TABLE_ENTRY_SIZE) *
846 (page_sz / td->entry_sz));
848 td->num_ids = 1ULL << idbits;
852 static void extract_cmdq_params(GICv3ITSState *s)
854 uint16_t num_pages = 0;
855 uint64_t value = s->cbaser;
857 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
859 memset(&s->cq, 0 , sizeof(s->cq));
860 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
863 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
864 GITS_CMDQ_ENTRY_SIZE;
865 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
866 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
870 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
871 uint64_t data, unsigned size,
874 GICv3ITSState *s = (GICv3ITSState *)opaque;
879 case GITS_TRANSLATER:
880 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
881 devid = attrs.requester_id;
882 result = process_its_cmd(s, data, devid, NONE);
896 static bool its_writel(GICv3ITSState *s, hwaddr offset,
897 uint64_t value, MemTxAttrs attrs)
904 if (value & R_GITS_CTLR_ENABLED_MASK) {
905 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
906 extract_table_params(s);
907 extract_cmdq_params(s);
911 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
916 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
919 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
920 s->cbaser = deposit64(s->cbaser, 0, 32, value);
922 s->cwriter = s->creadr;
925 case GITS_CBASER + 4:
927 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
930 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
931 s->cbaser = deposit64(s->cbaser, 32, 32, value);
933 s->cwriter = s->creadr;
937 s->cwriter = deposit64(s->cwriter, 0, 32,
938 (value & ~R_GITS_CWRITER_RETRY_MASK));
939 if (s->cwriter != s->creadr) {
943 case GITS_CWRITER + 4:
944 s->cwriter = deposit64(s->cwriter, 32, 32, value);
947 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
948 s->creadr = deposit64(s->creadr, 0, 32,
949 (value & ~R_GITS_CREADR_STALLED_MASK));
951 /* RO register, ignore the write */
952 qemu_log_mask(LOG_GUEST_ERROR,
953 "%s: invalid guest write to RO register at offset "
954 TARGET_FMT_plx "\n", __func__, offset);
957 case GITS_CREADR + 4:
958 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
959 s->creadr = deposit64(s->creadr, 32, 32, value);
961 /* RO register, ignore the write */
962 qemu_log_mask(LOG_GUEST_ERROR,
963 "%s: invalid guest write to RO register at offset "
964 TARGET_FMT_plx "\n", __func__, offset);
967 case GITS_BASER ... GITS_BASER + 0x3f:
969 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
972 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
973 index = (offset - GITS_BASER) / 8;
977 value &= ~GITS_BASER_RO_MASK;
978 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
979 s->baser[index] |= value;
981 value &= ~GITS_BASER_RO_MASK;
982 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
983 s->baser[index] |= value;
988 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
989 /* RO registers, ignore the write */
990 qemu_log_mask(LOG_GUEST_ERROR,
991 "%s: invalid guest write to RO register at offset "
992 TARGET_FMT_plx "\n", __func__, offset);
1001 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1002 uint64_t *data, MemTxAttrs attrs)
1012 *data = gicv3_iidr();
1014 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1016 *data = gicv3_idreg(offset - GITS_IDREGS);
1019 *data = extract64(s->typer, 0, 32);
1021 case GITS_TYPER + 4:
1022 *data = extract64(s->typer, 32, 32);
1025 *data = extract64(s->cbaser, 0, 32);
1027 case GITS_CBASER + 4:
1028 *data = extract64(s->cbaser, 32, 32);
1031 *data = extract64(s->creadr, 0, 32);
1033 case GITS_CREADR + 4:
1034 *data = extract64(s->creadr, 32, 32);
1037 *data = extract64(s->cwriter, 0, 32);
1039 case GITS_CWRITER + 4:
1040 *data = extract64(s->cwriter, 32, 32);
1042 case GITS_BASER ... GITS_BASER + 0x3f:
1043 index = (offset - GITS_BASER) / 8;
1045 *data = extract64(s->baser[index], 32, 32);
1047 *data = extract64(s->baser[index], 0, 32);
1057 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1058 uint64_t value, MemTxAttrs attrs)
1064 case GITS_BASER ... GITS_BASER + 0x3f:
1066 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1069 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1070 index = (offset - GITS_BASER) / 8;
1071 s->baser[index] &= GITS_BASER_RO_MASK;
1072 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1077 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1080 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1083 s->cwriter = s->creadr;
1087 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1088 if (s->cwriter != s->creadr) {
1093 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1094 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1096 /* RO register, ignore the write */
1097 qemu_log_mask(LOG_GUEST_ERROR,
1098 "%s: invalid guest write to RO register at offset "
1099 TARGET_FMT_plx "\n", __func__, offset);
1103 /* RO registers, ignore the write */
1104 qemu_log_mask(LOG_GUEST_ERROR,
1105 "%s: invalid guest write to RO register at offset "
1106 TARGET_FMT_plx "\n", __func__, offset);
1115 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1116 uint64_t *data, MemTxAttrs attrs)
1125 case GITS_BASER ... GITS_BASER + 0x3f:
1126 index = (offset - GITS_BASER) / 8;
1127 *data = s->baser[index];
1145 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1146 unsigned size, MemTxAttrs attrs)
1148 GICv3ITSState *s = (GICv3ITSState *)opaque;
1153 result = its_readl(s, offset, data, attrs);
1156 result = its_readll(s, offset, data, attrs);
1164 qemu_log_mask(LOG_GUEST_ERROR,
1165 "%s: invalid guest read at offset " TARGET_FMT_plx
1166 "size %u\n", __func__, offset, size);
1168 * The spec requires that reserved registers are RAZ/WI;
1169 * so use false returns from leaf functions as a way to
1170 * trigger the guest-error logging but don't return it to
1171 * the caller, or we'll cause a spurious guest data abort.
1178 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1179 unsigned size, MemTxAttrs attrs)
1181 GICv3ITSState *s = (GICv3ITSState *)opaque;
1186 result = its_writel(s, offset, data, attrs);
1189 result = its_writell(s, offset, data, attrs);
1197 qemu_log_mask(LOG_GUEST_ERROR,
1198 "%s: invalid guest write at offset " TARGET_FMT_plx
1199 "size %u\n", __func__, offset, size);
1201 * The spec requires that reserved registers are RAZ/WI;
1202 * so use false returns from leaf functions as a way to
1203 * trigger the guest-error logging but don't return it to
1204 * the caller, or we'll cause a spurious guest data abort.
1210 static const MemoryRegionOps gicv3_its_control_ops = {
1211 .read_with_attrs = gicv3_its_read,
1212 .write_with_attrs = gicv3_its_write,
1213 .valid.min_access_size = 4,
1214 .valid.max_access_size = 8,
1215 .impl.min_access_size = 4,
1216 .impl.max_access_size = 8,
1217 .endianness = DEVICE_NATIVE_ENDIAN,
1220 static const MemoryRegionOps gicv3_its_translation_ops = {
1221 .write_with_attrs = gicv3_its_translation_write,
1222 .valid.min_access_size = 2,
1223 .valid.max_access_size = 4,
1224 .impl.min_access_size = 2,
1225 .impl.max_access_size = 4,
1226 .endianness = DEVICE_NATIVE_ENDIAN,
1229 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1231 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1234 for (i = 0; i < s->gicv3->num_cpu; i++) {
1235 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1236 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1241 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1243 address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
1244 "gicv3-its-sysmem");
1246 /* set the ITS default features supported */
1247 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1248 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1249 ITS_ITT_ENTRY_SIZE - 1);
1250 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1251 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1252 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1253 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1256 static void gicv3_its_reset(DeviceState *dev)
1258 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1259 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1261 c->parent_reset(dev);
1263 /* Quiescent bit reset to 1 */
1264 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1267 * setting GITS_BASER0.Type = 0b001 (Device)
1268 * GITS_BASER1.Type = 0b100 (Collection Table)
1269 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1270 * GITS_BASER<0,1>.Page_Size = 64KB
1271 * and default translation table entry size to 16 bytes
1273 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1274 GITS_BASER_TYPE_DEVICE);
1275 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1276 GITS_BASER_PAGESIZE_64K);
1277 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1280 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1281 GITS_BASER_TYPE_COLLECTION);
1282 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1283 GITS_BASER_PAGESIZE_64K);
1284 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1288 static void gicv3_its_post_load(GICv3ITSState *s)
1290 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1291 extract_table_params(s);
1292 extract_cmdq_params(s);
1296 static Property gicv3_its_props[] = {
1297 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1299 DEFINE_PROP_END_OF_LIST(),
1302 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1304 DeviceClass *dc = DEVICE_CLASS(klass);
1305 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1306 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1308 dc->realize = gicv3_arm_its_realize;
1309 device_class_set_props(dc, gicv3_its_props);
1310 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1311 icc->post_load = gicv3_its_post_load;
1314 static const TypeInfo gicv3_its_info = {
1315 .name = TYPE_ARM_GICV3_ITS,
1316 .parent = TYPE_ARM_GICV3_ITS_COMMON,
1317 .instance_size = sizeof(GICv3ITSState),
1318 .class_init = gicv3_its_class_init,
1319 .class_size = sizeof(GICv3ITSClass),
1322 static void gicv3_its_register_types(void)
1324 type_register_static(&gicv3_its_info);
1327 type_init(gicv3_its_register_types)