2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qapi/error.h"
26 #include "hw/sysbus.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/i386/pc.h"
32 #include "hw/i386/apic-msidef.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/dma.h"
37 #include "sysemu/sysemu.h"
38 #include "hw/i386/apic_internal.h"
39 #include "kvm/kvm_i386.h"
40 #include "migration/vmstate.h"
43 /* context entry operations */
44 #define VTD_CE_GET_RID2PASID(ce) \
45 ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK)
46 #define VTD_CE_GET_PASID_DIR_TABLE(ce) \
47 ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK)
50 #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT)
51 #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW))
54 * PCI bus number (or SID) is not reliable since the device is usaully
55 * initalized before guest can configure the PCI bridge
56 * (SECONDARY_BUS_NUMBER).
63 static void vtd_address_space_refresh_all(IntelIOMMUState *s);
64 static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n);
66 static void vtd_panic_require_caching_mode(void)
68 error_report("We need to set caching-mode=on for intel-iommu to enable "
69 "device assignment with IOMMU protection.");
73 static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
74 uint64_t wmask, uint64_t w1cmask)
76 stq_le_p(&s->csr[addr], val);
77 stq_le_p(&s->wmask[addr], wmask);
78 stq_le_p(&s->w1cmask[addr], w1cmask);
81 static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
83 stq_le_p(&s->womask[addr], mask);
86 static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
87 uint32_t wmask, uint32_t w1cmask)
89 stl_le_p(&s->csr[addr], val);
90 stl_le_p(&s->wmask[addr], wmask);
91 stl_le_p(&s->w1cmask[addr], w1cmask);
94 static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
96 stl_le_p(&s->womask[addr], mask);
99 /* "External" get/set operations */
100 static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
102 uint64_t oldval = ldq_le_p(&s->csr[addr]);
103 uint64_t wmask = ldq_le_p(&s->wmask[addr]);
104 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
105 stq_le_p(&s->csr[addr],
106 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
109 static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
111 uint32_t oldval = ldl_le_p(&s->csr[addr]);
112 uint32_t wmask = ldl_le_p(&s->wmask[addr]);
113 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
114 stl_le_p(&s->csr[addr],
115 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
118 static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
120 uint64_t val = ldq_le_p(&s->csr[addr]);
121 uint64_t womask = ldq_le_p(&s->womask[addr]);
122 return val & ~womask;
125 static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
127 uint32_t val = ldl_le_p(&s->csr[addr]);
128 uint32_t womask = ldl_le_p(&s->womask[addr]);
129 return val & ~womask;
132 /* "Internal" get/set operations */
133 static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
135 return ldq_le_p(&s->csr[addr]);
138 static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
140 return ldl_le_p(&s->csr[addr]);
143 static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
145 stq_le_p(&s->csr[addr], val);
148 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
149 uint32_t clear, uint32_t mask)
151 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
152 stl_le_p(&s->csr[addr], new_val);
156 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
157 uint64_t clear, uint64_t mask)
159 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
160 stq_le_p(&s->csr[addr], new_val);
164 static inline void vtd_iommu_lock(IntelIOMMUState *s)
166 qemu_mutex_lock(&s->iommu_lock);
169 static inline void vtd_iommu_unlock(IntelIOMMUState *s)
171 qemu_mutex_unlock(&s->iommu_lock);
174 static void vtd_update_scalable_state(IntelIOMMUState *s)
176 uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
178 if (s->scalable_mode) {
179 s->root_scalable = val & VTD_RTADDR_SMT;
183 static void vtd_update_iq_dw(IntelIOMMUState *s)
185 uint64_t val = vtd_get_quad_raw(s, DMAR_IQA_REG);
187 if (s->ecap & VTD_ECAP_SMTS &&
188 val & VTD_IQA_DW_MASK) {
195 /* Whether the address space needs to notify new mappings */
196 static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as)
198 return as->notifier_flags & IOMMU_NOTIFIER_MAP;
201 /* GHashTable functions */
202 static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
204 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
207 static guint vtd_uint64_hash(gconstpointer v)
209 return (guint)*(const uint64_t *)v;
212 static gboolean vtd_as_equal(gconstpointer v1, gconstpointer v2)
214 const struct vtd_as_key *key1 = v1;
215 const struct vtd_as_key *key2 = v2;
217 return (key1->bus == key2->bus) && (key1->devfn == key2->devfn);
221 * Note that we use pointer to PCIBus as the key, so hashing/shifting
222 * based on the pointer value is intended. Note that we deal with
223 * collisions through vtd_as_equal().
225 static guint vtd_as_hash(gconstpointer v)
227 const struct vtd_as_key *key = v;
228 guint value = (guint)(uintptr_t)key->bus;
230 return (guint)(value << 8 | key->devfn);
233 static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
236 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
237 uint16_t domain_id = *(uint16_t *)user_data;
238 return entry->domain_id == domain_id;
241 /* The shift of an addr for a certain level of paging structure */
242 static inline uint32_t vtd_slpt_level_shift(uint32_t level)
245 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
248 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
250 return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
253 static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
256 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
257 VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
258 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
259 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
260 return (entry->domain_id == info->domain_id) &&
261 (((entry->gfn & info->mask) == gfn) ||
262 (entry->gfn == gfn_tlb));
265 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
266 * IntelIOMMUState to 1. Must be called with IOMMU lock held.
268 static void vtd_reset_context_cache_locked(IntelIOMMUState *s)
270 VTDAddressSpace *vtd_as;
271 GHashTableIter as_it;
273 trace_vtd_context_cache_reset();
275 g_hash_table_iter_init(&as_it, s->vtd_address_spaces);
277 while (g_hash_table_iter_next(&as_it, NULL, (void **)&vtd_as)) {
278 vtd_as->context_cache_entry.context_cache_gen = 0;
280 s->context_cache_gen = 1;
283 /* Must be called with IOMMU lock held. */
284 static void vtd_reset_iotlb_locked(IntelIOMMUState *s)
287 g_hash_table_remove_all(s->iotlb);
290 static void vtd_reset_iotlb(IntelIOMMUState *s)
293 vtd_reset_iotlb_locked(s);
297 static void vtd_reset_caches(IntelIOMMUState *s)
300 vtd_reset_iotlb_locked(s);
301 vtd_reset_context_cache_locked(s);
305 static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
308 return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
309 ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
312 static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
314 return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
317 /* Must be called with IOMMU lock held */
318 static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
321 VTDIOTLBEntry *entry;
325 for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
326 key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
328 entry = g_hash_table_lookup(s->iotlb, &key);
338 /* Must be with IOMMU lock held */
339 static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
340 uint16_t domain_id, hwaddr addr, uint64_t slpte,
341 uint8_t access_flags, uint32_t level)
343 VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
344 uint64_t *key = g_malloc(sizeof(*key));
345 uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
347 trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
348 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
349 trace_vtd_iotlb_reset("iotlb exceeds size limit");
350 vtd_reset_iotlb_locked(s);
354 entry->domain_id = domain_id;
355 entry->slpte = slpte;
356 entry->access_flags = access_flags;
357 entry->mask = vtd_slpt_level_page_mask(level);
358 *key = vtd_get_iotlb_key(gfn, source_id, level);
359 g_hash_table_replace(s->iotlb, key, entry);
362 /* Given the reg addr of both the message data and address, generate an
365 static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
366 hwaddr mesg_data_reg)
370 assert(mesg_data_reg < DMAR_REG_SIZE);
371 assert(mesg_addr_reg < DMAR_REG_SIZE);
373 msi.address = vtd_get_long_raw(s, mesg_addr_reg);
374 msi.data = vtd_get_long_raw(s, mesg_data_reg);
376 trace_vtd_irq_generate(msi.address, msi.data);
378 apic_get_class()->send_msi(&msi);
381 /* Generate a fault event to software via MSI if conditions are met.
382 * Notice that the value of FSTS_REG being passed to it should be the one
385 static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
387 if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
388 pre_fsts & VTD_FSTS_IQE) {
389 error_report_once("There are previous interrupt conditions "
390 "to be serviced by software, fault event "
394 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
395 if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
396 error_report_once("Interrupt Mask set, irq is not generated");
398 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
399 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
403 /* Check if the Fault (F) field of the Fault Recording Register referenced by
406 static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
408 /* Each reg is 128-bit */
409 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
410 addr += 8; /* Access the high 64-bit half */
412 assert(index < DMAR_FRCD_REG_NR);
414 return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
417 /* Update the PPF field of Fault Status Register.
418 * Should be called whenever change the F field of any fault recording
421 static void vtd_update_fsts_ppf(IntelIOMMUState *s)
424 uint32_t ppf_mask = 0;
426 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
427 if (vtd_is_frcd_set(s, i)) {
428 ppf_mask = VTD_FSTS_PPF;
432 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
433 trace_vtd_fsts_ppf(!!ppf_mask);
436 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
438 /* Each reg is 128-bit */
439 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
440 addr += 8; /* Access the high 64-bit half */
442 assert(index < DMAR_FRCD_REG_NR);
444 vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
445 vtd_update_fsts_ppf(s);
448 /* Must not update F field now, should be done later */
449 static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
450 uint16_t source_id, hwaddr addr,
451 VTDFaultReason fault, bool is_write)
454 hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
456 assert(index < DMAR_FRCD_REG_NR);
458 lo = VTD_FRCD_FI(addr);
459 hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
463 vtd_set_quad_raw(s, frcd_reg_addr, lo);
464 vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
466 trace_vtd_frr_new(index, hi, lo);
469 /* Try to collapse multiple pending faults from the same requester */
470 static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
474 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
476 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
477 frcd_reg = vtd_get_quad_raw(s, addr);
478 if ((frcd_reg & VTD_FRCD_F) &&
479 ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
482 addr += 16; /* 128-bit for each */
487 /* Log and report an DMAR (address translation) fault to software */
488 static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
489 hwaddr addr, VTDFaultReason fault,
492 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
494 assert(fault < VTD_FR_MAX);
496 trace_vtd_dmar_fault(source_id, fault, addr, is_write);
498 if (fsts_reg & VTD_FSTS_PFO) {
499 error_report_once("New fault is not recorded due to "
500 "Primary Fault Overflow");
504 if (vtd_try_collapse_fault(s, source_id)) {
505 error_report_once("New fault is not recorded due to "
506 "compression of faults");
510 if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
511 error_report_once("Next Fault Recording Reg is used, "
512 "new fault is not recorded, set PFO field");
513 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
517 vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
519 if (fsts_reg & VTD_FSTS_PPF) {
520 error_report_once("There are pending faults already, "
521 "fault event is not generated");
522 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
524 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
525 s->next_frcd_reg = 0;
528 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
529 VTD_FSTS_FRI(s->next_frcd_reg));
530 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
532 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
533 s->next_frcd_reg = 0;
535 /* This case actually cause the PPF to be Set.
536 * So generate fault event (interrupt).
538 vtd_generate_fault_event(s, fsts_reg);
542 /* Handle Invalidation Queue Errors of queued invalidation interface error
545 static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
547 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
549 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
550 vtd_generate_fault_event(s, fsts_reg);
553 /* Set the IWC field and try to generate an invalidation completion interrupt */
554 static void vtd_generate_completion_event(IntelIOMMUState *s)
556 if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
557 trace_vtd_inv_desc_wait_irq("One pending, skip current");
560 vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
561 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
562 if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
563 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
564 "new event not generated");
567 /* Generate the interrupt event */
568 trace_vtd_inv_desc_wait_irq("Generating complete event");
569 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
570 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
574 static inline bool vtd_root_entry_present(IntelIOMMUState *s,
578 if (s->root_scalable && devfn > UINT8_MAX / 2) {
579 return re->hi & VTD_ROOT_ENTRY_P;
582 return re->lo & VTD_ROOT_ENTRY_P;
585 static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
590 addr = s->root + index * sizeof(*re);
591 if (dma_memory_read(&address_space_memory, addr,
592 re, sizeof(*re), MEMTXATTRS_UNSPECIFIED)) {
594 return -VTD_FR_ROOT_TABLE_INV;
596 re->lo = le64_to_cpu(re->lo);
597 re->hi = le64_to_cpu(re->hi);
601 static inline bool vtd_ce_present(VTDContextEntry *context)
603 return context->lo & VTD_CONTEXT_ENTRY_P;
606 static int vtd_get_context_entry_from_root(IntelIOMMUState *s,
611 dma_addr_t addr, ce_size;
613 /* we have checked that root entry is present */
614 ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE :
615 VTD_CTX_ENTRY_LEGACY_SIZE;
617 if (s->root_scalable && index > UINT8_MAX / 2) {
618 index = index & (~VTD_DEVFN_CHECK_MASK);
619 addr = re->hi & VTD_ROOT_ENTRY_CTP;
621 addr = re->lo & VTD_ROOT_ENTRY_CTP;
624 addr = addr + index * ce_size;
625 if (dma_memory_read(&address_space_memory, addr,
626 ce, ce_size, MEMTXATTRS_UNSPECIFIED)) {
627 return -VTD_FR_CONTEXT_TABLE_INV;
630 ce->lo = le64_to_cpu(ce->lo);
631 ce->hi = le64_to_cpu(ce->hi);
632 if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) {
633 ce->val[2] = le64_to_cpu(ce->val[2]);
634 ce->val[3] = le64_to_cpu(ce->val[3]);
639 static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce)
641 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
644 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw)
646 return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw);
649 /* Whether the pte indicates the address of the page frame */
650 static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
652 return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
655 /* Get the content of a spte located in @base_addr[@index] */
656 static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
660 assert(index < VTD_SL_PT_ENTRY_NR);
662 if (dma_memory_read(&address_space_memory,
663 base_addr + index * sizeof(slpte),
664 &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) {
665 slpte = (uint64_t)-1;
668 slpte = le64_to_cpu(slpte);
672 /* Given an iova and the level of paging structure, return the offset
675 static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
677 return (iova >> vtd_slpt_level_shift(level)) &
678 ((1ULL << VTD_SL_LEVEL_BITS) - 1);
681 /* Check Capability Register to see if the @level of page-table is supported */
682 static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
684 return VTD_CAP_SAGAW_MASK & s->cap &
685 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
688 /* Return true if check passed, otherwise false */
689 static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu,
692 switch (VTD_PE_GET_TYPE(pe)) {
693 case VTD_SM_PASID_ENTRY_FLT:
694 case VTD_SM_PASID_ENTRY_SLT:
695 case VTD_SM_PASID_ENTRY_NESTED:
697 case VTD_SM_PASID_ENTRY_PT:
698 if (!x86_iommu->pt_supported) {
709 static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire)
711 return pdire->val & 1;
715 * Caller of this function should check present bit if wants
716 * to use pdir entry for further usage except for fpd bit check.
718 static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
720 VTDPASIDDirEntry *pdire)
723 dma_addr_t addr, entry_size;
725 index = VTD_PASID_DIR_INDEX(pasid);
726 entry_size = VTD_PASID_DIR_ENTRY_SIZE;
727 addr = pasid_dir_base + index * entry_size;
728 if (dma_memory_read(&address_space_memory, addr,
729 pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) {
730 return -VTD_FR_PASID_TABLE_INV;
736 static inline bool vtd_pe_present(VTDPASIDEntry *pe)
738 return pe->val[0] & VTD_PASID_ENTRY_P;
741 static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
747 dma_addr_t entry_size;
748 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
750 index = VTD_PASID_TABLE_INDEX(pasid);
751 entry_size = VTD_PASID_ENTRY_SIZE;
752 addr = addr + index * entry_size;
753 if (dma_memory_read(&address_space_memory, addr,
754 pe, entry_size, MEMTXATTRS_UNSPECIFIED)) {
755 return -VTD_FR_PASID_TABLE_INV;
758 /* Do translation type check */
759 if (!vtd_pe_type_check(x86_iommu, pe)) {
760 return -VTD_FR_PASID_TABLE_INV;
763 if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) {
764 return -VTD_FR_PASID_TABLE_INV;
771 * Caller of this function should check present bit if wants
772 * to use pasid entry for further usage except for fpd bit check.
774 static int vtd_get_pe_from_pdire(IntelIOMMUState *s,
776 VTDPASIDDirEntry *pdire,
779 dma_addr_t addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK;
781 return vtd_get_pe_in_pasid_leaf_table(s, pasid, addr, pe);
785 * This function gets a pasid entry from a specified pasid
786 * table (includes dir and leaf table) with a specified pasid.
787 * Sanity check should be done to ensure return a present
788 * pasid entry to caller.
790 static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s,
791 dma_addr_t pasid_dir_base,
796 VTDPASIDDirEntry pdire;
798 ret = vtd_get_pdire_from_pdir_table(pasid_dir_base,
804 if (!vtd_pdire_present(&pdire)) {
805 return -VTD_FR_PASID_TABLE_INV;
808 ret = vtd_get_pe_from_pdire(s, pasid, &pdire, pe);
813 if (!vtd_pe_present(pe)) {
814 return -VTD_FR_PASID_TABLE_INV;
820 static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s,
825 dma_addr_t pasid_dir_base;
828 pasid = VTD_CE_GET_RID2PASID(ce);
829 pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce);
830 ret = vtd_get_pe_from_pasid_table(s, pasid_dir_base, pasid, pe);
835 static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s,
841 dma_addr_t pasid_dir_base;
842 VTDPASIDDirEntry pdire;
845 pasid = VTD_CE_GET_RID2PASID(ce);
846 pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce);
849 * No present bit check since fpd is meaningful even
850 * if the present bit is clear.
852 ret = vtd_get_pdire_from_pdir_table(pasid_dir_base, pasid, &pdire);
857 if (pdire.val & VTD_PASID_DIR_FPD) {
862 if (!vtd_pdire_present(&pdire)) {
863 return -VTD_FR_PASID_TABLE_INV;
867 * No present bit check since fpd is meaningful even
868 * if the present bit is clear.
870 ret = vtd_get_pe_from_pdire(s, pasid, &pdire, &pe);
875 if (pe.val[0] & VTD_PASID_ENTRY_FPD) {
882 /* Get the page-table level that hardware should use for the second-level
883 * page-table walk from the Address Width field of context-entry.
885 static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce)
887 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
890 static uint32_t vtd_get_iova_level(IntelIOMMUState *s,
895 if (s->root_scalable) {
896 vtd_ce_get_rid2pasid_entry(s, ce, &pe);
897 return VTD_PE_GET_LEVEL(&pe);
900 return vtd_ce_get_level(ce);
903 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce)
905 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
908 static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s,
913 if (s->root_scalable) {
914 vtd_ce_get_rid2pasid_entry(s, ce, &pe);
915 return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9;
918 return vtd_ce_get_agaw(ce);
921 static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce)
923 return ce->lo & VTD_CONTEXT_ENTRY_TT;
926 /* Only for Legacy Mode. Return true if check passed, otherwise false */
927 static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu,
930 switch (vtd_ce_get_type(ce)) {
931 case VTD_CONTEXT_TT_MULTI_LEVEL:
932 /* Always supported */
934 case VTD_CONTEXT_TT_DEV_IOTLB:
935 if (!x86_iommu->dt_supported) {
936 error_report_once("%s: DT specified but not supported", __func__);
940 case VTD_CONTEXT_TT_PASS_THROUGH:
941 if (!x86_iommu->pt_supported) {
942 error_report_once("%s: PT specified but not supported", __func__);
948 error_report_once("%s: unknown ce type: %"PRIu32, __func__,
949 vtd_ce_get_type(ce));
955 static inline uint64_t vtd_iova_limit(IntelIOMMUState *s,
956 VTDContextEntry *ce, uint8_t aw)
958 uint32_t ce_agaw = vtd_get_iova_agaw(s, ce);
959 return 1ULL << MIN(ce_agaw, aw);
962 /* Return true if IOVA passes range check, otherwise false. */
963 static inline bool vtd_iova_range_check(IntelIOMMUState *s,
964 uint64_t iova, VTDContextEntry *ce,
968 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
969 * in CAP_REG and AW in context-entry.
971 return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1));
974 static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s,
979 if (s->root_scalable) {
980 vtd_ce_get_rid2pasid_entry(s, ce, &pe);
981 return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR;
984 return vtd_ce_get_slpt_base(ce);
988 * Rsvd field masks for spte:
989 * vtd_spte_rsvd 4k pages
990 * vtd_spte_rsvd_large large pages
992 static uint64_t vtd_spte_rsvd[5];
993 static uint64_t vtd_spte_rsvd_large[5];
995 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
997 uint64_t rsvd_mask = vtd_spte_rsvd[level];
999 if ((level == VTD_SL_PD_LEVEL || level == VTD_SL_PDP_LEVEL) &&
1000 (slpte & VTD_SL_PT_PAGE_SIZE_MASK)) {
1002 rsvd_mask = vtd_spte_rsvd_large[level];
1005 return slpte & rsvd_mask;
1008 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
1009 * of the translation, can be used for deciding the size of large page.
1011 static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce,
1012 uint64_t iova, bool is_write,
1013 uint64_t *slptep, uint32_t *slpte_level,
1014 bool *reads, bool *writes, uint8_t aw_bits)
1016 dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce);
1017 uint32_t level = vtd_get_iova_level(s, ce);
1020 uint64_t access_right_check;
1021 uint64_t xlat, size;
1023 if (!vtd_iova_range_check(s, iova, ce, aw_bits)) {
1024 error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")",
1026 return -VTD_FR_ADDR_BEYOND_MGAW;
1029 /* FIXME: what is the Atomics request here? */
1030 access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
1033 offset = vtd_iova_level_offset(iova, level);
1034 slpte = vtd_get_slpte(addr, offset);
1036 if (slpte == (uint64_t)-1) {
1037 error_report_once("%s: detected read error on DMAR slpte "
1038 "(iova=0x%" PRIx64 ")", __func__, iova);
1039 if (level == vtd_get_iova_level(s, ce)) {
1040 /* Invalid programming of context-entry */
1041 return -VTD_FR_CONTEXT_ENTRY_INV;
1043 return -VTD_FR_PAGING_ENTRY_INV;
1046 *reads = (*reads) && (slpte & VTD_SL_R);
1047 *writes = (*writes) && (slpte & VTD_SL_W);
1048 if (!(slpte & access_right_check)) {
1049 error_report_once("%s: detected slpte permission error "
1050 "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", "
1051 "slpte=0x%" PRIx64 ", write=%d)", __func__,
1052 iova, level, slpte, is_write);
1053 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
1055 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
1056 error_report_once("%s: detected splte reserve non-zero "
1057 "iova=0x%" PRIx64 ", level=0x%" PRIx32
1058 "slpte=0x%" PRIx64 ")", __func__, iova,
1060 return -VTD_FR_PAGING_ENTRY_RSVD;
1063 if (vtd_is_last_slpte(slpte, level)) {
1065 *slpte_level = level;
1068 addr = vtd_get_slpte_addr(slpte, aw_bits);
1072 xlat = vtd_get_slpte_addr(*slptep, aw_bits);
1073 size = ~vtd_slpt_level_page_mask(level) + 1;
1076 * From VT-d spec 3.14: Untranslated requests and translation
1077 * requests that result in an address in the interrupt range will be
1078 * blocked with condition code LGN.4 or SGN.8.
1080 if ((xlat > VTD_INTERRUPT_ADDR_LAST ||
1081 xlat + size - 1 < VTD_INTERRUPT_ADDR_FIRST)) {
1084 error_report_once("%s: xlat address is in interrupt range "
1085 "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", "
1086 "slpte=0x%" PRIx64 ", write=%d, "
1087 "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ")",
1088 __func__, iova, level, slpte, is_write,
1090 return s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR :
1091 -VTD_FR_INTERRUPT_ADDR;
1095 typedef int (*vtd_page_walk_hook)(IOMMUTLBEvent *event, void *private);
1098 * Constant information used during page walking
1100 * @hook_fn: hook func to be called when detected page
1101 * @private: private data to be passed into hook func
1102 * @notify_unmap: whether we should notify invalid entries
1103 * @as: VT-d address space of the device
1104 * @aw: maximum address width
1105 * @domain: domain ID of the page walk
1108 VTDAddressSpace *as;
1109 vtd_page_walk_hook hook_fn;
1114 } vtd_page_walk_info;
1116 static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info)
1118 VTDAddressSpace *as = info->as;
1119 vtd_page_walk_hook hook_fn = info->hook_fn;
1120 void *private = info->private;
1121 IOMMUTLBEntry *entry = &event->entry;
1123 .iova = entry->iova,
1124 .size = entry->addr_mask,
1125 .translated_addr = entry->translated_addr,
1126 .perm = entry->perm,
1128 const DMAMap *mapped = iova_tree_find(as->iova_tree, &target);
1130 if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) {
1131 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask);
1137 /* Update local IOVA mapped ranges */
1138 if (event->type == IOMMU_NOTIFIER_MAP) {
1140 /* If it's exactly the same translation, skip */
1141 if (!memcmp(mapped, &target, sizeof(target))) {
1142 trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask,
1143 entry->translated_addr);
1147 * Translation changed. Normally this should not
1148 * happen, but it can happen when with buggy guest
1149 * OSes. Note that there will be a small window that
1150 * we don't have map at all. But that's the best
1151 * effort we can do. The ideal way to emulate this is
1152 * atomically modify the PTE to follow what has
1153 * changed, but we can't. One example is that vfio
1154 * driver only has VFIO_IOMMU_[UN]MAP_DMA but no
1155 * interface to modify a mapping (meanwhile it seems
1156 * meaningless to even provide one). Anyway, let's
1157 * mark this as a TODO in case one day we'll have
1158 * a better solution.
1160 IOMMUAccessFlags cache_perm = entry->perm;
1163 /* Emulate an UNMAP */
1164 event->type = IOMMU_NOTIFIER_UNMAP;
1165 entry->perm = IOMMU_NONE;
1166 trace_vtd_page_walk_one(info->domain_id,
1168 entry->translated_addr,
1171 ret = hook_fn(event, private);
1175 /* Drop any existing mapping */
1176 iova_tree_remove(as->iova_tree, target);
1177 /* Recover the correct type */
1178 event->type = IOMMU_NOTIFIER_MAP;
1179 entry->perm = cache_perm;
1182 iova_tree_insert(as->iova_tree, &target);
1185 /* Skip since we didn't map this range at all */
1186 trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask);
1189 iova_tree_remove(as->iova_tree, target);
1192 trace_vtd_page_walk_one(info->domain_id, entry->iova,
1193 entry->translated_addr, entry->addr_mask,
1195 return hook_fn(event, private);
1199 * vtd_page_walk_level - walk over specific level for IOVA range
1201 * @addr: base GPA addr to start the walk
1202 * @start: IOVA range start address
1203 * @end: IOVA range end address (start <= addr < end)
1204 * @read: whether parent level has read permission
1205 * @write: whether parent level has write permission
1206 * @info: constant information for the page walk
1208 static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
1209 uint64_t end, uint32_t level, bool read,
1210 bool write, vtd_page_walk_info *info)
1212 bool read_cur, write_cur, entry_valid;
1215 uint64_t subpage_size, subpage_mask;
1216 IOMMUTLBEvent event;
1217 uint64_t iova = start;
1221 trace_vtd_page_walk_level(addr, level, start, end);
1223 subpage_size = 1ULL << vtd_slpt_level_shift(level);
1224 subpage_mask = vtd_slpt_level_page_mask(level);
1226 while (iova < end) {
1227 iova_next = (iova & subpage_mask) + subpage_size;
1229 offset = vtd_iova_level_offset(iova, level);
1230 slpte = vtd_get_slpte(addr, offset);
1232 if (slpte == (uint64_t)-1) {
1233 trace_vtd_page_walk_skip_read(iova, iova_next);
1237 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
1238 trace_vtd_page_walk_skip_reserve(iova, iova_next);
1242 /* Permissions are stacked with parents' */
1243 read_cur = read && (slpte & VTD_SL_R);
1244 write_cur = write && (slpte & VTD_SL_W);
1247 * As long as we have either read/write permission, this is a
1248 * valid entry. The rule works for both page entries and page
1251 entry_valid = read_cur | write_cur;
1253 if (!vtd_is_last_slpte(slpte, level) && entry_valid) {
1255 * This is a valid PDE (or even bigger than PDE). We need
1256 * to walk one further level.
1258 ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw),
1259 iova, MIN(iova_next, end), level - 1,
1260 read_cur, write_cur, info);
1263 * This means we are either:
1265 * (1) the real page entry (either 4K page, or huge page)
1266 * (2) the whole range is invalid
1268 * In either case, we send an IOTLB notification down.
1270 event.entry.target_as = &address_space_memory;
1271 event.entry.iova = iova & subpage_mask;
1272 event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
1273 event.entry.addr_mask = ~subpage_mask;
1274 /* NOTE: this is only meaningful if entry_valid == true */
1275 event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
1276 event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP :
1277 IOMMU_NOTIFIER_UNMAP;
1278 ret = vtd_page_walk_one(&event, info);
1293 * vtd_page_walk - walk specific IOVA range, and call the hook
1295 * @s: intel iommu state
1296 * @ce: context entry to walk upon
1297 * @start: IOVA address to start the walk
1298 * @end: IOVA range end address (start <= addr < end)
1299 * @info: page walking information struct
1301 static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce,
1302 uint64_t start, uint64_t end,
1303 vtd_page_walk_info *info)
1305 dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce);
1306 uint32_t level = vtd_get_iova_level(s, ce);
1308 if (!vtd_iova_range_check(s, start, ce, info->aw)) {
1309 return -VTD_FR_ADDR_BEYOND_MGAW;
1312 if (!vtd_iova_range_check(s, end, ce, info->aw)) {
1313 /* Fix end so that it reaches the maximum */
1314 end = vtd_iova_limit(s, ce, info->aw);
1317 return vtd_page_walk_level(addr, start, end, level, true, true, info);
1320 static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s,
1323 /* Legacy Mode reserved bits check */
1324 if (!s->root_scalable &&
1325 (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits))))
1328 /* Scalable Mode reserved bits check */
1329 if (s->root_scalable &&
1330 ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) ||
1331 (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits))))
1337 error_report_once("%s: invalid root entry: hi=0x%"PRIx64
1339 __func__, re->hi, re->lo);
1340 return -VTD_FR_ROOT_ENTRY_RSVD;
1343 static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s,
1344 VTDContextEntry *ce)
1346 if (!s->root_scalable &&
1347 (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI ||
1348 ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) {
1349 error_report_once("%s: invalid context entry: hi=%"PRIx64
1350 ", lo=%"PRIx64" (reserved nonzero)",
1351 __func__, ce->hi, ce->lo);
1352 return -VTD_FR_CONTEXT_ENTRY_RSVD;
1355 if (s->root_scalable &&
1356 (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) ||
1357 ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 ||
1360 error_report_once("%s: invalid context entry: val[3]=%"PRIx64
1363 ", val[0]=%"PRIx64" (reserved nonzero)",
1364 __func__, ce->val[3], ce->val[2],
1365 ce->val[1], ce->val[0]);
1366 return -VTD_FR_CONTEXT_ENTRY_RSVD;
1372 static int vtd_ce_rid2pasid_check(IntelIOMMUState *s,
1373 VTDContextEntry *ce)
1378 * Make sure in Scalable Mode, a present context entry
1379 * has valid rid2pasid setting, which includes valid
1380 * rid2pasid field and corresponding pasid entry setting
1382 return vtd_ce_get_rid2pasid_entry(s, ce, &pe);
1385 /* Map a device to its corresponding domain (context-entry) */
1386 static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
1387 uint8_t devfn, VTDContextEntry *ce)
1391 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
1393 ret_fr = vtd_get_root_entry(s, bus_num, &re);
1398 if (!vtd_root_entry_present(s, &re, devfn)) {
1399 /* Not error - it's okay we don't have root entry. */
1400 trace_vtd_re_not_present(bus_num);
1401 return -VTD_FR_ROOT_ENTRY_P;
1404 ret_fr = vtd_root_entry_rsvd_bits_check(s, &re);
1409 ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce);
1414 if (!vtd_ce_present(ce)) {
1415 /* Not error - it's okay we don't have context entry. */
1416 trace_vtd_ce_not_present(bus_num, devfn);
1417 return -VTD_FR_CONTEXT_ENTRY_P;
1420 ret_fr = vtd_context_entry_rsvd_bits_check(s, ce);
1425 /* Check if the programming of context-entry is valid */
1426 if (!s->root_scalable &&
1427 !vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
1428 error_report_once("%s: invalid context entry: hi=%"PRIx64
1429 ", lo=%"PRIx64" (level %d not supported)",
1430 __func__, ce->hi, ce->lo,
1431 vtd_ce_get_level(ce));
1432 return -VTD_FR_CONTEXT_ENTRY_INV;
1435 if (!s->root_scalable) {
1436 /* Do translation type check */
1437 if (!vtd_ce_type_check(x86_iommu, ce)) {
1438 /* Errors dumped in vtd_ce_type_check() */
1439 return -VTD_FR_CONTEXT_ENTRY_INV;
1443 * Check if the programming of context-entry.rid2pasid
1444 * and corresponding pasid setting is valid, and thus
1445 * avoids to check pasid entry fetching result in future
1446 * helper function calling.
1448 ret_fr = vtd_ce_rid2pasid_check(s, ce);
1457 static int vtd_sync_shadow_page_hook(IOMMUTLBEvent *event,
1460 memory_region_notify_iommu(private, 0, *event);
1464 static uint16_t vtd_get_domain_id(IntelIOMMUState *s,
1465 VTDContextEntry *ce)
1469 if (s->root_scalable) {
1470 vtd_ce_get_rid2pasid_entry(s, ce, &pe);
1471 return VTD_SM_PASID_ENTRY_DID(pe.val[1]);
1474 return VTD_CONTEXT_ENTRY_DID(ce->hi);
1477 static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as,
1478 VTDContextEntry *ce,
1479 hwaddr addr, hwaddr size)
1481 IntelIOMMUState *s = vtd_as->iommu_state;
1482 vtd_page_walk_info info = {
1483 .hook_fn = vtd_sync_shadow_page_hook,
1484 .private = (void *)&vtd_as->iommu,
1485 .notify_unmap = true,
1488 .domain_id = vtd_get_domain_id(s, ce),
1491 return vtd_page_walk(s, ce, addr, addr + size, &info);
1494 static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as)
1500 if (!(vtd_as->iommu.iommu_notify_flags & IOMMU_NOTIFIER_IOTLB_EVENTS)) {
1504 ret = vtd_dev_to_context_entry(vtd_as->iommu_state,
1505 pci_bus_num(vtd_as->bus),
1506 vtd_as->devfn, &ce);
1508 if (ret == -VTD_FR_CONTEXT_ENTRY_P) {
1510 * It's a valid scenario to have a context entry that is
1511 * not present. For example, when a device is removed
1512 * from an existing domain then the context entry will be
1513 * zeroed by the guest before it was put into another
1514 * domain. When this happens, instead of synchronizing
1515 * the shadow pages we should invalidate all existing
1516 * mappings and notify the backends.
1518 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
1519 vtd_address_space_unmap(vtd_as, n);
1526 return vtd_sync_shadow_page_table_range(vtd_as, &ce, 0, UINT64_MAX);
1530 * Check if specific device is configured to bypass address
1531 * translation for DMA requests. In Scalable Mode, bypass
1532 * 1st-level translation or 2nd-level translation, it depends
1535 static bool vtd_dev_pt_enabled(IntelIOMMUState *s, VTDContextEntry *ce)
1540 if (s->root_scalable) {
1541 ret = vtd_ce_get_rid2pasid_entry(s, ce, &pe);
1544 * This error is guest triggerable. We should assumt PT
1545 * not enabled for safety.
1549 return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT);
1552 return (vtd_ce_get_type(ce) == VTD_CONTEXT_TT_PASS_THROUGH);
1556 static bool vtd_as_pt_enabled(VTDAddressSpace *as)
1563 s = as->iommu_state;
1564 if (vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn,
1567 * Possibly failed to parse the context entry for some reason
1568 * (e.g., during init, or any guest configuration errors on
1569 * context entries). We should assume PT not enabled for
1575 return vtd_dev_pt_enabled(s, &ce);
1578 /* Return whether the device is using IOMMU translation. */
1579 static bool vtd_switch_address_space(VTDAddressSpace *as)
1582 /* Whether we need to take the BQL on our own */
1583 bool take_bql = !qemu_mutex_iothread_locked();
1587 use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as);
1589 trace_vtd_switch_address_space(pci_bus_num(as->bus),
1590 VTD_PCI_SLOT(as->devfn),
1591 VTD_PCI_FUNC(as->devfn),
1595 * It's possible that we reach here without BQL, e.g., when called
1596 * from vtd_pt_enable_fast_path(). However the memory APIs need
1597 * it. We'd better make sure we have had it already, or, take it.
1600 qemu_mutex_lock_iothread();
1603 /* Turn off first then on the other */
1605 memory_region_set_enabled(&as->nodmar, false);
1606 memory_region_set_enabled(MEMORY_REGION(&as->iommu), true);
1608 memory_region_set_enabled(MEMORY_REGION(&as->iommu), false);
1609 memory_region_set_enabled(&as->nodmar, true);
1613 qemu_mutex_unlock_iothread();
1619 static void vtd_switch_address_space_all(IntelIOMMUState *s)
1621 VTDAddressSpace *vtd_as;
1622 GHashTableIter iter;
1624 g_hash_table_iter_init(&iter, s->vtd_address_spaces);
1625 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_as)) {
1626 vtd_switch_address_space(vtd_as);
1630 static const bool vtd_qualified_faults[] = {
1631 [VTD_FR_RESERVED] = false,
1632 [VTD_FR_ROOT_ENTRY_P] = false,
1633 [VTD_FR_CONTEXT_ENTRY_P] = true,
1634 [VTD_FR_CONTEXT_ENTRY_INV] = true,
1635 [VTD_FR_ADDR_BEYOND_MGAW] = true,
1636 [VTD_FR_WRITE] = true,
1637 [VTD_FR_READ] = true,
1638 [VTD_FR_PAGING_ENTRY_INV] = true,
1639 [VTD_FR_ROOT_TABLE_INV] = false,
1640 [VTD_FR_CONTEXT_TABLE_INV] = false,
1641 [VTD_FR_INTERRUPT_ADDR] = true,
1642 [VTD_FR_ROOT_ENTRY_RSVD] = false,
1643 [VTD_FR_PAGING_ENTRY_RSVD] = true,
1644 [VTD_FR_CONTEXT_ENTRY_TT] = true,
1645 [VTD_FR_PASID_TABLE_INV] = false,
1646 [VTD_FR_SM_INTERRUPT_ADDR] = true,
1647 [VTD_FR_MAX] = false,
1650 /* To see if a fault condition is "qualified", which is reported to software
1651 * only if the FPD field in the context-entry used to process the faulting
1654 static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
1656 return vtd_qualified_faults[fault];
1659 static inline bool vtd_is_interrupt_addr(hwaddr addr)
1661 return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
1664 static gboolean vtd_find_as_by_sid(gpointer key, gpointer value,
1667 struct vtd_as_key *as_key = (struct vtd_as_key *)key;
1668 uint16_t target_sid = *(uint16_t *)user_data;
1669 uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn);
1670 return sid == target_sid;
1673 static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid)
1675 uint8_t bus_num = PCI_BUS_NUM(sid);
1676 VTDAddressSpace *vtd_as = s->vtd_as_cache[bus_num];
1679 (sid == PCI_BUILD_BDF(pci_bus_num(vtd_as->bus), vtd_as->devfn))) {
1683 vtd_as = g_hash_table_find(s->vtd_address_spaces, vtd_find_as_by_sid, &sid);
1684 s->vtd_as_cache[bus_num] = vtd_as;
1689 static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id)
1691 VTDAddressSpace *vtd_as;
1692 bool success = false;
1694 vtd_as = vtd_get_as_by_sid(s, source_id);
1699 if (vtd_switch_address_space(vtd_as) == false) {
1700 /* We switched off IOMMU region successfully. */
1705 trace_vtd_pt_enable_fast_path(source_id, success);
1708 static void vtd_report_fault(IntelIOMMUState *s,
1709 int err, bool is_fpd_set,
1714 if (is_fpd_set && vtd_is_qualified_fault(err)) {
1715 trace_vtd_fault_disabled();
1717 vtd_report_dmar_fault(s, source_id, addr, err, is_write);
1721 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1724 * Called from RCU critical section.
1726 * @bus_num: The bus number
1727 * @devfn: The devfn, which is the combined of device and function number
1728 * @is_write: The access is a write operation
1729 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1731 * Returns true if translation is successful, otherwise false.
1733 static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
1734 uint8_t devfn, hwaddr addr, bool is_write,
1735 IOMMUTLBEntry *entry)
1737 IntelIOMMUState *s = vtd_as->iommu_state;
1739 uint8_t bus_num = pci_bus_num(bus);
1740 VTDContextCacheEntry *cc_entry;
1741 uint64_t slpte, page_mask;
1743 uint16_t source_id = PCI_BUILD_BDF(bus_num, devfn);
1745 bool is_fpd_set = false;
1748 uint8_t access_flags;
1749 VTDIOTLBEntry *iotlb_entry;
1752 * We have standalone memory region for interrupt addresses, we
1753 * should never receive translation requests in this region.
1755 assert(!vtd_is_interrupt_addr(addr));
1759 cc_entry = &vtd_as->context_cache_entry;
1761 /* Try to fetch slpte form IOTLB */
1762 iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
1764 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
1765 iotlb_entry->domain_id);
1766 slpte = iotlb_entry->slpte;
1767 access_flags = iotlb_entry->access_flags;
1768 page_mask = iotlb_entry->mask;
1772 /* Try to fetch context-entry from cache first */
1773 if (cc_entry->context_cache_gen == s->context_cache_gen) {
1774 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
1775 cc_entry->context_entry.lo,
1776 cc_entry->context_cache_gen);
1777 ce = cc_entry->context_entry;
1778 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1779 if (!is_fpd_set && s->root_scalable) {
1780 ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set);
1782 vtd_report_fault(s, -ret_fr, is_fpd_set,
1783 source_id, addr, is_write);
1788 ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
1789 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1790 if (!ret_fr && !is_fpd_set && s->root_scalable) {
1791 ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set);
1794 vtd_report_fault(s, -ret_fr, is_fpd_set,
1795 source_id, addr, is_write);
1798 /* Update context-cache */
1799 trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
1800 cc_entry->context_cache_gen,
1801 s->context_cache_gen);
1802 cc_entry->context_entry = ce;
1803 cc_entry->context_cache_gen = s->context_cache_gen;
1807 * We don't need to translate for pass-through context entries.
1808 * Also, let's ignore IOTLB caching as well for PT devices.
1810 if (vtd_dev_pt_enabled(s, &ce)) {
1811 entry->iova = addr & VTD_PAGE_MASK_4K;
1812 entry->translated_addr = entry->iova;
1813 entry->addr_mask = ~VTD_PAGE_MASK_4K;
1814 entry->perm = IOMMU_RW;
1815 trace_vtd_translate_pt(source_id, entry->iova);
1818 * When this happens, it means firstly caching-mode is not
1819 * enabled, and this is the first passthrough translation for
1820 * the device. Let's enable the fast path for passthrough.
1822 * When passthrough is disabled again for the device, we can
1823 * capture it via the context entry invalidation, then the
1824 * IOMMU region can be swapped back.
1826 vtd_pt_enable_fast_path(s, source_id);
1827 vtd_iommu_unlock(s);
1831 ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level,
1832 &reads, &writes, s->aw_bits);
1834 vtd_report_fault(s, -ret_fr, is_fpd_set, source_id,
1839 page_mask = vtd_slpt_level_page_mask(level);
1840 access_flags = IOMMU_ACCESS_FLAG(reads, writes);
1841 vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte,
1842 access_flags, level);
1844 vtd_iommu_unlock(s);
1845 entry->iova = addr & page_mask;
1846 entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask;
1847 entry->addr_mask = ~page_mask;
1848 entry->perm = access_flags;
1852 vtd_iommu_unlock(s);
1854 entry->translated_addr = 0;
1855 entry->addr_mask = 0;
1856 entry->perm = IOMMU_NONE;
1860 static void vtd_root_table_setup(IntelIOMMUState *s)
1862 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
1863 s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits);
1865 vtd_update_scalable_state(s);
1867 trace_vtd_reg_dmar_root(s->root, s->root_scalable);
1870 static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
1871 uint32_t index, uint32_t mask)
1873 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
1876 static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
1879 value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
1880 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
1881 s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits);
1882 s->intr_eime = value & VTD_IRTA_EIME;
1884 /* Notify global invalidation */
1885 vtd_iec_notify_all(s, true, 0, 0);
1887 trace_vtd_reg_ir_root(s->intr_root, s->intr_size);
1890 static void vtd_iommu_replay_all(IntelIOMMUState *s)
1892 VTDAddressSpace *vtd_as;
1894 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
1895 vtd_sync_shadow_page_table(vtd_as);
1899 static void vtd_context_global_invalidate(IntelIOMMUState *s)
1901 trace_vtd_inv_desc_cc_global();
1902 /* Protects context cache */
1904 s->context_cache_gen++;
1905 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
1906 vtd_reset_context_cache_locked(s);
1908 vtd_iommu_unlock(s);
1909 vtd_address_space_refresh_all(s);
1911 * From VT-d spec 6.5.2.1, a global context entry invalidation
1912 * should be followed by a IOTLB global invalidation, so we should
1913 * be safe even without this. Hoewever, let's replay the region as
1914 * well to be safer, and go back here when we need finer tunes for
1915 * VT-d emulation codes.
1917 vtd_iommu_replay_all(s);
1920 /* Do a context-cache device-selective invalidation.
1921 * @func_mask: FM field after shifting
1923 static void vtd_context_device_invalidate(IntelIOMMUState *s,
1927 GHashTableIter as_it;
1929 VTDAddressSpace *vtd_as;
1930 uint8_t bus_n, devfn;
1932 trace_vtd_inv_desc_cc_devices(source_id, func_mask);
1934 switch (func_mask & 3) {
1936 mask = 0; /* No bits in the SID field masked */
1939 mask = 4; /* Mask bit 2 in the SID field */
1942 mask = 6; /* Mask bit 2:1 in the SID field */
1945 mask = 7; /* Mask bit 2:0 in the SID field */
1948 g_assert_not_reached();
1952 bus_n = VTD_SID_TO_BUS(source_id);
1953 devfn = VTD_SID_TO_DEVFN(source_id);
1955 g_hash_table_iter_init(&as_it, s->vtd_address_spaces);
1956 while (g_hash_table_iter_next(&as_it, NULL, (void **)&vtd_as)) {
1957 if ((pci_bus_num(vtd_as->bus) == bus_n) &&
1958 (vtd_as->devfn & mask) == (devfn & mask)) {
1959 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(vtd_as->devfn),
1960 VTD_PCI_FUNC(vtd_as->devfn));
1962 vtd_as->context_cache_entry.context_cache_gen = 0;
1963 vtd_iommu_unlock(s);
1965 * Do switch address space when needed, in case if the
1966 * device passthrough bit is switched.
1968 vtd_switch_address_space(vtd_as);
1970 * So a device is moving out of (or moving into) a
1971 * domain, resync the shadow page table.
1972 * This won't bring bad even if we have no such
1973 * notifier registered - the IOMMU notification
1974 * framework will skip MAP notifications if that
1977 vtd_sync_shadow_page_table(vtd_as);
1982 /* Context-cache invalidation
1983 * Returns the Context Actual Invalidation Granularity.
1984 * @val: the content of the CCMD_REG
1986 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
1989 uint64_t type = val & VTD_CCMD_CIRG_MASK;
1992 case VTD_CCMD_DOMAIN_INVL:
1994 case VTD_CCMD_GLOBAL_INVL:
1995 caig = VTD_CCMD_GLOBAL_INVL_A;
1996 vtd_context_global_invalidate(s);
1999 case VTD_CCMD_DEVICE_INVL:
2000 caig = VTD_CCMD_DEVICE_INVL_A;
2001 vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
2005 error_report_once("%s: invalid context: 0x%" PRIx64,
2012 static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
2014 trace_vtd_inv_desc_iotlb_global();
2016 vtd_iommu_replay_all(s);
2019 static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
2022 VTDAddressSpace *vtd_as;
2024 trace_vtd_inv_desc_iotlb_domain(domain_id);
2027 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
2029 vtd_iommu_unlock(s);
2031 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
2032 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
2033 vtd_as->devfn, &ce) &&
2034 domain_id == vtd_get_domain_id(s, &ce)) {
2035 vtd_sync_shadow_page_table(vtd_as);
2040 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
2041 uint16_t domain_id, hwaddr addr,
2044 VTDAddressSpace *vtd_as;
2047 hwaddr size = (1 << am) * VTD_PAGE_SIZE;
2049 QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) {
2050 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
2051 vtd_as->devfn, &ce);
2052 if (!ret && domain_id == vtd_get_domain_id(s, &ce)) {
2053 if (vtd_as_has_map_notifier(vtd_as)) {
2055 * As long as we have MAP notifications registered in
2056 * any of our IOMMU notifiers, we need to sync the
2057 * shadow page table.
2059 vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size);
2062 * For UNMAP-only notifiers, we don't need to walk the
2063 * page tables. We just deliver the PSI down to
2064 * invalidate caches.
2066 IOMMUTLBEvent event = {
2067 .type = IOMMU_NOTIFIER_UNMAP,
2069 .target_as = &address_space_memory,
2071 .translated_addr = 0,
2072 .addr_mask = size - 1,
2076 memory_region_notify_iommu(&vtd_as->iommu, 0, event);
2082 static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
2083 hwaddr addr, uint8_t am)
2085 VTDIOTLBPageInvInfo info;
2087 trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
2089 assert(am <= VTD_MAMV);
2090 info.domain_id = domain_id;
2092 info.mask = ~((1 << am) - 1);
2094 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
2095 vtd_iommu_unlock(s);
2096 vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
2100 * Returns the IOTLB Actual Invalidation Granularity.
2101 * @val: the content of the IOTLB_REG
2103 static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
2106 uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
2112 case VTD_TLB_GLOBAL_FLUSH:
2113 iaig = VTD_TLB_GLOBAL_FLUSH_A;
2114 vtd_iotlb_global_invalidate(s);
2117 case VTD_TLB_DSI_FLUSH:
2118 domain_id = VTD_TLB_DID(val);
2119 iaig = VTD_TLB_DSI_FLUSH_A;
2120 vtd_iotlb_domain_invalidate(s, domain_id);
2123 case VTD_TLB_PSI_FLUSH:
2124 domain_id = VTD_TLB_DID(val);
2125 addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
2126 am = VTD_IVA_AM(addr);
2127 addr = VTD_IVA_ADDR(addr);
2128 if (am > VTD_MAMV) {
2129 error_report_once("%s: address mask overflow: 0x%" PRIx64,
2130 __func__, vtd_get_quad_raw(s, DMAR_IVA_REG));
2134 iaig = VTD_TLB_PSI_FLUSH_A;
2135 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
2139 error_report_once("%s: invalid granularity: 0x%" PRIx64,
2146 static void vtd_fetch_inv_desc(IntelIOMMUState *s);
2148 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
2150 return s->qi_enabled && (s->iq_tail == s->iq_head) &&
2151 (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
2154 static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
2156 uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
2158 trace_vtd_inv_qi_enable(en);
2161 s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits);
2162 /* 2^(x+8) entries */
2163 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0));
2164 s->qi_enabled = true;
2165 trace_vtd_inv_qi_setup(s->iq, s->iq_size);
2166 /* Ok - report back to driver */
2167 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
2169 if (s->iq_tail != 0) {
2171 * This is a spec violation but Windows guests are known to set up
2172 * Queued Invalidation this way so we allow the write and process
2173 * Invalidation Descriptors right away.
2175 trace_vtd_warn_invalid_qi_tail(s->iq_tail);
2176 if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
2177 vtd_fetch_inv_desc(s);
2181 if (vtd_queued_inv_disable_check(s)) {
2182 /* disable Queued Invalidation */
2183 vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
2185 s->qi_enabled = false;
2186 /* Ok - report back to driver */
2187 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
2189 error_report_once("%s: detected improper state when disable QI "
2190 "(head=0x%x, tail=0x%x, last_type=%d)",
2192 s->iq_head, s->iq_tail, s->iq_last_desc_type);
2197 /* Set Root Table Pointer */
2198 static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
2200 vtd_root_table_setup(s);
2201 /* Ok - report back to driver */
2202 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
2203 vtd_reset_caches(s);
2204 vtd_address_space_refresh_all(s);
2207 /* Set Interrupt Remap Table Pointer */
2208 static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
2210 vtd_interrupt_remap_table_setup(s);
2211 /* Ok - report back to driver */
2212 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
2215 /* Handle Translation Enable/Disable */
2216 static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
2218 if (s->dmar_enabled == en) {
2222 trace_vtd_dmar_enable(en);
2225 s->dmar_enabled = true;
2226 /* Ok - report back to driver */
2227 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
2229 s->dmar_enabled = false;
2231 /* Clear the index of Fault Recording Register */
2232 s->next_frcd_reg = 0;
2233 /* Ok - report back to driver */
2234 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
2237 vtd_reset_caches(s);
2238 vtd_address_space_refresh_all(s);
2241 /* Handle Interrupt Remap Enable/Disable */
2242 static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
2244 trace_vtd_ir_enable(en);
2247 s->intr_enabled = true;
2248 /* Ok - report back to driver */
2249 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
2251 s->intr_enabled = false;
2252 /* Ok - report back to driver */
2253 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
2257 /* Handle write to Global Command Register */
2258 static void vtd_handle_gcmd_write(IntelIOMMUState *s)
2260 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2261 uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
2262 uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
2263 uint32_t changed = status ^ val;
2265 trace_vtd_reg_write_gcmd(status, val);
2266 if ((changed & VTD_GCMD_TE) && s->dma_translation) {
2267 /* Translation enable/disable */
2268 vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
2270 if (val & VTD_GCMD_SRTP) {
2271 /* Set/update the root-table pointer */
2272 vtd_handle_gcmd_srtp(s);
2274 if (changed & VTD_GCMD_QIE) {
2275 /* Queued Invalidation Enable */
2276 vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
2278 if (val & VTD_GCMD_SIRTP) {
2279 /* Set/update the interrupt remapping root-table pointer */
2280 vtd_handle_gcmd_sirtp(s);
2282 if ((changed & VTD_GCMD_IRE) &&
2283 x86_iommu_ir_supported(x86_iommu)) {
2284 /* Interrupt remap enable/disable */
2285 vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
2289 /* Handle write to Context Command Register */
2290 static void vtd_handle_ccmd_write(IntelIOMMUState *s)
2293 uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
2295 /* Context-cache invalidation request */
2296 if (val & VTD_CCMD_ICC) {
2297 if (s->qi_enabled) {
2298 error_report_once("Queued Invalidation enabled, "
2299 "should not use register-based invalidation");
2302 ret = vtd_context_cache_invalidate(s, val);
2303 /* Invalidation completed. Change something to show */
2304 vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
2305 ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
2310 /* Handle write to IOTLB Invalidation Register */
2311 static void vtd_handle_iotlb_write(IntelIOMMUState *s)
2314 uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
2316 /* IOTLB invalidation request */
2317 if (val & VTD_TLB_IVT) {
2318 if (s->qi_enabled) {
2319 error_report_once("Queued Invalidation enabled, "
2320 "should not use register-based invalidation");
2323 ret = vtd_iotlb_flush(s, val);
2324 /* Invalidation completed. Change something to show */
2325 vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
2326 ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
2327 VTD_TLB_FLUSH_GRANU_MASK_A, ret);
2331 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
2332 static bool vtd_get_inv_desc(IntelIOMMUState *s,
2333 VTDInvDesc *inv_desc)
2335 dma_addr_t base_addr = s->iq;
2336 uint32_t offset = s->iq_head;
2337 uint32_t dw = s->iq_dw ? 32 : 16;
2338 dma_addr_t addr = base_addr + offset * dw;
2340 if (dma_memory_read(&address_space_memory, addr,
2341 inv_desc, dw, MEMTXATTRS_UNSPECIFIED)) {
2342 error_report_once("Read INV DESC failed.");
2345 inv_desc->lo = le64_to_cpu(inv_desc->lo);
2346 inv_desc->hi = le64_to_cpu(inv_desc->hi);
2348 inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]);
2349 inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]);
2354 static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
2356 if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
2357 (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
2358 error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
2359 " (reserved nonzero)", __func__, inv_desc->hi,
2363 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
2365 uint32_t status_data = (uint32_t)(inv_desc->lo >>
2366 VTD_INV_DESC_WAIT_DATA_SHIFT);
2368 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
2370 /* FIXME: need to be masked with HAW? */
2371 dma_addr_t status_addr = inv_desc->hi;
2372 trace_vtd_inv_desc_wait_sw(status_addr, status_data);
2373 status_data = cpu_to_le32(status_data);
2374 if (dma_memory_write(&address_space_memory, status_addr,
2375 &status_data, sizeof(status_data),
2376 MEMTXATTRS_UNSPECIFIED)) {
2377 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
2380 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
2381 /* Interrupt flag */
2382 vtd_generate_completion_event(s);
2384 error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
2385 " (unknown type)", __func__, inv_desc->hi,
2392 static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
2393 VTDInvDesc *inv_desc)
2395 uint16_t sid, fmask;
2397 if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
2398 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64
2399 " (reserved nonzero)", __func__, inv_desc->hi,
2403 switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
2404 case VTD_INV_DESC_CC_DOMAIN:
2405 trace_vtd_inv_desc_cc_domain(
2406 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
2408 case VTD_INV_DESC_CC_GLOBAL:
2409 vtd_context_global_invalidate(s);
2412 case VTD_INV_DESC_CC_DEVICE:
2413 sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
2414 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
2415 vtd_context_device_invalidate(s, sid, fmask);
2419 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64
2420 " (invalid type)", __func__, inv_desc->hi,
2427 static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
2433 if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
2434 (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
2435 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2436 ", lo=0x%"PRIx64" (reserved bits unzero)",
2437 __func__, inv_desc->hi, inv_desc->lo);
2441 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
2442 case VTD_INV_DESC_IOTLB_GLOBAL:
2443 vtd_iotlb_global_invalidate(s);
2446 case VTD_INV_DESC_IOTLB_DOMAIN:
2447 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
2448 vtd_iotlb_domain_invalidate(s, domain_id);
2451 case VTD_INV_DESC_IOTLB_PAGE:
2452 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
2453 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
2454 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
2455 if (am > VTD_MAMV) {
2456 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2457 ", lo=0x%"PRIx64" (am=%u > VTD_MAMV=%u)",
2458 __func__, inv_desc->hi, inv_desc->lo,
2459 am, (unsigned)VTD_MAMV);
2462 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
2466 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
2467 ", lo=0x%"PRIx64" (type mismatch: 0x%llx)",
2468 __func__, inv_desc->hi, inv_desc->lo,
2469 inv_desc->lo & VTD_INV_DESC_IOTLB_G);
2475 static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
2476 VTDInvDesc *inv_desc)
2478 trace_vtd_inv_desc_iec(inv_desc->iec.granularity,
2479 inv_desc->iec.index,
2480 inv_desc->iec.index_mask);
2482 vtd_iec_notify_all(s, !inv_desc->iec.granularity,
2483 inv_desc->iec.index,
2484 inv_desc->iec.index_mask);
2488 static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
2489 VTDInvDesc *inv_desc)
2491 VTDAddressSpace *vtd_dev_as;
2492 IOMMUTLBEvent event;
2498 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
2499 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
2500 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
2502 if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
2503 (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
2504 error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
2505 ", lo=%"PRIx64" (reserved nonzero)", __func__,
2506 inv_desc->hi, inv_desc->lo);
2511 * Using sid is OK since the guest should have finished the
2512 * initialization of both the bus and device.
2514 vtd_dev_as = vtd_get_as_by_sid(s, sid);
2519 /* According to ATS spec table 2.4:
2520 * S = 0, bits 15:12 = xxxx range size: 4K
2521 * S = 1, bits 15:12 = xxx0 range size: 8K
2522 * S = 1, bits 15:12 = xx01 range size: 16K
2523 * S = 1, bits 15:12 = x011 range size: 32K
2524 * S = 1, bits 15:12 = 0111 range size: 64K
2528 sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
2534 event.type = IOMMU_NOTIFIER_DEVIOTLB_UNMAP;
2535 event.entry.target_as = &vtd_dev_as->as;
2536 event.entry.addr_mask = sz - 1;
2537 event.entry.iova = addr;
2538 event.entry.perm = IOMMU_NONE;
2539 event.entry.translated_addr = 0;
2540 memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event);
2546 static bool vtd_process_inv_desc(IntelIOMMUState *s)
2548 VTDInvDesc inv_desc;
2551 trace_vtd_inv_qi_head(s->iq_head);
2552 if (!vtd_get_inv_desc(s, &inv_desc)) {
2553 s->iq_last_desc_type = VTD_INV_DESC_NONE;
2557 desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
2558 /* FIXME: should update at first or at last? */
2559 s->iq_last_desc_type = desc_type;
2561 switch (desc_type) {
2562 case VTD_INV_DESC_CC:
2563 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
2564 if (!vtd_process_context_cache_desc(s, &inv_desc)) {
2569 case VTD_INV_DESC_IOTLB:
2570 trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
2571 if (!vtd_process_iotlb_desc(s, &inv_desc)) {
2577 * TODO: the entity of below two cases will be implemented in future series.
2578 * To make guest (which integrates scalable mode support patch set in
2579 * iommu driver) work, just return true is enough so far.
2581 case VTD_INV_DESC_PC:
2584 case VTD_INV_DESC_PIOTLB:
2587 case VTD_INV_DESC_WAIT:
2588 trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
2589 if (!vtd_process_wait_desc(s, &inv_desc)) {
2594 case VTD_INV_DESC_IEC:
2595 trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
2596 if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
2601 case VTD_INV_DESC_DEVICE:
2602 trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo);
2603 if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
2609 error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64
2610 " (unknown type)", __func__, inv_desc.hi,
2615 if (s->iq_head == s->iq_size) {
2621 /* Try to fetch and process more Invalidation Descriptors */
2622 static void vtd_fetch_inv_desc(IntelIOMMUState *s)
2626 /* Refer to 10.4.23 of VT-d spec 3.0 */
2627 qi_shift = s->iq_dw ? VTD_IQH_QH_SHIFT_5 : VTD_IQH_QH_SHIFT_4;
2629 trace_vtd_inv_qi_fetch();
2631 if (s->iq_tail >= s->iq_size) {
2632 /* Detects an invalid Tail pointer */
2633 error_report_once("%s: detected invalid QI tail "
2634 "(tail=0x%x, size=0x%x)",
2635 __func__, s->iq_tail, s->iq_size);
2636 vtd_handle_inv_queue_error(s);
2639 while (s->iq_head != s->iq_tail) {
2640 if (!vtd_process_inv_desc(s)) {
2641 /* Invalidation Queue Errors */
2642 vtd_handle_inv_queue_error(s);
2645 /* Must update the IQH_REG in time */
2646 vtd_set_quad_raw(s, DMAR_IQH_REG,
2647 (((uint64_t)(s->iq_head)) << qi_shift) &
2652 /* Handle write to Invalidation Queue Tail Register */
2653 static void vtd_handle_iqt_write(IntelIOMMUState *s)
2655 uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
2657 if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) {
2658 error_report_once("%s: RSV bit is set: val=0x%"PRIx64,
2662 s->iq_tail = VTD_IQT_QT(s->iq_dw, val);
2663 trace_vtd_inv_qi_tail(s->iq_tail);
2665 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
2666 /* Process Invalidation Queue here */
2667 vtd_fetch_inv_desc(s);
2671 static void vtd_handle_fsts_write(IntelIOMMUState *s)
2673 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
2674 uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
2675 uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
2677 if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
2678 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
2679 trace_vtd_fsts_clear_ip();
2681 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
2682 * Descriptors if there are any when Queued Invalidation is enabled?
2686 static void vtd_handle_fectl_write(IntelIOMMUState *s)
2689 /* FIXME: when software clears the IM field, check the IP field. But do we
2690 * need to compare the old value and the new value to conclude that
2691 * software clears the IM field? Or just check if the IM field is zero?
2693 fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
2695 trace_vtd_reg_write_fectl(fectl_reg);
2697 if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
2698 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
2699 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
2703 static void vtd_handle_ics_write(IntelIOMMUState *s)
2705 uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
2706 uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
2708 if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
2709 trace_vtd_reg_ics_clear_ip();
2710 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
2714 static void vtd_handle_iectl_write(IntelIOMMUState *s)
2717 /* FIXME: when software clears the IM field, check the IP field. But do we
2718 * need to compare the old value and the new value to conclude that
2719 * software clears the IM field? Or just check if the IM field is zero?
2721 iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
2723 trace_vtd_reg_write_iectl(iectl_reg);
2725 if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
2726 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
2727 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
2731 static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
2733 IntelIOMMUState *s = opaque;
2736 trace_vtd_reg_read(addr, size);
2738 if (addr + size > DMAR_REG_SIZE) {
2739 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2740 " size=0x%x", __func__, addr, size);
2741 return (uint64_t)-1;
2745 /* Root Table Address Register, 64-bit */
2746 case DMAR_RTADDR_REG:
2747 val = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
2749 val = val & ((1ULL << 32) - 1);
2753 case DMAR_RTADDR_REG_HI:
2755 val = vtd_get_quad_raw(s, DMAR_RTADDR_REG) >> 32;
2758 /* Invalidation Queue Address Register, 64-bit */
2760 val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
2762 val = val & ((1ULL << 32) - 1);
2766 case DMAR_IQA_REG_HI:
2773 val = vtd_get_long(s, addr);
2775 val = vtd_get_quad(s, addr);
2782 static void vtd_mem_write(void *opaque, hwaddr addr,
2783 uint64_t val, unsigned size)
2785 IntelIOMMUState *s = opaque;
2787 trace_vtd_reg_write(addr, size, val);
2789 if (addr + size > DMAR_REG_SIZE) {
2790 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2791 " size=0x%x", __func__, addr, size);
2796 /* Global Command Register, 32-bit */
2798 vtd_set_long(s, addr, val);
2799 vtd_handle_gcmd_write(s);
2802 /* Context Command Register, 64-bit */
2805 vtd_set_long(s, addr, val);
2807 vtd_set_quad(s, addr, val);
2808 vtd_handle_ccmd_write(s);
2812 case DMAR_CCMD_REG_HI:
2814 vtd_set_long(s, addr, val);
2815 vtd_handle_ccmd_write(s);
2818 /* IOTLB Invalidation Register, 64-bit */
2819 case DMAR_IOTLB_REG:
2821 vtd_set_long(s, addr, val);
2823 vtd_set_quad(s, addr, val);
2824 vtd_handle_iotlb_write(s);
2828 case DMAR_IOTLB_REG_HI:
2830 vtd_set_long(s, addr, val);
2831 vtd_handle_iotlb_write(s);
2834 /* Invalidate Address Register, 64-bit */
2837 vtd_set_long(s, addr, val);
2839 vtd_set_quad(s, addr, val);
2843 case DMAR_IVA_REG_HI:
2845 vtd_set_long(s, addr, val);
2848 /* Fault Status Register, 32-bit */
2851 vtd_set_long(s, addr, val);
2852 vtd_handle_fsts_write(s);
2855 /* Fault Event Control Register, 32-bit */
2856 case DMAR_FECTL_REG:
2858 vtd_set_long(s, addr, val);
2859 vtd_handle_fectl_write(s);
2862 /* Fault Event Data Register, 32-bit */
2863 case DMAR_FEDATA_REG:
2865 vtd_set_long(s, addr, val);
2868 /* Fault Event Address Register, 32-bit */
2869 case DMAR_FEADDR_REG:
2871 vtd_set_long(s, addr, val);
2874 * While the register is 32-bit only, some guests (Xen...) write to
2877 vtd_set_quad(s, addr, val);
2881 /* Fault Event Upper Address Register, 32-bit */
2882 case DMAR_FEUADDR_REG:
2884 vtd_set_long(s, addr, val);
2887 /* Protected Memory Enable Register, 32-bit */
2890 vtd_set_long(s, addr, val);
2893 /* Root Table Address Register, 64-bit */
2894 case DMAR_RTADDR_REG:
2896 vtd_set_long(s, addr, val);
2898 vtd_set_quad(s, addr, val);
2902 case DMAR_RTADDR_REG_HI:
2904 vtd_set_long(s, addr, val);
2907 /* Invalidation Queue Tail Register, 64-bit */
2910 vtd_set_long(s, addr, val);
2912 vtd_set_quad(s, addr, val);
2914 vtd_handle_iqt_write(s);
2917 case DMAR_IQT_REG_HI:
2919 vtd_set_long(s, addr, val);
2920 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2923 /* Invalidation Queue Address Register, 64-bit */
2926 vtd_set_long(s, addr, val);
2928 vtd_set_quad(s, addr, val);
2930 vtd_update_iq_dw(s);
2933 case DMAR_IQA_REG_HI:
2935 vtd_set_long(s, addr, val);
2938 /* Invalidation Completion Status Register, 32-bit */
2941 vtd_set_long(s, addr, val);
2942 vtd_handle_ics_write(s);
2945 /* Invalidation Event Control Register, 32-bit */
2946 case DMAR_IECTL_REG:
2948 vtd_set_long(s, addr, val);
2949 vtd_handle_iectl_write(s);
2952 /* Invalidation Event Data Register, 32-bit */
2953 case DMAR_IEDATA_REG:
2955 vtd_set_long(s, addr, val);
2958 /* Invalidation Event Address Register, 32-bit */
2959 case DMAR_IEADDR_REG:
2961 vtd_set_long(s, addr, val);
2964 /* Invalidation Event Upper Address Register, 32-bit */
2965 case DMAR_IEUADDR_REG:
2967 vtd_set_long(s, addr, val);
2970 /* Fault Recording Registers, 128-bit */
2971 case DMAR_FRCD_REG_0_0:
2973 vtd_set_long(s, addr, val);
2975 vtd_set_quad(s, addr, val);
2979 case DMAR_FRCD_REG_0_1:
2981 vtd_set_long(s, addr, val);
2984 case DMAR_FRCD_REG_0_2:
2986 vtd_set_long(s, addr, val);
2988 vtd_set_quad(s, addr, val);
2989 /* May clear bit 127 (Fault), update PPF */
2990 vtd_update_fsts_ppf(s);
2994 case DMAR_FRCD_REG_0_3:
2996 vtd_set_long(s, addr, val);
2997 /* May clear bit 127 (Fault), update PPF */
2998 vtd_update_fsts_ppf(s);
3003 vtd_set_long(s, addr, val);
3005 vtd_set_quad(s, addr, val);
3009 case DMAR_IRTA_REG_HI:
3011 vtd_set_long(s, addr, val);
3016 vtd_set_long(s, addr, val);
3018 vtd_set_quad(s, addr, val);
3023 static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
3024 IOMMUAccessFlags flag, int iommu_idx)
3026 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
3027 IntelIOMMUState *s = vtd_as->iommu_state;
3028 IOMMUTLBEntry iotlb = {
3029 /* We'll fill in the rest later. */
3030 .target_as = &address_space_memory,
3034 if (likely(s->dmar_enabled)) {
3035 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
3036 addr, flag & IOMMU_WO, &iotlb);
3038 /* DMAR disabled, passthrough, use 4k-page*/
3039 iotlb.iova = addr & VTD_PAGE_MASK_4K;
3040 iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
3041 iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
3042 iotlb.perm = IOMMU_RW;
3046 if (likely(success)) {
3047 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus),
3048 VTD_PCI_SLOT(vtd_as->devfn),
3049 VTD_PCI_FUNC(vtd_as->devfn),
3050 iotlb.iova, iotlb.translated_addr,
3053 error_report_once("%s: detected translation failure "
3054 "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")",
3055 __func__, pci_bus_num(vtd_as->bus),
3056 VTD_PCI_SLOT(vtd_as->devfn),
3057 VTD_PCI_FUNC(vtd_as->devfn),
3064 static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
3065 IOMMUNotifierFlag old,
3066 IOMMUNotifierFlag new,
3069 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
3070 IntelIOMMUState *s = vtd_as->iommu_state;
3072 /* TODO: add support for VFIO and vhost users */
3073 if (s->snoop_control) {
3074 error_setg_errno(errp, ENOTSUP,
3075 "Snoop Control with vhost or VFIO is not supported");
3079 /* Update per-address-space notifier flags */
3080 vtd_as->notifier_flags = new;
3082 if (old == IOMMU_NOTIFIER_NONE) {
3083 QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next);
3084 } else if (new == IOMMU_NOTIFIER_NONE) {
3085 QLIST_REMOVE(vtd_as, next);
3090 static int vtd_post_load(void *opaque, int version_id)
3092 IntelIOMMUState *iommu = opaque;
3095 * We don't need to migrate the root_scalable because we can
3096 * simply do the calculation after the loading is complete. We
3097 * can actually do similar things with root, dmar_enabled, etc.
3098 * however since we've had them already so we'd better keep them
3099 * for compatibility of migration.
3101 vtd_update_scalable_state(iommu);
3103 vtd_update_iq_dw(iommu);
3106 * Memory regions are dynamically turned on/off depending on
3107 * context entry configurations from the guest. After migration,
3108 * we need to make sure the memory regions are still correct.
3110 vtd_switch_address_space_all(iommu);
3115 static const VMStateDescription vtd_vmstate = {
3116 .name = "iommu-intel",
3118 .minimum_version_id = 1,
3119 .priority = MIG_PRI_IOMMU,
3120 .post_load = vtd_post_load,
3121 .fields = (VMStateField[]) {
3122 VMSTATE_UINT64(root, IntelIOMMUState),
3123 VMSTATE_UINT64(intr_root, IntelIOMMUState),
3124 VMSTATE_UINT64(iq, IntelIOMMUState),
3125 VMSTATE_UINT32(intr_size, IntelIOMMUState),
3126 VMSTATE_UINT16(iq_head, IntelIOMMUState),
3127 VMSTATE_UINT16(iq_tail, IntelIOMMUState),
3128 VMSTATE_UINT16(iq_size, IntelIOMMUState),
3129 VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
3130 VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
3131 VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
3132 VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
3133 VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
3134 VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
3135 VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
3136 VMSTATE_BOOL(intr_eime, IntelIOMMUState),
3137 VMSTATE_END_OF_LIST()
3141 static const MemoryRegionOps vtd_mem_ops = {
3142 .read = vtd_mem_read,
3143 .write = vtd_mem_write,
3144 .endianness = DEVICE_LITTLE_ENDIAN,
3146 .min_access_size = 4,
3147 .max_access_size = 8,
3150 .min_access_size = 4,
3151 .max_access_size = 8,
3155 static Property vtd_properties[] = {
3156 DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
3157 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
3159 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
3160 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits,
3161 VTD_HOST_ADDRESS_WIDTH),
3162 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
3163 DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE),
3164 DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
3165 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
3166 DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true),
3167 DEFINE_PROP_END_OF_LIST(),
3170 /* Read IRTE entry with specific index */
3171 static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
3172 VTD_IR_TableEntry *entry, uint16_t sid)
3174 static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
3175 {0xffff, 0xfffb, 0xfff9, 0xfff8};
3176 dma_addr_t addr = 0x00;
3177 uint16_t mask, source_id;
3178 uint8_t bus, bus_max, bus_min;
3180 if (index >= iommu->intr_size) {
3181 error_report_once("%s: index too large: ind=0x%x",
3183 return -VTD_FR_IR_INDEX_OVER;
3186 addr = iommu->intr_root + index * sizeof(*entry);
3187 if (dma_memory_read(&address_space_memory, addr,
3188 entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) {
3189 error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64,
3190 __func__, index, addr);
3191 return -VTD_FR_IR_ROOT_INVAL;
3194 trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]),
3195 le64_to_cpu(entry->data[0]));
3197 if (!entry->irte.present) {
3198 error_report_once("%s: detected non-present IRTE "
3199 "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
3200 __func__, index, le64_to_cpu(entry->data[1]),
3201 le64_to_cpu(entry->data[0]));
3202 return -VTD_FR_IR_ENTRY_P;
3205 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
3206 entry->irte.__reserved_2) {
3207 error_report_once("%s: detected non-zero reserved IRTE "
3208 "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
3209 __func__, index, le64_to_cpu(entry->data[1]),
3210 le64_to_cpu(entry->data[0]));
3211 return -VTD_FR_IR_IRTE_RSVD;
3214 if (sid != X86_IOMMU_SID_INVALID) {
3215 /* Validate IRTE SID */
3216 source_id = le32_to_cpu(entry->irte.source_id);
3217 switch (entry->irte.sid_vtype) {
3222 mask = vtd_svt_mask[entry->irte.sid_q];
3223 if ((source_id & mask) != (sid & mask)) {
3224 error_report_once("%s: invalid IRTE SID "
3225 "(index=%u, sid=%u, source_id=%u)",
3226 __func__, index, sid, source_id);
3227 return -VTD_FR_IR_SID_ERR;
3232 bus_max = source_id >> 8;
3233 bus_min = source_id & 0xff;
3235 if (bus > bus_max || bus < bus_min) {
3236 error_report_once("%s: invalid SVT_BUS "
3237 "(index=%u, bus=%u, min=%u, max=%u)",
3238 __func__, index, bus, bus_min, bus_max);
3239 return -VTD_FR_IR_SID_ERR;
3244 error_report_once("%s: detected invalid IRTE SVT "
3245 "(index=%u, type=%d)", __func__,
3246 index, entry->irte.sid_vtype);
3247 /* Take this as verification failure. */
3248 return -VTD_FR_IR_SID_ERR;
3255 /* Fetch IRQ information of specific IR index */
3256 static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
3257 X86IOMMUIrq *irq, uint16_t sid)
3259 VTD_IR_TableEntry irte = {};
3262 ret = vtd_irte_get(iommu, index, &irte, sid);
3267 irq->trigger_mode = irte.irte.trigger_mode;
3268 irq->vector = irte.irte.vector;
3269 irq->delivery_mode = irte.irte.delivery_mode;
3270 irq->dest = le32_to_cpu(irte.irte.dest_id);
3271 if (!iommu->intr_eime) {
3272 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
3273 #define VTD_IR_APIC_DEST_SHIFT (8)
3274 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
3275 VTD_IR_APIC_DEST_SHIFT;
3277 irq->dest_mode = irte.irte.dest_mode;
3278 irq->redir_hint = irte.irte.redir_hint;
3280 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
3281 irq->delivery_mode, irq->dest, irq->dest_mode);
3286 /* Interrupt remapping for MSI/MSI-X entry */
3287 static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
3289 MSIMessage *translated,
3293 VTD_IR_MSIAddress addr;
3295 X86IOMMUIrq irq = {};
3297 assert(origin && translated);
3299 trace_vtd_ir_remap_msi_req(origin->address, origin->data);
3301 if (!iommu || !iommu->intr_enabled) {
3302 memcpy(translated, origin, sizeof(*origin));
3306 if (origin->address & VTD_MSI_ADDR_HI_MASK) {
3307 error_report_once("%s: MSI address high 32 bits non-zero detected: "
3308 "address=0x%" PRIx64, __func__, origin->address);
3309 return -VTD_FR_IR_REQ_RSVD;
3312 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
3313 if (addr.addr.__head != 0xfee) {
3314 error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32,
3315 __func__, addr.data);
3316 return -VTD_FR_IR_REQ_RSVD;
3319 /* This is compatible mode. */
3320 if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
3321 memcpy(translated, origin, sizeof(*origin));
3325 index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
3327 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
3328 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
3330 if (addr.addr.sub_valid) {
3331 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
3332 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
3335 ret = vtd_remap_irq_get(iommu, index, &irq, sid);
3340 if (addr.addr.sub_valid) {
3341 trace_vtd_ir_remap_type("MSI");
3342 if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
3343 error_report_once("%s: invalid IR MSI "
3344 "(sid=%u, address=0x%" PRIx64
3345 ", data=0x%" PRIx32 ")",
3346 __func__, sid, origin->address, origin->data);
3347 return -VTD_FR_IR_REQ_RSVD;
3350 uint8_t vector = origin->data & 0xff;
3351 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
3353 trace_vtd_ir_remap_type("IOAPIC");
3354 /* IOAPIC entry vector should be aligned with IRTE vector
3355 * (see vt-d spec 5.1.5.1). */
3356 if (vector != irq.vector) {
3357 trace_vtd_warn_ir_vector(sid, index, vector, irq.vector);
3360 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
3361 * (see vt-d spec 5.1.5.1). */
3362 if (trigger_mode != irq.trigger_mode) {
3363 trace_vtd_warn_ir_trigger(sid, index, trigger_mode,
3369 * We'd better keep the last two bits, assuming that guest OS
3370 * might modify it. Keep it does not hurt after all.
3372 irq.msi_addr_last_bits = addr.addr.__not_care;
3374 /* Translate X86IOMMUIrq to MSI message */
3375 x86_iommu_irq_to_msi_message(&irq, translated);
3378 trace_vtd_ir_remap_msi(origin->address, origin->data,
3379 translated->address, translated->data);
3383 static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
3384 MSIMessage *dst, uint16_t sid)
3386 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
3390 static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
3391 uint64_t *data, unsigned size,
3397 static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
3398 uint64_t value, unsigned size,
3402 MSIMessage from = {}, to = {};
3403 uint16_t sid = X86_IOMMU_SID_INVALID;
3405 from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
3406 from.data = (uint32_t) value;
3408 if (!attrs.unspecified) {
3409 /* We have explicit Source ID */
3410 sid = attrs.requester_id;
3413 ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
3415 /* TODO: report error */
3416 /* Drop this interrupt */
3420 apic_get_class()->send_msi(&to);
3425 static const MemoryRegionOps vtd_mem_ir_ops = {
3426 .read_with_attrs = vtd_mem_ir_read,
3427 .write_with_attrs = vtd_mem_ir_write,
3428 .endianness = DEVICE_LITTLE_ENDIAN,
3430 .min_access_size = 4,
3431 .max_access_size = 4,
3434 .min_access_size = 4,
3435 .max_access_size = 4,
3439 VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
3442 * We can't simply use sid here since the bus number might not be
3443 * initialized by the guest.
3445 struct vtd_as_key key = {
3449 VTDAddressSpace *vtd_dev_as;
3452 vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key);
3454 struct vtd_as_key *new_key = g_malloc(sizeof(*new_key));
3457 new_key->devfn = devfn;
3459 snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn),
3461 vtd_dev_as = g_new0(VTDAddressSpace, 1);
3463 vtd_dev_as->bus = bus;
3464 vtd_dev_as->devfn = (uint8_t)devfn;
3465 vtd_dev_as->iommu_state = s;
3466 vtd_dev_as->context_cache_entry.context_cache_gen = 0;
3467 vtd_dev_as->iova_tree = iova_tree_new();
3469 memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX);
3470 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root");
3473 * Build the DMAR-disabled container with aliases to the
3474 * shared MRs. Note that aliasing to a shared memory region
3475 * could help the memory API to detect same FlatViews so we
3476 * can have devices to share the same FlatView when DMAR is
3477 * disabled (either by not providing "intel_iommu=on" or with
3478 * "iommu=pt"). It will greatly reduce the total number of
3479 * FlatViews of the system hence VM runs faster.
3481 memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s),
3482 "vtd-nodmar", &s->mr_nodmar, 0,
3483 memory_region_size(&s->mr_nodmar));
3486 * Build the per-device DMAR-enabled container.
3488 * TODO: currently we have per-device IOMMU memory region only
3489 * because we have per-device IOMMU notifiers for devices. If
3490 * one day we can abstract the IOMMU notifiers out of the
3491 * memory regions then we can also share the same memory
3492 * region here just like what we've done above with the nodmar
3495 strcat(name, "-dmar");
3496 memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu),
3497 TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s),
3499 memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir",
3500 &s->mr_ir, 0, memory_region_size(&s->mr_ir));
3501 memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu),
3502 VTD_INTERRUPT_ADDR_FIRST,
3503 &vtd_dev_as->iommu_ir, 1);
3506 * Hook both the containers under the root container, we
3507 * switch between DMAR & noDMAR by enable/disable
3508 * corresponding sub-containers
3510 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
3511 MEMORY_REGION(&vtd_dev_as->iommu),
3513 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
3514 &vtd_dev_as->nodmar, 0);
3516 vtd_switch_address_space(vtd_dev_as);
3518 g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as);
3523 /* Unmap the whole range in the notifier's scope. */
3524 static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
3526 hwaddr size, remain;
3527 hwaddr start = n->start;
3528 hwaddr end = n->end;
3529 IntelIOMMUState *s = as->iommu_state;
3533 * Note: all the codes in this function has a assumption that IOVA
3534 * bits are no more than VTD_MGAW bits (which is restricted by
3535 * VT-d spec), otherwise we need to consider overflow of 64 bits.
3538 if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) {
3540 * Don't need to unmap regions that is bigger than the whole
3541 * VT-d supported address space size
3543 end = VTD_ADDRESS_SIZE(s->aw_bits) - 1;
3546 assert(start <= end);
3547 size = remain = end - start + 1;
3549 while (remain >= VTD_PAGE_SIZE) {
3550 IOMMUTLBEvent event;
3551 uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits);
3552 uint64_t size = mask + 1;
3556 event.type = IOMMU_NOTIFIER_UNMAP;
3557 event.entry.iova = start;
3558 event.entry.addr_mask = mask;
3559 event.entry.target_as = &address_space_memory;
3560 event.entry.perm = IOMMU_NONE;
3561 /* This field is meaningless for unmap */
3562 event.entry.translated_addr = 0;
3564 memory_region_notify_iommu_one(n, &event);
3572 trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
3573 VTD_PCI_SLOT(as->devfn),
3574 VTD_PCI_FUNC(as->devfn),
3577 map.iova = n->start;
3579 iova_tree_remove(as->iova_tree, map);
3582 static void vtd_address_space_unmap_all(IntelIOMMUState *s)
3584 VTDAddressSpace *vtd_as;
3587 QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
3588 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
3589 vtd_address_space_unmap(vtd_as, n);
3594 static void vtd_address_space_refresh_all(IntelIOMMUState *s)
3596 vtd_address_space_unmap_all(s);
3597 vtd_switch_address_space_all(s);
3600 static int vtd_replay_hook(IOMMUTLBEvent *event, void *private)
3602 memory_region_notify_iommu_one(private, event);
3606 static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
3608 VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu);
3609 IntelIOMMUState *s = vtd_as->iommu_state;
3610 uint8_t bus_n = pci_bus_num(vtd_as->bus);
3614 * The replay can be triggered by either a invalidation or a newly
3615 * created entry. No matter what, we release existing mappings
3616 * (it means flushing caches for UNMAP-only registers).
3618 vtd_address_space_unmap(vtd_as, n);
3620 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
3621 trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" :
3623 bus_n, PCI_SLOT(vtd_as->devfn),
3624 PCI_FUNC(vtd_as->devfn),
3625 vtd_get_domain_id(s, &ce),
3627 if (vtd_as_has_map_notifier(vtd_as)) {
3628 /* This is required only for MAP typed notifiers */
3629 vtd_page_walk_info info = {
3630 .hook_fn = vtd_replay_hook,
3631 .private = (void *)n,
3632 .notify_unmap = false,
3635 .domain_id = vtd_get_domain_id(s, &ce),
3638 vtd_page_walk(s, &ce, 0, ~0ULL, &info);
3641 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
3642 PCI_FUNC(vtd_as->devfn));
3648 /* Do the initialization. It will also be called when reset, so pay
3649 * attention when adding new initialization stuff.
3651 static void vtd_init(IntelIOMMUState *s)
3653 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
3655 memset(s->csr, 0, DMAR_REG_SIZE);
3656 memset(s->wmask, 0, DMAR_REG_SIZE);
3657 memset(s->w1cmask, 0, DMAR_REG_SIZE);
3658 memset(s->womask, 0, DMAR_REG_SIZE);
3661 s->root_scalable = false;
3662 s->dmar_enabled = false;
3663 s->intr_enabled = false;
3668 s->qi_enabled = false;
3669 s->iq_last_desc_type = VTD_INV_DESC_NONE;
3671 s->next_frcd_reg = 0;
3672 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND |
3673 VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS |
3674 VTD_CAP_MGAW(s->aw_bits);
3676 s->cap |= VTD_CAP_DRAIN;
3678 if (s->dma_translation) {
3679 if (s->aw_bits >= VTD_HOST_AW_39BIT) {
3680 s->cap |= VTD_CAP_SAGAW_39bit;
3682 if (s->aw_bits >= VTD_HOST_AW_48BIT) {
3683 s->cap |= VTD_CAP_SAGAW_48bit;
3686 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
3689 * Rsvd field masks for spte
3691 vtd_spte_rsvd[0] = ~0ULL;
3692 vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits,
3693 x86_iommu->dt_supported);
3694 vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
3695 vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
3696 vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
3698 vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits,
3699 x86_iommu->dt_supported);
3700 vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits,
3701 x86_iommu->dt_supported);
3703 if (s->scalable_mode || s->snoop_control) {
3704 vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP;
3705 vtd_spte_rsvd_large[2] &= ~VTD_SPTE_SNP;
3706 vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP;
3709 if (x86_iommu_ir_supported(x86_iommu)) {
3710 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
3711 if (s->intr_eim == ON_OFF_AUTO_ON) {
3712 s->ecap |= VTD_ECAP_EIM;
3714 assert(s->intr_eim != ON_OFF_AUTO_AUTO);
3717 if (x86_iommu->dt_supported) {
3718 s->ecap |= VTD_ECAP_DT;
3721 if (x86_iommu->pt_supported) {
3722 s->ecap |= VTD_ECAP_PT;
3725 if (s->caching_mode) {
3726 s->cap |= VTD_CAP_CM;
3729 /* TODO: read cap/ecap from host to decide which cap to be exposed. */
3730 if (s->scalable_mode) {
3731 s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS;
3734 if (s->snoop_control) {
3735 s->ecap |= VTD_ECAP_SC;
3738 vtd_reset_caches(s);
3740 /* Define registers with default values and bit semantics */
3741 vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
3742 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
3743 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
3744 vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
3745 vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
3746 vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
3747 vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0);
3748 vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
3749 vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
3751 /* Advanced Fault Logging not supported */
3752 vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
3753 vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
3754 vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
3755 vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
3757 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
3758 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
3760 vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
3762 /* Treated as RO for implementations that PLMR and PHMR fields reported
3763 * as Clear in the CAP_REG.
3764 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
3766 vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
3768 vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
3769 vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
3770 vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0);
3771 vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
3772 vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
3773 vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
3774 vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
3775 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
3776 vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
3778 /* IOTLB registers */
3779 vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
3780 vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
3781 vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
3783 /* Fault Recording Registers, 128-bit */
3784 vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
3785 vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
3788 * Interrupt remapping registers.
3790 vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
3793 /* Should not reset address_spaces when reset because devices will still use
3794 * the address space they got at first (won't ask the bus again).
3796 static void vtd_reset(DeviceState *dev)
3798 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
3801 vtd_address_space_refresh_all(s);
3804 static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
3806 IntelIOMMUState *s = opaque;
3807 VTDAddressSpace *vtd_as;
3809 assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
3811 vtd_as = vtd_find_add_as(s, bus, devfn);
3815 static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
3817 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
3819 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) {
3820 error_setg(errp, "eim=on cannot be selected without intremap=on");
3824 if (s->intr_eim == ON_OFF_AUTO_AUTO) {
3825 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
3826 && x86_iommu_ir_supported(x86_iommu) ?
3827 ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
3829 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
3830 if (!kvm_irqchip_is_split()) {
3831 error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
3834 if (!kvm_enable_x2apic()) {
3835 error_setg(errp, "eim=on requires support on the KVM side"
3836 "(X2APIC_API, first shipped in v4.7)");
3841 /* Currently only address widths supported are 39 and 48 bits */
3842 if ((s->aw_bits != VTD_HOST_AW_39BIT) &&
3843 (s->aw_bits != VTD_HOST_AW_48BIT)) {
3844 error_setg(errp, "Supported values for aw-bits are: %d, %d",
3845 VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT);
3849 if (s->scalable_mode && !s->dma_drain) {
3850 error_setg(errp, "Need to set dma_drain for scalable mode");
3857 static int vtd_machine_done_notify_one(Object *child, void *unused)
3859 IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default());
3862 * We hard-coded here because vfio-pci is the only special case
3863 * here. Let's be more elegant in the future when we can, but so
3864 * far there seems to be no better way.
3866 if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) {
3867 vtd_panic_require_caching_mode();
3873 static void vtd_machine_done_hook(Notifier *notifier, void *unused)
3875 object_child_foreach_recursive(object_get_root(),
3876 vtd_machine_done_notify_one, NULL);
3879 static Notifier vtd_machine_done_notify = {
3880 .notify = vtd_machine_done_hook,
3883 static void vtd_realize(DeviceState *dev, Error **errp)
3885 MachineState *ms = MACHINE(qdev_get_machine());
3886 PCMachineState *pcms = PC_MACHINE(ms);
3887 X86MachineState *x86ms = X86_MACHINE(ms);
3888 PCIBus *bus = pcms->bus;
3889 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
3891 if (!vtd_decide_config(s, errp)) {
3895 QLIST_INIT(&s->vtd_as_with_notifiers);
3896 qemu_mutex_init(&s->iommu_lock);
3897 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
3898 "intel_iommu", DMAR_REG_SIZE);
3900 /* Create the shared memory regions by all devices */
3901 memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar",
3903 memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops,
3904 s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE);
3905 memory_region_init_alias(&s->mr_sys_alias, OBJECT(s),
3906 "vtd-sys-alias", get_system_memory(), 0,
3907 memory_region_size(get_system_memory()));
3908 memory_region_add_subregion_overlap(&s->mr_nodmar, 0,
3909 &s->mr_sys_alias, 0);
3910 memory_region_add_subregion_overlap(&s->mr_nodmar,
3911 VTD_INTERRUPT_ADDR_FIRST,
3914 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
3915 /* No corresponding destroy */
3916 s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
3918 s->vtd_address_spaces = g_hash_table_new_full(vtd_as_hash, vtd_as_equal,
3921 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
3922 pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
3923 /* Pseudo address space under root PCI bus. */
3924 x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
3925 qemu_add_machine_init_done_notifier(&vtd_machine_done_notify);
3928 static void vtd_class_init(ObjectClass *klass, void *data)
3930 DeviceClass *dc = DEVICE_CLASS(klass);
3931 X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass);
3933 dc->reset = vtd_reset;
3934 dc->vmsd = &vtd_vmstate;
3935 device_class_set_props(dc, vtd_properties);
3936 dc->hotpluggable = false;
3937 x86_class->realize = vtd_realize;
3938 x86_class->int_remap = vtd_int_remap;
3939 /* Supported by the pc-q35-* machine types */
3940 dc->user_creatable = true;
3941 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3942 dc->desc = "Intel IOMMU (VT-d) DMA Remapping device";
3945 static const TypeInfo vtd_info = {
3946 .name = TYPE_INTEL_IOMMU_DEVICE,
3947 .parent = TYPE_X86_IOMMU_DEVICE,
3948 .instance_size = sizeof(IntelIOMMUState),
3949 .class_init = vtd_class_init,
3952 static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
3955 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
3957 imrc->translate = vtd_iommu_translate;
3958 imrc->notify_flag_changed = vtd_iommu_notify_flag_changed;
3959 imrc->replay = vtd_iommu_replay;
3962 static const TypeInfo vtd_iommu_memory_region_info = {
3963 .parent = TYPE_IOMMU_MEMORY_REGION,
3964 .name = TYPE_INTEL_IOMMU_MEMORY_REGION,
3965 .class_init = vtd_iommu_memory_region_class_init,
3968 static void vtd_register_types(void)
3970 type_register_static(&vtd_info);
3971 type_register_static(&vtd_iommu_memory_region_info);
3974 type_init(vtd_register_types)