OSDN Git Service

iommu/amd: Set up data structures for flush queue
authorJoerg Roedel <jroedel@suse.de>
Wed, 6 Jul 2016 09:55:37 +0000 (11:55 +0200)
committerJoerg Roedel <jroedel@suse.de>
Wed, 13 Jul 2016 10:48:35 +0000 (12:48 +0200)
The flush queue is the equivalent to defered-flushing in the
Intel VT-d driver. This patch sets up the data structures
needed for this.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index d218e35..38f8a5e 100644 (file)
@@ -89,6 +89,22 @@ LIST_HEAD(ioapic_map);
 LIST_HEAD(hpet_map);
 LIST_HEAD(acpihid_map);
 
+#define FLUSH_QUEUE_SIZE 256
+
+struct flush_queue_entry {
+       unsigned long iova_pfn;
+       unsigned long pages;
+       struct dma_ops_domain *dma_dom;
+};
+
+struct flush_queue {
+       spinlock_t lock;
+       unsigned next;
+       struct flush_queue_entry *entries;
+};
+
+DEFINE_PER_CPU(struct flush_queue, flush_queue);
+
 /*
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
@@ -2508,7 +2524,7 @@ static int init_reserved_iova_ranges(void)
 
 int __init amd_iommu_init_api(void)
 {
-       int ret, err = 0;
+       int ret, cpu, err = 0;
 
        ret = iova_cache_get();
        if (ret)
@@ -2518,6 +2534,18 @@ int __init amd_iommu_init_api(void)
        if (ret)
                return ret;
 
+       for_each_possible_cpu(cpu) {
+               struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+               queue->entries = kzalloc(FLUSH_QUEUE_SIZE *
+                                        sizeof(*queue->entries),
+                                        GFP_KERNEL);
+               if (!queue->entries)
+                       goto out_put_iova;
+
+               spin_lock_init(&queue->lock);
+       }
+
        err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
        if (err)
                return err;
@@ -2530,6 +2558,15 @@ int __init amd_iommu_init_api(void)
        if (err)
                return err;
        return 0;
+
+out_put_iova:
+       for_each_possible_cpu(cpu) {
+               struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+               kfree(queue->entries);
+       }
+
+       return -ENOMEM;
 }
 
 int __init amd_iommu_init_dma_ops(void)
@@ -2552,6 +2589,7 @@ int __init amd_iommu_init_dma_ops(void)
                pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
        return 0;
+
 }
 
 /*****************************************************************************