OSDN Git Service

iommu: Add a capability for flush queue support
authorRobin Murphy <robin.murphy@arm.com>
Thu, 4 May 2023 21:10:55 +0000 (22:10 +0100)
committerJoerg Roedel <jroedel@suse.de>
Mon, 22 May 2023 15:38:44 +0000 (17:38 +0200)
Passing a special type to domain_alloc to indirectly query whether flush
queues are a worthwhile optimisation with the given driver is a bit
clunky, and looking increasingly anachronistic. Let's put that into an
explicit capability instead.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Jerry Snitselaar <jsnitsel@redhat.com> # amd, intel, smmu-v3
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/f0086a93dbccb92622e1ace775846d81c1c4b174.1683233867.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/iommu.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/intel/iommu.c
include/linux/iommu.h

index 4a31464..9b7bd6b 100644 (file)
@@ -2293,6 +2293,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
                return amdr_ivrs_remap_support;
        case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
                return true;
+       case IOMMU_CAP_DEFERRED_FLUSH:
+               return true;
        default:
                break;
        }
index 3fd83fb..6d65a7e 100644 (file)
@@ -2008,6 +2008,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
                /* Assume that a coherent TCU implies coherent TBUs */
                return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
        case IOMMU_CAP_NOEXEC:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        default:
                return false;
index 6e0813b..7f4ee36 100644 (file)
@@ -1325,6 +1325,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
                return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
                        device_get_dma_attr(dev) == DEV_DMA_COHERENT;
        case IOMMU_CAP_NOEXEC:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        default:
                return false;
index b871a6a..ff92329 100644 (file)
@@ -4369,6 +4369,7 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
+       case IOMMU_CAP_DEFERRED_FLUSH:
                return true;
        case IOMMU_CAP_PRE_BOOT_PROTECTION:
                return dmar_platform_optin();
index e8c9a7d..1b7180d 100644 (file)
@@ -127,6 +127,11 @@ enum iommu_cap {
         * this device.
         */
        IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+       /*
+        * IOMMU driver does not issue TLB maintenance during .unmap, so can
+        * usefully support the non-strict DMA flush queue.
+        */
+       IOMMU_CAP_DEFERRED_FLUSH,
 };
 
 /* These are the possible reserved region types */