OSDN Git Service

Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/smmu', 'arm/tegra...
[uclinux-h8/linux.git] / drivers / iommu / dma-iommu.c
index 896bea0..b42e38a 100644 (file)
@@ -98,9 +98,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
 /**
  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
  * @domain: IOMMU domain to prepare for DMA-API usage
- *
- * IOMMU drivers should normally call this from their domain_alloc
- * callback when domain->type == IOMMU_DOMAIN_DMA.
  */
 int iommu_get_dma_cookie(struct iommu_domain *domain)
 {
@@ -113,7 +110,6 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
 
        return 0;
 }
-EXPORT_SYMBOL(iommu_get_dma_cookie);
 
 /**
  * iommu_get_msi_cookie - Acquire just MSI remapping resources
@@ -151,8 +147,6 @@ EXPORT_SYMBOL(iommu_get_msi_cookie);
  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
  *          iommu_get_msi_cookie()
- *
- * IOMMU drivers should normally call this from their domain_free callback.
  */
 void iommu_put_dma_cookie(struct iommu_domain *domain)
 {
@@ -172,7 +166,6 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
        kfree(cookie);
        domain->iova_cookie = NULL;
 }
-EXPORT_SYMBOL(iommu_put_dma_cookie);
 
 /**
  * iommu_dma_get_resv_regions - Reserved region driver helper
@@ -317,6 +310,11 @@ static bool dev_is_untrusted(struct device *dev)
        return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
 }
 
+static bool dev_use_swiotlb(struct device *dev)
+{
+       return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
+}
+
 /* sysfs updates are serialised by the mutex of the group owning @domain */
 int iommu_dma_init_fq(struct iommu_domain *domain)
 {
@@ -510,23 +508,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
        iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
 }
 
-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
-               size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       struct iommu_domain *domain = iommu_get_dma_domain(dev);
-       phys_addr_t phys;
-
-       phys = iommu_iova_to_phys(domain, dma_addr);
-       if (WARN_ON(!phys))
-               return;
-
-       __iommu_dma_unmap(dev, dma_addr, size);
-
-       if (unlikely(is_swiotlb_buffer(dev, phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
                size_t size, int prot, u64 dma_mask)
 {
@@ -553,52 +534,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
        return iova + iova_off;
 }
 
-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
-               size_t org_size, dma_addr_t dma_mask, bool coherent,
-               enum dma_data_direction dir, unsigned long attrs)
-{
-       int prot = dma_info_to_prot(dir, coherent, attrs);
-       struct iommu_domain *domain = iommu_get_dma_domain(dev);
-       struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       struct iova_domain *iovad = &cookie->iovad;
-       size_t aligned_size = org_size;
-       void *padding_start;
-       size_t padding_size;
-       dma_addr_t iova;
-
-       /*
-        * If both the physical buffer start address and size are
-        * page aligned, we don't need to use a bounce page.
-        */
-       if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
-           iova_offset(iovad, phys | org_size)) {
-               aligned_size = iova_align(iovad, org_size);
-               phys = swiotlb_tbl_map_single(dev, phys, org_size,
-                                             aligned_size, dir, attrs);
-
-               if (phys == DMA_MAPPING_ERROR)
-                       return DMA_MAPPING_ERROR;
-
-               /* Cleanup the padding area. */
-               padding_start = phys_to_virt(phys);
-               padding_size = aligned_size;
-
-               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-                   (dir == DMA_TO_DEVICE ||
-                    dir == DMA_BIDIRECTIONAL)) {
-                       padding_start += org_size;
-                       padding_size -= org_size;
-               }
-
-               memset(padding_start, 0, padding_size);
-       }
-
-       iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
-               swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
-       return iova;
-}
-
 static void __iommu_dma_free_pages(struct page **pages, int count)
 {
        while (count--)
@@ -616,7 +551,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
        if (!order_mask)
                return NULL;
 
-       pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
+       pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
        if (!pages)
                return NULL;
 
@@ -794,7 +729,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
 {
        phys_addr_t phys;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+       if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
                return;
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -810,7 +745,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
 {
        phys_addr_t phys;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+       if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
                return;
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -828,17 +763,13 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
        struct scatterlist *sg;
        int i;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i) {
-               if (!dev_is_dma_coherent(dev))
+       if (dev_use_swiotlb(dev))
+               for_each_sg(sgl, sg, nelems, i)
+                       iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+                                                     sg->length, dir);
+       else if (!dev_is_dma_coherent(dev))
+               for_each_sg(sgl, sg, nelems, i)
                        arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
-
-               if (is_swiotlb_buffer(dev, sg_phys(sg)))
-                       swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
-                                                   sg->length, dir);
-       }
 }
 
 static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -848,17 +779,14 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
        struct scatterlist *sg;
        int i;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i) {
-               if (is_swiotlb_buffer(dev, sg_phys(sg)))
-                       swiotlb_sync_single_for_device(dev, sg_phys(sg),
-                                                      sg->length, dir);
-
-               if (!dev_is_dma_coherent(dev))
+       if (dev_use_swiotlb(dev))
+               for_each_sg(sgl, sg, nelems, i)
+                       iommu_dma_sync_single_for_device(dev,
+                                                        sg_dma_address(sg),
+                                                        sg->length, dir);
+       else if (!dev_is_dma_coherent(dev))
+               for_each_sg(sgl, sg, nelems, i)
                        arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
-       }
 }
 
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -867,22 +795,66 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 {
        phys_addr_t phys = page_to_phys(page) + offset;
        bool coherent = dev_is_dma_coherent(dev);
-       dma_addr_t dma_handle;
+       int prot = dma_info_to_prot(dir, coherent, attrs);
+       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+       struct iova_domain *iovad = &cookie->iovad;
+       dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+       /*
+        * If both the physical buffer start address and size are
+        * page aligned, we don't need to use a bounce page.
+        */
+       if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
+               void *padding_start;
+               size_t padding_size, aligned_size;
+
+               aligned_size = iova_align(iovad, size);
+               phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+                                             iova_mask(iovad), dir, attrs);
+
+               if (phys == DMA_MAPPING_ERROR)
+                       return DMA_MAPPING_ERROR;
 
-       dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
-                       coherent, dir, attrs);
-       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           dma_handle != DMA_MAPPING_ERROR)
+               /* Cleanup the padding area. */
+               padding_start = phys_to_virt(phys);
+               padding_size = aligned_size;
+
+               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+                   (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+                       padding_start += size;
+                       padding_size -= size;
+               }
+
+               memset(padding_start, 0, padding_size);
+       }
+
+       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                arch_sync_dma_for_device(phys, size, dir);
-       return dma_handle;
+
+       iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
+       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+       return iova;
 }
 
 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
-       __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
+       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+       phys_addr_t phys;
+
+       phys = iommu_iova_to_phys(domain, dma_handle);
+       if (WARN_ON(!phys))
+               return;
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+               arch_sync_dma_for_cpu(phys, size, dir);
+
+       __iommu_dma_unmap(dev, dma_handle, size);
+
+       if (unlikely(is_swiotlb_buffer(dev, phys)))
+               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
 }
 
 /*
@@ -967,7 +939,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
        int i;
 
        for_each_sg(sg, s, nents, i)
-               __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+               iommu_dma_unmap_page(dev, sg_dma_address(s),
                                sg_dma_len(s), dir, attrs);
 }
 
@@ -978,9 +950,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i) {
-               sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
-                               s->length, dma_get_mask(dev),
-                               dev_is_dma_coherent(dev), dir, attrs);
+               sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
+                               s->offset, s->length, dir, attrs);
                if (sg_dma_address(s) == DMA_MAPPING_ERROR)
                        goto out_unmap;
                sg_dma_len(s) = s->length;
@@ -1016,15 +987,16 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 
        if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
                ret = iommu_deferred_attach(dev, domain);
-               goto out;
+               if (ret)
+                       goto out;
        }
 
+       if (dev_use_swiotlb(dev))
+               return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
 
-       if (dev_is_untrusted(dev))
-               return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
-
        /*
         * Work out how much IOVA space we need, and align the segments to
         * IOVA granules for the IOMMU driver to handle. With some clever
@@ -1097,14 +1069,14 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
        struct scatterlist *tmp;
        int i;
 
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
-
-       if (dev_is_untrusted(dev)) {
+       if (dev_use_swiotlb(dev)) {
                iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
                return;
        }
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
        /*
         * The scatterlist segments are mapped into a single
         * contiguous IOVA allocation, so this is incredibly easy.