OSDN Git Service

swiotlb: add support for non-coherent DMA
authorChristoph Hellwig <hch@lst.de>
Fri, 19 Oct 2018 06:51:53 +0000 (08:51 +0200)
committerChristoph Hellwig <hch@lst.de>
Fri, 19 Oct 2018 06:53:05 +0000 (08:53 +0200)
Handle architectures that are not cache coherent directly in the main
swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right
places from the various dma_map/unmap/sync methods when the device is
non-coherent.

Because swiotlb now uses dma_direct_alloc for the coherent allocation
that side is already taken care of by the dma-direct code calling into
arch_dma_{alloc,free} for devices that are non-coherent.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
kernel/dma/swiotlb.c

index 1a01b0a..ebecaf2 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/cache.h>
 #include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/mm.h>
 #include <linux/export.h>
 #include <linux/spinlock.h>
@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
-               return dev_addr;
+       if (!dma_capable(dev, dev_addr, size) ||
+           swiotlb_force == SWIOTLB_FORCE) {
+               trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+               dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
+       }
+
+       if (!dev_is_dma_coherent(dev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+               arch_sync_dma_for_device(dev, phys, size, dir);
 
-       trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-       return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
+       return dev_addr;
 }
 
 /*
@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 
        BUG_ON(dir == DMA_NONE);
 
+       if (!dev_is_dma_coherent(hwdev) &&
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+               arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
        if (is_swiotlb_buffer(paddr)) {
                swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
                return;
@@ -730,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
 
        BUG_ON(dir == DMA_NONE);
 
-       if (is_swiotlb_buffer(paddr)) {
+       if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
+               arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
+       if (is_swiotlb_buffer(paddr))
                swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
-               return;
-       }
 
-       if (dir != DMA_FROM_DEVICE)
-               return;
+       if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
+               arch_sync_dma_for_device(hwdev, paddr, size, dir);
 
-       dma_mark_clean(phys_to_virt(paddr), size);
+       if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
+               dma_mark_clean(phys_to_virt(paddr), size);
 }
 
 void