OSDN Git Service

sh: use the generic dma coherent remap allocator
authorChristoph Hellwig <hch@lst.de>
Tue, 14 Jul 2020 12:18:56 +0000 (14:18 +0200)
committerRich Felker <dalias@libc.org>
Sat, 15 Aug 2020 02:05:18 +0000 (22:05 -0400)
This switches to using common code for the DMA allocations, including
potential use of the CMA allocator if configured.

Switching to the generic code enables DMA allocations from atomic
context, which is required by the DMA API documentation, and also
adds various other minor features drivers start relying upon.  It
also makes sure we have on tested code base for all architectures
that require uncached pte bits for coherent DMA allocations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Rich Felker <dalias@libc.org>
arch/sh/Kconfig
arch/sh/kernel/dma-coherent.c

index 7082a4a..c315cc3 100644 (file)
@@ -137,7 +137,9 @@ config DMA_COHERENT
 
 config DMA_NONCOHERENT
        def_bool !NO_DMA && !DMA_COHERENT
+       select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select DMA_DIRECT_REMAP
 
 config PGTABLE_LEVELS
        default 3 if X2TLB
index d481169..cd46a98 100644 (file)
@@ -3,60 +3,13 @@
  * Copyright (C) 2004 - 2007  Paul Mundt
  */
 #include <linux/mm.h>
-#include <linux/init.h>
 #include <linux/dma-noncoherent.h>
-#include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/addrspace.h>
 
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
+void arch_dma_prep_coherent(struct page *page, size_t size)
 {
-       void *ret, *ret_nocache;
-       int order = get_order(size);
-
-       gfp |= __GFP_ZERO;
-
-       ret = (void *)__get_free_pages(gfp, order);
-       if (!ret)
-               return NULL;
-
-       /*
-        * Pages from the page allocator may have data present in
-        * cache. So flush the cache before using uncached memory.
-        */
-       arch_sync_dma_for_device(virt_to_phys(ret), size,
-                       DMA_BIDIRECTIONAL);
-
-       ret_nocache = (void __force *)ioremap(virt_to_phys(ret), size);
-       if (!ret_nocache) {
-               free_pages((unsigned long)ret, order);
-               return NULL;
-       }
-
-       split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
-
-       *dma_handle = virt_to_phys(ret);
-       if (!WARN_ON(!dev))
-               *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
-
-       return ret_nocache;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, unsigned long attrs)
-{
-       int order = get_order(size);
-       unsigned long pfn = (dma_handle >> PAGE_SHIFT);
-       int k;
-
-       if (!WARN_ON(!dev))
-               pfn += dev->dma_pfn_offset;
-
-       for (k = 0; k < (1 << order); k++)
-               __free_pages(pfn_to_page(pfn + k), 0);
-
-       iounmap(vaddr);
+       __flush_purge_region(page_address(page), size);
 }
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,