OSDN Git Service

arm: dma-mapping: map_page map to nearest page
authorLiam Mark <lmark@codeaurora.org>
Fri, 27 Jan 2017 00:46:14 +0000 (16:46 -0800)
committerLiam Mark <lmark@codeaurora.org>
Mon, 6 Feb 2017 22:03:48 +0000 (14:03 -0800)
Since the page offset can be greater than the size of a page fix
arm_coherent_iommu_map_page so that it maps to nearest page
boundary.

This both prevents unnecessarily mapping memory we don’t need to
map and fixes a bug where the unmap wasn’t unmapping this extra
memory.

Change-Id: Iaa69aff7505ee75d1f2e69bb0cda814bc6211bd3
Signed-off-by: Liam Mark <lmark@codeaurora.org>
arch/arm/mm/dma-mapping.c
arch/arm64/mm/dma-mapping.c

index 48836eb..330061d 100644 (file)
@@ -1875,7 +1875,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 {
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        dma_addr_t dma_addr;
-       int ret, prot, len = PAGE_ALIGN(size + offset);
+       int ret, prot, len, start_offset, map_offset;
+
+       map_offset = offset & ~PAGE_MASK;
+       start_offset = offset & PAGE_MASK;
+       len = PAGE_ALIGN(map_offset + size);
 
        dma_addr = __alloc_iova(mapping, len);
        if (dma_addr == DMA_ERROR_CODE)
@@ -1883,11 +1887,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
 
        prot = __dma_direction_to_prot(dir);
 
-       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+                       start_offset, len, prot);
        if (ret < 0)
                goto fail;
 
-       return dma_addr + offset;
+       return dma_addr + map_offset;
 fail:
        __free_iova(mapping, dma_addr, len);
        return DMA_ERROR_CODE;
index 1ad7995..2d3f1ab 100644 (file)
@@ -1756,7 +1756,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
        dma_addr_t dma_addr;
-       int ret, prot, len = PAGE_ALIGN(size + offset);
+       int ret, prot, len, start_offset, map_offset;
+
+       map_offset = offset & ~PAGE_MASK;
+       start_offset = offset & PAGE_MASK;
+       len = PAGE_ALIGN(map_offset + size);
 
        dma_addr = __alloc_iova(mapping, len);
        if (dma_addr == DMA_ERROR_CODE)
@@ -1766,12 +1770,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
        prot = __get_iommu_pgprot(attrs, prot,
                                  is_dma_coherent(dev, attrs));
 
-       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
-                       prot);
+       ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+                       start_offset, len, prot);
        if (ret < 0)
                goto fail;
 
-       return dma_addr + offset;
+       return dma_addr + map_offset;
 fail:
        __free_iova(mapping, dma_addr, len);
        return DMA_ERROR_CODE;