return mask;
}
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size,
+ struct dma_attrs *attrs)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
- memset(ptr, 0, PAGE_SIZE);
+ if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+ memset(ptr, 0, PAGE_SIZE);
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
- memset(ptr, 0, size);
+ if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+ memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
}
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, NULL);
return page;
}
if (!page)
return NULL;
- if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
- __dma_clear_buffer(page, size);
+ /*
+ * skip completely if we neither need to zero nor sync.
+ */
+ if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) &&
+ dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)))
+ __dma_clear_buffer(page, size, attrs);
if (!want_vaddr)
goto out;
if (!page)
goto error;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, NULL);
for (i = 0; i < count; i++)
pages[i] = page + i;
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL);
i += 1 << order;
count -= 1 << order;
}