1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
4 #include <linux/kmemcheck.h>
6 #include <linux/scatterlist.h>
7 #include <linux/dma-debug.h>
8 #include <linux/dma-attrs.h>
9 #include <asm-generic/dma-coherent.h>
11 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
13 enum dma_data_direction dir,
14 struct dma_attrs *attrs)
16 const struct dma_map_ops *ops = get_dma_ops(dev);
19 kmemcheck_mark_initialized(ptr, size);
20 BUG_ON(!valid_dma_direction(dir));
21 addr = ops->map_page(dev, virt_to_page(ptr),
22 (unsigned long)ptr & ~PAGE_MASK, size,
24 debug_dma_map_page(dev, virt_to_page(ptr),
25 (unsigned long)ptr & ~PAGE_MASK, size,
30 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
32 enum dma_data_direction dir,
33 struct dma_attrs *attrs)
35 const struct dma_map_ops *ops = get_dma_ops(dev);
37 BUG_ON(!valid_dma_direction(dir));
39 ops->unmap_page(dev, addr, size, dir, attrs);
40 debug_dma_unmap_page(dev, addr, size, dir, true);
44 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
45 * It should never return a value < 0.
47 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
48 int nents, enum dma_data_direction dir,
49 struct dma_attrs *attrs)
51 const struct dma_map_ops *ops = get_dma_ops(dev);
53 struct scatterlist *s;
55 for_each_sg(sg, s, nents, i)
56 kmemcheck_mark_initialized(sg_virt(s), s->length);
57 BUG_ON(!valid_dma_direction(dir));
58 ents = ops->map_sg(dev, sg, nents, dir, attrs);
60 debug_dma_map_sg(dev, sg, nents, ents, dir);
65 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
66 int nents, enum dma_data_direction dir,
67 struct dma_attrs *attrs)
69 const struct dma_map_ops *ops = get_dma_ops(dev);
71 BUG_ON(!valid_dma_direction(dir));
72 debug_dma_unmap_sg(dev, sg, nents, dir);
74 ops->unmap_sg(dev, sg, nents, dir, attrs);
77 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
78 size_t offset, size_t size,
79 enum dma_data_direction dir)
81 const struct dma_map_ops *ops = get_dma_ops(dev);
84 kmemcheck_mark_initialized(page_address(page) + offset, size);
85 BUG_ON(!valid_dma_direction(dir));
86 addr = ops->map_page(dev, page, offset, size, dir, NULL);
87 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
92 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
93 size_t size, enum dma_data_direction dir)
95 const struct dma_map_ops *ops = get_dma_ops(dev);
97 BUG_ON(!valid_dma_direction(dir));
99 ops->unmap_page(dev, addr, size, dir, NULL);
100 debug_dma_unmap_page(dev, addr, size, dir, false);
103 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
105 enum dma_data_direction dir)
107 const struct dma_map_ops *ops = get_dma_ops(dev);
109 BUG_ON(!valid_dma_direction(dir));
110 if (ops->sync_single_for_cpu)
111 ops->sync_single_for_cpu(dev, addr, size, dir);
112 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
115 static inline void dma_sync_single_for_device(struct device *dev,
116 dma_addr_t addr, size_t size,
117 enum dma_data_direction dir)
119 const struct dma_map_ops *ops = get_dma_ops(dev);
121 BUG_ON(!valid_dma_direction(dir));
122 if (ops->sync_single_for_device)
123 ops->sync_single_for_device(dev, addr, size, dir);
124 debug_dma_sync_single_for_device(dev, addr, size, dir);
127 static inline void dma_sync_single_range_for_cpu(struct device *dev,
129 unsigned long offset,
131 enum dma_data_direction dir)
133 const struct dma_map_ops *ops = get_dma_ops(dev);
135 BUG_ON(!valid_dma_direction(dir));
136 if (ops->sync_single_for_cpu)
137 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
138 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
141 static inline void dma_sync_single_range_for_device(struct device *dev,
143 unsigned long offset,
145 enum dma_data_direction dir)
147 const struct dma_map_ops *ops = get_dma_ops(dev);
149 BUG_ON(!valid_dma_direction(dir));
150 if (ops->sync_single_for_device)
151 ops->sync_single_for_device(dev, addr + offset, size, dir);
152 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
156 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
157 int nelems, enum dma_data_direction dir)
159 const struct dma_map_ops *ops = get_dma_ops(dev);
161 BUG_ON(!valid_dma_direction(dir));
162 if (ops->sync_sg_for_cpu)
163 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
164 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
169 int nelems, enum dma_data_direction dir)
171 const struct dma_map_ops *ops = get_dma_ops(dev);
173 BUG_ON(!valid_dma_direction(dir));
174 if (ops->sync_sg_for_device)
175 ops->sync_sg_for_device(dev, sg, nelems, dir);
176 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
180 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
181 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
182 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
183 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
185 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
186 void *cpu_addr, dma_addr_t dma_addr, size_t size);
188 void *dma_common_contiguous_remap(struct page *page, size_t size,
189 unsigned long vm_flags,
190 pgprot_t prot, const void *caller);
192 void *dma_common_pages_remap(struct page **pages, size_t size,
193 unsigned long vm_flags, pgprot_t prot,
195 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
199 * dma_mmap_attrs - map a coherent DMA allocation into user space
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @vma: vm_area_struct describing requested user mapping
202 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
203 * @handle: device-view address returned from dma_alloc_attrs
204 * @size: size of memory originally requested in dma_alloc_attrs
205 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
207 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
208 * into user space. The coherent DMA buffer must not be freed by the
209 * driver until the user space mapping has been released.
212 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
213 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
215 const struct dma_map_ops *ops = get_dma_ops(dev);
218 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
219 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
222 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
225 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
226 void *cpu_addr, dma_addr_t dma_addr, size_t size);
229 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
230 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
232 const struct dma_map_ops *ops = get_dma_ops(dev);
234 if (ops->get_sgtable)
235 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
237 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
240 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
242 #ifndef arch_dma_alloc_attrs
243 #define arch_dma_alloc_attrs(dev, flag) (true)
246 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
247 dma_addr_t *dma_handle, gfp_t flag,
248 struct dma_attrs *attrs)
250 const struct dma_map_ops *ops = get_dma_ops(dev);
255 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
258 if (!arch_dma_alloc_attrs(&dev, &flag))
263 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
264 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
268 static inline void dma_free_attrs(struct device *dev, size_t size,
269 void *cpu_addr, dma_addr_t dma_handle,
270 struct dma_attrs *attrs)
272 const struct dma_map_ops *ops = get_dma_ops(dev);
275 WARN_ON(irqs_disabled());
277 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
283 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
284 ops->free(dev, size, cpu_addr, dma_handle, attrs);
287 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
288 dma_addr_t *dma_handle, gfp_t flag)
290 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
293 static inline void dma_free_coherent(struct device *dev, size_t size,
294 void *cpu_addr, dma_addr_t dma_handle)
296 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
299 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
300 dma_addr_t *dma_handle, gfp_t gfp)
302 DEFINE_DMA_ATTRS(attrs);
304 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
305 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
308 static inline void dma_free_noncoherent(struct device *dev, size_t size,
309 void *cpu_addr, dma_addr_t dma_handle)
311 DEFINE_DMA_ATTRS(attrs);
313 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
314 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
317 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
319 debug_dma_mapping_error(dev, dma_addr);
321 if (get_dma_ops(dev)->mapping_error)
322 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
324 #ifdef DMA_ERROR_CODE
325 return dma_addr == DMA_ERROR_CODE;
331 static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
332 dma_addr_t *dma_handle, gfp_t flag)
334 DEFINE_DMA_ATTRS(attrs);
335 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
336 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
339 static inline void dma_free_nonconsistent(struct device *dev, size_t size,
340 void *cpu_addr, dma_addr_t dma_handle)
342 DEFINE_DMA_ATTRS(attrs);
343 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
344 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
347 static inline int dma_mmap_nonconsistent(struct device *dev,
348 struct vm_area_struct *vma, void *cpu_addr,
349 dma_addr_t dma_addr, size_t size)
351 DEFINE_DMA_ATTRS(attrs);
352 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
353 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
356 #ifndef HAVE_ARCH_DMA_SUPPORTED
357 static inline int dma_supported(struct device *dev, u64 mask)
359 const struct dma_map_ops *ops = get_dma_ops(dev);
363 if (!ops->dma_supported)
365 return ops->dma_supported(dev, mask);
369 #ifndef HAVE_ARCH_DMA_SET_MASK
370 static inline int dma_set_mask(struct device *dev, u64 mask)
372 const struct dma_map_ops *ops = get_dma_ops(dev);
374 if (ops->set_dma_mask)
375 return ops->set_dma_mask(dev, mask);
377 if (!dev->dma_mask || !dma_supported(dev, mask))
379 *dev->dma_mask = mask;