Root/include/asm-generic/dma-mapping-common.h

1#ifndef _ASM_GENERIC_DMA_MAPPING_H
2#define _ASM_GENERIC_DMA_MAPPING_H
3
4#include <linux/kmemcheck.h>
5#include <linux/scatterlist.h>
6#include <linux/dma-debug.h>
7#include <linux/dma-attrs.h>
8
9static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10                          size_t size,
11                          enum dma_data_direction dir,
12                          struct dma_attrs *attrs)
13{
14    struct dma_map_ops *ops = get_dma_ops(dev);
15    dma_addr_t addr;
16
17    kmemcheck_mark_initialized(ptr, size);
18    BUG_ON(!valid_dma_direction(dir));
19    addr = ops->map_page(dev, virt_to_page(ptr),
20                 (unsigned long)ptr & ~PAGE_MASK, size,
21                 dir, attrs);
22    debug_dma_map_page(dev, virt_to_page(ptr),
23               (unsigned long)ptr & ~PAGE_MASK, size,
24               dir, addr, true);
25    return addr;
26}
27
28static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
29                      size_t size,
30                      enum dma_data_direction dir,
31                      struct dma_attrs *attrs)
32{
33    struct dma_map_ops *ops = get_dma_ops(dev);
34
35    BUG_ON(!valid_dma_direction(dir));
36    if (ops->unmap_page)
37        ops->unmap_page(dev, addr, size, dir, attrs);
38    debug_dma_unmap_page(dev, addr, size, dir, true);
39}
40
41static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
42                   int nents, enum dma_data_direction dir,
43                   struct dma_attrs *attrs)
44{
45    struct dma_map_ops *ops = get_dma_ops(dev);
46    int i, ents;
47    struct scatterlist *s;
48
49    for_each_sg(sg, s, nents, i)
50        kmemcheck_mark_initialized(sg_virt(s), s->length);
51    BUG_ON(!valid_dma_direction(dir));
52    ents = ops->map_sg(dev, sg, nents, dir, attrs);
53    debug_dma_map_sg(dev, sg, nents, ents, dir);
54
55    return ents;
56}
57
58static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
59                      int nents, enum dma_data_direction dir,
60                      struct dma_attrs *attrs)
61{
62    struct dma_map_ops *ops = get_dma_ops(dev);
63
64    BUG_ON(!valid_dma_direction(dir));
65    debug_dma_unmap_sg(dev, sg, nents, dir);
66    if (ops->unmap_sg)
67        ops->unmap_sg(dev, sg, nents, dir, attrs);
68}
69
70static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
71                      size_t offset, size_t size,
72                      enum dma_data_direction dir)
73{
74    struct dma_map_ops *ops = get_dma_ops(dev);
75    dma_addr_t addr;
76
77    kmemcheck_mark_initialized(page_address(page) + offset, size);
78    BUG_ON(!valid_dma_direction(dir));
79    addr = ops->map_page(dev, page, offset, size, dir, NULL);
80    debug_dma_map_page(dev, page, offset, size, dir, addr, false);
81
82    return addr;
83}
84
85static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
86                  size_t size, enum dma_data_direction dir)
87{
88    struct dma_map_ops *ops = get_dma_ops(dev);
89
90    BUG_ON(!valid_dma_direction(dir));
91    if (ops->unmap_page)
92        ops->unmap_page(dev, addr, size, dir, NULL);
93    debug_dma_unmap_page(dev, addr, size, dir, false);
94}
95
96static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
97                       size_t size,
98                       enum dma_data_direction dir)
99{
100    struct dma_map_ops *ops = get_dma_ops(dev);
101
102    BUG_ON(!valid_dma_direction(dir));
103    if (ops->sync_single_for_cpu)
104        ops->sync_single_for_cpu(dev, addr, size, dir);
105    debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106}
107
108static inline void dma_sync_single_for_device(struct device *dev,
109                          dma_addr_t addr, size_t size,
110                          enum dma_data_direction dir)
111{
112    struct dma_map_ops *ops = get_dma_ops(dev);
113
114    BUG_ON(!valid_dma_direction(dir));
115    if (ops->sync_single_for_device)
116        ops->sync_single_for_device(dev, addr, size, dir);
117    debug_dma_sync_single_for_device(dev, addr, size, dir);
118}
119
120static inline void dma_sync_single_range_for_cpu(struct device *dev,
121                         dma_addr_t addr,
122                         unsigned long offset,
123                         size_t size,
124                         enum dma_data_direction dir)
125{
126    dma_sync_single_for_cpu(dev, addr + offset, size, dir);
127}
128
129static inline void dma_sync_single_range_for_device(struct device *dev,
130                            dma_addr_t addr,
131                            unsigned long offset,
132                            size_t size,
133                            enum dma_data_direction dir)
134{
135    dma_sync_single_for_device(dev, addr + offset, size, dir);
136}
137
138static inline void
139dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
140            int nelems, enum dma_data_direction dir)
141{
142    struct dma_map_ops *ops = get_dma_ops(dev);
143
144    BUG_ON(!valid_dma_direction(dir));
145    if (ops->sync_sg_for_cpu)
146        ops->sync_sg_for_cpu(dev, sg, nelems, dir);
147    debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
148}
149
150static inline void
151dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
152               int nelems, enum dma_data_direction dir)
153{
154    struct dma_map_ops *ops = get_dma_ops(dev);
155
156    BUG_ON(!valid_dma_direction(dir));
157    if (ops->sync_sg_for_device)
158        ops->sync_sg_for_device(dev, sg, nelems, dir);
159    debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
160
161}
162
163#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
164#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
165#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
166#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
167
168#endif
169

Archive Download this file



interactive