]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/linux-26-headers/include/asm-x86/dma-mapping.h
update
[l4.git] / l4 / pkg / linux-26-headers / include / asm-x86 / dma-mapping.h
1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12
13 extern dma_addr_t bad_dma_address;
14 extern int iommu_merge;
15 extern struct device fallback_dev;
16 extern int panic_on_overflow;
17 extern int force_iommu;
18
19 struct dma_mapping_ops {
20         int             (*mapping_error)(struct device *dev,
21                                          dma_addr_t dma_addr);
22         void*           (*alloc_coherent)(struct device *dev, size_t size,
23                                 dma_addr_t *dma_handle, gfp_t gfp);
24         void            (*free_coherent)(struct device *dev, size_t size,
25                                 void *vaddr, dma_addr_t dma_handle);
26         dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
27                                 size_t size, int direction);
28         /* like map_single, but doesn't check the device mask */
29         dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
30                                 size_t size, int direction);
31         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
32                                 size_t size, int direction);
33         void            (*sync_single_for_cpu)(struct device *hwdev,
34                                 dma_addr_t dma_handle, size_t size,
35                                 int direction);
36         void            (*sync_single_for_device)(struct device *hwdev,
37                                 dma_addr_t dma_handle, size_t size,
38                                 int direction);
39         void            (*sync_single_range_for_cpu)(struct device *hwdev,
40                                 dma_addr_t dma_handle, unsigned long offset,
41                                 size_t size, int direction);
42         void            (*sync_single_range_for_device)(struct device *hwdev,
43                                 dma_addr_t dma_handle, unsigned long offset,
44                                 size_t size, int direction);
45         void            (*sync_sg_for_cpu)(struct device *hwdev,
46                                 struct scatterlist *sg, int nelems,
47                                 int direction);
48         void            (*sync_sg_for_device)(struct device *hwdev,
49                                 struct scatterlist *sg, int nelems,
50                                 int direction);
51         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
52                                 int nents, int direction);
53         void            (*unmap_sg)(struct device *hwdev,
54                                 struct scatterlist *sg, int nents,
55                                 int direction);
56         int             (*dma_supported)(struct device *hwdev, u64 mask);
57         int             is_phys;
58 };
59
60 extern struct dma_mapping_ops *dma_ops;
61
62 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
63 {
64 #ifdef CONFIG_X86_32
65         return dma_ops;
66 #else
67         if (unlikely(!dev) || !dev->archdata.dma_ops)
68                 return dma_ops;
69         else
70                 return dev->archdata.dma_ops;
71 #endif
72 }
73
74 /* Make sure we keep the same behaviour */
75 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
76 {
77 #ifdef CONFIG_X86_32
78         return 0;
79 #else
80         struct dma_mapping_ops *ops = get_dma_ops(dev);
81         if (ops->mapping_error)
82                 return ops->mapping_error(dev, dma_addr);
83
84         return (dma_addr == bad_dma_address);
85 #endif
86 }
87
88 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
89 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
90
91 void *dma_alloc_coherent(struct device *dev, size_t size,
92                            dma_addr_t *dma_handle, gfp_t flag);
93
94 void dma_free_coherent(struct device *dev, size_t size,
95                          void *vaddr, dma_addr_t dma_handle);
96
97
98 extern int dma_supported(struct device *hwdev, u64 mask);
99 extern int dma_set_mask(struct device *dev, u64 mask);
100
101 static inline dma_addr_t
102 dma_map_single(struct device *hwdev, void *ptr, size_t size,
103                int direction)
104 {
105         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
106
107         BUG_ON(!valid_dma_direction(direction));
108         return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
109 }
110
111 static inline void
112 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
113                  int direction)
114 {
115         struct dma_mapping_ops *ops = get_dma_ops(dev);
116
117         BUG_ON(!valid_dma_direction(direction));
118         if (ops->unmap_single)
119                 ops->unmap_single(dev, addr, size, direction);
120 }
121
122 static inline int
123 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
124            int nents, int direction)
125 {
126         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
128         BUG_ON(!valid_dma_direction(direction));
129         return ops->map_sg(hwdev, sg, nents, direction);
130 }
131
132 static inline void
133 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
134              int direction)
135 {
136         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
137
138         BUG_ON(!valid_dma_direction(direction));
139         if (ops->unmap_sg)
140                 ops->unmap_sg(hwdev, sg, nents, direction);
141 }
142
143 static inline void
144 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
145                         size_t size, int direction)
146 {
147         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
148
149         BUG_ON(!valid_dma_direction(direction));
150         if (ops->sync_single_for_cpu)
151                 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
152         flush_write_buffers();
153 }
154
155 static inline void
156 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
157                            size_t size, int direction)
158 {
159         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
160
161         BUG_ON(!valid_dma_direction(direction));
162         if (ops->sync_single_for_device)
163                 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
164         flush_write_buffers();
165 }
166
167 static inline void
168 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
169                               unsigned long offset, size_t size, int direction)
170 {
171         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
172
173         BUG_ON(!valid_dma_direction(direction));
174         if (ops->sync_single_range_for_cpu)
175                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
176                                                size, direction);
177         flush_write_buffers();
178 }
179
180 static inline void
181 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
182                                  unsigned long offset, size_t size,
183                                  int direction)
184 {
185         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
186
187         BUG_ON(!valid_dma_direction(direction));
188         if (ops->sync_single_range_for_device)
189                 ops->sync_single_range_for_device(hwdev, dma_handle,
190                                                   offset, size, direction);
191         flush_write_buffers();
192 }
193
194 static inline void
195 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
196                     int nelems, int direction)
197 {
198         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
199
200         BUG_ON(!valid_dma_direction(direction));
201         if (ops->sync_sg_for_cpu)
202                 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
203         flush_write_buffers();
204 }
205
206 static inline void
207 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
208                        int nelems, int direction)
209 {
210         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
211
212         BUG_ON(!valid_dma_direction(direction));
213         if (ops->sync_sg_for_device)
214                 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
215
216         flush_write_buffers();
217 }
218
219 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
220                                       size_t offset, size_t size,
221                                       int direction)
222 {
223         struct dma_mapping_ops *ops = get_dma_ops(dev);
224
225         BUG_ON(!valid_dma_direction(direction));
226         return ops->map_single(dev, page_to_phys(page) + offset,
227                                size, direction);
228 }
229
230 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
231                                   size_t size, int direction)
232 {
233         dma_unmap_single(dev, addr, size, direction);
234 }
235
236 static inline void
237 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
238         enum dma_data_direction dir)
239 {
240         flush_write_buffers();
241 }
242
243 static inline int dma_get_cache_alignment(void)
244 {
245         /* no easy way to get cache size on all x86, so return the
246          * maximum possible, to be safe */
247         return boot_cpu_data.x86_clflush_size;
248 }
249
250 #define dma_is_consistent(d, h) (1)
251
252 #include <asm-generic/dma-coherent.h>
253 #endif