blob: fddb229bd74f270231c570726d7e15bef0a201f5 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
21#ifdef CONFIG_NOT_COHERENT_CACHE
22/*
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset,
33 size_t size, int direction);
34
35#else /* ! CONFIG_NOT_COHERENT_CACHE */
36/*
37 * Cache coherent cores.
38 */
39
40#define __dma_alloc_coherent(gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0)
44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46
Mark Nelson3a4c6f02008-07-05 05:05:45 +100047static inline unsigned long device_to_mask(struct device *dev)
48{
49 if (dev->dma_mask && *dev->dma_mask)
50 return *dev->dma_mask;
51 /* Assume devices without mask can take 32 bit addresses */
52 return 0xfffffffful;
53}
54
Anton Blanchard33ff9102007-10-16 14:54:33 -050055/*
56 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
57 */
58struct dma_mapping_ops {
59 void * (*alloc_coherent)(struct device *dev, size_t size,
60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle);
63 dma_addr_t (*map_single)(struct device *dev, void *ptr,
Mark Nelson3affedc2008-07-05 05:05:42 +100064 size_t size, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050066 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
Mark Nelson3affedc2008-07-05 05:05:42 +100067 size_t size, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050069 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100070 int nents, enum dma_data_direction direction,
71 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050072 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100073 int nents, enum dma_data_direction direction,
74 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050075 int (*dma_supported)(struct device *dev, u64 mask);
76 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
Becky Bruce4fc665b2008-09-12 10:34:46 +000077 dma_addr_t (*map_page)(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction direction,
80 struct dma_attrs *attrs);
81 void (*unmap_page)(struct device *dev,
82 dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction,
84 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050085};
86
Becky Bruce4fc665b2008-09-12 10:34:46 +000087/*
88 * Available generic sets of operations
89 */
90#ifdef CONFIG_PPC64
91extern struct dma_mapping_ops dma_iommu_ops;
92#endif
93extern struct dma_mapping_ops dma_direct_ops;
94
Anton Blanchard33ff9102007-10-16 14:54:33 -050095static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
96{
97 /* We don't handle the NULL dev case for ISA for now. We could
98 * do it via an out of line call but it is not needed for now. The
99 * only ISA DMA device we support is the floppy and we have a hack
100 * in the floppy driver directly to get a device for us.
101 */
Becky Bruce4fc665b2008-09-12 10:34:46 +0000102
103 if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
104#ifdef CONFIG_PPC64
Anton Blanchard33ff9102007-10-16 14:54:33 -0500105 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000106#else
107 /* Use default on 32-bit if dma_ops is not set up */
108 /* TODO: Long term, we should fix drivers so that dev and
109 * archdata dma_ops are set up for all buses.
110 */
111 return &dma_direct_ops;
112#endif
113 }
114
Anton Blanchard33ff9102007-10-16 14:54:33 -0500115 return dev->archdata.dma_ops;
116}
117
Michael Ellerman1f62a162008-01-30 01:13:58 +1100118static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
119{
120 dev->archdata.dma_ops = ops;
121}
122
Anton Blanchard33ff9102007-10-16 14:54:33 -0500123static inline int dma_supported(struct device *dev, u64 mask)
124{
125 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
126
127 if (unlikely(dma_ops == NULL))
128 return 0;
129 if (dma_ops->dma_supported == NULL)
130 return 1;
131 return dma_ops->dma_supported(dev, mask);
132}
133
Michael Ellerman84631f32007-12-17 17:35:53 +1100134/* We have our own implementation of pci_set_dma_mask() */
135#define HAVE_ARCH_PCI_SET_DMA_MASK
136
Anton Blanchard33ff9102007-10-16 14:54:33 -0500137static inline int dma_set_mask(struct device *dev, u64 dma_mask)
138{
139 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
140
141 if (unlikely(dma_ops == NULL))
142 return -EIO;
143 if (dma_ops->set_dma_mask != NULL)
144 return dma_ops->set_dma_mask(dev, dma_mask);
145 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
146 return -EIO;
147 *dev->dma_mask = dma_mask;
148 return 0;
149}
150
Becky Bruce4fc665b2008-09-12 10:34:46 +0000151/*
152 * TODO: map_/unmap_single will ideally go away, to be completely
153 * replaced by map/unmap_page. Until then, we allow dma_ops to have
154 * one or the other, or both by checking to see if the specific
155 * function requested exists; and if not, falling back on the other set.
156 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000157static inline dma_addr_t dma_map_single_attrs(struct device *dev,
158 void *cpu_addr,
159 size_t size,
160 enum dma_data_direction direction,
161 struct dma_attrs *attrs)
162{
163 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
164
165 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000166
167 if (dma_ops->map_single)
168 return dma_ops->map_single(dev, cpu_addr, size, direction,
169 attrs);
170
171 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
172 (unsigned long)cpu_addr % PAGE_SIZE, size,
173 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000174}
175
176static inline void dma_unmap_single_attrs(struct device *dev,
177 dma_addr_t dma_addr,
178 size_t size,
179 enum dma_data_direction direction,
180 struct dma_attrs *attrs)
181{
182 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
183
184 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000185
186 if (dma_ops->unmap_single) {
187 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
188 return;
189 }
190
191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000192}
193
194static inline dma_addr_t dma_map_page_attrs(struct device *dev,
195 struct page *page,
196 unsigned long offset, size_t size,
197 enum dma_data_direction direction,
198 struct dma_attrs *attrs)
199{
200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
201
202 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000203
204 if (dma_ops->map_page)
205 return dma_ops->map_page(dev, page, offset, size, direction,
206 attrs);
207
Mark Nelson3affedc2008-07-05 05:05:42 +1000208 return dma_ops->map_single(dev, page_address(page) + offset, size,
Becky Bruce4fc665b2008-09-12 10:34:46 +0000209 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000210}
211
212static inline void dma_unmap_page_attrs(struct device *dev,
213 dma_addr_t dma_address,
214 size_t size,
215 enum dma_data_direction direction,
216 struct dma_attrs *attrs)
217{
218 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
219
220 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000221
222 if (dma_ops->unmap_page) {
223 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
224 return;
225 }
226
Mark Nelson3affedc2008-07-05 05:05:42 +1000227 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
228}
229
230static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
231 int nents, enum dma_data_direction direction,
232 struct dma_attrs *attrs)
233{
234 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
235
236 BUG_ON(!dma_ops);
237 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
238}
239
240static inline void dma_unmap_sg_attrs(struct device *dev,
241 struct scatterlist *sg,
242 int nhwentries,
243 enum dma_data_direction direction,
244 struct dma_attrs *attrs)
245{
246 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
247
248 BUG_ON(!dma_ops);
249 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
250}
251
Anton Blanchard33ff9102007-10-16 14:54:33 -0500252static inline void *dma_alloc_coherent(struct device *dev, size_t size,
253 dma_addr_t *dma_handle, gfp_t flag)
254{
255 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
256
257 BUG_ON(!dma_ops);
258 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
259}
260
261static inline void dma_free_coherent(struct device *dev, size_t size,
262 void *cpu_addr, dma_addr_t dma_handle)
263{
264 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
265
266 BUG_ON(!dma_ops);
267 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
268}
269
270static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
271 size_t size,
272 enum dma_data_direction direction)
273{
Mark Nelson3affedc2008-07-05 05:05:42 +1000274 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500275}
276
277static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
278 size_t size,
279 enum dma_data_direction direction)
280{
Mark Nelson3affedc2008-07-05 05:05:42 +1000281 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500282}
283
284static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
285 unsigned long offset, size_t size,
286 enum dma_data_direction direction)
287{
Mark Nelson3affedc2008-07-05 05:05:42 +1000288 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500289}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100290
291static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
292 size_t size,
293 enum dma_data_direction direction)
294{
Mark Nelson3affedc2008-07-05 05:05:42 +1000295 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100296}
297
298static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
299 int nents, enum dma_data_direction direction)
300{
Mark Nelson3affedc2008-07-05 05:05:42 +1000301 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100302}
303
304static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
305 int nhwentries,
306 enum dma_data_direction direction)
307{
Mark Nelson3affedc2008-07-05 05:05:42 +1000308 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100309}
310
Stephen Rothwell78b09732005-11-19 01:40:46 +1100311static inline void dma_sync_single_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, size_t size,
313 enum dma_data_direction direction)
314{
315 BUG_ON(direction == DMA_NONE);
316 __dma_sync(bus_to_virt(dma_handle), size, direction);
317}
318
319static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t dma_handle, size_t size,
321 enum dma_data_direction direction)
322{
323 BUG_ON(direction == DMA_NONE);
324 __dma_sync(bus_to_virt(dma_handle), size, direction);
325}
326
327static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200328 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329 enum dma_data_direction direction)
330{
Jens Axboe78bdc312007-10-12 13:44:12 +0200331 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100332 int i;
333
334 BUG_ON(direction == DMA_NONE);
335
Jens Axboe78bdc312007-10-12 13:44:12 +0200336 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200337 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100338}
339
340static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200341 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342 enum dma_data_direction direction)
343{
Jens Axboe78bdc312007-10-12 13:44:12 +0200344 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100345 int i;
346
347 BUG_ON(direction == DMA_NONE);
348
Jens Axboe78bdc312007-10-12 13:44:12 +0200349 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200350 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100351}
352
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700353static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100354{
355#ifdef CONFIG_PPC64
356 return (dma_addr == DMA_ERROR_CODE);
357#else
358 return 0;
359#endif
360}
361
362#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
363#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
364#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800365#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100366#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800367#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100368#endif
369
370static inline int dma_get_cache_alignment(void)
371{
372#ifdef CONFIG_PPC64
373 /* no easy way to get cache size on all processors, so return
374 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800375 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100376#else
377 /*
378 * Each processor family will define its own L1_CACHE_SHIFT,
379 * L1_CACHE_BYTES wraps to this, so this is always safe.
380 */
381 return L1_CACHE_BYTES;
382#endif
383}
384
385static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t dma_handle, unsigned long offset, size_t size,
387 enum dma_data_direction direction)
388{
389 /* just sync everything for now */
390 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
391}
392
393static inline void dma_sync_single_range_for_device(struct device *dev,
394 dma_addr_t dma_handle, unsigned long offset, size_t size,
395 enum dma_data_direction direction)
396{
397 /* just sync everything for now */
398 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
399}
400
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800401static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100402 enum dma_data_direction direction)
403{
404 BUG_ON(direction == DMA_NONE);
405 __dma_sync(vaddr, size, (int)direction);
406}
407
Arnd Bergmann88ced032005-12-16 22:43:46 +0100408#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100409#endif /* _ASM_DMA_MAPPING_H */