blob: 4194bbbbdb1053b6aa5e41f7d6fff1ac3d4cba8e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11002 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11004 * Provide default implementations of the DMA mapping callbacks for
Becky Bruce8dd0e952008-09-08 09:09:53 +00005 * directly mapped busses.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +000010#include <linux/dma-debug.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100012#include <linux/memblock.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040013#include <linux/export.h>
Anton Blancharda9803492012-06-24 18:25:28 +000014#include <linux/pci.h>
15#include <asm/vio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/bug.h>
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +000017#include <asm/machdep.h>
Scott Wood6397fc32014-08-08 18:40:43 -050018#include <asm/swiotlb.h>
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +100019#include <asm/iommu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110021/*
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110022 * Generic direct DMA implementation
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110023 *
Michael Ellerman31d1b492008-01-21 16:42:48 +110024 * This implementation supports a per-device offset that can be applied if
25 * the address at which memory is visible to devices is not 0. Platform code
26 * can set archdata.dma_data to an unsigned long holding the offset. By
Becky Bruce4fc665b2008-09-12 10:34:46 +000027 * default the offset is PCI_DRAM_OFFSET.
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +110028 */
29
Scott Wood6397fc32014-08-08 18:40:43 -050030static u64 __maybe_unused get_pfn_limit(struct device *dev)
31{
32 u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
33 struct dev_archdata __maybe_unused *sd = &dev->archdata;
34
35#ifdef CONFIG_SWIOTLB
Bart Van Assche56579332017-01-20 13:04:02 -080036 if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
Scott Wood6397fc32014-08-08 18:40:43 -050037 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
38#endif
39
40 return pfn;
41}
Michael Ellerman35e4a6e2008-01-21 16:42:43 +110042
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +100043static int dma_direct_dma_supported(struct device *dev, u64 mask)
44{
45#ifdef CONFIG_PPC64
46 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
47
48 /* Limit fits in the mask, we are good */
49 if (mask >= limit)
50 return 1;
51
52#ifdef CONFIG_FSL_SOC
53 /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
54 * that will have to be refined if/when they support iommus
55 */
56 return 1;
57#endif
58 /* Sorry ... */
59 return 0;
60#else
61 return 1;
62#endif
63}
64
65void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070067 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000069 void *ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000070#ifdef CONFIG_NOT_COHERENT_CACHE
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100071 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000072 if (ret == NULL)
73 return NULL;
Becky Bruce1cebd7a2009-09-21 08:26:34 +000074 *dma_handle += get_dma_offset(dev);
Benjamin Herrenschmidt8aa26592008-10-09 17:06:24 +000075 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +000076#else
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +110077 struct page *page;
Becky Bruce8fae0352008-09-08 09:09:54 +000078 int node = dev_to_node(dev);
Michael Ellermane89dafb2014-10-16 17:43:02 +110079#ifdef CONFIG_FSL_SOC
Scott Wood6397fc32014-08-08 18:40:43 -050080 u64 pfn = get_pfn_limit(dev);
Scott Wood1c980252014-08-08 18:40:42 -050081 int zone;
82
Michael Ellermane89dafb2014-10-16 17:43:02 +110083 /*
84 * This code should be OK on other platforms, but we have drivers that
85 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
86 * whole routine needs some serious cleanup.
87 */
88
Scott Wood1c980252014-08-08 18:40:42 -050089 zone = dma_pfn_limit_to_zone(pfn);
90 if (zone < 0) {
91 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
92 __func__, pfn);
93 return NULL;
94 }
95
96 switch (zone) {
97 case ZONE_DMA:
98 flag |= GFP_DMA;
99 break;
100#ifdef CONFIG_ZONE_DMA32
101 case ZONE_DMA32:
102 flag |= GFP_DMA32;
103 break;
104#endif
105 };
Michael Ellermane89dafb2014-10-16 17:43:02 +1100106#endif /* CONFIG_FSL_SOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Becky Bruce4fc665b2008-09-12 10:34:46 +0000108 /* ignore region specifiers */
109 flag &= ~(__GFP_HIGHMEM);
110
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +1100111 page = alloc_pages_node(node, flag, get_order(size));
112 if (page == NULL)
113 return NULL;
114 ret = page_address(page);
115 memset(ret, 0, size);
Michael Ellerman3d267522012-07-25 21:19:56 +0000116 *dma_handle = __pa(ret) + get_dma_offset(dev);
Benjamin Herrenschmidtc80d9132006-11-11 17:25:16 +1100117
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100118 return ret;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000119#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000122void __dma_direct_free_coherent(struct device *dev, size_t size,
123 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700124 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
Becky Bruce4fc665b2008-09-12 10:34:46 +0000126#ifdef CONFIG_NOT_COHERENT_CACHE
127 __dma_free_coherent(size, vaddr);
128#else
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100129 free_pages((unsigned long)vaddr, get_order(size));
Becky Bruce4fc665b2008-09-12 10:34:46 +0000130#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100131}
132
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000133static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
134 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700135 unsigned long attrs)
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000136{
137 struct iommu_table *iommu;
138
139 /* The coherent mask may be smaller than the real mask, check if
140 * we can really use the direct ops
141 */
142 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
143 return __dma_direct_alloc_coherent(dev, size, dma_handle,
144 flag, attrs);
145
146 /* Ok we can't ... do we have an iommu ? If not, fail */
147 iommu = get_iommu_table_base(dev);
148 if (!iommu)
149 return NULL;
150
151 /* Try to use the iommu */
152 return iommu_alloc_coherent(dev, iommu, size, dma_handle,
153 dev->coherent_dma_mask, flag,
154 dev_to_node(dev));
155}
156
157static void dma_direct_free_coherent(struct device *dev, size_t size,
158 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700159 unsigned long attrs)
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000160{
161 struct iommu_table *iommu;
162
163 /* See comments in dma_direct_alloc_coherent() */
164 if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
165 return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
166 attrs);
167 /* Maybe we used an iommu ... */
168 iommu = get_iommu_table_base(dev);
169
170 /* If we hit that we should have never allocated in the first
171 * place so how come we are freeing ?
172 */
173 if (WARN_ON(!iommu))
174 return;
175 iommu_free_coherent(iommu, size, vaddr, dma_handle);
176}
177
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200178int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
179 void *cpu_addr, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700180 unsigned long attrs)
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200181{
182 unsigned long pfn;
183
184#ifdef CONFIG_NOT_COHERENT_CACHE
185 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
186 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
187#else
188 pfn = page_to_pfn(virt_to_page(cpu_addr));
189#endif
190 return remap_pfn_range(vma, vma->vm_start,
191 pfn + vma->vm_pgoff,
192 vma->vm_end - vma->vm_start,
193 vma->vm_page_prot);
194}
195
Jens Axboe78bdc312007-10-12 13:44:12 +0200196static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
Mark Nelson3affedc2008-07-05 05:05:42 +1000197 int nents, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700198 unsigned long attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100199{
Jens Axboe78bdc312007-10-12 13:44:12 +0200200 struct scatterlist *sg;
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100201 int i;
202
Jens Axboe78bdc312007-10-12 13:44:12 +0200203 for_each_sg(sgl, sg, nents, i) {
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000204 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100205 sg->dma_length = sg->length;
Alexander Duyck6f774802016-12-14 15:05:09 -0800206
207 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
208 continue;
209
Benjamin Herrenschmidt2434bbb2008-11-30 18:53:40 +0000210 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100211 }
212
213 return nents;
214}
215
216static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +1000217 int nents, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700218 unsigned long attrs)
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100219{
220}
221
Milton Millerd24f9c62011-06-24 09:05:24 +0000222static u64 dma_direct_get_required_mask(struct device *dev)
223{
224 u64 end, mask;
225
226 end = memblock_end_of_DRAM() + get_dma_offset(dev);
227
228 mask = 1ULL << (fls64(end) - 1);
229 mask += mask - 1;
230
231 return mask;
232}
233
Becky Bruce4fc665b2008-09-12 10:34:46 +0000234static inline dma_addr_t dma_direct_map_page(struct device *dev,
235 struct page *page,
236 unsigned long offset,
237 size_t size,
238 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700239 unsigned long attrs)
Becky Bruce4fc665b2008-09-12 10:34:46 +0000240{
241 BUG_ON(dir == DMA_NONE);
Alexander Duyck6f774802016-12-14 15:05:09 -0800242
243 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
244 __dma_sync_page(page, offset, size, dir);
245
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000246 return page_to_phys(page) + offset + get_dma_offset(dev);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000247}
248
249static inline void dma_direct_unmap_page(struct device *dev,
250 dma_addr_t dma_address,
251 size_t size,
252 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700253 unsigned long attrs)
Becky Bruce4fc665b2008-09-12 10:34:46 +0000254{
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100255}
256
Becky Bruce15e09c02008-11-20 06:49:16 +0000257#ifdef CONFIG_NOT_COHERENT_CACHE
258static inline void dma_direct_sync_sg(struct device *dev,
259 struct scatterlist *sgl, int nents,
260 enum dma_data_direction direction)
261{
262 struct scatterlist *sg;
263 int i;
264
265 for_each_sg(sgl, sg, nents, i)
266 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
267}
268
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700269static inline void dma_direct_sync_single(struct device *dev,
270 dma_addr_t dma_handle, size_t size,
271 enum dma_data_direction direction)
Becky Bruce15e09c02008-11-20 06:49:16 +0000272{
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700273 __dma_sync(bus_to_virt(dma_handle), size, direction);
Becky Bruce15e09c02008-11-20 06:49:16 +0000274}
275#endif
276
Bart Van Assche52997092017-01-20 13:04:01 -0800277const struct dma_map_ops dma_direct_ops = {
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100278 .alloc = dma_direct_alloc_coherent,
279 .free = dma_direct_free_coherent,
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200280 .mmap = dma_direct_mmap_coherent,
Milton Miller2eccacd2011-06-24 09:05:25 +0000281 .map_sg = dma_direct_map_sg,
282 .unmap_sg = dma_direct_unmap_sg,
283 .dma_supported = dma_direct_dma_supported,
284 .map_page = dma_direct_map_page,
285 .unmap_page = dma_direct_unmap_page,
286 .get_required_mask = dma_direct_get_required_mask,
Becky Bruce15e09c02008-11-20 06:49:16 +0000287#ifdef CONFIG_NOT_COHERENT_CACHE
FUJITA Tomonori712d3e22010-05-26 14:44:17 -0700288 .sync_single_for_cpu = dma_direct_sync_single,
289 .sync_single_for_device = dma_direct_sync_single,
Becky Bruce15e09c02008-11-20 06:49:16 +0000290 .sync_sg_for_cpu = dma_direct_sync_sg,
291 .sync_sg_for_device = dma_direct_sync_sg,
292#endif
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100293};
294EXPORT_SYMBOL(dma_direct_ops);
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000295
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000296int dma_set_coherent_mask(struct device *dev, u64 mask)
297{
298 if (!dma_supported(dev, mask)) {
299 /*
300 * We need to special case the direct DMA ops which can
301 * support a fallback for coherent allocations. There
302 * is no dma_op->set_coherent_mask() so we have to do
303 * things the hard way:
304 */
305 if (get_dma_ops(dev) != &dma_direct_ops ||
306 get_iommu_table_base(dev) == NULL ||
307 !dma_iommu_dma_supported(dev, mask))
308 return -EIO;
309 }
310 dev->coherent_dma_mask = mask;
311 return 0;
312}
Benjamin Herrenschmidt977bf062015-10-27 17:20:05 +0900313EXPORT_SYMBOL(dma_set_coherent_mask);
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000314
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000315#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
316
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100317int dma_set_mask(struct device *dev, u64 dma_mask)
318{
319 if (ppc_md.dma_set_mask)
320 return ppc_md.dma_set_mask(dev, dma_mask);
Daniel Axtens3405c252015-04-28 15:12:06 +1000321
322 if (dev_is_pci(dev)) {
323 struct pci_dev *pdev = to_pci_dev(dev);
324 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
325 if (phb->controller_ops.dma_set_mask)
326 return phb->controller_ops.dma_set_mask(pdev, dma_mask);
327 }
328
Christoph Hellwiga9a7b062017-05-23 15:42:02 +0200329 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
330 return -EIO;
331 *dev->dma_mask = dma_mask;
332 return 0;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100333}
Benjamin Herrenschmidt5b6e9ff2010-08-30 19:23:52 +0000334EXPORT_SYMBOL(dma_set_mask);
335
Gavin Shanfe7e85c2014-09-30 12:39:10 +1000336u64 __dma_get_required_mask(struct device *dev)
Milton Miller6a5c7be2011-06-24 09:05:22 +0000337{
Bart Van Assche52997092017-01-20 13:04:01 -0800338 const struct dma_map_ops *dma_ops = get_dma_ops(dev);
Milton Miller6a5c7be2011-06-24 09:05:22 +0000339
Milton Miller6a5c7be2011-06-24 09:05:22 +0000340 if (unlikely(dma_ops == NULL))
341 return 0;
342
Milton Millerd24f9c62011-06-24 09:05:24 +0000343 if (dma_ops->get_required_mask)
344 return dma_ops->get_required_mask(dev);
Milton Miller6a5c7be2011-06-24 09:05:22 +0000345
Milton Millerd24f9c62011-06-24 09:05:24 +0000346 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
Milton Miller6a5c7be2011-06-24 09:05:22 +0000347}
Gavin Shanfe7e85c2014-09-30 12:39:10 +1000348
349u64 dma_get_required_mask(struct device *dev)
350{
351 if (ppc_md.dma_get_required_mask)
352 return ppc_md.dma_get_required_mask(dev);
353
Andrew Donnellan53522982015-08-07 13:45:54 +1000354 if (dev_is_pci(dev)) {
355 struct pci_dev *pdev = to_pci_dev(dev);
356 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
357 if (phb->controller_ops.dma_get_required_mask)
358 return phb->controller_ops.dma_get_required_mask(pdev);
359 }
360
Gavin Shanfe7e85c2014-09-30 12:39:10 +1000361 return __dma_get_required_mask(dev);
362}
Milton Miller6a5c7be2011-06-24 09:05:22 +0000363EXPORT_SYMBOL_GPL(dma_get_required_mask);
364
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000365static int __init dma_init(void)
366{
Anton Blancharda9803492012-06-24 18:25:28 +0000367 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
368#ifdef CONFIG_PCI
369 dma_debug_add_bus(&pci_bus_type);
370#endif
371#ifdef CONFIG_IBMVIO
372 dma_debug_add_bus(&vio_bus_type);
373#endif
FUJITA Tomonori80d3e8a2009-08-04 19:08:28 +0000374
375 return 0;
376}
377fs_initcall(dma_init);
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +0000378