Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 4 | * Provide default implementations of the DMA mapping callbacks for |
Becky Bruce | 8dd0e95 | 2008-09-08 09:09:53 +0000 | [diff] [blame] | 5 | * directly mapped busses. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/device.h> |
| 9 | #include <linux/dma-mapping.h> |
FUJITA Tomonori | 80d3e8a | 2009-08-04 19:08:28 +0000 | [diff] [blame] | 10 | #include <linux/dma-debug.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/gfp.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 12 | #include <linux/memblock.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 13 | #include <linux/export.h> |
Anton Blanchard | a980349 | 2012-06-24 18:25:28 +0000 | [diff] [blame] | 14 | #include <linux/pci.h> |
| 15 | #include <asm/vio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/bug.h> |
Benjamin Herrenschmidt | 5b6e9ff | 2010-08-30 19:23:52 +0000 | [diff] [blame] | 17 | #include <asm/machdep.h> |
Scott Wood | 6397fc3 | 2014-08-08 18:40:43 -0500 | [diff] [blame] | 18 | #include <asm/swiotlb.h> |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 19 | #include <asm/iommu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 21 | /* |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 22 | * Generic direct DMA implementation |
Benjamin Herrenschmidt | 92b20c4 | 2006-11-11 17:25:14 +1100 | [diff] [blame] | 23 | * |
Michael Ellerman | 31d1b49 | 2008-01-21 16:42:48 +1100 | [diff] [blame] | 24 | * This implementation supports a per-device offset that can be applied if |
| 25 | * the address at which memory is visible to devices is not 0. Platform code |
| 26 | * can set archdata.dma_data to an unsigned long holding the offset. By |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 27 | * default the offset is PCI_DRAM_OFFSET. |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 28 | */ |
| 29 | |
Scott Wood | 6397fc3 | 2014-08-08 18:40:43 -0500 | [diff] [blame] | 30 | static u64 __maybe_unused get_pfn_limit(struct device *dev) |
| 31 | { |
| 32 | u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1; |
| 33 | struct dev_archdata __maybe_unused *sd = &dev->archdata; |
| 34 | |
| 35 | #ifdef CONFIG_SWIOTLB |
Bart Van Assche | 5657933 | 2017-01-20 13:04:02 -0800 | [diff] [blame] | 36 | if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) |
Scott Wood | 6397fc3 | 2014-08-08 18:40:43 -0500 | [diff] [blame] | 37 | pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); |
| 38 | #endif |
| 39 | |
| 40 | return pfn; |
| 41 | } |
Michael Ellerman | 35e4a6e | 2008-01-21 16:42:43 +1100 | [diff] [blame] | 42 | |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 43 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
| 44 | { |
| 45 | #ifdef CONFIG_PPC64 |
| 46 | u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
| 47 | |
| 48 | /* Limit fits in the mask, we are good */ |
| 49 | if (mask >= limit) |
| 50 | return 1; |
| 51 | |
| 52 | #ifdef CONFIG_FSL_SOC |
| 53 | /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however |
| 54 | * that will have to be refined if/when they support iommus |
| 55 | */ |
| 56 | return 1; |
| 57 | #endif |
| 58 | /* Sorry ... */ |
| 59 | return 0; |
| 60 | #else |
| 61 | return 1; |
| 62 | #endif |
| 63 | } |
| 64 | |
| 65 | void *__dma_direct_alloc_coherent(struct device *dev, size_t size, |
| 66 | dma_addr_t *dma_handle, gfp_t flag, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 67 | unsigned long attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { |
Benjamin Herrenschmidt | 8aa2659 | 2008-10-09 17:06:24 +0000 | [diff] [blame] | 69 | void *ret; |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 70 | #ifdef CONFIG_NOT_COHERENT_CACHE |
Benjamin Herrenschmidt | 8b31e49 | 2009-05-27 13:50:33 +1000 | [diff] [blame] | 71 | ret = __dma_alloc_coherent(dev, size, dma_handle, flag); |
Benjamin Herrenschmidt | 8aa2659 | 2008-10-09 17:06:24 +0000 | [diff] [blame] | 72 | if (ret == NULL) |
| 73 | return NULL; |
Becky Bruce | 1cebd7a | 2009-09-21 08:26:34 +0000 | [diff] [blame] | 74 | *dma_handle += get_dma_offset(dev); |
Benjamin Herrenschmidt | 8aa2659 | 2008-10-09 17:06:24 +0000 | [diff] [blame] | 75 | return ret; |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 76 | #else |
Benjamin Herrenschmidt | c80d913 | 2006-11-11 17:25:16 +1100 | [diff] [blame] | 77 | struct page *page; |
Becky Bruce | 8fae035 | 2008-09-08 09:09:54 +0000 | [diff] [blame] | 78 | int node = dev_to_node(dev); |
Michael Ellerman | e89dafb | 2014-10-16 17:43:02 +1100 | [diff] [blame] | 79 | #ifdef CONFIG_FSL_SOC |
Scott Wood | 6397fc3 | 2014-08-08 18:40:43 -0500 | [diff] [blame] | 80 | u64 pfn = get_pfn_limit(dev); |
Scott Wood | 1c98025 | 2014-08-08 18:40:42 -0500 | [diff] [blame] | 81 | int zone; |
| 82 | |
Michael Ellerman | e89dafb | 2014-10-16 17:43:02 +1100 | [diff] [blame] | 83 | /* |
| 84 | * This code should be OK on other platforms, but we have drivers that |
| 85 | * don't set coherent_dma_mask. As a workaround we just ifdef it. This |
| 86 | * whole routine needs some serious cleanup. |
| 87 | */ |
| 88 | |
Scott Wood | 1c98025 | 2014-08-08 18:40:42 -0500 | [diff] [blame] | 89 | zone = dma_pfn_limit_to_zone(pfn); |
| 90 | if (zone < 0) { |
| 91 | dev_err(dev, "%s: No suitable zone for pfn %#llx\n", |
| 92 | __func__, pfn); |
| 93 | return NULL; |
| 94 | } |
| 95 | |
| 96 | switch (zone) { |
| 97 | case ZONE_DMA: |
| 98 | flag |= GFP_DMA; |
| 99 | break; |
| 100 | #ifdef CONFIG_ZONE_DMA32 |
| 101 | case ZONE_DMA32: |
| 102 | flag |= GFP_DMA32; |
| 103 | break; |
| 104 | #endif |
| 105 | }; |
Michael Ellerman | e89dafb | 2014-10-16 17:43:02 +1100 | [diff] [blame] | 106 | #endif /* CONFIG_FSL_SOC */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 108 | /* ignore region specifiers */ |
| 109 | flag &= ~(__GFP_HIGHMEM); |
| 110 | |
Benjamin Herrenschmidt | c80d913 | 2006-11-11 17:25:16 +1100 | [diff] [blame] | 111 | page = alloc_pages_node(node, flag, get_order(size)); |
| 112 | if (page == NULL) |
| 113 | return NULL; |
| 114 | ret = page_address(page); |
| 115 | memset(ret, 0, size); |
Michael Ellerman | 3d26752 | 2012-07-25 21:19:56 +0000 | [diff] [blame] | 116 | *dma_handle = __pa(ret) + get_dma_offset(dev); |
Benjamin Herrenschmidt | c80d913 | 2006-11-11 17:25:16 +1100 | [diff] [blame] | 117 | |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 118 | return ret; |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 119 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 122 | void __dma_direct_free_coherent(struct device *dev, size_t size, |
| 123 | void *vaddr, dma_addr_t dma_handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 124 | unsigned long attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 126 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 127 | __dma_free_coherent(size, vaddr); |
| 128 | #else |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 129 | free_pages((unsigned long)vaddr, get_order(size)); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 130 | #endif |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 131 | } |
| 132 | |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 133 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
| 134 | dma_addr_t *dma_handle, gfp_t flag, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 135 | unsigned long attrs) |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 136 | { |
| 137 | struct iommu_table *iommu; |
| 138 | |
| 139 | /* The coherent mask may be smaller than the real mask, check if |
| 140 | * we can really use the direct ops |
| 141 | */ |
| 142 | if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) |
| 143 | return __dma_direct_alloc_coherent(dev, size, dma_handle, |
| 144 | flag, attrs); |
| 145 | |
| 146 | /* Ok we can't ... do we have an iommu ? If not, fail */ |
| 147 | iommu = get_iommu_table_base(dev); |
| 148 | if (!iommu) |
| 149 | return NULL; |
| 150 | |
| 151 | /* Try to use the iommu */ |
| 152 | return iommu_alloc_coherent(dev, iommu, size, dma_handle, |
| 153 | dev->coherent_dma_mask, flag, |
| 154 | dev_to_node(dev)); |
| 155 | } |
| 156 | |
| 157 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
| 158 | void *vaddr, dma_addr_t dma_handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 159 | unsigned long attrs) |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 160 | { |
| 161 | struct iommu_table *iommu; |
| 162 | |
| 163 | /* See comments in dma_direct_alloc_coherent() */ |
| 164 | if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) |
| 165 | return __dma_direct_free_coherent(dev, size, vaddr, dma_handle, |
| 166 | attrs); |
| 167 | /* Maybe we used an iommu ... */ |
| 168 | iommu = get_iommu_table_base(dev); |
| 169 | |
| 170 | /* If we hit that we should have never allocated in the first |
| 171 | * place so how come we are freeing ? |
| 172 | */ |
| 173 | if (WARN_ON(!iommu)) |
| 174 | return; |
| 175 | iommu_free_coherent(iommu, size, vaddr, dma_handle); |
| 176 | } |
| 177 | |
Marek Szyprowski | 64ccc9c | 2012-06-14 13:03:04 +0200 | [diff] [blame] | 178 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
| 179 | void *cpu_addr, dma_addr_t handle, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 180 | unsigned long attrs) |
Marek Szyprowski | 64ccc9c | 2012-06-14 13:03:04 +0200 | [diff] [blame] | 181 | { |
| 182 | unsigned long pfn; |
| 183 | |
| 184 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 185 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 186 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); |
| 187 | #else |
| 188 | pfn = page_to_pfn(virt_to_page(cpu_addr)); |
| 189 | #endif |
| 190 | return remap_pfn_range(vma, vma->vm_start, |
| 191 | pfn + vma->vm_pgoff, |
| 192 | vma->vm_end - vma->vm_start, |
| 193 | vma->vm_page_prot); |
| 194 | } |
| 195 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 196 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 197 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 198 | unsigned long attrs) |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 199 | { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 200 | struct scatterlist *sg; |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 201 | int i; |
| 202 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 203 | for_each_sg(sgl, sg, nents, i) { |
Becky Bruce | 1cebd7a | 2009-09-21 08:26:34 +0000 | [diff] [blame] | 204 | sg->dma_address = sg_phys(sg) + get_dma_offset(dev); |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 205 | sg->dma_length = sg->length; |
Alexander Duyck | 6f77480 | 2016-12-14 15:05:09 -0800 | [diff] [blame] | 206 | |
| 207 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
| 208 | continue; |
| 209 | |
Benjamin Herrenschmidt | 2434bbb | 2008-11-30 18:53:40 +0000 | [diff] [blame] | 210 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | return nents; |
| 214 | } |
| 215 | |
| 216 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
Mark Nelson | 3affedc | 2008-07-05 05:05:42 +1000 | [diff] [blame] | 217 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 218 | unsigned long attrs) |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 219 | { |
| 220 | } |
| 221 | |
Milton Miller | d24f9c6 | 2011-06-24 09:05:24 +0000 | [diff] [blame] | 222 | static u64 dma_direct_get_required_mask(struct device *dev) |
| 223 | { |
| 224 | u64 end, mask; |
| 225 | |
| 226 | end = memblock_end_of_DRAM() + get_dma_offset(dev); |
| 227 | |
| 228 | mask = 1ULL << (fls64(end) - 1); |
| 229 | mask += mask - 1; |
| 230 | |
| 231 | return mask; |
| 232 | } |
| 233 | |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 234 | static inline dma_addr_t dma_direct_map_page(struct device *dev, |
| 235 | struct page *page, |
| 236 | unsigned long offset, |
| 237 | size_t size, |
| 238 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 239 | unsigned long attrs) |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 240 | { |
| 241 | BUG_ON(dir == DMA_NONE); |
Alexander Duyck | 6f77480 | 2016-12-14 15:05:09 -0800 | [diff] [blame] | 242 | |
| 243 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 244 | __dma_sync_page(page, offset, size, dir); |
| 245 | |
Becky Bruce | 1cebd7a | 2009-09-21 08:26:34 +0000 | [diff] [blame] | 246 | return page_to_phys(page) + offset + get_dma_offset(dev); |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | static inline void dma_direct_unmap_page(struct device *dev, |
| 250 | dma_addr_t dma_address, |
| 251 | size_t size, |
| 252 | enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 253 | unsigned long attrs) |
Becky Bruce | 4fc665b | 2008-09-12 10:34:46 +0000 | [diff] [blame] | 254 | { |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 255 | } |
| 256 | |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 257 | #ifdef CONFIG_NOT_COHERENT_CACHE |
| 258 | static inline void dma_direct_sync_sg(struct device *dev, |
| 259 | struct scatterlist *sgl, int nents, |
| 260 | enum dma_data_direction direction) |
| 261 | { |
| 262 | struct scatterlist *sg; |
| 263 | int i; |
| 264 | |
| 265 | for_each_sg(sgl, sg, nents, i) |
| 266 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
| 267 | } |
| 268 | |
FUJITA Tomonori | 712d3e2 | 2010-05-26 14:44:17 -0700 | [diff] [blame] | 269 | static inline void dma_direct_sync_single(struct device *dev, |
| 270 | dma_addr_t dma_handle, size_t size, |
| 271 | enum dma_data_direction direction) |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 272 | { |
FUJITA Tomonori | 712d3e2 | 2010-05-26 14:44:17 -0700 | [diff] [blame] | 273 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 274 | } |
| 275 | #endif |
| 276 | |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 277 | const struct dma_map_ops dma_direct_ops = { |
Andrzej Pietrasiewicz | bfbf7d6 | 2011-12-06 14:14:46 +0100 | [diff] [blame] | 278 | .alloc = dma_direct_alloc_coherent, |
| 279 | .free = dma_direct_free_coherent, |
Marek Szyprowski | 64ccc9c | 2012-06-14 13:03:04 +0200 | [diff] [blame] | 280 | .mmap = dma_direct_mmap_coherent, |
Milton Miller | 2eccacd | 2011-06-24 09:05:25 +0000 | [diff] [blame] | 281 | .map_sg = dma_direct_map_sg, |
| 282 | .unmap_sg = dma_direct_unmap_sg, |
| 283 | .dma_supported = dma_direct_dma_supported, |
| 284 | .map_page = dma_direct_map_page, |
| 285 | .unmap_page = dma_direct_unmap_page, |
| 286 | .get_required_mask = dma_direct_get_required_mask, |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 287 | #ifdef CONFIG_NOT_COHERENT_CACHE |
FUJITA Tomonori | 712d3e2 | 2010-05-26 14:44:17 -0700 | [diff] [blame] | 288 | .sync_single_for_cpu = dma_direct_sync_single, |
| 289 | .sync_single_for_device = dma_direct_sync_single, |
Becky Bruce | 15e09c0 | 2008-11-20 06:49:16 +0000 | [diff] [blame] | 290 | .sync_sg_for_cpu = dma_direct_sync_sg, |
| 291 | .sync_sg_for_device = dma_direct_sync_sg, |
| 292 | #endif |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 293 | }; |
| 294 | EXPORT_SYMBOL(dma_direct_ops); |
FUJITA Tomonori | 80d3e8a | 2009-08-04 19:08:28 +0000 | [diff] [blame] | 295 | |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 296 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 297 | { |
| 298 | if (!dma_supported(dev, mask)) { |
| 299 | /* |
| 300 | * We need to special case the direct DMA ops which can |
| 301 | * support a fallback for coherent allocations. There |
| 302 | * is no dma_op->set_coherent_mask() so we have to do |
| 303 | * things the hard way: |
| 304 | */ |
| 305 | if (get_dma_ops(dev) != &dma_direct_ops || |
| 306 | get_iommu_table_base(dev) == NULL || |
| 307 | !dma_iommu_dma_supported(dev, mask)) |
| 308 | return -EIO; |
| 309 | } |
| 310 | dev->coherent_dma_mask = mask; |
| 311 | return 0; |
| 312 | } |
Benjamin Herrenschmidt | 977bf06 | 2015-10-27 17:20:05 +0900 | [diff] [blame] | 313 | EXPORT_SYMBOL(dma_set_coherent_mask); |
Benjamin Herrenschmidt | 817820b | 2015-06-24 15:25:31 +1000 | [diff] [blame] | 314 | |
FUJITA Tomonori | 80d3e8a | 2009-08-04 19:08:28 +0000 | [diff] [blame] | 315 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
| 316 | |
Benjamin Herrenschmidt | cd15b04 | 2014-02-11 11:32:38 +1100 | [diff] [blame] | 317 | int dma_set_mask(struct device *dev, u64 dma_mask) |
| 318 | { |
| 319 | if (ppc_md.dma_set_mask) |
| 320 | return ppc_md.dma_set_mask(dev, dma_mask); |
Daniel Axtens | 3405c25 | 2015-04-28 15:12:06 +1000 | [diff] [blame] | 321 | |
| 322 | if (dev_is_pci(dev)) { |
| 323 | struct pci_dev *pdev = to_pci_dev(dev); |
| 324 | struct pci_controller *phb = pci_bus_to_host(pdev->bus); |
| 325 | if (phb->controller_ops.dma_set_mask) |
| 326 | return phb->controller_ops.dma_set_mask(pdev, dma_mask); |
| 327 | } |
| 328 | |
Christoph Hellwig | a9a7b06 | 2017-05-23 15:42:02 +0200 | [diff] [blame] | 329 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| 330 | return -EIO; |
| 331 | *dev->dma_mask = dma_mask; |
| 332 | return 0; |
Benjamin Herrenschmidt | cd15b04 | 2014-02-11 11:32:38 +1100 | [diff] [blame] | 333 | } |
Benjamin Herrenschmidt | 5b6e9ff | 2010-08-30 19:23:52 +0000 | [diff] [blame] | 334 | EXPORT_SYMBOL(dma_set_mask); |
| 335 | |
Gavin Shan | fe7e85c | 2014-09-30 12:39:10 +1000 | [diff] [blame] | 336 | u64 __dma_get_required_mask(struct device *dev) |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 337 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 338 | const struct dma_map_ops *dma_ops = get_dma_ops(dev); |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 339 | |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 340 | if (unlikely(dma_ops == NULL)) |
| 341 | return 0; |
| 342 | |
Milton Miller | d24f9c6 | 2011-06-24 09:05:24 +0000 | [diff] [blame] | 343 | if (dma_ops->get_required_mask) |
| 344 | return dma_ops->get_required_mask(dev); |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 345 | |
Milton Miller | d24f9c6 | 2011-06-24 09:05:24 +0000 | [diff] [blame] | 346 | return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 347 | } |
Gavin Shan | fe7e85c | 2014-09-30 12:39:10 +1000 | [diff] [blame] | 348 | |
| 349 | u64 dma_get_required_mask(struct device *dev) |
| 350 | { |
| 351 | if (ppc_md.dma_get_required_mask) |
| 352 | return ppc_md.dma_get_required_mask(dev); |
| 353 | |
Andrew Donnellan | 5352298 | 2015-08-07 13:45:54 +1000 | [diff] [blame] | 354 | if (dev_is_pci(dev)) { |
| 355 | struct pci_dev *pdev = to_pci_dev(dev); |
| 356 | struct pci_controller *phb = pci_bus_to_host(pdev->bus); |
| 357 | if (phb->controller_ops.dma_get_required_mask) |
| 358 | return phb->controller_ops.dma_get_required_mask(pdev); |
| 359 | } |
| 360 | |
Gavin Shan | fe7e85c | 2014-09-30 12:39:10 +1000 | [diff] [blame] | 361 | return __dma_get_required_mask(dev); |
| 362 | } |
Milton Miller | 6a5c7be | 2011-06-24 09:05:22 +0000 | [diff] [blame] | 363 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
| 364 | |
FUJITA Tomonori | 80d3e8a | 2009-08-04 19:08:28 +0000 | [diff] [blame] | 365 | static int __init dma_init(void) |
| 366 | { |
Anton Blanchard | a980349 | 2012-06-24 18:25:28 +0000 | [diff] [blame] | 367 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
| 368 | #ifdef CONFIG_PCI |
| 369 | dma_debug_add_bus(&pci_bus_type); |
| 370 | #endif |
| 371 | #ifdef CONFIG_IBMVIO |
| 372 | dma_debug_add_bus(&vio_bus_type); |
| 373 | #endif |
FUJITA Tomonori | 80d3e8a | 2009-08-04 19:08:28 +0000 | [diff] [blame] | 374 | |
| 375 | return 0; |
| 376 | } |
| 377 | fs_initcall(dma_init); |
Benjamin Herrenschmidt | 6090912 | 2011-03-24 20:50:06 +0000 | [diff] [blame] | 378 | |