Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
| 2 | #define __ASM_SH_DMA_MAPPING_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> |
Jens Axboe | 71df50a | 2007-10-23 12:52:48 +0200 | [diff] [blame] | 5 | #include <linux/scatterlist.h> |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 6 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/io.h> |
Dmitry Baryshkov | 9de90ac | 2008-07-18 13:30:31 +0400 | [diff] [blame] | 8 | #include <asm-generic/dma-coherent.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | extern struct bus_type pci_bus_type; |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #define dma_supported(dev, mask) (1) |
| 13 | |
| 14 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 15 | { |
| 16 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 17 | return -EIO; |
| 18 | |
| 19 | *dev->dma_mask = mask; |
| 20 | |
| 21 | return 0; |
| 22 | } |
| 23 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 24 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 25 | dma_addr_t *dma_handle, gfp_t flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 27 | void dma_free_coherent(struct device *dev, size_t size, |
| 28 | void *vaddr, dma_addr_t dma_handle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 30 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 31 | enum dma_data_direction dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Paul Mundt | c7666e7 | 2007-02-13 11:11:22 +0900 | [diff] [blame] | 33 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 34 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 35 | #define dma_is_consistent(d, h) (1) |
| 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | static inline dma_addr_t dma_map_single(struct device *dev, |
| 38 | void *ptr, size_t size, |
| 39 | enum dma_data_direction dir) |
| 40 | { |
| 41 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 42 | if (dev->bus == &pci_bus_type) |
Paul Mundt | e257ad0 | 2007-07-25 11:18:00 +0900 | [diff] [blame] | 43 | return virt_to_phys(ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #endif |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 45 | dma_cache_sync(dev, ptr, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Paul Mundt | e257ad0 | 2007-07-25 11:18:00 +0900 | [diff] [blame] | 47 | return virt_to_phys(ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) |
| 51 | |
| 52 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 53 | int nents, enum dma_data_direction dir) |
| 54 | { |
| 55 | int i; |
| 56 | |
| 57 | for (i = 0; i < nents; i++) { |
| 58 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
Jens Axboe | 71df50a | 2007-10-23 12:52:48 +0200 | [diff] [blame] | 59 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #endif |
Jens Axboe | 71df50a | 2007-10-23 12:52:48 +0200 | [diff] [blame] | 61 | sg[i].dma_address = sg_phys(&sg[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | return nents; |
| 65 | } |
| 66 | |
| 67 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) |
| 68 | |
| 69 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 70 | unsigned long offset, size_t size, |
| 71 | enum dma_data_direction dir) |
| 72 | { |
| 73 | return dma_map_single(dev, page_address(page) + offset, size, dir); |
| 74 | } |
| 75 | |
| 76 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 77 | size_t size, enum dma_data_direction dir) |
| 78 | { |
| 79 | dma_unmap_single(dev, dma_address, size, dir); |
| 80 | } |
| 81 | |
| 82 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, |
| 83 | size_t size, enum dma_data_direction dir) |
| 84 | { |
| 85 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 86 | if (dev->bus == &pci_bus_type) |
| 87 | return; |
| 88 | #endif |
Paul Mundt | e257ad0 | 2007-07-25 11:18:00 +0900 | [diff] [blame] | 89 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static inline void dma_sync_single_range(struct device *dev, |
| 93 | dma_addr_t dma_handle, |
| 94 | unsigned long offset, size_t size, |
| 95 | enum dma_data_direction dir) |
| 96 | { |
| 97 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 98 | if (dev->bus == &pci_bus_type) |
| 99 | return; |
| 100 | #endif |
Paul Mundt | e257ad0 | 2007-07-25 11:18:00 +0900 | [diff] [blame] | 101 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, |
| 105 | int nelems, enum dma_data_direction dir) |
| 106 | { |
| 107 | int i; |
| 108 | |
| 109 | for (i = 0; i < nelems; i++) { |
| 110 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
Jens Axboe | 71df50a | 2007-10-23 12:52:48 +0200 | [diff] [blame] | 111 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #endif |
Jens Axboe | 71df50a | 2007-10-23 12:52:48 +0200 | [diff] [blame] | 113 | sg[i].dma_address = sg_phys(&sg[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | } |
| 115 | } |
| 116 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 117 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 118 | dma_addr_t dma_handle, size_t size, |
| 119 | enum dma_data_direction dir) |
| 120 | { |
| 121 | dma_sync_single(dev, dma_handle, size, dir); |
| 122 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 124 | static inline void dma_sync_single_for_device(struct device *dev, |
| 125 | dma_addr_t dma_handle, |
| 126 | size_t size, |
| 127 | enum dma_data_direction dir) |
| 128 | { |
| 129 | dma_sync_single(dev, dma_handle, size, dir); |
| 130 | } |
| 131 | |
Paul Mundt | 3223926 | 2007-08-10 02:37:01 +0900 | [diff] [blame] | 132 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 133 | dma_addr_t dma_handle, |
| 134 | unsigned long offset, |
| 135 | size_t size, |
| 136 | enum dma_data_direction direction) |
| 137 | { |
| 138 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); |
| 139 | } |
| 140 | |
| 141 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 142 | dma_addr_t dma_handle, |
| 143 | unsigned long offset, |
| 144 | size_t size, |
| 145 | enum dma_data_direction direction) |
| 146 | { |
| 147 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); |
| 148 | } |
| 149 | |
| 150 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 151 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 152 | struct scatterlist *sg, int nelems, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | enum dma_data_direction dir) |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 154 | { |
| 155 | dma_sync_sg(dev, sg, nelems, dir); |
| 156 | } |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 157 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 158 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 159 | struct scatterlist *sg, int nelems, |
| 160 | enum dma_data_direction dir) |
| 161 | { |
| 162 | dma_sync_sg(dev, sg, nelems, dir); |
| 163 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
| 166 | static inline int dma_get_cache_alignment(void) |
| 167 | { |
| 168 | /* |
| 169 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 170 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 171 | */ |
| 172 | return L1_CACHE_BYTES; |
| 173 | } |
| 174 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 175 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | { |
| 177 | return dma_addr == 0; |
| 178 | } |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 179 | |
| 180 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY |
| 181 | |
| 182 | extern int |
| 183 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, |
| 184 | dma_addr_t device_addr, size_t size, int flags); |
| 185 | |
| 186 | extern void |
| 187 | dma_release_declared_memory(struct device *dev); |
| 188 | |
| 189 | extern void * |
| 190 | dma_mark_declared_memory_occupied(struct device *dev, |
| 191 | dma_addr_t device_addr, size_t size); |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | #endif /* __ASM_SH_DMA_MAPPING_H */ |