blob: 37771a51811915665e7279f0ec0089e20cfcf51b [file] [log] [blame]
Becky Bruce8dd0e952008-09-08 09:09:53 +00001/*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * Provide default implementations of the DMA mapping callbacks for
5 * busses using the iommu infrastructure
6 */
7
8#include <asm/iommu.h>
9
10/*
11 * Generic iommu implementation
12 */
13
14/* Allocates a contiguous real buffer and creates mappings over it.
15 * Returns the virtual address of the buffer and sets dma_handle
16 * to the dma address (mapping) of the first page.
17 */
18static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag)
20{
Becky Bruce738ef422009-09-21 08:26:35 +000021 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
Becky Bruce8dd0e952008-09-08 09:09:53 +000022 dma_handle, device_to_mask(dev), flag,
Becky Bruce8fae0352008-09-08 09:09:54 +000023 dev_to_node(dev));
Becky Bruce8dd0e952008-09-08 09:09:53 +000024}
25
26static void dma_iommu_free_coherent(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle)
28{
Becky Bruce738ef422009-09-21 08:26:35 +000029 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
Becky Bruce8dd0e952008-09-08 09:09:53 +000030}
31
32/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +000033 * contiguous real kernel storage (not vmalloc). The address passed here
34 * comprises a page address and offset into that page. The dma_addr_t
35 * returned will point to the same byte within the page as was passed in.
Becky Bruce8dd0e952008-09-08 09:09:53 +000036 */
Mark Nelsonf9226d52008-10-27 20:38:08 +000037static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
38 unsigned long offset, size_t size,
39 enum dma_data_direction direction,
40 struct dma_attrs *attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000041{
Becky Bruce738ef422009-09-21 08:26:35 +000042 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
43 size, device_to_mask(dev), direction, attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +000044}
45
46
Mark Nelsonf9226d52008-10-27 20:38:08 +000047static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
48 size_t size, enum dma_data_direction direction,
49 struct dma_attrs *attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000050{
Becky Bruce738ef422009-09-21 08:26:35 +000051 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
Mark Nelsonf9226d52008-10-27 20:38:08 +000052 attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +000053}
54
55
56static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
57 int nelems, enum dma_data_direction direction,
58 struct dma_attrs *attrs)
59{
Becky Bruce738ef422009-09-21 08:26:35 +000060 return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
Becky Bruce8dd0e952008-09-08 09:09:53 +000061 device_to_mask(dev), direction, attrs);
62}
63
64static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
65 int nelems, enum dma_data_direction direction,
66 struct dma_attrs *attrs)
67{
Becky Bruce738ef422009-09-21 08:26:35 +000068 iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
Becky Bruce8dd0e952008-09-08 09:09:53 +000069 attrs);
70}
71
72/* We support DMA to/from any memory page via the iommu */
73static int dma_iommu_dma_supported(struct device *dev, u64 mask)
74{
Becky Bruce738ef422009-09-21 08:26:35 +000075 struct iommu_table *tbl = get_iommu_table_base(dev);
Becky Bruce8dd0e952008-09-08 09:09:53 +000076
77 if (!tbl || tbl->it_offset > mask) {
78 printk(KERN_INFO
79 "Warning: IOMMU offset too big for device mask\n");
80 if (tbl)
81 printk(KERN_INFO
Ingo Molnarfe333322009-01-06 14:26:03 +000082 "mask: 0x%08llx, table offset: 0x%08lx\n",
Becky Bruce8dd0e952008-09-08 09:09:53 +000083 mask, tbl->it_offset);
84 else
Ingo Molnarfe333322009-01-06 14:26:03 +000085 printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
Becky Bruce8dd0e952008-09-08 09:09:53 +000086 mask);
87 return 0;
88 } else
89 return 1;
90}
91
FUJITA Tomonori45223c52009-08-04 19:08:25 +000092struct dma_map_ops dma_iommu_ops = {
Becky Bruce8dd0e952008-09-08 09:09:53 +000093 .alloc_coherent = dma_iommu_alloc_coherent,
94 .free_coherent = dma_iommu_free_coherent,
Becky Bruce8dd0e952008-09-08 09:09:53 +000095 .map_sg = dma_iommu_map_sg,
96 .unmap_sg = dma_iommu_unmap_sg,
97 .dma_supported = dma_iommu_dma_supported,
Mark Nelsonf9226d52008-10-27 20:38:08 +000098 .map_page = dma_iommu_map_page,
99 .unmap_page = dma_iommu_unmap_page,
Becky Bruce8dd0e952008-09-08 09:09:53 +0000100};
101EXPORT_SYMBOL(dma_iommu_ops);