Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 1 | /* |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 2 | * Intel I/OAT DMA Linux driver |
| 3 | * Copyright(c) 2004 - 2007 Intel Corporation. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 15 | * this program; if not, write to the Free Software Foundation, Inc., |
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 17 | * |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 18 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". |
| 20 | * |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous |
| 25 | * copy operations. |
| 26 | */ |
| 27 | |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/pci.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/dmaengine.h> |
| 33 | #include <linux/delay.h> |
David S. Miller | 6b00c92 | 2006-05-23 17:37:58 -0700 | [diff] [blame] | 34 | #include <linux/dma-mapping.h> |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 35 | #include "ioatdma.h" |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 36 | #include "ioatdma_registers.h" |
| 37 | #include "ioatdma_hw.h" |
| 38 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 39 | #define INITIAL_IOAT_DESC_COUNT 128 |
| 40 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 45 | |
| 46 | /* internal functions */ |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 47 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
| 48 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 49 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 50 | static struct ioat_dma_chan *ioat_lookup_chan_by_index(struct ioatdma_device *device, |
| 51 | int index) |
| 52 | { |
| 53 | return device->idx[index]; |
| 54 | } |
| 55 | |
| 56 | /** |
| 57 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode |
| 58 | * @irq: interrupt id |
| 59 | * @data: interrupt data |
| 60 | */ |
| 61 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) |
| 62 | { |
| 63 | struct ioatdma_device *instance = data; |
| 64 | struct ioat_dma_chan *ioat_chan; |
| 65 | unsigned long attnstatus; |
| 66 | int bit; |
| 67 | u8 intrctrl; |
| 68 | |
| 69 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); |
| 70 | |
| 71 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) |
| 72 | return IRQ_NONE; |
| 73 | |
| 74 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { |
| 75 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); |
| 76 | return IRQ_NONE; |
| 77 | } |
| 78 | |
| 79 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); |
| 80 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { |
| 81 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); |
| 82 | tasklet_schedule(&ioat_chan->cleanup_task); |
| 83 | } |
| 84 | |
| 85 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); |
| 86 | return IRQ_HANDLED; |
| 87 | } |
| 88 | |
| 89 | /** |
| 90 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode |
| 91 | * @irq: interrupt id |
| 92 | * @data: interrupt data |
| 93 | */ |
| 94 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) |
| 95 | { |
| 96 | struct ioat_dma_chan *ioat_chan = data; |
| 97 | |
| 98 | tasklet_schedule(&ioat_chan->cleanup_task); |
| 99 | |
| 100 | return IRQ_HANDLED; |
| 101 | } |
| 102 | |
| 103 | static void ioat_dma_cleanup_tasklet(unsigned long data); |
| 104 | |
| 105 | /** |
| 106 | * ioat_dma_enumerate_channels - find and initialize the device's channels |
| 107 | * @device: the device to be enumerated |
| 108 | */ |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 109 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 110 | { |
| 111 | u8 xfercap_scale; |
| 112 | u32 xfercap; |
| 113 | int i; |
| 114 | struct ioat_dma_chan *ioat_chan; |
| 115 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 116 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
| 117 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 118 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
| 119 | |
| 120 | for (i = 0; i < device->common.chancnt; i++) { |
| 121 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); |
| 122 | if (!ioat_chan) { |
| 123 | device->common.chancnt = i; |
| 124 | break; |
| 125 | } |
| 126 | |
| 127 | ioat_chan->device = device; |
| 128 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); |
| 129 | ioat_chan->xfercap = xfercap; |
| 130 | spin_lock_init(&ioat_chan->cleanup_lock); |
| 131 | spin_lock_init(&ioat_chan->desc_lock); |
| 132 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
| 133 | INIT_LIST_HEAD(&ioat_chan->used_desc); |
| 134 | /* This should be made common somewhere in dmaengine.c */ |
| 135 | ioat_chan->common.device = &device->common; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 136 | list_add_tail(&ioat_chan->common.device_node, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 137 | &device->common.channels); |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 138 | device->idx[i] = ioat_chan; |
| 139 | tasklet_init(&ioat_chan->cleanup_task, |
| 140 | ioat_dma_cleanup_tasklet, |
| 141 | (unsigned long) ioat_chan); |
| 142 | tasklet_disable(&ioat_chan->cleanup_task); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 143 | } |
| 144 | return device->common.chancnt; |
| 145 | } |
| 146 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 147 | static void ioat_set_src(dma_addr_t addr, |
| 148 | struct dma_async_tx_descriptor *tx, |
| 149 | int index) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 150 | { |
| 151 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
| 152 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 153 | |
| 154 | pci_unmap_addr_set(desc, src, addr); |
| 155 | |
| 156 | list_for_each_entry(iter, &desc->async_tx.tx_list, node) { |
| 157 | iter->hw->src_addr = addr; |
| 158 | addr += ioat_chan->xfercap; |
| 159 | } |
| 160 | |
| 161 | } |
| 162 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 163 | static void ioat_set_dest(dma_addr_t addr, |
| 164 | struct dma_async_tx_descriptor *tx, |
| 165 | int index) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 166 | { |
| 167 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
| 168 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 169 | |
| 170 | pci_unmap_addr_set(desc, dst, addr); |
| 171 | |
| 172 | list_for_each_entry(iter, &desc->async_tx.tx_list, node) { |
| 173 | iter->hw->dst_addr = addr; |
| 174 | addr += ioat_chan->xfercap; |
| 175 | } |
| 176 | } |
| 177 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 178 | static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 179 | { |
| 180 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 181 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
| 182 | int append = 0; |
| 183 | dma_cookie_t cookie; |
| 184 | struct ioat_desc_sw *group_start; |
| 185 | |
| 186 | group_start = list_entry(desc->async_tx.tx_list.next, |
| 187 | struct ioat_desc_sw, node); |
| 188 | spin_lock_bh(&ioat_chan->desc_lock); |
| 189 | /* cookie incr and addition to used_list must be atomic */ |
| 190 | cookie = ioat_chan->common.cookie; |
| 191 | cookie++; |
| 192 | if (cookie < 0) |
| 193 | cookie = 1; |
| 194 | ioat_chan->common.cookie = desc->async_tx.cookie = cookie; |
| 195 | |
| 196 | /* write address into NextDescriptor field of last desc in chain */ |
| 197 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = |
| 198 | group_start->async_tx.phys; |
| 199 | list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev); |
| 200 | |
| 201 | ioat_chan->pending += desc->tx_cnt; |
| 202 | if (ioat_chan->pending >= 4) { |
| 203 | append = 1; |
| 204 | ioat_chan->pending = 0; |
| 205 | } |
| 206 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 207 | |
| 208 | if (append) |
| 209 | writeb(IOAT_CHANCMD_APPEND, |
| 210 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Shannon Nelson | 1fda5f4 | 2007-10-16 01:27:37 -0700 | [diff] [blame] | 211 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 212 | return cookie; |
| 213 | } |
| 214 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 215 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 216 | struct ioat_dma_chan *ioat_chan, |
| 217 | gfp_t flags) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 218 | { |
| 219 | struct ioat_dma_descriptor *desc; |
| 220 | struct ioat_desc_sw *desc_sw; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 221 | struct ioatdma_device *ioatdma_device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 222 | dma_addr_t phys; |
| 223 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 224 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); |
| 225 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 226 | if (unlikely(!desc)) |
| 227 | return NULL; |
| 228 | |
| 229 | desc_sw = kzalloc(sizeof(*desc_sw), flags); |
| 230 | if (unlikely(!desc_sw)) { |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 231 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 232 | return NULL; |
| 233 | } |
| 234 | |
| 235 | memset(desc, 0, sizeof(*desc)); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 236 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); |
| 237 | desc_sw->async_tx.tx_set_src = ioat_set_src; |
| 238 | desc_sw->async_tx.tx_set_dest = ioat_set_dest; |
| 239 | desc_sw->async_tx.tx_submit = ioat_tx_submit; |
| 240 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 241 | desc_sw->hw = desc; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 242 | desc_sw->async_tx.phys = phys; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 243 | |
| 244 | return desc_sw; |
| 245 | } |
| 246 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 247 | /* returns the actual number of allocated descriptors */ |
| 248 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
| 249 | { |
| 250 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 251 | struct ioat_desc_sw *desc = NULL; |
| 252 | u16 chanctrl; |
| 253 | u32 chanerr; |
| 254 | int i; |
| 255 | LIST_HEAD(tmp_list); |
| 256 | |
Shannon Nelson | e422397 | 2007-08-24 23:02:53 -0700 | [diff] [blame] | 257 | /* have we already been set up? */ |
| 258 | if (!list_empty(&ioat_chan->free_desc)) |
| 259 | return INITIAL_IOAT_DESC_COUNT; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 260 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 261 | /* Setup register to interrupt and write completion status on error */ |
Shannon Nelson | e422397 | 2007-08-24 23:02:53 -0700 | [diff] [blame] | 262 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 263 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | |
| 264 | IOAT_CHANCTRL_ERR_COMPLETION_EN; |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 265 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 266 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 267 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 268 | if (chanerr) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 269 | dev_err(&ioat_chan->device->pdev->dev, |
| 270 | "ioatdma: CHANERR = %x, clearing\n", chanerr); |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 271 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* Allocate descriptors */ |
| 275 | for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { |
| 276 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
| 277 | if (!desc) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 278 | dev_err(&ioat_chan->device->pdev->dev, |
| 279 | "ioatdma: Only %d initial descriptors\n", i); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 280 | break; |
| 281 | } |
| 282 | list_add_tail(&desc->node, &tmp_list); |
| 283 | } |
| 284 | spin_lock_bh(&ioat_chan->desc_lock); |
| 285 | list_splice(&tmp_list, &ioat_chan->free_desc); |
| 286 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 287 | |
| 288 | /* allocate a completion writeback area */ |
| 289 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
| 290 | ioat_chan->completion_virt = |
| 291 | pci_pool_alloc(ioat_chan->device->completion_pool, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 292 | GFP_KERNEL, |
| 293 | &ioat_chan->completion_addr); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 294 | memset(ioat_chan->completion_virt, 0, |
| 295 | sizeof(*ioat_chan->completion_virt)); |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 296 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, |
| 297 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
| 298 | writel(((u64) ioat_chan->completion_addr) >> 32, |
| 299 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 300 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 301 | tasklet_enable(&ioat_chan->cleanup_task); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 302 | ioat_dma_start_null_desc(ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 303 | return i; |
| 304 | } |
| 305 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 306 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
| 307 | { |
| 308 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 309 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 310 | struct ioat_desc_sw *desc, *_desc; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 311 | int in_use_descs = 0; |
| 312 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 313 | tasklet_disable(&ioat_chan->cleanup_task); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 314 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 315 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 316 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
| 317 | * before removing DMA descriptor resources. |
| 318 | */ |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 319 | writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 320 | mdelay(100); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 321 | |
| 322 | spin_lock_bh(&ioat_chan->desc_lock); |
| 323 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
| 324 | in_use_descs++; |
| 325 | list_del(&desc->node); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 326 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 327 | desc->async_tx.phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 328 | kfree(desc); |
| 329 | } |
| 330 | list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { |
| 331 | list_del(&desc->node); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 332 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 333 | desc->async_tx.phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 334 | kfree(desc); |
| 335 | } |
| 336 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 337 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 338 | pci_pool_free(ioatdma_device->completion_pool, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 339 | ioat_chan->completion_virt, |
| 340 | ioat_chan->completion_addr); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 341 | |
| 342 | /* one is ok since we left it on there on purpose */ |
| 343 | if (in_use_descs > 1) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 344 | dev_err(&ioat_chan->device->pdev->dev, |
| 345 | "ioatdma: Freeing %d in use descriptors!\n", |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 346 | in_use_descs - 1); |
| 347 | |
| 348 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 349 | ioat_chan->pending = 0; |
| 350 | } |
| 351 | /** |
| 352 | * ioat_dma_get_next_descriptor - return the next available descriptor |
| 353 | * @ioat_chan: IOAT DMA channel handle |
| 354 | * |
| 355 | * Gets the next descriptor from the chain, and must be called with the |
| 356 | * channel's desc_lock held. Allocates more descriptors if the channel |
| 357 | * has run out. |
| 358 | */ |
| 359 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( |
| 360 | struct ioat_dma_chan *ioat_chan) |
| 361 | { |
| 362 | struct ioat_desc_sw *new = NULL; |
| 363 | |
| 364 | if (!list_empty(&ioat_chan->free_desc)) { |
| 365 | new = to_ioat_desc(ioat_chan->free_desc.next); |
| 366 | list_del(&new->node); |
| 367 | } else { |
| 368 | /* try to get another desc */ |
| 369 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
| 370 | /* will this ever happen? */ |
| 371 | /* TODO add upper limit on these */ |
| 372 | BUG_ON(!new); |
| 373 | } |
| 374 | |
| 375 | prefetch(new->hw); |
| 376 | return new; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 377 | } |
| 378 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 379 | static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( |
| 380 | struct dma_chan *chan, |
| 381 | size_t len, |
| 382 | int int_en) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 383 | { |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 384 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 385 | struct ioat_desc_sw *first, *prev, *new; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 386 | LIST_HEAD(new_chain); |
| 387 | u32 copy; |
| 388 | size_t orig_len; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 389 | int desc_count = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 390 | |
| 391 | if (!len) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 392 | return NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 393 | |
| 394 | orig_len = len; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 395 | |
| 396 | first = NULL; |
| 397 | prev = NULL; |
| 398 | |
| 399 | spin_lock_bh(&ioat_chan->desc_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 400 | while (len) { |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 401 | new = ioat_dma_get_next_descriptor(ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 402 | copy = min((u32) len, ioat_chan->xfercap); |
| 403 | |
| 404 | new->hw->size = copy; |
| 405 | new->hw->ctl = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 406 | new->async_tx.cookie = 0; |
| 407 | new->async_tx.ack = 1; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 408 | |
| 409 | /* chain together the physical address list for the HW */ |
| 410 | if (!first) |
| 411 | first = new; |
| 412 | else |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 413 | prev->hw->next = (u64) new->async_tx.phys; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 414 | |
| 415 | prev = new; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 416 | len -= copy; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 417 | list_add_tail(&new->node, &new_chain); |
| 418 | desc_count++; |
| 419 | } |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 420 | |
| 421 | list_splice(&new_chain, &new->async_tx.tx_list); |
| 422 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 423 | new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
| 424 | new->hw->next = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 425 | new->tx_cnt = desc_count; |
| 426 | new->async_tx.ack = 0; /* client is in control of this ack */ |
| 427 | new->async_tx.cookie = -EBUSY; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 428 | |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 429 | pci_unmap_len_set(new, len, orig_len); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 430 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 431 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 432 | return new ? &new->async_tx : NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 433 | } |
| 434 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 435 | /** |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 436 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended |
| 437 | * descriptors to hw |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 438 | * @chan: DMA channel handle |
| 439 | */ |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 440 | static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) |
| 441 | { |
| 442 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 443 | |
| 444 | if (ioat_chan->pending != 0) { |
| 445 | ioat_chan->pending = 0; |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 446 | writeb(IOAT_CHANCMD_APPEND, |
| 447 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 448 | } |
| 449 | } |
| 450 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 451 | static void ioat_dma_cleanup_tasklet(unsigned long data) |
| 452 | { |
| 453 | struct ioat_dma_chan *chan = (void *)data; |
| 454 | ioat_dma_memcpy_cleanup(chan); |
| 455 | writew(IOAT_CHANCTRL_INT_DISABLE, |
| 456 | chan->reg_base + IOAT_CHANCTRL_OFFSET); |
| 457 | } |
| 458 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 459 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 460 | { |
| 461 | unsigned long phys_complete; |
| 462 | struct ioat_desc_sw *desc, *_desc; |
| 463 | dma_cookie_t cookie = 0; |
| 464 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 465 | prefetch(ioat_chan->completion_virt); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 466 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 467 | if (!spin_trylock(&ioat_chan->cleanup_lock)) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 468 | return; |
| 469 | |
| 470 | /* The completion writeback can happen at any time, |
| 471 | so reads by the driver need to be atomic operations |
| 472 | The descriptor physical addresses are limited to 32-bits |
| 473 | when the CPU can only do a 32-bit mov */ |
| 474 | |
| 475 | #if (BITS_PER_LONG == 64) |
| 476 | phys_complete = |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 477 | ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 478 | #else |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 479 | phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 480 | #endif |
| 481 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 482 | if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == |
| 483 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
| 484 | dev_err(&ioat_chan->device->pdev->dev, |
| 485 | "ioatdma: Channel halted, chanerr = %x\n", |
| 486 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 487 | |
| 488 | /* TODO do something to salvage the situation */ |
| 489 | } |
| 490 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 491 | if (phys_complete == ioat_chan->last_completion) { |
| 492 | spin_unlock(&ioat_chan->cleanup_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 493 | return; |
| 494 | } |
| 495 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 496 | cookie = 0; |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 497 | spin_lock_bh(&ioat_chan->desc_lock); |
| 498 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 499 | |
| 500 | /* |
| 501 | * Incoming DMA requests may use multiple descriptors, due to |
| 502 | * exceeding xfercap, perhaps. If so, only the last one will |
| 503 | * have a cookie, and require unmapping. |
| 504 | */ |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 505 | if (desc->async_tx.cookie) { |
| 506 | cookie = desc->async_tx.cookie; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 507 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 508 | /* |
| 509 | * yes we are unmapping both _page and _single alloc'd |
| 510 | * regions with unmap_page. Is this *really* that bad? |
| 511 | */ |
| 512 | pci_unmap_page(ioat_chan->device->pdev, |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 513 | pci_unmap_addr(desc, dst), |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 514 | pci_unmap_len(desc, len), |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 515 | PCI_DMA_FROMDEVICE); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 516 | pci_unmap_page(ioat_chan->device->pdev, |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 517 | pci_unmap_addr(desc, src), |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 518 | pci_unmap_len(desc, len), |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 519 | PCI_DMA_TODEVICE); |
| 520 | } |
| 521 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 522 | if (desc->async_tx.phys != phys_complete) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 523 | /* |
| 524 | * a completed entry, but not the last, so cleanup |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 525 | * if the client is done with the descriptor |
| 526 | */ |
| 527 | if (desc->async_tx.ack) { |
| 528 | list_del(&desc->node); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 529 | list_add_tail(&desc->node, |
| 530 | &ioat_chan->free_desc); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 531 | } else |
| 532 | desc->async_tx.cookie = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 533 | } else { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 534 | /* |
| 535 | * last used desc. Do not remove, so we can append from |
| 536 | * it, but don't look at it next time, either |
| 537 | */ |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 538 | desc->async_tx.cookie = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 539 | |
| 540 | /* TODO check status bits? */ |
| 541 | break; |
| 542 | } |
| 543 | } |
| 544 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 545 | spin_unlock_bh(&ioat_chan->desc_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 546 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 547 | ioat_chan->last_completion = phys_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 548 | if (cookie != 0) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 549 | ioat_chan->completed_cookie = cookie; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 550 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 551 | spin_unlock(&ioat_chan->cleanup_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 552 | } |
| 553 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 554 | static void ioat_dma_dependency_added(struct dma_chan *chan) |
| 555 | { |
| 556 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 557 | spin_lock_bh(&ioat_chan->desc_lock); |
| 558 | if (ioat_chan->pending == 0) { |
| 559 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 560 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 561 | } else |
| 562 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 563 | } |
| 564 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 565 | /** |
| 566 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction |
| 567 | * @chan: IOAT DMA channel handle |
| 568 | * @cookie: DMA transaction identifier |
Randy Dunlap | 6508871 | 2006-07-03 19:45:31 -0700 | [diff] [blame] | 569 | * @done: if not %NULL, updated with last completed transaction |
| 570 | * @used: if not %NULL, updated with last used transaction |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 571 | */ |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 572 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 573 | dma_cookie_t cookie, |
| 574 | dma_cookie_t *done, |
| 575 | dma_cookie_t *used) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 576 | { |
| 577 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 578 | dma_cookie_t last_used; |
| 579 | dma_cookie_t last_complete; |
| 580 | enum dma_status ret; |
| 581 | |
| 582 | last_used = chan->cookie; |
| 583 | last_complete = ioat_chan->completed_cookie; |
| 584 | |
| 585 | if (done) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 586 | *done = last_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 587 | if (used) |
| 588 | *used = last_used; |
| 589 | |
| 590 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
| 591 | if (ret == DMA_SUCCESS) |
| 592 | return ret; |
| 593 | |
| 594 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 595 | |
| 596 | last_used = chan->cookie; |
| 597 | last_complete = ioat_chan->completed_cookie; |
| 598 | |
| 599 | if (done) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 600 | *done = last_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 601 | if (used) |
| 602 | *used = last_used; |
| 603 | |
| 604 | return dma_async_is_complete(cookie, last_complete, last_used); |
| 605 | } |
| 606 | |
| 607 | /* PCI API */ |
| 608 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 609 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 610 | { |
| 611 | struct ioat_desc_sw *desc; |
| 612 | |
| 613 | spin_lock_bh(&ioat_chan->desc_lock); |
| 614 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 615 | desc = ioat_dma_get_next_descriptor(ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 616 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; |
| 617 | desc->hw->next = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 618 | desc->async_tx.ack = 1; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 619 | |
| 620 | list_add_tail(&desc->node, &ioat_chan->used_desc); |
| 621 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 622 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 623 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 624 | ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 625 | writel(((u64) desc->async_tx.phys) >> 32, |
Chris Leech | 70774b4 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 626 | ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH); |
| 627 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 628 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | /* |
| 632 | * Perform a IOAT transaction to verify the HW works. |
| 633 | */ |
| 634 | #define IOAT_TEST_SIZE 2000 |
| 635 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 636 | /** |
| 637 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. |
| 638 | * @device: device to be tested |
| 639 | */ |
| 640 | static int ioat_dma_self_test(struct ioatdma_device *device) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 641 | { |
| 642 | int i; |
| 643 | u8 *src; |
| 644 | u8 *dest; |
| 645 | struct dma_chan *dma_chan; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 646 | struct dma_async_tx_descriptor *tx; |
| 647 | dma_addr_t addr; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 648 | dma_cookie_t cookie; |
| 649 | int err = 0; |
| 650 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 651 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 652 | if (!src) |
| 653 | return -ENOMEM; |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 654 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 655 | if (!dest) { |
| 656 | kfree(src); |
| 657 | return -ENOMEM; |
| 658 | } |
| 659 | |
| 660 | /* Fill in src buffer */ |
| 661 | for (i = 0; i < IOAT_TEST_SIZE; i++) |
| 662 | src[i] = (u8)i; |
| 663 | |
| 664 | /* Start copy, using first DMA channel */ |
| 665 | dma_chan = container_of(device->common.channels.next, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 666 | struct dma_chan, |
| 667 | device_node); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 668 | if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 669 | dev_err(&device->pdev->dev, |
| 670 | "selftest cannot allocate chan resource\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 671 | err = -ENODEV; |
| 672 | goto out; |
| 673 | } |
| 674 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 675 | tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); |
| 676 | async_tx_ack(tx); |
| 677 | addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, |
| 678 | DMA_TO_DEVICE); |
| 679 | ioat_set_src(addr, tx, 0); |
| 680 | addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, |
| 681 | DMA_FROM_DEVICE); |
| 682 | ioat_set_dest(addr, tx, 0); |
| 683 | cookie = ioat_tx_submit(tx); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 684 | ioat_dma_memcpy_issue_pending(dma_chan); |
| 685 | msleep(1); |
| 686 | |
| 687 | if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 688 | dev_err(&device->pdev->dev, |
| 689 | "ioatdma: Self-test copy timed out, disabling\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 690 | err = -ENODEV; |
| 691 | goto free_resources; |
| 692 | } |
| 693 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 694 | dev_err(&device->pdev->dev, |
| 695 | "ioatdma: Self-test copy failed compare, disabling\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 696 | err = -ENODEV; |
| 697 | goto free_resources; |
| 698 | } |
| 699 | |
| 700 | free_resources: |
| 701 | ioat_dma_free_chan_resources(dma_chan); |
| 702 | out: |
| 703 | kfree(src); |
| 704 | kfree(dest); |
| 705 | return err; |
| 706 | } |
| 707 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 708 | static char ioat_interrupt_style[32] = "msix"; |
| 709 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
| 710 | sizeof(ioat_interrupt_style), 0644); |
| 711 | MODULE_PARM_DESC(ioat_interrupt_style, |
| 712 | "set ioat interrupt style: msix (default), " |
| 713 | "msix-single-vector, msi, intx)"); |
| 714 | |
| 715 | /** |
| 716 | * ioat_dma_setup_interrupts - setup interrupt handler |
| 717 | * @device: ioat device |
| 718 | */ |
| 719 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) |
| 720 | { |
| 721 | struct ioat_dma_chan *ioat_chan; |
| 722 | int err, i, j, msixcnt; |
| 723 | u8 intrctrl = 0; |
| 724 | |
| 725 | if (!strcmp(ioat_interrupt_style, "msix")) |
| 726 | goto msix; |
| 727 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) |
| 728 | goto msix_single_vector; |
| 729 | if (!strcmp(ioat_interrupt_style, "msi")) |
| 730 | goto msi; |
| 731 | if (!strcmp(ioat_interrupt_style, "intx")) |
| 732 | goto intx; |
| 733 | |
| 734 | msix: |
| 735 | /* The number of MSI-X vectors should equal the number of channels */ |
| 736 | msixcnt = device->common.chancnt; |
| 737 | for (i = 0; i < msixcnt; i++) |
| 738 | device->msix_entries[i].entry = i; |
| 739 | |
| 740 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); |
| 741 | if (err < 0) |
| 742 | goto msi; |
| 743 | if (err > 0) |
| 744 | goto msix_single_vector; |
| 745 | |
| 746 | for (i = 0; i < msixcnt; i++) { |
| 747 | ioat_chan = ioat_lookup_chan_by_index(device, i); |
| 748 | err = request_irq(device->msix_entries[i].vector, |
| 749 | ioat_dma_do_interrupt_msix, |
| 750 | 0, "ioat-msix", ioat_chan); |
| 751 | if (err) { |
| 752 | for (j = 0; j < i; j++) { |
| 753 | ioat_chan = |
| 754 | ioat_lookup_chan_by_index(device, j); |
| 755 | free_irq(device->msix_entries[j].vector, |
| 756 | ioat_chan); |
| 757 | } |
| 758 | goto msix_single_vector; |
| 759 | } |
| 760 | } |
| 761 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; |
| 762 | device->irq_mode = msix_multi_vector; |
| 763 | goto done; |
| 764 | |
| 765 | msix_single_vector: |
| 766 | device->msix_entries[0].entry = 0; |
| 767 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); |
| 768 | if (err) |
| 769 | goto msi; |
| 770 | |
| 771 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, |
| 772 | 0, "ioat-msix", device); |
| 773 | if (err) { |
| 774 | pci_disable_msix(device->pdev); |
| 775 | goto msi; |
| 776 | } |
| 777 | device->irq_mode = msix_single_vector; |
| 778 | goto done; |
| 779 | |
| 780 | msi: |
| 781 | err = pci_enable_msi(device->pdev); |
| 782 | if (err) |
| 783 | goto intx; |
| 784 | |
| 785 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, |
| 786 | 0, "ioat-msi", device); |
| 787 | if (err) { |
| 788 | pci_disable_msi(device->pdev); |
| 789 | goto intx; |
| 790 | } |
| 791 | /* |
| 792 | * CB 1.2 devices need a bit set in configuration space to enable MSI |
| 793 | */ |
| 794 | if (device->version == IOAT_VER_1_2) { |
| 795 | u32 dmactrl; |
| 796 | pci_read_config_dword(device->pdev, |
| 797 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); |
| 798 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; |
| 799 | pci_write_config_dword(device->pdev, |
| 800 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); |
| 801 | } |
| 802 | device->irq_mode = msi; |
| 803 | goto done; |
| 804 | |
| 805 | intx: |
| 806 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, |
| 807 | IRQF_SHARED, "ioat-intx", device); |
| 808 | if (err) |
| 809 | goto err_no_irq; |
| 810 | device->irq_mode = intx; |
| 811 | |
| 812 | done: |
| 813 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
| 814 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); |
| 815 | return 0; |
| 816 | |
| 817 | err_no_irq: |
| 818 | /* Disable all interrupt generation */ |
| 819 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
| 820 | dev_err(&device->pdev->dev, "no usable interrupts\n"); |
| 821 | device->irq_mode = none; |
| 822 | return -1; |
| 823 | } |
| 824 | |
| 825 | /** |
| 826 | * ioat_dma_remove_interrupts - remove whatever interrupts were set |
| 827 | * @device: ioat device |
| 828 | */ |
| 829 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) |
| 830 | { |
| 831 | struct ioat_dma_chan *ioat_chan; |
| 832 | int i; |
| 833 | |
| 834 | /* Disable all interrupt generation */ |
| 835 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
| 836 | |
| 837 | switch (device->irq_mode) { |
| 838 | case msix_multi_vector: |
| 839 | for (i = 0; i < device->common.chancnt; i++) { |
| 840 | ioat_chan = ioat_lookup_chan_by_index(device, i); |
| 841 | free_irq(device->msix_entries[i].vector, ioat_chan); |
| 842 | } |
| 843 | pci_disable_msix(device->pdev); |
| 844 | break; |
| 845 | case msix_single_vector: |
| 846 | free_irq(device->msix_entries[0].vector, device); |
| 847 | pci_disable_msix(device->pdev); |
| 848 | break; |
| 849 | case msi: |
| 850 | free_irq(device->pdev->irq, device); |
| 851 | pci_disable_msi(device->pdev); |
| 852 | break; |
| 853 | case intx: |
| 854 | free_irq(device->pdev->irq, device); |
| 855 | break; |
| 856 | case none: |
| 857 | dev_warn(&device->pdev->dev, |
| 858 | "call to %s without interrupts setup\n", __func__); |
| 859 | } |
| 860 | device->irq_mode = none; |
| 861 | } |
| 862 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 863 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, |
| 864 | void __iomem *iobase) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 865 | { |
| 866 | int err; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 867 | struct ioatdma_device *device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 868 | |
| 869 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
| 870 | if (!device) { |
| 871 | err = -ENOMEM; |
| 872 | goto err_kzalloc; |
| 873 | } |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 874 | device->pdev = pdev; |
| 875 | device->reg_base = iobase; |
| 876 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 877 | |
| 878 | /* DMA coherent memory pool for DMA descriptor allocations */ |
| 879 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 880 | sizeof(struct ioat_dma_descriptor), |
| 881 | 64, 0); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 882 | if (!device->dma_pool) { |
| 883 | err = -ENOMEM; |
| 884 | goto err_dma_pool; |
| 885 | } |
| 886 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 887 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
| 888 | sizeof(u64), SMP_CACHE_BYTES, |
| 889 | SMP_CACHE_BYTES); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 890 | if (!device->completion_pool) { |
| 891 | err = -ENOMEM; |
| 892 | goto err_completion_pool; |
| 893 | } |
| 894 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 895 | INIT_LIST_HEAD(&device->common.channels); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 896 | ioat_dma_enumerate_channels(device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 897 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 898 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 899 | device->common.device_alloc_chan_resources = |
| 900 | ioat_dma_alloc_chan_resources; |
| 901 | device->common.device_free_chan_resources = |
| 902 | ioat_dma_free_chan_resources; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 903 | device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; |
| 904 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
| 905 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; |
| 906 | device->common.device_dependency_added = ioat_dma_dependency_added; |
| 907 | device->common.dev = &pdev->dev; |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 908 | dev_err(&device->pdev->dev, |
| 909 | "ioatdma: Intel(R) I/OAT DMA Engine found," |
| 910 | " %d channels, device version 0x%02x\n", |
| 911 | device->common.chancnt, device->version); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 912 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 913 | err = ioat_dma_setup_interrupts(device); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 914 | if (err) |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 915 | goto err_setup_interrupts; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 916 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 917 | err = ioat_dma_self_test(device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 918 | if (err) |
| 919 | goto err_self_test; |
| 920 | |
| 921 | dma_async_device_register(&device->common); |
| 922 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 923 | return device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 924 | |
| 925 | err_self_test: |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 926 | ioat_dma_remove_interrupts(device); |
| 927 | err_setup_interrupts: |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 928 | pci_pool_destroy(device->completion_pool); |
| 929 | err_completion_pool: |
| 930 | pci_pool_destroy(device->dma_pool); |
| 931 | err_dma_pool: |
| 932 | kfree(device); |
| 933 | err_kzalloc: |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 934 | iounmap(iobase); |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 935 | dev_err(&device->pdev->dev, |
| 936 | "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n"); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 937 | return NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 938 | } |
| 939 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 940 | void ioat_dma_remove(struct ioatdma_device *device) |
Dan Aloni | 428ed60 | 2007-03-08 09:57:36 -0800 | [diff] [blame] | 941 | { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 942 | struct dma_chan *chan, *_chan; |
| 943 | struct ioat_dma_chan *ioat_chan; |
| 944 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 945 | dma_async_device_unregister(&device->common); |
| 946 | |
Shannon Nelson | 3e03745 | 2007-10-16 01:27:40 -0700 | [diff] [blame] | 947 | ioat_dma_remove_interrupts(device); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 948 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 949 | pci_pool_destroy(device->dma_pool); |
| 950 | pci_pool_destroy(device->completion_pool); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 951 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 952 | list_for_each_entry_safe(chan, _chan, |
| 953 | &device->common.channels, device_node) { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 954 | ioat_chan = to_ioat_chan(chan); |
| 955 | list_del(&chan->device_node); |
| 956 | kfree(ioat_chan); |
| 957 | } |
| 958 | kfree(device); |
| 959 | } |
| 960 | |