dma.c 32.7 KB
Newer Older
1
/*
2
 * Intel I/OAT DMA Linux driver
3
 * Copyright(c) 2004 - 2009 Intel Corporation.
4 5
 *
 * This program is free software; you can redistribute it and/or modify it
6 7
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
8 9 10 11 12 13 14
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
15 16 17 18 19
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
20 21 22 23 24 25 26 27 28 29
 *
 */

/*
 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 * copy operations.
 */

#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/slab.h>
31 32 33 34
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
35
#include <linux/dma-mapping.h>
36
#include <linux/workqueue.h>
37
#include <linux/prefetch.h>
38
#include <linux/i7300_idle.h>
D
Dan Williams 已提交
39 40 41
#include "dma.h"
#include "registers.h"
#include "hw.h"
42

43 44
#include "../dmaengine.h"

45
int ioat_pending_level = 4;
46 47 48 49
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
		 "high-water mark for pushing ioat descriptors (default: 4)");

50
/* internal functions */
51 52
static void ioat1_cleanup(struct ioat_dma_chan *ioat);
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
53 54 55 56 57 58 59 60 61

/**
 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
	struct ioatdma_device *instance = data;
62
	struct ioat_chan_common *chan;
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
	unsigned long attnstatus;
	int bit;
	u8 intrctrl;

	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);

	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
		return IRQ_NONE;

	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
		return IRQ_NONE;
	}

	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
78
	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
79 80
		chan = ioat_chan_by_index(instance, bit);
		tasklet_schedule(&chan->cleanup_task);
81 82 83 84 85 86 87 88 89 90 91 92 93
	}

	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	return IRQ_HANDLED;
}

/**
 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
94
	struct ioat_chan_common *chan = data;
95

96
	tasklet_schedule(&chan->cleanup_task);
97 98 99 100

	return IRQ_HANDLED;
}

101
/* common channel initialization */
102
void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
103 104
{
	struct dma_device *dma = &device->common;
105 106
	struct dma_chan *c = &chan->common;
	unsigned long data = (unsigned long) c;
107 108 109 110 111 112 113

	chan->device = device;
	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
	spin_lock_init(&chan->cleanup_lock);
	chan->common.device = dma;
	list_add_tail(&chan->common.device_node, &dma->channels);
	device->idx[idx] = chan;
114
	init_timer(&chan->timer);
115 116 117
	chan->timer.function = device->timer_fn;
	chan->timer.data = data;
	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
118 119 120
	tasklet_disable(&chan->cleanup_task);
}

121
/**
122
 * ioat1_dma_enumerate_channels - find and initialize the device's channels
123 124
 * @device: the device to be enumerated
 */
125
static int ioat1_enumerate_channels(struct ioatdma_device *device)
126 127 128 129
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
130
	struct ioat_dma_chan *ioat;
131
	struct device *dev = &device->pdev->dev;
132
	struct dma_device *dma = &device->common;
133

134 135
	INIT_LIST_HEAD(&dma->channels);
	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
136 137 138 139 140 141
	dma->chancnt &= 0x1f; /* bits [4:0] valid */
	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
			 dma->chancnt, ARRAY_SIZE(device->idx));
		dma->chancnt = ARRAY_SIZE(device->idx);
	}
142
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
143
	xfercap_scale &= 0x1f; /* bits [4:0] valid */
144
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
D
Dan Williams 已提交
145
	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
146

147
#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
148 149
	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
		dma->chancnt--;
A
Andy Henroid 已提交
150
#endif
151
	for (i = 0; i < dma->chancnt; i++) {
152
		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
153
		if (!ioat)
154 155
			break;

156
		ioat_init_channel(device, &ioat->base, i);
157 158 159 160
		ioat->xfercap = xfercap;
		spin_lock_init(&ioat->desc_lock);
		INIT_LIST_HEAD(&ioat->free_desc);
		INIT_LIST_HEAD(&ioat->used_desc);
161
	}
162 163
	dma->chancnt = i;
	return i;
164 165
}

S
Shannon Nelson 已提交
166 167 168 169 170
/**
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
 * @chan: DMA channel handle
 */
171
static inline void
172
__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
S
Shannon Nelson 已提交
173
{
174 175
	void __iomem *reg_base = ioat->base.reg_base;

D
Dan Williams 已提交
176 177
	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
		__func__, ioat->pending);
178 179
	ioat->pending = 0;
	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
S
Shannon Nelson 已提交
180 181 182 183
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
184
	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
S
Shannon Nelson 已提交
185

186 187 188 189
	if (ioat->pending > 0) {
		spin_lock_bh(&ioat->desc_lock);
		__ioat1_dma_memcpy_issue_pending(ioat);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
190 191 192
	}
}

193
/**
194
 * ioat1_reset_channel - restart a channel
195
 * @ioat: IOAT DMA channel handle
196
 */
197
static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
198
{
199 200
	struct ioat_chan_common *chan = &ioat->base;
	void __iomem *reg_base = chan->reg_base;
201 202
	u32 chansts, chanerr;

203
	dev_warn(to_dev(chan), "reset\n");
204
	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
205
	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
206
	if (chanerr) {
207
		dev_err(to_dev(chan),
208
			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
209 210
			chan_num(chan), chansts, chanerr);
		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
211 212 213 214 215 216 217 218 219 220
	}

	/*
	 * whack it upside the head with a reset
	 * and wait for things to settle out.
	 * force the pending count to a really big negative
	 * to make sure no one forces an issue_pending
	 * while we're waiting.
	 */

221
	ioat->pending = INT_MIN;
222
	writeb(IOAT_CHANCMD_RESET,
223
	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
224 225
	set_bit(IOAT_RESET_PENDING, &chan->state);
	mod_timer(&chan->timer, jiffies + RESET_DELAY);
226 227
}

228
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
229
{
230 231
	struct dma_chan *c = tx->chan;
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
232
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
233
	struct ioat_chan_common *chan = &ioat->base;
234 235
	struct ioat_desc_sw *first;
	struct ioat_desc_sw *chain_tail;
236 237
	dma_cookie_t cookie;

238
	spin_lock_bh(&ioat->desc_lock);
239
	/* cookie incr and addition to used_list must be atomic */
240
	cookie = dma_cookie_assign(tx);
D
Dan Williams 已提交
241
	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
242 243

	/* write address into NextDescriptor field of last desc in chain */
D
Dan Williams 已提交
244
	first = to_ioat_desc(desc->tx_list.next);
245
	chain_tail = to_ioat_desc(ioat->used_desc.prev);
246 247 248
	/* make descriptor updates globally visible before chaining */
	wmb();
	chain_tail->hw->next = first->txd.phys;
D
Dan Williams 已提交
249
	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
D
Dan Williams 已提交
250 251
	dump_desc_dbg(ioat, chain_tail);
	dump_desc_dbg(ioat, first);
252

253 254 255
	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);

D
Dan Williams 已提交
256
	ioat->active += desc->hw->tx_cnt;
D
Dan Williams 已提交
257
	ioat->pending += desc->hw->tx_cnt;
258 259 260
	if (ioat->pending >= ioat_pending_level)
		__ioat1_dma_memcpy_issue_pending(ioat);
	spin_unlock_bh(&ioat->desc_lock);
261

262 263 264 265 266
	return cookie;
}

/**
 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
267
 * @ioat: the channel supplying the memory pool for the descriptors
268 269
 * @flags: allocation flags
 */
270
static struct ioat_desc_sw *
271
ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
272 273 274
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
275
	struct ioatdma_device *ioatdma_device;
276 277
	dma_addr_t phys;

278
	ioatdma_device = ioat->base.device;
279
	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
280 281 282 283 284
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
285
		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
286 287 288 289
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));
290

D
Dan Williams 已提交
291
	INIT_LIST_HEAD(&desc_sw->tx_list);
292 293
	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
	desc_sw->txd.tx_submit = ioat1_tx_submit;
294
	desc_sw->hw = desc;
295
	desc_sw->txd.phys = phys;
D
Dan Williams 已提交
296
	set_desc_id(desc_sw, -1);
297 298 299 300

	return desc_sw;
}

301 302 303
static int ioat_initial_desc_count = 256;
module_param(ioat_initial_desc_count, int, 0644);
MODULE_PARM_DESC(ioat_initial_desc_count,
304
		 "ioat1: initial descriptors per channel (default: 256)");
305
/**
306
 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
307 308
 * @chan: the channel to be filled out
 */
309
static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
310
{
311 312
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
S
Shannon Nelson 已提交
313
	struct ioat_desc_sw *desc;
314 315 316 317
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

318
	/* have we already been set up? */
319 320
	if (!list_empty(&ioat->free_desc))
		return ioat->desccount;
321

322
	/* Setup register to interrupt and write completion status on error */
323
	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
324

325
	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
326
	if (chanerr) {
327 328
		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
329 330 331
	}

	/* Allocate descriptors */
332
	for (i = 0; i < ioat_initial_desc_count; i++) {
333
		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
334
		if (!desc) {
335
			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
336 337
			break;
		}
D
Dan Williams 已提交
338
		set_desc_id(desc, i);
339 340
		list_add_tail(&desc->node, &tmp_list);
	}
341 342 343 344
	spin_lock_bh(&ioat->desc_lock);
	ioat->desccount = i;
	list_splice(&tmp_list, &ioat->free_desc);
	spin_unlock_bh(&ioat->desc_lock);
345 346 347

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
348 349 350 351
	chan->completion = pci_pool_alloc(chan->device->completion_pool,
					  GFP_KERNEL, &chan->completion_dma);
	memset(chan->completion, 0, sizeof(*chan->completion));
	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
352
	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
353
	writel(((u64) chan->completion_dma) >> 32,
354 355 356
	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);

	tasklet_enable(&chan->cleanup_task);
357
	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
D
Dan Williams 已提交
358 359
	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
		__func__, ioat->desccount);
360
	return ioat->desccount;
361 362
}

363
/**
364
 * ioat1_dma_free_chan_resources - release all the descriptors
365 366
 * @chan: the channel to be cleaned
 */
367
static void ioat1_dma_free_chan_resources(struct dma_chan *c)
368
{
369 370 371
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioatdma_device *ioatdma_device = chan->device;
372 373 374
	struct ioat_desc_sw *desc, *_desc;
	int in_use_descs = 0;

375 376 377
	/* Before freeing channel resources first check
	 * if they have been previously allocated for this channel.
	 */
378
	if (ioat->desccount == 0)
379 380
		return;

381
	tasklet_disable(&chan->cleanup_task);
382
	del_timer_sync(&chan->timer);
383
	ioat1_cleanup(ioat);
384

385 386 387
	/* Delay 100ms after reset to allow internal DMA logic to quiesce
	 * before removing DMA descriptor resources.
	 */
388
	writeb(IOAT_CHANCMD_RESET,
389
	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
390
	mdelay(100);
391

392
	spin_lock_bh(&ioat->desc_lock);
D
Dan Williams 已提交
393 394 395 396
	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
			__func__, desc_id(desc));
		dump_desc_dbg(ioat, desc);
397 398 399 400 401 402 403 404 405
		in_use_descs++;
		list_del(&desc->node);
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
			      desc->txd.phys);
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc,
				 &ioat->free_desc, node) {
		list_del(&desc->node);
406
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
407
			      desc->txd.phys);
408 409
		kfree(desc);
	}
410
	spin_unlock_bh(&ioat->desc_lock);
411

412
	pci_pool_free(ioatdma_device->completion_pool,
413 414
		      chan->completion,
		      chan->completion_dma);
415 416 417

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
418
		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
419 420
			in_use_descs - 1);

421 422
	chan->last_completion = 0;
	chan->completion_dma = 0;
423 424
	ioat->pending = 0;
	ioat->desccount = 0;
425
}
426

427
/**
428 429
 * ioat1_dma_get_next_descriptor - return the next available descriptor
 * @ioat: IOAT DMA channel handle
430 431 432 433 434
 *
 * Gets the next descriptor from the chain, and must be called with the
 * channel's desc_lock held.  Allocates more descriptors if the channel
 * has run out.
 */
435
static struct ioat_desc_sw *
436
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
437
{
S
Shannon Nelson 已提交
438
	struct ioat_desc_sw *new;
439

440 441
	if (!list_empty(&ioat->free_desc)) {
		new = to_ioat_desc(ioat->free_desc.next);
442 443 444
		list_del(&new->node);
	} else {
		/* try to get another desc */
445
		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
S
Shannon Nelson 已提交
446
		if (!new) {
447
			dev_err(to_dev(&ioat->base), "alloc failed\n");
S
Shannon Nelson 已提交
448 449
			return NULL;
		}
450
	}
D
Dan Williams 已提交
451 452
	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
		__func__, desc_id(new));
453 454
	prefetch(new->hw);
	return new;
455 456
}

457
static struct dma_async_tx_descriptor *
458
ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
459
		      dma_addr_t dma_src, size_t len, unsigned long flags)
460
{
461
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
462 463 464 465 466 467 468 469
	struct ioat_desc_sw *desc;
	size_t copy;
	LIST_HEAD(chain);
	dma_addr_t src = dma_src;
	dma_addr_t dest = dma_dest;
	size_t total_len = len;
	struct ioat_dma_descriptor *hw = NULL;
	int tx_cnt = 0;
470

471
	spin_lock_bh(&ioat->desc_lock);
472
	desc = ioat1_dma_get_next_descriptor(ioat);
473 474 475
	do {
		if (!desc)
			break;
476

477
		tx_cnt++;
478
		copy = min_t(size_t, len, ioat->xfercap);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494

		hw = desc->hw;
		hw->size = copy;
		hw->ctl = 0;
		hw->src_addr = src;
		hw->dst_addr = dest;

		list_add_tail(&desc->node, &chain);

		len -= copy;
		dest += copy;
		src += copy;
		if (len) {
			struct ioat_desc_sw *next;

			async_tx_ack(&desc->txd);
495
			next = ioat1_dma_get_next_descriptor(ioat);
496
			hw->next = next ? next->txd.phys : 0;
D
Dan Williams 已提交
497
			dump_desc_dbg(ioat, desc);
498 499 500 501 502 503
			desc = next;
		} else
			hw->next = 0;
	} while (len);

	if (!desc) {
504 505 506
		struct ioat_chan_common *chan = &ioat->base;

		dev_err(to_dev(chan),
507
			"chan%d - get_next_desc failed\n", chan_num(chan));
508 509
		list_splice(&chain, &ioat->free_desc);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
510
		return NULL;
511
	}
512
	spin_unlock_bh(&ioat->desc_lock);
513 514 515

	desc->txd.flags = flags;
	desc->len = total_len;
D
Dan Williams 已提交
516
	list_splice(&chain, &desc->tx_list);
517 518
	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
	hw->ctl_f.compl_write = 1;
D
Dan Williams 已提交
519
	hw->tx_cnt = tx_cnt;
D
Dan Williams 已提交
520
	dump_desc_dbg(ioat, desc);
521 522

	return &desc->txd;
523 524
}

525
static void ioat1_cleanup_event(unsigned long data)
526
{
527
	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
528

529 530
	ioat1_cleanup(ioat);
	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
531 532
}

533 534
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
		    size_t len, struct ioat_dma_descriptor *hw)
535
{
536 537
	struct pci_dev *pdev = chan->device->pdev;
	size_t offset = len - hw->size;
538

539 540 541
	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
		ioat_unmap(pdev, hw->dst_addr - offset, len,
			   PCI_DMA_FROMDEVICE, flags, 1);
542

543 544 545 546 547 548 549 550
	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
		ioat_unmap(pdev, hw->src_addr - offset, len,
			   PCI_DMA_TODEVICE, flags, 0);
}

unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
{
	unsigned long phys_complete;
551
	u64 completion;
552

553
	completion = *chan->completion;
554
	phys_complete = ioat_chansts_to_addr(completion);
555

D
Dan Williams 已提交
556 557 558
	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
		(unsigned long long) phys_complete);

559 560
	if (is_ioat_halted(completion)) {
		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
561
		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
562
			chanerr);
563 564 565 566

		/* TODO do something to salvage the situation */
	}

567 568 569
	return phys_complete;
}

570 571
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
			   unsigned long *phys_complete)
572
{
573 574 575 576 577
	*phys_complete = ioat_get_current_completion(chan);
	if (*phys_complete == chan->last_completion)
		return false;
	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
578

579 580
	return true;
}
581

582 583 584 585 586
static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
{
	struct ioat_chan_common *chan = &ioat->base;
	struct list_head *_desc, *n;
	struct dma_async_tx_descriptor *tx;
587

D
Dan Williams 已提交
588 589
	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
		 __func__, phys_complete);
590 591 592 593 594
	list_for_each_safe(_desc, n, &ioat->used_desc) {
		struct ioat_desc_sw *desc;

		prefetch(n);
		desc = list_entry(_desc, typeof(*desc), node);
595 596 597 598 599 600
		tx = &desc->txd;
		/*
		 * Incoming DMA requests may use multiple descriptors,
		 * due to exceeding xfercap, perhaps. If so, only the
		 * last one will have a cookie, and require unmapping.
		 */
D
Dan Williams 已提交
601
		dump_desc_dbg(ioat, desc);
602
		if (tx->cookie) {
603
			chan->common.completed_cookie = tx->cookie;
604
			tx->cookie = 0;
605
			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
D
Dan Williams 已提交
606
			ioat->active -= desc->hw->tx_cnt;
607 608 609
			if (tx->callback) {
				tx->callback(tx->callback_param);
				tx->callback = NULL;
610
			}
611
		}
612

613 614 615 616 617 618 619 620 621 622
		if (tx->phys != phys_complete) {
			/*
			 * a completed entry, but not the last, so clean
			 * up if the client is done with the descriptor
			 */
			if (async_tx_test_ack(tx))
				list_move_tail(&desc->node, &ioat->free_desc);
		} else {
			/*
			 * last used desc. Do not remove, so we can
623
			 * append from it.
624
			 */
625 626 627 628 629 630 631 632 633 634

			/* if nothing else is pending, cancel the
			 * completion timeout
			 */
			if (n == &ioat->used_desc) {
				dev_dbg(to_dev(chan),
					"%s cancel completion timeout\n",
					__func__);
				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
			}
635

636
			/* TODO check status bits? */
637 638 639 640
			break;
		}
	}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	chan->last_completion = phys_complete;
}

/**
 * ioat1_cleanup - cleanup up finished descriptors
 * @chan: ioat channel to be cleaned up
 *
 * To prevent lock contention we defer cleanup when the locks are
 * contended with a terminal timeout that forces cleanup and catches
 * completion notification errors.
 */
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	unsigned long phys_complete;

	prefetch(chan->completion);

	if (!spin_trylock_bh(&chan->cleanup_lock))
		return;

	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	if (!spin_trylock_bh(&ioat->desc_lock)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	__cleanup(ioat, phys_complete);

674
	spin_unlock_bh(&ioat->desc_lock);
675 676
	spin_unlock_bh(&chan->cleanup_lock);
}
677

678 679
static void ioat1_timer_event(unsigned long data)
{
680
	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
681
	struct ioat_chan_common *chan = &ioat->base;
682

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);

	spin_lock_bh(&chan->cleanup_lock);
	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
		struct ioat_desc_sw *desc;

		spin_lock_bh(&ioat->desc_lock);

		/* restart active descriptors */
		desc = to_ioat_desc(ioat->used_desc.prev);
		ioat_set_chainaddr(ioat, desc->txd.phys);
		ioat_start(chan);

		ioat->pending = 0;
		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		spin_unlock_bh(&ioat->desc_lock);
	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
		unsigned long phys_complete;

		spin_lock_bh(&ioat->desc_lock);
		/* if we haven't made progress and we have already
		 * acknowledged a pending completion once, then be more
		 * forceful with a restart
		 */
		if (ioat_cleanup_preamble(chan, &phys_complete))
			__cleanup(ioat, phys_complete);
		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
			ioat1_reset_channel(ioat);
		else {
			u64 status = ioat_chansts(chan);

			/* manually update the last completion address */
			if (ioat_chansts_to_addr(status) != 0)
				*chan->completion = status;

			set_bit(IOAT_COMPLETION_ACK, &chan->state);
			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		}
		spin_unlock_bh(&ioat->desc_lock);
	}
724
	spin_unlock_bh(&chan->cleanup_lock);
725 726
}

727
enum dma_status
728 729
ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
		   struct dma_tx_state *txstate)
730
{
731 732
	struct ioat_chan_common *chan = to_chan_common(c);
	struct ioatdma_device *device = chan->device;
733

734
	if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
735
		return DMA_SUCCESS;
736

737
	device->cleanup_fn((unsigned long) c);
738

739
	return ioat_tx_status(c, cookie, txstate);
740 741
}

742
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
743
{
744
	struct ioat_chan_common *chan = &ioat->base;
745
	struct ioat_desc_sw *desc;
746
	struct ioat_dma_descriptor *hw;
747

748
	spin_lock_bh(&ioat->desc_lock);
749

750
	desc = ioat1_dma_get_next_descriptor(ioat);
751 752

	if (!desc) {
753
		dev_err(to_dev(chan),
754
			"Unable to start null desc - get next desc failed\n");
755
		spin_unlock_bh(&ioat->desc_lock);
756 757 758
		return;
	}

759 760 761 762 763
	hw = desc->hw;
	hw->ctl = 0;
	hw->ctl_f.null = 1;
	hw->ctl_f.int_en = 1;
	hw->ctl_f.compl_write = 1;
764
	/* set size to non-zero value (channel returns error when size is 0) */
765 766 767
	hw->size = NULL_DESC_BUFFER_SIZE;
	hw->src_addr = 0;
	hw->dst_addr = 0;
768
	async_tx_ack(&desc->txd);
769 770
	hw->next = 0;
	list_add_tail(&desc->node, &ioat->used_desc);
D
Dan Williams 已提交
771
	dump_desc_dbg(ioat, desc);
772

773 774
	ioat_set_chainaddr(ioat, desc->txd.phys);
	ioat_start(chan);
775
	spin_unlock_bh(&ioat->desc_lock);
776 777 778 779 780 781 782
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
#define IOAT_TEST_SIZE 2000

783
static void __devinit ioat_dma_test_callback(void *dma_async_param)
784
{
785 786 787
	struct completion *cmp = dma_async_param;

	complete(cmp);
788 789
}

790 791 792 793
/**
 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 * @device: device to be tested
 */
D
Dan Williams 已提交
794
int __devinit ioat_dma_self_test(struct ioatdma_device *device)
795 796 797 798
{
	int i;
	u8 *src;
	u8 *dest;
799 800
	struct dma_device *dma = &device->common;
	struct device *dev = &device->pdev->dev;
801
	struct dma_chan *dma_chan;
S
Shannon Nelson 已提交
802
	struct dma_async_tx_descriptor *tx;
803
	dma_addr_t dma_dest, dma_src;
804 805
	dma_cookie_t cookie;
	int err = 0;
806
	struct completion cmp;
807
	unsigned long tmo;
808
	unsigned long flags;
809

810
	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
811 812
	if (!src)
		return -ENOMEM;
813
	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
814 815 816 817 818 819 820 821 822 823
	if (!dest) {
		kfree(src);
		return -ENOMEM;
	}

	/* Fill in src buffer */
	for (i = 0; i < IOAT_TEST_SIZE; i++)
		src[i] = (u8)i;

	/* Start copy, using first DMA channel */
824
	dma_chan = container_of(dma->channels.next, struct dma_chan,
825
				device_node);
826 827
	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
		dev_err(dev, "selftest cannot allocate chan resource\n");
828 829 830 831
		err = -ENODEV;
		goto out;
	}

832 833
	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
D
Dan Williams 已提交
834 835
	flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
		DMA_PREP_INTERRUPT;
836
	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
837
						   IOAT_TEST_SIZE, flags);
838
	if (!tx) {
839
		dev_err(dev, "Self-test prep failed, disabling\n");
840 841 842 843
		err = -ENODEV;
		goto free_resources;
	}

844
	async_tx_ack(tx);
845
	init_completion(&cmp);
846
	tx->callback = ioat_dma_test_callback;
847
	tx->callback_param = &cmp;
848
	cookie = tx->tx_submit(tx);
849
	if (cookie < 0) {
850
		dev_err(dev, "Self-test setup failed, disabling\n");
851 852 853
		err = -ENODEV;
		goto free_resources;
	}
854
	dma->device_issue_pending(dma_chan);
D
Dan Williams 已提交
855

856
	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
857

858
	if (tmo == 0 ||
859
	    dma->device_tx_status(dma_chan, cookie, NULL)
860
					!= DMA_SUCCESS) {
861
		dev_err(dev, "Self-test copy timed out, disabling\n");
862 863 864 865
		err = -ENODEV;
		goto free_resources;
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
866
		dev_err(dev, "Self-test copy failed compare, disabling\n");
867 868 869 870 871
		err = -ENODEV;
		goto free_resources;
	}

free_resources:
872
	dma->device_free_chan_resources(dma_chan);
873 874 875 876 877 878
out:
	kfree(src);
	kfree(dest);
	return err;
}

879 880 881 882 883 884 885 886 887 888 889 890 891
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
		    sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
		 "set ioat interrupt style: msix (default), "
		 "msix-single-vector, msi, intx)");

/**
 * ioat_dma_setup_interrupts - setup interrupt handler
 * @device: ioat device
 */
static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
892
	struct ioat_chan_common *chan;
893 894 895 896 897
	struct pci_dev *pdev = device->pdev;
	struct device *dev = &pdev->dev;
	struct msix_entry *msix;
	int i, j, msixcnt;
	int err = -EINVAL;
898 899 900 901 902 903 904 905 906 907
	u8 intrctrl = 0;

	if (!strcmp(ioat_interrupt_style, "msix"))
		goto msix;
	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
		goto msix_single_vector;
	if (!strcmp(ioat_interrupt_style, "msi"))
		goto msi;
	if (!strcmp(ioat_interrupt_style, "intx"))
		goto intx;
908
	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
909
	goto err_no_irq;
910 911 912 913 914 915 916

msix:
	/* The number of MSI-X vectors should equal the number of channels */
	msixcnt = device->common.chancnt;
	for (i = 0; i < msixcnt; i++)
		device->msix_entries[i].entry = i;

917
	err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
918 919 920 921 922 923
	if (err < 0)
		goto msi;
	if (err > 0)
		goto msix_single_vector;

	for (i = 0; i < msixcnt; i++) {
924
		msix = &device->msix_entries[i];
925
		chan = ioat_chan_by_index(device, i);
926 927
		err = devm_request_irq(dev, msix->vector,
				       ioat_dma_do_interrupt_msix, 0,
928
				       "ioat-msix", chan);
929 930
		if (err) {
			for (j = 0; j < i; j++) {
931
				msix = &device->msix_entries[j];
932 933
				chan = ioat_chan_by_index(device, j);
				devm_free_irq(dev, msix->vector, chan);
934 935 936 937 938 939 940 941
			}
			goto msix_single_vector;
		}
	}
	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
	goto done;

msix_single_vector:
942 943 944
	msix = &device->msix_entries[0];
	msix->entry = 0;
	err = pci_enable_msix(pdev, device->msix_entries, 1);
945 946 947
	if (err)
		goto msi;

948 949
	err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
			       "ioat-msix", device);
950
	if (err) {
951
		pci_disable_msix(pdev);
952 953 954 955 956
		goto msi;
	}
	goto done;

msi:
957
	err = pci_enable_msi(pdev);
958 959 960
	if (err)
		goto intx;

961 962
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
			       "ioat-msi", device);
963
	if (err) {
964
		pci_disable_msi(pdev);
965 966 967 968 969
		goto intx;
	}
	goto done;

intx:
970 971
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
			       IRQF_SHARED, "ioat-intx", device);
972 973 974 975
	if (err)
		goto err_no_irq;

done:
976 977
	if (device->intr_quirk)
		device->intr_quirk(device);
978 979 980 981 982 983 984
	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
	return 0;

err_no_irq:
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
985 986
	dev_err(dev, "no usable interrupts\n");
	return err;
987 988
}

989
static void ioat_disable_interrupts(struct ioatdma_device *device)
990 991 992 993 994
{
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}

995
int __devinit ioat_probe(struct ioatdma_device *device)
996
{
997 998 999
	int err = -ENODEV;
	struct dma_device *dma = &device->common;
	struct pci_dev *pdev = device->pdev;
1000
	struct device *dev = &pdev->dev;
1001 1002 1003

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1004 1005
					   sizeof(struct ioat_dma_descriptor),
					   64, 0);
1006 1007 1008 1009 1010
	if (!device->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

1011 1012 1013
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
1014

1015 1016 1017 1018 1019
	if (!device->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

1020
	device->enumerate_channels(device);
1021

1022 1023
	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
	dma->dev = &pdev->dev;
1024

1025
	if (!dma->chancnt) {
1026
		dev_err(dev, "channel enumeration error\n");
1027 1028 1029
		goto err_setup_interrupts;
	}

1030
	err = ioat_dma_setup_interrupts(device);
1031
	if (err)
1032
		goto err_setup_interrupts;
1033

D
Dan Williams 已提交
1034
	err = device->self_test(device);
1035 1036 1037
	if (err)
		goto err_self_test;

1038
	return 0;
1039 1040

err_self_test:
1041
	ioat_disable_interrupts(device);
1042
err_setup_interrupts:
1043 1044 1045 1046
	pci_pool_destroy(device->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
err_dma_pool:
1047 1048 1049
	return err;
}

1050
int __devinit ioat_register(struct ioatdma_device *device)
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
{
	int err = dma_async_device_register(&device->common);

	if (err) {
		ioat_disable_interrupts(device);
		pci_pool_destroy(device->completion_pool);
		pci_pool_destroy(device->dma_pool);
	}

	return err;
}

/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
static void ioat1_intr_quirk(struct ioatdma_device *device)
{
	struct pci_dev *pdev = device->pdev;
	u32 dmactrl;

	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
	if (pdev->msi_enabled)
		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
	else
		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
}

D
Dan Williams 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->desccount);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);

static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->active);
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);

static ssize_t cap_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;

	return sprintf(page, "copy%s%s%s%s%s%s\n",
		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
		       dma_has_cap(DMA_MEMSET, dma->cap_mask)  ? " fill" : "",
		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");

}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);

static ssize_t version_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;
	struct ioatdma_device *device = to_ioatdma_device(dma);

	return sprintf(page, "%d.%d\n",
		       device->version >> 4, device->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);

static struct attribute *ioat1_attrs[] = {
	&ring_size_attr.attr,
	&ring_active_attr.attr,
	&ioat_cap_attr.attr,
	&ioat_version_attr.attr,
	NULL,
};

static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct ioat_sysfs_entry *entry;
	struct ioat_chan_common *chan;

	entry = container_of(attr, struct ioat_sysfs_entry, attr);
	chan = container_of(kobj, struct ioat_chan_common, kobj);

	if (!entry->show)
		return -EIO;
	return entry->show(&chan->common, page);
}

1140
const struct sysfs_ops ioat_sysfs_ops = {
D
Dan Williams 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	.show	= ioat_attr_show,
};

static struct kobj_type ioat1_ktype = {
	.sysfs_ops = &ioat_sysfs_ops,
	.default_attrs = ioat1_attrs,
};

void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);
		struct kobject *parent = &c->dev->device.kobj;
		int err;

		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
		if (err) {
			dev_warn(to_dev(chan),
				 "sysfs init error (%d), continuing...\n", err);
			kobject_put(&chan->kobj);
			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
		}
	}
}

void ioat_kobject_del(struct ioatdma_device *device)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);

		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
			kobject_del(&chan->kobj);
			kobject_put(&chan->kobj);
		}
	}
}

1184
int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1185 1186 1187 1188 1189 1190
{
	struct pci_dev *pdev = device->pdev;
	struct dma_device *dma;
	int err;

	device->intr_quirk = ioat1_intr_quirk;
1191
	device->enumerate_channels = ioat1_enumerate_channels;
D
Dan Williams 已提交
1192
	device->self_test = ioat_dma_self_test;
1193 1194
	device->timer_fn = ioat1_timer_event;
	device->cleanup_fn = ioat1_cleanup_event;
1195 1196 1197
	dma = &device->common;
	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1198 1199
	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1200
	dma->device_tx_status = ioat_dma_tx_status;
1201 1202 1203 1204 1205 1206 1207 1208

	err = ioat_probe(device);
	if (err)
		return err;
	ioat_set_tcp_copy_break(4096);
	err = ioat_register(device);
	if (err)
		return err;
D
Dan Williams 已提交
1209 1210
	ioat_kobject_add(device, &ioat1_ktype);

1211 1212 1213 1214 1215 1216
	if (dca)
		device->dca = ioat_dca_init(pdev, device->reg_base);

	return err;
}

1217
void __devexit ioat_dma_remove(struct ioatdma_device *device)
1218
{
1219
	struct dma_device *dma = &device->common;
1220

1221
	ioat_disable_interrupts(device);
1222

D
Dan Williams 已提交
1223 1224
	ioat_kobject_del(device);

1225
	dma_async_device_unregister(dma);
1226

1227 1228
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
1229

1230
	INIT_LIST_HEAD(&dma->channels);
1231
}