dma.c 32.7 KB
Newer Older
1
/*
2
 * Intel I/OAT DMA Linux driver
3
 * Copyright(c) 2004 - 2009 Intel Corporation.
4 5
 *
 * This program is free software; you can redistribute it and/or modify it
6 7
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
8 9 10 11 12 13 14
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
15 16 17 18 19
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 */

/*
 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 * copy operations.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
34
#include <linux/dma-mapping.h>
35
#include <linux/workqueue.h>
36
#include <linux/i7300_idle.h>
D
Dan Williams 已提交
37 38 39
#include "dma.h"
#include "registers.h"
#include "hw.h"
40

41
int ioat_pending_level = 4;
42 43 44 45
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
		 "high-water mark for pushing ioat descriptors (default: 4)");

46
/* internal functions */
47 48
static void ioat1_cleanup(struct ioat_dma_chan *ioat);
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
49 50 51 52 53 54 55 56 57

/**
 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
	struct ioatdma_device *instance = data;
58
	struct ioat_chan_common *chan;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
	unsigned long attnstatus;
	int bit;
	u8 intrctrl;

	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);

	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
		return IRQ_NONE;

	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
		return IRQ_NONE;
	}

	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
	for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 76
		chan = ioat_chan_by_index(instance, bit);
		tasklet_schedule(&chan->cleanup_task);
77 78 79 80 81 82 83 84 85 86 87 88 89
	}

	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	return IRQ_HANDLED;
}

/**
 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
90
	struct ioat_chan_common *chan = data;
91

92
	tasklet_schedule(&chan->cleanup_task);
93 94 95 96

	return IRQ_HANDLED;
}

97 98 99 100 101
static void ioat1_cleanup_tasklet(unsigned long data);

/* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device,
		       struct ioat_chan_common *chan, int idx,
102 103 104
		       void (*timer_fn)(unsigned long),
		       void (*tasklet)(unsigned long),
		       unsigned long ioat)
105 106 107 108 109 110 111 112 113
{
	struct dma_device *dma = &device->common;

	chan->device = device;
	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
	spin_lock_init(&chan->cleanup_lock);
	chan->common.device = dma;
	list_add_tail(&chan->common.device_node, &dma->channels);
	device->idx[idx] = chan;
114 115 116 117
	init_timer(&chan->timer);
	chan->timer.function = timer_fn;
	chan->timer.data = ioat;
	tasklet_init(&chan->cleanup_task, tasklet, ioat);
118 119 120
	tasklet_disable(&chan->cleanup_task);
}

121
static void ioat1_timer_event(unsigned long data);
122 123

/**
124
 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 126
 * @device: the device to be enumerated
 */
127
static int ioat1_enumerate_channels(struct ioatdma_device *device)
128 129 130 131
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
132
	struct ioat_dma_chan *ioat;
133
	struct device *dev = &device->pdev->dev;
134
	struct dma_device *dma = &device->common;
135

136 137
	INIT_LIST_HEAD(&dma->channels);
	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
138 139 140 141 142 143
	dma->chancnt &= 0x1f; /* bits [4:0] valid */
	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
			 dma->chancnt, ARRAY_SIZE(device->idx));
		dma->chancnt = ARRAY_SIZE(device->idx);
	}
144
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
145
	xfercap_scale &= 0x1f; /* bits [4:0] valid */
146
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
D
Dan Williams 已提交
147
	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
148

149
#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
150 151
	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
		dma->chancnt--;
A
Andy Henroid 已提交
152
#endif
153
	for (i = 0; i < dma->chancnt; i++) {
154
		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
155
		if (!ioat)
156 157
			break;

158
		ioat_init_channel(device, &ioat->base, i,
159
				  ioat1_timer_event,
160 161
				  ioat1_cleanup_tasklet,
				  (unsigned long) ioat);
162 163 164 165
		ioat->xfercap = xfercap;
		spin_lock_init(&ioat->desc_lock);
		INIT_LIST_HEAD(&ioat->free_desc);
		INIT_LIST_HEAD(&ioat->used_desc);
166
	}
167 168
	dma->chancnt = i;
	return i;
169 170
}

S
Shannon Nelson 已提交
171 172 173 174 175
/**
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
 * @chan: DMA channel handle
 */
176
static inline void
177
__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
S
Shannon Nelson 已提交
178
{
179 180
	void __iomem *reg_base = ioat->base.reg_base;

D
Dan Williams 已提交
181 182
	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
		__func__, ioat->pending);
183 184
	ioat->pending = 0;
	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
S
Shannon Nelson 已提交
185 186 187 188
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
189
	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
S
Shannon Nelson 已提交
190

191 192 193 194
	if (ioat->pending > 0) {
		spin_lock_bh(&ioat->desc_lock);
		__ioat1_dma_memcpy_issue_pending(ioat);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
195 196 197
	}
}

198
/**
199
 * ioat1_reset_channel - restart a channel
200
 * @ioat: IOAT DMA channel handle
201
 */
202
static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
203
{
204 205
	struct ioat_chan_common *chan = &ioat->base;
	void __iomem *reg_base = chan->reg_base;
206 207
	u32 chansts, chanerr;

208
	dev_warn(to_dev(chan), "reset\n");
209
	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
210
	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
211
	if (chanerr) {
212
		dev_err(to_dev(chan),
213
			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
214 215
			chan_num(chan), chansts, chanerr);
		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
216 217 218 219 220 221 222 223 224 225
	}

	/*
	 * whack it upside the head with a reset
	 * and wait for things to settle out.
	 * force the pending count to a really big negative
	 * to make sure no one forces an issue_pending
	 * while we're waiting.
	 */

226
	ioat->pending = INT_MIN;
227
	writeb(IOAT_CHANCMD_RESET,
228
	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
229 230
	set_bit(IOAT_RESET_PENDING, &chan->state);
	mod_timer(&chan->timer, jiffies + RESET_DELAY);
231 232
}

233
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
234
{
235 236
	struct dma_chan *c = tx->chan;
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
237
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
238
	struct ioat_chan_common *chan = &ioat->base;
239 240
	struct ioat_desc_sw *first;
	struct ioat_desc_sw *chain_tail;
241 242
	dma_cookie_t cookie;

243
	spin_lock_bh(&ioat->desc_lock);
244
	/* cookie incr and addition to used_list must be atomic */
245
	cookie = c->cookie;
246 247 248
	cookie++;
	if (cookie < 0)
		cookie = 1;
249 250
	c->cookie = cookie;
	tx->cookie = cookie;
D
Dan Williams 已提交
251
	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
252 253

	/* write address into NextDescriptor field of last desc in chain */
254
	first = to_ioat_desc(tx->tx_list.next);
255
	chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 257 258
	/* make descriptor updates globally visible before chaining */
	wmb();
	chain_tail->hw->next = first->txd.phys;
259
	list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
D
Dan Williams 已提交
260 261
	dump_desc_dbg(ioat, chain_tail);
	dump_desc_dbg(ioat, first);
262

263 264 265
	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);

D
Dan Williams 已提交
266
	ioat->active += desc->hw->tx_cnt;
D
Dan Williams 已提交
267
	ioat->pending += desc->hw->tx_cnt;
268 269 270
	if (ioat->pending >= ioat_pending_level)
		__ioat1_dma_memcpy_issue_pending(ioat);
	spin_unlock_bh(&ioat->desc_lock);
271

272 273 274 275 276
	return cookie;
}

/**
 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
277
 * @ioat: the channel supplying the memory pool for the descriptors
278 279
 * @flags: allocation flags
 */
280
static struct ioat_desc_sw *
281
ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
282 283 284
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
285
	struct ioatdma_device *ioatdma_device;
286 287
	dma_addr_t phys;

288
	ioatdma_device = ioat->base.device;
289
	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
290 291 292 293 294
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
295
		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
296 297 298 299
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));
300

301 302
	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
	desc_sw->txd.tx_submit = ioat1_tx_submit;
303
	desc_sw->hw = desc;
304
	desc_sw->txd.phys = phys;
D
Dan Williams 已提交
305
	set_desc_id(desc_sw, -1);
306 307 308 309

	return desc_sw;
}

310 311 312
static int ioat_initial_desc_count = 256;
module_param(ioat_initial_desc_count, int, 0644);
MODULE_PARM_DESC(ioat_initial_desc_count,
313
		 "ioat1: initial descriptors per channel (default: 256)");
314
/**
315
 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
316 317
 * @chan: the channel to be filled out
 */
318
static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
319
{
320 321
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
S
Shannon Nelson 已提交
322
	struct ioat_desc_sw *desc;
323 324 325 326
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

327
	/* have we already been set up? */
328 329
	if (!list_empty(&ioat->free_desc))
		return ioat->desccount;
330

331
	/* Setup register to interrupt and write completion status on error */
332
	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
333

334
	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
335
	if (chanerr) {
336 337
		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
338 339 340
	}

	/* Allocate descriptors */
341
	for (i = 0; i < ioat_initial_desc_count; i++) {
342
		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
343
		if (!desc) {
344
			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
345 346
			break;
		}
D
Dan Williams 已提交
347
		set_desc_id(desc, i);
348 349
		list_add_tail(&desc->node, &tmp_list);
	}
350 351 352 353
	spin_lock_bh(&ioat->desc_lock);
	ioat->desccount = i;
	list_splice(&tmp_list, &ioat->free_desc);
	spin_unlock_bh(&ioat->desc_lock);
354 355 356

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
357 358 359 360
	chan->completion = pci_pool_alloc(chan->device->completion_pool,
					  GFP_KERNEL, &chan->completion_dma);
	memset(chan->completion, 0, sizeof(*chan->completion));
	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
361
	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
362
	writel(((u64) chan->completion_dma) >> 32,
363 364 365
	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);

	tasklet_enable(&chan->cleanup_task);
366
	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
D
Dan Williams 已提交
367 368
	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
		__func__, ioat->desccount);
369
	return ioat->desccount;
370 371
}

372
/**
373
 * ioat1_dma_free_chan_resources - release all the descriptors
374 375
 * @chan: the channel to be cleaned
 */
376
static void ioat1_dma_free_chan_resources(struct dma_chan *c)
377
{
378 379 380
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioatdma_device *ioatdma_device = chan->device;
381 382 383
	struct ioat_desc_sw *desc, *_desc;
	int in_use_descs = 0;

384 385 386
	/* Before freeing channel resources first check
	 * if they have been previously allocated for this channel.
	 */
387
	if (ioat->desccount == 0)
388 389
		return;

390
	tasklet_disable(&chan->cleanup_task);
391
	del_timer_sync(&chan->timer);
392
	ioat1_cleanup(ioat);
393

394 395 396
	/* Delay 100ms after reset to allow internal DMA logic to quiesce
	 * before removing DMA descriptor resources.
	 */
397
	writeb(IOAT_CHANCMD_RESET,
398
	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
399
	mdelay(100);
400

401
	spin_lock_bh(&ioat->desc_lock);
D
Dan Williams 已提交
402 403 404 405
	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
			__func__, desc_id(desc));
		dump_desc_dbg(ioat, desc);
406 407 408 409 410 411 412 413 414
		in_use_descs++;
		list_del(&desc->node);
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
			      desc->txd.phys);
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc,
				 &ioat->free_desc, node) {
		list_del(&desc->node);
415
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
416
			      desc->txd.phys);
417 418
		kfree(desc);
	}
419
	spin_unlock_bh(&ioat->desc_lock);
420

421
	pci_pool_free(ioatdma_device->completion_pool,
422 423
		      chan->completion,
		      chan->completion_dma);
424 425 426

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
427
		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
428 429
			in_use_descs - 1);

430 431
	chan->last_completion = 0;
	chan->completion_dma = 0;
432 433
	ioat->pending = 0;
	ioat->desccount = 0;
434
}
435

436
/**
437 438
 * ioat1_dma_get_next_descriptor - return the next available descriptor
 * @ioat: IOAT DMA channel handle
439 440 441 442 443
 *
 * Gets the next descriptor from the chain, and must be called with the
 * channel's desc_lock held.  Allocates more descriptors if the channel
 * has run out.
 */
444
static struct ioat_desc_sw *
445
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
446
{
S
Shannon Nelson 已提交
447
	struct ioat_desc_sw *new;
448

449 450
	if (!list_empty(&ioat->free_desc)) {
		new = to_ioat_desc(ioat->free_desc.next);
451 452 453
		list_del(&new->node);
	} else {
		/* try to get another desc */
454
		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
S
Shannon Nelson 已提交
455
		if (!new) {
456
			dev_err(to_dev(&ioat->base), "alloc failed\n");
S
Shannon Nelson 已提交
457 458
			return NULL;
		}
459
	}
D
Dan Williams 已提交
460 461
	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
		__func__, desc_id(new));
462 463
	prefetch(new->hw);
	return new;
464 465
}

466
static struct dma_async_tx_descriptor *
467
ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
468
		      dma_addr_t dma_src, size_t len, unsigned long flags)
469
{
470
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
471 472 473 474 475 476 477 478
	struct ioat_desc_sw *desc;
	size_t copy;
	LIST_HEAD(chain);
	dma_addr_t src = dma_src;
	dma_addr_t dest = dma_dest;
	size_t total_len = len;
	struct ioat_dma_descriptor *hw = NULL;
	int tx_cnt = 0;
479

480
	spin_lock_bh(&ioat->desc_lock);
481
	desc = ioat1_dma_get_next_descriptor(ioat);
482 483 484
	do {
		if (!desc)
			break;
485

486
		tx_cnt++;
487
		copy = min_t(size_t, len, ioat->xfercap);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

		hw = desc->hw;
		hw->size = copy;
		hw->ctl = 0;
		hw->src_addr = src;
		hw->dst_addr = dest;

		list_add_tail(&desc->node, &chain);

		len -= copy;
		dest += copy;
		src += copy;
		if (len) {
			struct ioat_desc_sw *next;

			async_tx_ack(&desc->txd);
504
			next = ioat1_dma_get_next_descriptor(ioat);
505
			hw->next = next ? next->txd.phys : 0;
D
Dan Williams 已提交
506
			dump_desc_dbg(ioat, desc);
507 508 509 510 511 512
			desc = next;
		} else
			hw->next = 0;
	} while (len);

	if (!desc) {
513 514 515
		struct ioat_chan_common *chan = &ioat->base;

		dev_err(to_dev(chan),
516
			"chan%d - get_next_desc failed\n", chan_num(chan));
517 518
		list_splice(&chain, &ioat->free_desc);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
519
		return NULL;
520
	}
521
	spin_unlock_bh(&ioat->desc_lock);
522 523 524 525 526 527

	desc->txd.flags = flags;
	desc->len = total_len;
	list_splice(&chain, &desc->txd.tx_list);
	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
	hw->ctl_f.compl_write = 1;
D
Dan Williams 已提交
528
	hw->tx_cnt = tx_cnt;
D
Dan Williams 已提交
529
	dump_desc_dbg(ioat, desc);
530 531

	return &desc->txd;
532 533
}

534
static void ioat1_cleanup_tasklet(unsigned long data)
535 536
{
	struct ioat_dma_chan *chan = (void *)data;
537

538
	ioat1_cleanup(chan);
539
	writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
540 541
}

542 543
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
		    size_t len, struct ioat_dma_descriptor *hw)
544
{
545 546
	struct pci_dev *pdev = chan->device->pdev;
	size_t offset = len - hw->size;
547

548 549 550
	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
		ioat_unmap(pdev, hw->dst_addr - offset, len,
			   PCI_DMA_FROMDEVICE, flags, 1);
551

552 553 554 555 556 557 558 559
	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
		ioat_unmap(pdev, hw->src_addr - offset, len,
			   PCI_DMA_TODEVICE, flags, 0);
}

unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
{
	unsigned long phys_complete;
560
	u64 completion;
561

562
	completion = *chan->completion;
563
	phys_complete = ioat_chansts_to_addr(completion);
564

D
Dan Williams 已提交
565 566 567
	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
		(unsigned long long) phys_complete);

568 569
	if (is_ioat_halted(completion)) {
		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
570
		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
571
			chanerr);
572 573 574 575

		/* TODO do something to salvage the situation */
	}

576 577 578
	return phys_complete;
}

579 580
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
			   unsigned long *phys_complete)
581
{
582 583 584 585 586
	*phys_complete = ioat_get_current_completion(chan);
	if (*phys_complete == chan->last_completion)
		return false;
	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
587

588 589
	return true;
}
590

591 592 593 594 595
static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
{
	struct ioat_chan_common *chan = &ioat->base;
	struct list_head *_desc, *n;
	struct dma_async_tx_descriptor *tx;
596

D
Dan Williams 已提交
597 598
	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
		 __func__, phys_complete);
599 600 601 602 603
	list_for_each_safe(_desc, n, &ioat->used_desc) {
		struct ioat_desc_sw *desc;

		prefetch(n);
		desc = list_entry(_desc, typeof(*desc), node);
604 605 606 607 608 609
		tx = &desc->txd;
		/*
		 * Incoming DMA requests may use multiple descriptors,
		 * due to exceeding xfercap, perhaps. If so, only the
		 * last one will have a cookie, and require unmapping.
		 */
D
Dan Williams 已提交
610
		dump_desc_dbg(ioat, desc);
611
		if (tx->cookie) {
612 613
			chan->completed_cookie = tx->cookie;
			tx->cookie = 0;
614
			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
D
Dan Williams 已提交
615
			ioat->active -= desc->hw->tx_cnt;
616 617 618
			if (tx->callback) {
				tx->callback(tx->callback_param);
				tx->callback = NULL;
619
			}
620
		}
621

622 623 624 625 626 627 628 629 630 631
		if (tx->phys != phys_complete) {
			/*
			 * a completed entry, but not the last, so clean
			 * up if the client is done with the descriptor
			 */
			if (async_tx_test_ack(tx))
				list_move_tail(&desc->node, &ioat->free_desc);
		} else {
			/*
			 * last used desc. Do not remove, so we can
632
			 * append from it.
633
			 */
634 635 636 637 638 639 640 641 642 643

			/* if nothing else is pending, cancel the
			 * completion timeout
			 */
			if (n == &ioat->used_desc) {
				dev_dbg(to_dev(chan),
					"%s cancel completion timeout\n",
					__func__);
				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
			}
644

645
			/* TODO check status bits? */
646 647 648 649
			break;
		}
	}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	chan->last_completion = phys_complete;
}

/**
 * ioat1_cleanup - cleanup up finished descriptors
 * @chan: ioat channel to be cleaned up
 *
 * To prevent lock contention we defer cleanup when the locks are
 * contended with a terminal timeout that forces cleanup and catches
 * completion notification errors.
 */
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	unsigned long phys_complete;

	prefetch(chan->completion);

	if (!spin_trylock_bh(&chan->cleanup_lock))
		return;

	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	if (!spin_trylock_bh(&ioat->desc_lock)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	__cleanup(ioat, phys_complete);

683
	spin_unlock_bh(&ioat->desc_lock);
684 685
	spin_unlock_bh(&chan->cleanup_lock);
}
686

687 688 689 690
static void ioat1_timer_event(unsigned long data)
{
	struct ioat_dma_chan *ioat = (void *) data;
	struct ioat_chan_common *chan = &ioat->base;
691

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);

	spin_lock_bh(&chan->cleanup_lock);
	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
		struct ioat_desc_sw *desc;

		spin_lock_bh(&ioat->desc_lock);

		/* restart active descriptors */
		desc = to_ioat_desc(ioat->used_desc.prev);
		ioat_set_chainaddr(ioat, desc->txd.phys);
		ioat_start(chan);

		ioat->pending = 0;
		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		spin_unlock_bh(&ioat->desc_lock);
	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
		unsigned long phys_complete;

		spin_lock_bh(&ioat->desc_lock);
		/* if we haven't made progress and we have already
		 * acknowledged a pending completion once, then be more
		 * forceful with a restart
		 */
		if (ioat_cleanup_preamble(chan, &phys_complete))
			__cleanup(ioat, phys_complete);
		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
			ioat1_reset_channel(ioat);
		else {
			u64 status = ioat_chansts(chan);

			/* manually update the last completion address */
			if (ioat_chansts_to_addr(status) != 0)
				*chan->completion = status;

			set_bit(IOAT_COMPLETION_ACK, &chan->state);
			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		}
		spin_unlock_bh(&ioat->desc_lock);
	}
733
	spin_unlock_bh(&chan->cleanup_lock);
734 735
}

736
static enum dma_status
737 738
ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
		      dma_cookie_t *done, dma_cookie_t *used)
739
{
740
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
741

742 743
	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
		return DMA_SUCCESS;
744

745
	ioat1_cleanup(ioat);
746

747
	return ioat_is_complete(c, cookie, done, used);
748 749
}

750
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
751
{
752
	struct ioat_chan_common *chan = &ioat->base;
753
	struct ioat_desc_sw *desc;
754
	struct ioat_dma_descriptor *hw;
755

756
	spin_lock_bh(&ioat->desc_lock);
757

758
	desc = ioat1_dma_get_next_descriptor(ioat);
759 760

	if (!desc) {
761
		dev_err(to_dev(chan),
762
			"Unable to start null desc - get next desc failed\n");
763
		spin_unlock_bh(&ioat->desc_lock);
764 765 766
		return;
	}

767 768 769 770 771
	hw = desc->hw;
	hw->ctl = 0;
	hw->ctl_f.null = 1;
	hw->ctl_f.int_en = 1;
	hw->ctl_f.compl_write = 1;
772
	/* set size to non-zero value (channel returns error when size is 0) */
773 774 775
	hw->size = NULL_DESC_BUFFER_SIZE;
	hw->src_addr = 0;
	hw->dst_addr = 0;
776
	async_tx_ack(&desc->txd);
777 778
	hw->next = 0;
	list_add_tail(&desc->node, &ioat->used_desc);
D
Dan Williams 已提交
779
	dump_desc_dbg(ioat, desc);
780

781 782
	ioat_set_chainaddr(ioat, desc->txd.phys);
	ioat_start(chan);
783
	spin_unlock_bh(&ioat->desc_lock);
784 785 786 787 788 789 790
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
#define IOAT_TEST_SIZE 2000

791
static void __devinit ioat_dma_test_callback(void *dma_async_param)
792
{
793 794 795
	struct completion *cmp = dma_async_param;

	complete(cmp);
796 797
}

798 799 800 801
/**
 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 * @device: device to be tested
 */
802
static int __devinit ioat_dma_self_test(struct ioatdma_device *device)
803 804 805 806
{
	int i;
	u8 *src;
	u8 *dest;
807 808
	struct dma_device *dma = &device->common;
	struct device *dev = &device->pdev->dev;
809
	struct dma_chan *dma_chan;
S
Shannon Nelson 已提交
810
	struct dma_async_tx_descriptor *tx;
811
	dma_addr_t dma_dest, dma_src;
812 813
	dma_cookie_t cookie;
	int err = 0;
814
	struct completion cmp;
815
	unsigned long tmo;
816
	unsigned long flags;
817

818
	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
819 820
	if (!src)
		return -ENOMEM;
821
	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
822 823 824 825 826 827 828 829 830 831
	if (!dest) {
		kfree(src);
		return -ENOMEM;
	}

	/* Fill in src buffer */
	for (i = 0; i < IOAT_TEST_SIZE; i++)
		src[i] = (u8)i;

	/* Start copy, using first DMA channel */
832
	dma_chan = container_of(dma->channels.next, struct dma_chan,
833
				device_node);
834 835
	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
		dev_err(dev, "selftest cannot allocate chan resource\n");
836 837 838 839
		err = -ENODEV;
		goto out;
	}

840 841
	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
D
Dan Williams 已提交
842 843
	flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
		DMA_PREP_INTERRUPT;
844
	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
845
						   IOAT_TEST_SIZE, flags);
846
	if (!tx) {
847
		dev_err(dev, "Self-test prep failed, disabling\n");
848 849 850 851
		err = -ENODEV;
		goto free_resources;
	}

852
	async_tx_ack(tx);
853
	init_completion(&cmp);
854
	tx->callback = ioat_dma_test_callback;
855
	tx->callback_param = &cmp;
856
	cookie = tx->tx_submit(tx);
857
	if (cookie < 0) {
858
		dev_err(dev, "Self-test setup failed, disabling\n");
859 860 861
		err = -ENODEV;
		goto free_resources;
	}
862
	dma->device_issue_pending(dma_chan);
D
Dan Williams 已提交
863

864
	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
865

866
	if (tmo == 0 ||
867
	    dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
868
					!= DMA_SUCCESS) {
869
		dev_err(dev, "Self-test copy timed out, disabling\n");
870 871 872 873
		err = -ENODEV;
		goto free_resources;
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
874
		dev_err(dev, "Self-test copy failed compare, disabling\n");
875 876 877 878 879
		err = -ENODEV;
		goto free_resources;
	}

free_resources:
880
	dma->device_free_chan_resources(dma_chan);
881 882 883 884 885 886
out:
	kfree(src);
	kfree(dest);
	return err;
}

887 888 889 890 891 892 893 894 895 896 897 898 899
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
		    sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
		 "set ioat interrupt style: msix (default), "
		 "msix-single-vector, msi, intx)");

/**
 * ioat_dma_setup_interrupts - setup interrupt handler
 * @device: ioat device
 */
static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
900
	struct ioat_chan_common *chan;
901 902 903 904 905
	struct pci_dev *pdev = device->pdev;
	struct device *dev = &pdev->dev;
	struct msix_entry *msix;
	int i, j, msixcnt;
	int err = -EINVAL;
906 907 908 909 910 911 912 913 914 915
	u8 intrctrl = 0;

	if (!strcmp(ioat_interrupt_style, "msix"))
		goto msix;
	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
		goto msix_single_vector;
	if (!strcmp(ioat_interrupt_style, "msi"))
		goto msi;
	if (!strcmp(ioat_interrupt_style, "intx"))
		goto intx;
916
	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
917
	goto err_no_irq;
918 919 920 921 922 923 924

msix:
	/* The number of MSI-X vectors should equal the number of channels */
	msixcnt = device->common.chancnt;
	for (i = 0; i < msixcnt; i++)
		device->msix_entries[i].entry = i;

925
	err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
926 927 928 929 930 931
	if (err < 0)
		goto msi;
	if (err > 0)
		goto msix_single_vector;

	for (i = 0; i < msixcnt; i++) {
932
		msix = &device->msix_entries[i];
933
		chan = ioat_chan_by_index(device, i);
934 935
		err = devm_request_irq(dev, msix->vector,
				       ioat_dma_do_interrupt_msix, 0,
936
				       "ioat-msix", chan);
937 938
		if (err) {
			for (j = 0; j < i; j++) {
939
				msix = &device->msix_entries[j];
940 941
				chan = ioat_chan_by_index(device, j);
				devm_free_irq(dev, msix->vector, chan);
942 943 944 945 946 947 948 949
			}
			goto msix_single_vector;
		}
	}
	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
	goto done;

msix_single_vector:
950 951 952
	msix = &device->msix_entries[0];
	msix->entry = 0;
	err = pci_enable_msix(pdev, device->msix_entries, 1);
953 954 955
	if (err)
		goto msi;

956 957
	err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
			       "ioat-msix", device);
958
	if (err) {
959
		pci_disable_msix(pdev);
960 961 962 963 964
		goto msi;
	}
	goto done;

msi:
965
	err = pci_enable_msi(pdev);
966 967 968
	if (err)
		goto intx;

969 970
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
			       "ioat-msi", device);
971
	if (err) {
972
		pci_disable_msi(pdev);
973 974 975 976 977
		goto intx;
	}
	goto done;

intx:
978 979
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
			       IRQF_SHARED, "ioat-intx", device);
980 981 982 983
	if (err)
		goto err_no_irq;

done:
984 985
	if (device->intr_quirk)
		device->intr_quirk(device);
986 987 988 989 990 991 992
	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
	return 0;

err_no_irq:
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
993 994
	dev_err(dev, "no usable interrupts\n");
	return err;
995 996
}

997
static void ioat_disable_interrupts(struct ioatdma_device *device)
998 999 1000 1001 1002
{
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}

1003
int __devinit ioat_probe(struct ioatdma_device *device)
1004
{
1005 1006 1007
	int err = -ENODEV;
	struct dma_device *dma = &device->common;
	struct pci_dev *pdev = device->pdev;
1008
	struct device *dev = &pdev->dev;
1009 1010 1011

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1012 1013
					   sizeof(struct ioat_dma_descriptor),
					   64, 0);
1014 1015 1016 1017 1018
	if (!device->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

1019 1020 1021
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
1022

1023 1024 1025 1026 1027
	if (!device->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

1028
	device->enumerate_channels(device);
1029

1030 1031
	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
	dma->dev = &pdev->dev;
1032

1033
	if (!dma->chancnt) {
D
Dan Williams 已提交
1034
		dev_err(dev, "zero channels detected\n");
1035 1036 1037
		goto err_setup_interrupts;
	}

1038
	err = ioat_dma_setup_interrupts(device);
1039
	if (err)
1040
		goto err_setup_interrupts;
1041

1042
	err = ioat_dma_self_test(device);
1043 1044 1045
	if (err)
		goto err_self_test;

1046
	return 0;
1047 1048

err_self_test:
1049
	ioat_disable_interrupts(device);
1050
err_setup_interrupts:
1051 1052 1053 1054
	pci_pool_destroy(device->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
err_dma_pool:
1055 1056 1057
	return err;
}

1058
int __devinit ioat_register(struct ioatdma_device *device)
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
{
	int err = dma_async_device_register(&device->common);

	if (err) {
		ioat_disable_interrupts(device);
		pci_pool_destroy(device->completion_pool);
		pci_pool_destroy(device->dma_pool);
	}

	return err;
}

/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
static void ioat1_intr_quirk(struct ioatdma_device *device)
{
	struct pci_dev *pdev = device->pdev;
	u32 dmactrl;

	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
	if (pdev->msi_enabled)
		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
	else
		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
}

D
Dan Williams 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->desccount);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);

static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->active);
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);

static ssize_t cap_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;

	return sprintf(page, "copy%s%s%s%s%s%s\n",
		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
		       dma_has_cap(DMA_MEMSET, dma->cap_mask)  ? " fill" : "",
		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");

}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);

static ssize_t version_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;
	struct ioatdma_device *device = to_ioatdma_device(dma);

	return sprintf(page, "%d.%d\n",
		       device->version >> 4, device->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);

static struct attribute *ioat1_attrs[] = {
	&ring_size_attr.attr,
	&ring_active_attr.attr,
	&ioat_cap_attr.attr,
	&ioat_version_attr.attr,
	NULL,
};

static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct ioat_sysfs_entry *entry;
	struct ioat_chan_common *chan;

	entry = container_of(attr, struct ioat_sysfs_entry, attr);
	chan = container_of(kobj, struct ioat_chan_common, kobj);

	if (!entry->show)
		return -EIO;
	return entry->show(&chan->common, page);
}

struct sysfs_ops ioat_sysfs_ops = {
	.show	= ioat_attr_show,
};

static struct kobj_type ioat1_ktype = {
	.sysfs_ops = &ioat_sysfs_ops,
	.default_attrs = ioat1_attrs,
};

void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);
		struct kobject *parent = &c->dev->device.kobj;
		int err;

		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
		if (err) {
			dev_warn(to_dev(chan),
				 "sysfs init error (%d), continuing...\n", err);
			kobject_put(&chan->kobj);
			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
		}
	}
}

void ioat_kobject_del(struct ioatdma_device *device)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);

		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
			kobject_del(&chan->kobj);
			kobject_put(&chan->kobj);
		}
	}
}

1192
int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1193 1194 1195 1196 1197 1198
{
	struct pci_dev *pdev = device->pdev;
	struct dma_device *dma;
	int err;

	device->intr_quirk = ioat1_intr_quirk;
1199
	device->enumerate_channels = ioat1_enumerate_channels;
1200 1201 1202
	dma = &device->common;
	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1203 1204 1205
	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
	dma->device_is_tx_complete = ioat1_dma_is_complete;
1206 1207 1208 1209 1210 1211 1212 1213

	err = ioat_probe(device);
	if (err)
		return err;
	ioat_set_tcp_copy_break(4096);
	err = ioat_register(device);
	if (err)
		return err;
D
Dan Williams 已提交
1214 1215
	ioat_kobject_add(device, &ioat1_ktype);

1216 1217 1218 1219 1220 1221
	if (dca)
		device->dca = ioat_dca_init(pdev, device->reg_base);

	return err;
}

1222
void __devexit ioat_dma_remove(struct ioatdma_device *device)
1223
{
1224
	struct dma_device *dma = &device->common;
1225

1226
	ioat_disable_interrupts(device);
1227

D
Dan Williams 已提交
1228 1229
	ioat_kobject_del(device);

1230
	dma_async_device_unregister(dma);
1231

1232 1233
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
1234

1235
	INIT_LIST_HEAD(&dma->channels);
1236
}