dma.c 32.8 KB
Newer Older
1
/*
2
 * Intel I/OAT DMA Linux driver
3
 * Copyright(c) 2004 - 2009 Intel Corporation.
4 5
 *
 * This program is free software; you can redistribute it and/or modify it
6 7
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
8 9 10 11 12 13 14
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
15 16 17 18 19
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 */

/*
 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 * copy operations.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
34
#include <linux/dma-mapping.h>
35
#include <linux/workqueue.h>
36
#include <linux/i7300_idle.h>
D
Dan Williams 已提交
37 38 39
#include "dma.h"
#include "registers.h"
#include "hw.h"
40

41
int ioat_pending_level = 4;
42 43 44 45
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
		 "high-water mark for pushing ioat descriptors (default: 4)");

46
/* internal functions */
47 48
static void ioat1_cleanup(struct ioat_dma_chan *ioat);
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
49 50 51 52 53 54 55 56 57

/**
 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
	struct ioatdma_device *instance = data;
58
	struct ioat_chan_common *chan;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
	unsigned long attnstatus;
	int bit;
	u8 intrctrl;

	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);

	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
		return IRQ_NONE;

	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
		return IRQ_NONE;
	}

	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
	for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 76
		chan = ioat_chan_by_index(instance, bit);
		tasklet_schedule(&chan->cleanup_task);
77 78 79 80 81 82 83 84 85 86 87 88 89
	}

	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	return IRQ_HANDLED;
}

/**
 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
90
	struct ioat_chan_common *chan = data;
91

92
	tasklet_schedule(&chan->cleanup_task);
93 94 95 96

	return IRQ_HANDLED;
}

97 98 99 100 101
static void ioat1_cleanup_tasklet(unsigned long data);

/* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device,
		       struct ioat_chan_common *chan, int idx,
102 103 104
		       void (*timer_fn)(unsigned long),
		       void (*tasklet)(unsigned long),
		       unsigned long ioat)
105 106 107 108 109 110 111 112 113
{
	struct dma_device *dma = &device->common;

	chan->device = device;
	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
	spin_lock_init(&chan->cleanup_lock);
	chan->common.device = dma;
	list_add_tail(&chan->common.device_node, &dma->channels);
	device->idx[idx] = chan;
114 115 116 117
	init_timer(&chan->timer);
	chan->timer.function = timer_fn;
	chan->timer.data = ioat;
	tasklet_init(&chan->cleanup_task, tasklet, ioat);
118 119 120
	tasklet_disable(&chan->cleanup_task);
}

121
static void ioat1_timer_event(unsigned long data);
122 123

/**
124
 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 126
 * @device: the device to be enumerated
 */
127
static int ioat1_enumerate_channels(struct ioatdma_device *device)
128 129 130 131
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
132
	struct ioat_dma_chan *ioat;
133
	struct device *dev = &device->pdev->dev;
134
	struct dma_device *dma = &device->common;
135

136 137
	INIT_LIST_HEAD(&dma->channels);
	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
138 139 140 141 142 143
	dma->chancnt &= 0x1f; /* bits [4:0] valid */
	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
			 dma->chancnt, ARRAY_SIZE(device->idx));
		dma->chancnt = ARRAY_SIZE(device->idx);
	}
144
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
145
	xfercap_scale &= 0x1f; /* bits [4:0] valid */
146
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
D
Dan Williams 已提交
147
	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
148

149
#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
150 151
	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
		dma->chancnt--;
A
Andy Henroid 已提交
152
#endif
153
	for (i = 0; i < dma->chancnt; i++) {
154
		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
155
		if (!ioat)
156 157
			break;

158
		ioat_init_channel(device, &ioat->base, i,
159
				  ioat1_timer_event,
160 161
				  ioat1_cleanup_tasklet,
				  (unsigned long) ioat);
162 163 164 165
		ioat->xfercap = xfercap;
		spin_lock_init(&ioat->desc_lock);
		INIT_LIST_HEAD(&ioat->free_desc);
		INIT_LIST_HEAD(&ioat->used_desc);
166
	}
167 168
	dma->chancnt = i;
	return i;
169 170
}

S
Shannon Nelson 已提交
171 172 173 174 175
/**
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
 * @chan: DMA channel handle
 */
176
static inline void
177
__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
S
Shannon Nelson 已提交
178
{
179 180
	void __iomem *reg_base = ioat->base.reg_base;

D
Dan Williams 已提交
181 182
	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
		__func__, ioat->pending);
183 184
	ioat->pending = 0;
	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
S
Shannon Nelson 已提交
185 186 187 188
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
189
	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
S
Shannon Nelson 已提交
190

191 192 193 194
	if (ioat->pending > 0) {
		spin_lock_bh(&ioat->desc_lock);
		__ioat1_dma_memcpy_issue_pending(ioat);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
195 196 197
	}
}

198
/**
199
 * ioat1_reset_channel - restart a channel
200
 * @ioat: IOAT DMA channel handle
201
 */
202
static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
203
{
204 205
	struct ioat_chan_common *chan = &ioat->base;
	void __iomem *reg_base = chan->reg_base;
206 207
	u32 chansts, chanerr;

208
	dev_warn(to_dev(chan), "reset\n");
209
	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
210
	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
211
	if (chanerr) {
212
		dev_err(to_dev(chan),
213
			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
214 215
			chan_num(chan), chansts, chanerr);
		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
216 217 218 219 220 221 222 223 224 225
	}

	/*
	 * whack it upside the head with a reset
	 * and wait for things to settle out.
	 * force the pending count to a really big negative
	 * to make sure no one forces an issue_pending
	 * while we're waiting.
	 */

226
	ioat->pending = INT_MIN;
227
	writeb(IOAT_CHANCMD_RESET,
228
	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
229 230
	set_bit(IOAT_RESET_PENDING, &chan->state);
	mod_timer(&chan->timer, jiffies + RESET_DELAY);
231 232
}

233
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
234
{
235 236
	struct dma_chan *c = tx->chan;
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
237
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
238
	struct ioat_chan_common *chan = &ioat->base;
239 240
	struct ioat_desc_sw *first;
	struct ioat_desc_sw *chain_tail;
241 242
	dma_cookie_t cookie;

243
	spin_lock_bh(&ioat->desc_lock);
244
	/* cookie incr and addition to used_list must be atomic */
245
	cookie = c->cookie;
246 247 248
	cookie++;
	if (cookie < 0)
		cookie = 1;
249 250
	c->cookie = cookie;
	tx->cookie = cookie;
D
Dan Williams 已提交
251
	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
252 253

	/* write address into NextDescriptor field of last desc in chain */
D
Dan Williams 已提交
254
	first = to_ioat_desc(desc->tx_list.next);
255
	chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 257 258
	/* make descriptor updates globally visible before chaining */
	wmb();
	chain_tail->hw->next = first->txd.phys;
D
Dan Williams 已提交
259
	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
D
Dan Williams 已提交
260 261
	dump_desc_dbg(ioat, chain_tail);
	dump_desc_dbg(ioat, first);
262

263 264 265
	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);

D
Dan Williams 已提交
266
	ioat->active += desc->hw->tx_cnt;
D
Dan Williams 已提交
267
	ioat->pending += desc->hw->tx_cnt;
268 269 270
	if (ioat->pending >= ioat_pending_level)
		__ioat1_dma_memcpy_issue_pending(ioat);
	spin_unlock_bh(&ioat->desc_lock);
271

272 273 274 275 276
	return cookie;
}

/**
 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
277
 * @ioat: the channel supplying the memory pool for the descriptors
278 279
 * @flags: allocation flags
 */
280
static struct ioat_desc_sw *
281
ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
282 283 284
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
285
	struct ioatdma_device *ioatdma_device;
286 287
	dma_addr_t phys;

288
	ioatdma_device = ioat->base.device;
289
	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
290 291 292 293 294
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
295
		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
296 297 298 299
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));
300

D
Dan Williams 已提交
301
	INIT_LIST_HEAD(&desc_sw->tx_list);
302 303
	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
	desc_sw->txd.tx_submit = ioat1_tx_submit;
304
	desc_sw->hw = desc;
305
	desc_sw->txd.phys = phys;
D
Dan Williams 已提交
306
	set_desc_id(desc_sw, -1);
307 308 309 310

	return desc_sw;
}

311 312 313
static int ioat_initial_desc_count = 256;
module_param(ioat_initial_desc_count, int, 0644);
MODULE_PARM_DESC(ioat_initial_desc_count,
314
		 "ioat1: initial descriptors per channel (default: 256)");
315
/**
316
 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
317 318
 * @chan: the channel to be filled out
 */
319
static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
320
{
321 322
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
S
Shannon Nelson 已提交
323
	struct ioat_desc_sw *desc;
324 325 326 327
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

328
	/* have we already been set up? */
329 330
	if (!list_empty(&ioat->free_desc))
		return ioat->desccount;
331

332
	/* Setup register to interrupt and write completion status on error */
333
	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
334

335
	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
336
	if (chanerr) {
337 338
		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
339 340 341
	}

	/* Allocate descriptors */
342
	for (i = 0; i < ioat_initial_desc_count; i++) {
343
		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
344
		if (!desc) {
345
			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
346 347
			break;
		}
D
Dan Williams 已提交
348
		set_desc_id(desc, i);
349 350
		list_add_tail(&desc->node, &tmp_list);
	}
351 352 353 354
	spin_lock_bh(&ioat->desc_lock);
	ioat->desccount = i;
	list_splice(&tmp_list, &ioat->free_desc);
	spin_unlock_bh(&ioat->desc_lock);
355 356 357

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
358 359 360 361
	chan->completion = pci_pool_alloc(chan->device->completion_pool,
					  GFP_KERNEL, &chan->completion_dma);
	memset(chan->completion, 0, sizeof(*chan->completion));
	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
362
	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
363
	writel(((u64) chan->completion_dma) >> 32,
364 365 366
	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);

	tasklet_enable(&chan->cleanup_task);
367
	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
D
Dan Williams 已提交
368 369
	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
		__func__, ioat->desccount);
370
	return ioat->desccount;
371 372
}

373
/**
374
 * ioat1_dma_free_chan_resources - release all the descriptors
375 376
 * @chan: the channel to be cleaned
 */
377
static void ioat1_dma_free_chan_resources(struct dma_chan *c)
378
{
379 380 381
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
	struct ioat_chan_common *chan = &ioat->base;
	struct ioatdma_device *ioatdma_device = chan->device;
382 383 384
	struct ioat_desc_sw *desc, *_desc;
	int in_use_descs = 0;

385 386 387
	/* Before freeing channel resources first check
	 * if they have been previously allocated for this channel.
	 */
388
	if (ioat->desccount == 0)
389 390
		return;

391
	tasklet_disable(&chan->cleanup_task);
392
	del_timer_sync(&chan->timer);
393
	ioat1_cleanup(ioat);
394

395 396 397
	/* Delay 100ms after reset to allow internal DMA logic to quiesce
	 * before removing DMA descriptor resources.
	 */
398
	writeb(IOAT_CHANCMD_RESET,
399
	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
400
	mdelay(100);
401

402
	spin_lock_bh(&ioat->desc_lock);
D
Dan Williams 已提交
403 404 405 406
	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
			__func__, desc_id(desc));
		dump_desc_dbg(ioat, desc);
407 408 409 410 411 412 413 414 415
		in_use_descs++;
		list_del(&desc->node);
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
			      desc->txd.phys);
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc,
				 &ioat->free_desc, node) {
		list_del(&desc->node);
416
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
417
			      desc->txd.phys);
418 419
		kfree(desc);
	}
420
	spin_unlock_bh(&ioat->desc_lock);
421

422
	pci_pool_free(ioatdma_device->completion_pool,
423 424
		      chan->completion,
		      chan->completion_dma);
425 426 427

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
428
		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
429 430
			in_use_descs - 1);

431 432
	chan->last_completion = 0;
	chan->completion_dma = 0;
433 434
	ioat->pending = 0;
	ioat->desccount = 0;
435
}
436

437
/**
438 439
 * ioat1_dma_get_next_descriptor - return the next available descriptor
 * @ioat: IOAT DMA channel handle
440 441 442 443 444
 *
 * Gets the next descriptor from the chain, and must be called with the
 * channel's desc_lock held.  Allocates more descriptors if the channel
 * has run out.
 */
445
static struct ioat_desc_sw *
446
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
447
{
S
Shannon Nelson 已提交
448
	struct ioat_desc_sw *new;
449

450 451
	if (!list_empty(&ioat->free_desc)) {
		new = to_ioat_desc(ioat->free_desc.next);
452 453 454
		list_del(&new->node);
	} else {
		/* try to get another desc */
455
		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
S
Shannon Nelson 已提交
456
		if (!new) {
457
			dev_err(to_dev(&ioat->base), "alloc failed\n");
S
Shannon Nelson 已提交
458 459
			return NULL;
		}
460
	}
D
Dan Williams 已提交
461 462
	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
		__func__, desc_id(new));
463 464
	prefetch(new->hw);
	return new;
465 466
}

467
static struct dma_async_tx_descriptor *
468
ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
469
		      dma_addr_t dma_src, size_t len, unsigned long flags)
470
{
471
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
472 473 474 475 476 477 478 479
	struct ioat_desc_sw *desc;
	size_t copy;
	LIST_HEAD(chain);
	dma_addr_t src = dma_src;
	dma_addr_t dest = dma_dest;
	size_t total_len = len;
	struct ioat_dma_descriptor *hw = NULL;
	int tx_cnt = 0;
480

481
	spin_lock_bh(&ioat->desc_lock);
482
	desc = ioat1_dma_get_next_descriptor(ioat);
483 484 485
	do {
		if (!desc)
			break;
486

487
		tx_cnt++;
488
		copy = min_t(size_t, len, ioat->xfercap);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504

		hw = desc->hw;
		hw->size = copy;
		hw->ctl = 0;
		hw->src_addr = src;
		hw->dst_addr = dest;

		list_add_tail(&desc->node, &chain);

		len -= copy;
		dest += copy;
		src += copy;
		if (len) {
			struct ioat_desc_sw *next;

			async_tx_ack(&desc->txd);
505
			next = ioat1_dma_get_next_descriptor(ioat);
506
			hw->next = next ? next->txd.phys : 0;
D
Dan Williams 已提交
507
			dump_desc_dbg(ioat, desc);
508 509 510 511 512 513
			desc = next;
		} else
			hw->next = 0;
	} while (len);

	if (!desc) {
514 515 516
		struct ioat_chan_common *chan = &ioat->base;

		dev_err(to_dev(chan),
517
			"chan%d - get_next_desc failed\n", chan_num(chan));
518 519
		list_splice(&chain, &ioat->free_desc);
		spin_unlock_bh(&ioat->desc_lock);
S
Shannon Nelson 已提交
520
		return NULL;
521
	}
522
	spin_unlock_bh(&ioat->desc_lock);
523 524 525

	desc->txd.flags = flags;
	desc->len = total_len;
D
Dan Williams 已提交
526
	list_splice(&chain, &desc->tx_list);
527 528
	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
	hw->ctl_f.compl_write = 1;
D
Dan Williams 已提交
529
	hw->tx_cnt = tx_cnt;
D
Dan Williams 已提交
530
	dump_desc_dbg(ioat, desc);
531 532

	return &desc->txd;
533 534
}

535
static void ioat1_cleanup_tasklet(unsigned long data)
536 537
{
	struct ioat_dma_chan *chan = (void *)data;
538

539
	ioat1_cleanup(chan);
540
	writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
541 542
}

543 544
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
		    size_t len, struct ioat_dma_descriptor *hw)
545
{
546 547
	struct pci_dev *pdev = chan->device->pdev;
	size_t offset = len - hw->size;
548

549 550 551
	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
		ioat_unmap(pdev, hw->dst_addr - offset, len,
			   PCI_DMA_FROMDEVICE, flags, 1);
552

553 554 555 556 557 558 559 560
	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
		ioat_unmap(pdev, hw->src_addr - offset, len,
			   PCI_DMA_TODEVICE, flags, 0);
}

unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
{
	unsigned long phys_complete;
561
	u64 completion;
562

563
	completion = *chan->completion;
564
	phys_complete = ioat_chansts_to_addr(completion);
565

D
Dan Williams 已提交
566 567 568
	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
		(unsigned long long) phys_complete);

569 570
	if (is_ioat_halted(completion)) {
		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
571
		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
572
			chanerr);
573 574 575 576

		/* TODO do something to salvage the situation */
	}

577 578 579
	return phys_complete;
}

580 581
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
			   unsigned long *phys_complete)
582
{
583 584 585 586 587
	*phys_complete = ioat_get_current_completion(chan);
	if (*phys_complete == chan->last_completion)
		return false;
	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
588

589 590
	return true;
}
591

592 593 594 595 596
static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
{
	struct ioat_chan_common *chan = &ioat->base;
	struct list_head *_desc, *n;
	struct dma_async_tx_descriptor *tx;
597

D
Dan Williams 已提交
598 599
	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
		 __func__, phys_complete);
600 601 602 603 604
	list_for_each_safe(_desc, n, &ioat->used_desc) {
		struct ioat_desc_sw *desc;

		prefetch(n);
		desc = list_entry(_desc, typeof(*desc), node);
605 606 607 608 609 610
		tx = &desc->txd;
		/*
		 * Incoming DMA requests may use multiple descriptors,
		 * due to exceeding xfercap, perhaps. If so, only the
		 * last one will have a cookie, and require unmapping.
		 */
D
Dan Williams 已提交
611
		dump_desc_dbg(ioat, desc);
612
		if (tx->cookie) {
613 614
			chan->completed_cookie = tx->cookie;
			tx->cookie = 0;
615
			ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
D
Dan Williams 已提交
616
			ioat->active -= desc->hw->tx_cnt;
617 618 619
			if (tx->callback) {
				tx->callback(tx->callback_param);
				tx->callback = NULL;
620
			}
621
		}
622

623 624 625 626 627 628 629 630 631 632
		if (tx->phys != phys_complete) {
			/*
			 * a completed entry, but not the last, so clean
			 * up if the client is done with the descriptor
			 */
			if (async_tx_test_ack(tx))
				list_move_tail(&desc->node, &ioat->free_desc);
		} else {
			/*
			 * last used desc. Do not remove, so we can
633
			 * append from it.
634
			 */
635 636 637 638 639 640 641 642 643 644

			/* if nothing else is pending, cancel the
			 * completion timeout
			 */
			if (n == &ioat->used_desc) {
				dev_dbg(to_dev(chan),
					"%s cancel completion timeout\n",
					__func__);
				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
			}
645

646
			/* TODO check status bits? */
647 648 649 650
			break;
		}
	}

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	chan->last_completion = phys_complete;
}

/**
 * ioat1_cleanup - cleanup up finished descriptors
 * @chan: ioat channel to be cleaned up
 *
 * To prevent lock contention we defer cleanup when the locks are
 * contended with a terminal timeout that forces cleanup and catches
 * completion notification errors.
 */
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
	struct ioat_chan_common *chan = &ioat->base;
	unsigned long phys_complete;

	prefetch(chan->completion);

	if (!spin_trylock_bh(&chan->cleanup_lock))
		return;

	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	if (!spin_trylock_bh(&ioat->desc_lock)) {
		spin_unlock_bh(&chan->cleanup_lock);
		return;
	}

	__cleanup(ioat, phys_complete);

684
	spin_unlock_bh(&ioat->desc_lock);
685 686
	spin_unlock_bh(&chan->cleanup_lock);
}
687

688 689 690 691
static void ioat1_timer_event(unsigned long data)
{
	struct ioat_dma_chan *ioat = (void *) data;
	struct ioat_chan_common *chan = &ioat->base;
692

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);

	spin_lock_bh(&chan->cleanup_lock);
	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
		struct ioat_desc_sw *desc;

		spin_lock_bh(&ioat->desc_lock);

		/* restart active descriptors */
		desc = to_ioat_desc(ioat->used_desc.prev);
		ioat_set_chainaddr(ioat, desc->txd.phys);
		ioat_start(chan);

		ioat->pending = 0;
		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		spin_unlock_bh(&ioat->desc_lock);
	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
		unsigned long phys_complete;

		spin_lock_bh(&ioat->desc_lock);
		/* if we haven't made progress and we have already
		 * acknowledged a pending completion once, then be more
		 * forceful with a restart
		 */
		if (ioat_cleanup_preamble(chan, &phys_complete))
			__cleanup(ioat, phys_complete);
		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
			ioat1_reset_channel(ioat);
		else {
			u64 status = ioat_chansts(chan);

			/* manually update the last completion address */
			if (ioat_chansts_to_addr(status) != 0)
				*chan->completion = status;

			set_bit(IOAT_COMPLETION_ACK, &chan->state);
			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
		}
		spin_unlock_bh(&ioat->desc_lock);
	}
734
	spin_unlock_bh(&chan->cleanup_lock);
735 736
}

737
static enum dma_status
738 739
ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
		      dma_cookie_t *done, dma_cookie_t *used)
740
{
741
	struct ioat_dma_chan *ioat = to_ioat_chan(c);
742

743 744
	if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
		return DMA_SUCCESS;
745

746
	ioat1_cleanup(ioat);
747

748
	return ioat_is_complete(c, cookie, done, used);
749 750
}

751
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
752
{
753
	struct ioat_chan_common *chan = &ioat->base;
754
	struct ioat_desc_sw *desc;
755
	struct ioat_dma_descriptor *hw;
756

757
	spin_lock_bh(&ioat->desc_lock);
758

759
	desc = ioat1_dma_get_next_descriptor(ioat);
760 761

	if (!desc) {
762
		dev_err(to_dev(chan),
763
			"Unable to start null desc - get next desc failed\n");
764
		spin_unlock_bh(&ioat->desc_lock);
765 766 767
		return;
	}

768 769 770 771 772
	hw = desc->hw;
	hw->ctl = 0;
	hw->ctl_f.null = 1;
	hw->ctl_f.int_en = 1;
	hw->ctl_f.compl_write = 1;
773
	/* set size to non-zero value (channel returns error when size is 0) */
774 775 776
	hw->size = NULL_DESC_BUFFER_SIZE;
	hw->src_addr = 0;
	hw->dst_addr = 0;
777
	async_tx_ack(&desc->txd);
778 779
	hw->next = 0;
	list_add_tail(&desc->node, &ioat->used_desc);
D
Dan Williams 已提交
780
	dump_desc_dbg(ioat, desc);
781

782 783
	ioat_set_chainaddr(ioat, desc->txd.phys);
	ioat_start(chan);
784
	spin_unlock_bh(&ioat->desc_lock);
785 786 787 788 789 790 791
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
#define IOAT_TEST_SIZE 2000

792
static void __devinit ioat_dma_test_callback(void *dma_async_param)
793
{
794 795 796
	struct completion *cmp = dma_async_param;

	complete(cmp);
797 798
}

799 800 801 802
/**
 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 * @device: device to be tested
 */
D
Dan Williams 已提交
803
int __devinit ioat_dma_self_test(struct ioatdma_device *device)
804 805 806 807
{
	int i;
	u8 *src;
	u8 *dest;
808 809
	struct dma_device *dma = &device->common;
	struct device *dev = &device->pdev->dev;
810
	struct dma_chan *dma_chan;
S
Shannon Nelson 已提交
811
	struct dma_async_tx_descriptor *tx;
812
	dma_addr_t dma_dest, dma_src;
813 814
	dma_cookie_t cookie;
	int err = 0;
815
	struct completion cmp;
816
	unsigned long tmo;
817
	unsigned long flags;
818

819
	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
820 821
	if (!src)
		return -ENOMEM;
822
	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
823 824 825 826 827 828 829 830 831 832
	if (!dest) {
		kfree(src);
		return -ENOMEM;
	}

	/* Fill in src buffer */
	for (i = 0; i < IOAT_TEST_SIZE; i++)
		src[i] = (u8)i;

	/* Start copy, using first DMA channel */
833
	dma_chan = container_of(dma->channels.next, struct dma_chan,
834
				device_node);
835 836
	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
		dev_err(dev, "selftest cannot allocate chan resource\n");
837 838 839 840
		err = -ENODEV;
		goto out;
	}

841 842
	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
D
Dan Williams 已提交
843 844
	flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
		DMA_PREP_INTERRUPT;
845
	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
846
						   IOAT_TEST_SIZE, flags);
847
	if (!tx) {
848
		dev_err(dev, "Self-test prep failed, disabling\n");
849 850 851 852
		err = -ENODEV;
		goto free_resources;
	}

853
	async_tx_ack(tx);
854
	init_completion(&cmp);
855
	tx->callback = ioat_dma_test_callback;
856
	tx->callback_param = &cmp;
857
	cookie = tx->tx_submit(tx);
858
	if (cookie < 0) {
859
		dev_err(dev, "Self-test setup failed, disabling\n");
860 861 862
		err = -ENODEV;
		goto free_resources;
	}
863
	dma->device_issue_pending(dma_chan);
D
Dan Williams 已提交
864

865
	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
866

867
	if (tmo == 0 ||
868
	    dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
869
					!= DMA_SUCCESS) {
870
		dev_err(dev, "Self-test copy timed out, disabling\n");
871 872 873 874
		err = -ENODEV;
		goto free_resources;
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
875
		dev_err(dev, "Self-test copy failed compare, disabling\n");
876 877 878 879 880
		err = -ENODEV;
		goto free_resources;
	}

free_resources:
881
	dma->device_free_chan_resources(dma_chan);
882 883 884 885 886 887
out:
	kfree(src);
	kfree(dest);
	return err;
}

888 889 890 891 892 893 894 895 896 897 898 899 900
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
		    sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
		 "set ioat interrupt style: msix (default), "
		 "msix-single-vector, msi, intx)");

/**
 * ioat_dma_setup_interrupts - setup interrupt handler
 * @device: ioat device
 */
static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
901
	struct ioat_chan_common *chan;
902 903 904 905 906
	struct pci_dev *pdev = device->pdev;
	struct device *dev = &pdev->dev;
	struct msix_entry *msix;
	int i, j, msixcnt;
	int err = -EINVAL;
907 908 909 910 911 912 913 914 915 916
	u8 intrctrl = 0;

	if (!strcmp(ioat_interrupt_style, "msix"))
		goto msix;
	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
		goto msix_single_vector;
	if (!strcmp(ioat_interrupt_style, "msi"))
		goto msi;
	if (!strcmp(ioat_interrupt_style, "intx"))
		goto intx;
917
	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
918
	goto err_no_irq;
919 920 921 922 923 924 925

msix:
	/* The number of MSI-X vectors should equal the number of channels */
	msixcnt = device->common.chancnt;
	for (i = 0; i < msixcnt; i++)
		device->msix_entries[i].entry = i;

926
	err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
927 928 929 930 931 932
	if (err < 0)
		goto msi;
	if (err > 0)
		goto msix_single_vector;

	for (i = 0; i < msixcnt; i++) {
933
		msix = &device->msix_entries[i];
934
		chan = ioat_chan_by_index(device, i);
935 936
		err = devm_request_irq(dev, msix->vector,
				       ioat_dma_do_interrupt_msix, 0,
937
				       "ioat-msix", chan);
938 939
		if (err) {
			for (j = 0; j < i; j++) {
940
				msix = &device->msix_entries[j];
941 942
				chan = ioat_chan_by_index(device, j);
				devm_free_irq(dev, msix->vector, chan);
943 944 945 946 947 948 949 950
			}
			goto msix_single_vector;
		}
	}
	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
	goto done;

msix_single_vector:
951 952 953
	msix = &device->msix_entries[0];
	msix->entry = 0;
	err = pci_enable_msix(pdev, device->msix_entries, 1);
954 955 956
	if (err)
		goto msi;

957 958
	err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
			       "ioat-msix", device);
959
	if (err) {
960
		pci_disable_msix(pdev);
961 962 963 964 965
		goto msi;
	}
	goto done;

msi:
966
	err = pci_enable_msi(pdev);
967 968 969
	if (err)
		goto intx;

970 971
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
			       "ioat-msi", device);
972
	if (err) {
973
		pci_disable_msi(pdev);
974 975 976 977 978
		goto intx;
	}
	goto done;

intx:
979 980
	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
			       IRQF_SHARED, "ioat-intx", device);
981 982 983 984
	if (err)
		goto err_no_irq;

done:
985 986
	if (device->intr_quirk)
		device->intr_quirk(device);
987 988 989 990 991 992 993
	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
	return 0;

err_no_irq:
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
994 995
	dev_err(dev, "no usable interrupts\n");
	return err;
996 997
}

998
static void ioat_disable_interrupts(struct ioatdma_device *device)
999 1000 1001 1002 1003
{
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}

1004
int __devinit ioat_probe(struct ioatdma_device *device)
1005
{
1006 1007 1008
	int err = -ENODEV;
	struct dma_device *dma = &device->common;
	struct pci_dev *pdev = device->pdev;
1009
	struct device *dev = &pdev->dev;
1010 1011 1012

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1013 1014
					   sizeof(struct ioat_dma_descriptor),
					   64, 0);
1015 1016 1017 1018 1019
	if (!device->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

1020 1021 1022
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
1023

1024 1025 1026 1027 1028
	if (!device->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

1029
	device->enumerate_channels(device);
1030

1031 1032
	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
	dma->dev = &pdev->dev;
1033

1034
	if (!dma->chancnt) {
1035
		dev_err(dev, "channel enumeration error\n");
1036 1037 1038
		goto err_setup_interrupts;
	}

1039
	err = ioat_dma_setup_interrupts(device);
1040
	if (err)
1041
		goto err_setup_interrupts;
1042

D
Dan Williams 已提交
1043
	err = device->self_test(device);
1044 1045 1046
	if (err)
		goto err_self_test;

1047
	return 0;
1048 1049

err_self_test:
1050
	ioat_disable_interrupts(device);
1051
err_setup_interrupts:
1052 1053 1054 1055
	pci_pool_destroy(device->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
err_dma_pool:
1056 1057 1058
	return err;
}

1059
int __devinit ioat_register(struct ioatdma_device *device)
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
{
	int err = dma_async_device_register(&device->common);

	if (err) {
		ioat_disable_interrupts(device);
		pci_pool_destroy(device->completion_pool);
		pci_pool_destroy(device->dma_pool);
	}

	return err;
}

/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
static void ioat1_intr_quirk(struct ioatdma_device *device)
{
	struct pci_dev *pdev = device->pdev;
	u32 dmactrl;

	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
	if (pdev->msi_enabled)
		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
	else
		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
}

D
Dan Williams 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->desccount);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);

static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
	struct ioat_dma_chan *ioat = to_ioat_chan(c);

	return sprintf(page, "%d\n", ioat->active);
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);

static ssize_t cap_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;

	return sprintf(page, "copy%s%s%s%s%s%s\n",
		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
		       dma_has_cap(DMA_MEMSET, dma->cap_mask)  ? " fill" : "",
		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");

}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);

static ssize_t version_show(struct dma_chan *c, char *page)
{
	struct dma_device *dma = c->device;
	struct ioatdma_device *device = to_ioatdma_device(dma);

	return sprintf(page, "%d.%d\n",
		       device->version >> 4, device->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);

static struct attribute *ioat1_attrs[] = {
	&ring_size_attr.attr,
	&ring_active_attr.attr,
	&ioat_cap_attr.attr,
	&ioat_version_attr.attr,
	NULL,
};

static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct ioat_sysfs_entry *entry;
	struct ioat_chan_common *chan;

	entry = container_of(attr, struct ioat_sysfs_entry, attr);
	chan = container_of(kobj, struct ioat_chan_common, kobj);

	if (!entry->show)
		return -EIO;
	return entry->show(&chan->common, page);
}

struct sysfs_ops ioat_sysfs_ops = {
	.show	= ioat_attr_show,
};

static struct kobj_type ioat1_ktype = {
	.sysfs_ops = &ioat_sysfs_ops,
	.default_attrs = ioat1_attrs,
};

void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);
		struct kobject *parent = &c->dev->device.kobj;
		int err;

		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
		if (err) {
			dev_warn(to_dev(chan),
				 "sysfs init error (%d), continuing...\n", err);
			kobject_put(&chan->kobj);
			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
		}
	}
}

void ioat_kobject_del(struct ioatdma_device *device)
{
	struct dma_device *dma = &device->common;
	struct dma_chan *c;

	list_for_each_entry(c, &dma->channels, device_node) {
		struct ioat_chan_common *chan = to_chan_common(c);

		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
			kobject_del(&chan->kobj);
			kobject_put(&chan->kobj);
		}
	}
}

1193
int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1194 1195 1196 1197 1198 1199
{
	struct pci_dev *pdev = device->pdev;
	struct dma_device *dma;
	int err;

	device->intr_quirk = ioat1_intr_quirk;
1200
	device->enumerate_channels = ioat1_enumerate_channels;
D
Dan Williams 已提交
1201
	device->self_test = ioat_dma_self_test;
1202 1203 1204
	dma = &device->common;
	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1205 1206 1207
	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
	dma->device_is_tx_complete = ioat1_dma_is_complete;
1208 1209 1210 1211 1212 1213 1214 1215

	err = ioat_probe(device);
	if (err)
		return err;
	ioat_set_tcp_copy_break(4096);
	err = ioat_register(device);
	if (err)
		return err;
D
Dan Williams 已提交
1216 1217
	ioat_kobject_add(device, &ioat1_ktype);

1218 1219 1220 1221 1222 1223
	if (dca)
		device->dca = ioat_dca_init(pdev, device->reg_base);

	return err;
}

1224
void __devexit ioat_dma_remove(struct ioatdma_device *device)
1225
{
1226
	struct dma_device *dma = &device->common;
1227

1228
	ioat_disable_interrupts(device);
1229

D
Dan Williams 已提交
1230 1231
	ioat_kobject_del(device);

1232
	dma_async_device_unregister(dma);
1233

1234 1235
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
1236

1237
	INIT_LIST_HEAD(&dma->channels);
1238
}