ioat_dma.c 25.8 KB
Newer Older
1
/*
2 3
 * Intel I/OAT DMA Linux driver
 * Copyright(c) 2004 - 2007 Intel Corporation.
4 5
 *
 * This program is free software; you can redistribute it and/or modify it
6 7
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
8 9 10 11 12 13 14
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
15 16 17 18 19
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 */

/*
 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 * copy operations.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
34
#include <linux/dma-mapping.h>
35 36 37 38
#include "ioatdma.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"

39 40
#define INITIAL_IOAT_DESC_COUNT 128

41
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 46

/* internal functions */
47 48
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
static struct ioat_dma_chan *ioat_lookup_chan_by_index(struct ioatdma_device *device,
						       int index)
{
	return device->idx[index];
}

/**
 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
	struct ioatdma_device *instance = data;
	struct ioat_dma_chan *ioat_chan;
	unsigned long attnstatus;
	int bit;
	u8 intrctrl;

	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);

	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
		return IRQ_NONE;

	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
		return IRQ_NONE;
	}

	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
	for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
		ioat_chan = ioat_lookup_chan_by_index(instance, bit);
		tasklet_schedule(&ioat_chan->cleanup_task);
	}

	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
	return IRQ_HANDLED;
}

/**
 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
 * @irq: interrupt id
 * @data: interrupt data
 */
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
	struct ioat_dma_chan *ioat_chan = data;

	tasklet_schedule(&ioat_chan->cleanup_task);

	return IRQ_HANDLED;
}

static void ioat_dma_cleanup_tasklet(unsigned long data);

/**
 * ioat_dma_enumerate_channels - find and initialize the device's channels
 * @device: the device to be enumerated
 */
109
static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
110 111 112 113 114 115
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
	struct ioat_dma_chan *ioat_chan;

116 117
	device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));

	for (i = 0; i < device->common.chancnt; i++) {
		ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
		if (!ioat_chan) {
			device->common.chancnt = i;
			break;
		}

		ioat_chan->device = device;
		ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
		ioat_chan->xfercap = xfercap;
		spin_lock_init(&ioat_chan->cleanup_lock);
		spin_lock_init(&ioat_chan->desc_lock);
		INIT_LIST_HEAD(&ioat_chan->free_desc);
		INIT_LIST_HEAD(&ioat_chan->used_desc);
		/* This should be made common somewhere in dmaengine.c */
		ioat_chan->common.device = &device->common;
		list_add_tail(&ioat_chan->common.device_node,
137
			      &device->common.channels);
138 139 140 141 142
		device->idx[i] = ioat_chan;
		tasklet_init(&ioat_chan->cleanup_task,
			     ioat_dma_cleanup_tasklet,
			     (unsigned long) ioat_chan);
		tasklet_disable(&ioat_chan->cleanup_task);
143 144 145 146
	}
	return device->common.chancnt;
}

147 148 149
static void ioat_set_src(dma_addr_t addr,
			 struct dma_async_tx_descriptor *tx,
			 int index)
150 151 152 153 154 155 156 157 158 159 160 161 162
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);

	pci_unmap_addr_set(desc, src, addr);

	list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
		iter->hw->src_addr = addr;
		addr += ioat_chan->xfercap;
	}

}

163 164 165
static void ioat_set_dest(dma_addr_t addr,
			  struct dma_async_tx_descriptor *tx,
			  int index)
166 167 168 169 170 171 172 173 174 175 176 177
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);

	pci_unmap_addr_set(desc, dst, addr);

	list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
		iter->hw->dst_addr = addr;
		addr += ioat_chan->xfercap;
	}
}

178
static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
	int append = 0;
	dma_cookie_t cookie;
	struct ioat_desc_sw *group_start;

	group_start = list_entry(desc->async_tx.tx_list.next,
				 struct ioat_desc_sw, node);
	spin_lock_bh(&ioat_chan->desc_lock);
	/* cookie incr and addition to used_list must be atomic */
	cookie = ioat_chan->common.cookie;
	cookie++;
	if (cookie < 0)
		cookie = 1;
	ioat_chan->common.cookie = desc->async_tx.cookie = cookie;

	/* write address into NextDescriptor field of last desc in chain */
	to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
						group_start->async_tx.phys;
	list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);

	ioat_chan->pending += desc->tx_cnt;
	if (ioat_chan->pending >= 4) {
		append = 1;
		ioat_chan->pending = 0;
	}
	spin_unlock_bh(&ioat_chan->desc_lock);

	if (append)
		writeb(IOAT_CHANCMD_APPEND,
			ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
S
Shannon Nelson 已提交
211

212 213 214
	return cookie;
}

215
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
216 217
					struct ioat_dma_chan *ioat_chan,
					gfp_t flags)
218 219 220
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
221
	struct ioatdma_device *ioatdma_device;
222 223
	dma_addr_t phys;

224 225
	ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
226 227 228 229 230
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
231
		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
232 233 234 235
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));
236 237 238 239 240
	dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
	desc_sw->async_tx.tx_set_src = ioat_set_src;
	desc_sw->async_tx.tx_set_dest = ioat_set_dest;
	desc_sw->async_tx.tx_submit = ioat_tx_submit;
	INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
241
	desc_sw->hw = desc;
242
	desc_sw->async_tx.phys = phys;
243 244 245 246 247 248 249 250 251 252 253 254 255 256

	return desc_sw;
}

/* returns the actual number of allocated descriptors */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_desc_sw *desc = NULL;
	u16 chanctrl;
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

257 258 259
	/* have we already been set up? */
	if (!list_empty(&ioat_chan->free_desc))
		return INITIAL_IOAT_DESC_COUNT;
260

261
	/* Setup register to interrupt and write completion status on error */
262
	chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
263 264
		IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
		IOAT_CHANCTRL_ERR_COMPLETION_EN;
265
	writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
266

267
	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
268
	if (chanerr) {
269 270
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: CHANERR = %x, clearing\n", chanerr);
271
		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
272 273 274 275 276 277
	}

	/* Allocate descriptors */
	for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
		if (!desc) {
278 279
			dev_err(&ioat_chan->device->pdev->dev,
				"ioatdma: Only %d initial descriptors\n", i);
280 281 282 283 284 285 286 287 288 289 290 291
			break;
		}
		list_add_tail(&desc->node, &tmp_list);
	}
	spin_lock_bh(&ioat_chan->desc_lock);
	list_splice(&tmp_list, &ioat_chan->free_desc);
	spin_unlock_bh(&ioat_chan->desc_lock);

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
	ioat_chan->completion_virt =
		pci_pool_alloc(ioat_chan->device->completion_pool,
292 293
			       GFP_KERNEL,
			       &ioat_chan->completion_addr);
294 295
	memset(ioat_chan->completion_virt, 0,
	       sizeof(*ioat_chan->completion_virt));
296 297 298 299
	writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
	writel(((u64) ioat_chan->completion_addr) >> 32,
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
300

301
	tasklet_enable(&ioat_chan->cleanup_task);
302
	ioat_dma_start_null_desc(ioat_chan);
303 304 305 306 307 308
	return i;
}

static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
309
	struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
310 311 312
	struct ioat_desc_sw *desc, *_desc;
	int in_use_descs = 0;

313
	tasklet_disable(&ioat_chan->cleanup_task);
314 315
	ioat_dma_memcpy_cleanup(ioat_chan);

316 317 318
	/* Delay 100ms after reset to allow internal DMA logic to quiesce
	 * before removing DMA descriptor resources.
	 */
319
	writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
320
	mdelay(100);
321 322 323 324 325

	spin_lock_bh(&ioat_chan->desc_lock);
	list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
		in_use_descs++;
		list_del(&desc->node);
326
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
327
			      desc->async_tx.phys);
328 329 330 331
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
		list_del(&desc->node);
332
		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
333
			      desc->async_tx.phys);
334 335 336 337
		kfree(desc);
	}
	spin_unlock_bh(&ioat_chan->desc_lock);

338
	pci_pool_free(ioatdma_device->completion_pool,
339 340
		      ioat_chan->completion_virt,
		      ioat_chan->completion_addr);
341 342 343

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
344 345
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: Freeing %d in use descriptors!\n",
346 347 348
			in_use_descs - 1);

	ioat_chan->last_completion = ioat_chan->completion_addr = 0;
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
	ioat_chan->pending = 0;
}
/**
 * ioat_dma_get_next_descriptor - return the next available descriptor
 * @ioat_chan: IOAT DMA channel handle
 *
 * Gets the next descriptor from the chain, and must be called with the
 * channel's desc_lock held.  Allocates more descriptors if the channel
 * has run out.
 */
static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
						struct ioat_dma_chan *ioat_chan)
{
	struct ioat_desc_sw *new = NULL;

	if (!list_empty(&ioat_chan->free_desc)) {
		new = to_ioat_desc(ioat_chan->free_desc.next);
		list_del(&new->node);
	} else {
		/* try to get another desc */
		new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
		/* will this ever happen? */
		/* TODO add upper limit on these */
		BUG_ON(!new);
	}

	prefetch(new->hw);
	return new;
377 378
}

379 380 381 382
static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
						struct dma_chan *chan,
						size_t len,
						int int_en)
383
{
384 385
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_desc_sw *first, *prev, *new;
386 387 388
	LIST_HEAD(new_chain);
	u32 copy;
	size_t orig_len;
389
	int desc_count = 0;
390 391

	if (!len)
392
		return NULL;
393 394 395 396 397 398 399 400

	orig_len = len;

	first = NULL;
	prev = NULL;

	spin_lock_bh(&ioat_chan->desc_lock);
	while (len) {
401
		new = ioat_dma_get_next_descriptor(ioat_chan);
402 403 404 405
		copy = min((u32) len, ioat_chan->xfercap);

		new->hw->size = copy;
		new->hw->ctl = 0;
406 407
		new->async_tx.cookie = 0;
		new->async_tx.ack = 1;
408 409 410 411 412

		/* chain together the physical address list for the HW */
		if (!first)
			first = new;
		else
413
			prev->hw->next = (u64) new->async_tx.phys;
414 415 416 417 418 419 420

		prev = new;
		len  -= copy;
		list_add_tail(&new->node, &new_chain);
		desc_count++;
	}

421
	list_splice(&new_chain, &new->async_tx.tx_list);
422

423 424 425 426 427
	new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
	new->hw->next = 0;
	new->tx_cnt = desc_count;
	new->async_tx.ack = 0; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;
428

429
	pci_unmap_len_set(new, len, orig_len);
430 431
	spin_unlock_bh(&ioat_chan->desc_lock);

432
	return new ? &new->async_tx : NULL;
433 434 435
}

/**
436 437
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
438 439 440 441 442 443 444 445
 * @chan: DMA channel handle
 */
static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);

	if (ioat_chan->pending != 0) {
		ioat_chan->pending = 0;
446 447
		writeb(IOAT_CHANCMD_APPEND,
		       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
448 449 450
	}
}

451 452 453 454 455 456 457 458
static void ioat_dma_cleanup_tasklet(unsigned long data)
{
	struct ioat_dma_chan *chan = (void *)data;
	ioat_dma_memcpy_cleanup(chan);
	writew(IOAT_CHANCTRL_INT_DISABLE,
	       chan->reg_base + IOAT_CHANCTRL_OFFSET);
}

459
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
460 461 462 463 464
{
	unsigned long phys_complete;
	struct ioat_desc_sw *desc, *_desc;
	dma_cookie_t cookie = 0;

465
	prefetch(ioat_chan->completion_virt);
466

467
	if (!spin_trylock(&ioat_chan->cleanup_lock))
468 469 470 471 472 473 474 475 476
		return;

	/* The completion writeback can happen at any time,
	   so reads by the driver need to be atomic operations
	   The descriptor physical addresses are limited to 32-bits
	   when the CPU can only do a 32-bit mov */

#if (BITS_PER_LONG == 64)
	phys_complete =
477
	ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
478
#else
479
	phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
480 481
#endif

482 483 484 485 486
	if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: Channel halted, chanerr = %x\n",
			readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
487 488 489 490

		/* TODO do something to salvage the situation */
	}

491 492
	if (phys_complete == ioat_chan->last_completion) {
		spin_unlock(&ioat_chan->cleanup_lock);
493 494 495
		return;
	}

496
	cookie = 0;
497 498
	spin_lock_bh(&ioat_chan->desc_lock);
	list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
499 500 501 502 503 504

		/*
		 * Incoming DMA requests may use multiple descriptors, due to
		 * exceeding xfercap, perhaps. If so, only the last one will
		 * have a cookie, and require unmapping.
		 */
505 506
		if (desc->async_tx.cookie) {
			cookie = desc->async_tx.cookie;
507

508 509 510 511 512
			/*
			 * yes we are unmapping both _page and _single alloc'd
			 * regions with unmap_page. Is this *really* that bad?
			 */
			pci_unmap_page(ioat_chan->device->pdev,
513
					pci_unmap_addr(desc, dst),
514
					pci_unmap_len(desc, len),
515
					PCI_DMA_FROMDEVICE);
516
			pci_unmap_page(ioat_chan->device->pdev,
517
					pci_unmap_addr(desc, src),
518
					pci_unmap_len(desc, len),
519 520 521
					PCI_DMA_TODEVICE);
		}

522
		if (desc->async_tx.phys != phys_complete) {
523 524
			/*
			 * a completed entry, but not the last, so cleanup
525 526 527 528
			 * if the client is done with the descriptor
			 */
			if (desc->async_tx.ack) {
				list_del(&desc->node);
529 530
				list_add_tail(&desc->node,
					      &ioat_chan->free_desc);
531 532
			} else
				desc->async_tx.cookie = 0;
533
		} else {
534 535 536 537
			/*
			 * last used desc. Do not remove, so we can append from
			 * it, but don't look at it next time, either
			 */
538
			desc->async_tx.cookie = 0;
539 540 541 542 543 544

			/* TODO check status bits? */
			break;
		}
	}

545
	spin_unlock_bh(&ioat_chan->desc_lock);
546

547
	ioat_chan->last_completion = phys_complete;
548
	if (cookie != 0)
549
		ioat_chan->completed_cookie = cookie;
550

551
	spin_unlock(&ioat_chan->cleanup_lock);
552 553
}

554 555 556 557 558 559 560 561 562 563 564
static void ioat_dma_dependency_added(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	spin_lock_bh(&ioat_chan->desc_lock);
	if (ioat_chan->pending == 0) {
		spin_unlock_bh(&ioat_chan->desc_lock);
		ioat_dma_memcpy_cleanup(ioat_chan);
	} else
		spin_unlock_bh(&ioat_chan->desc_lock);
}

565 566 567 568
/**
 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
 * @chan: IOAT DMA channel handle
 * @cookie: DMA transaction identifier
569 570
 * @done: if not %NULL, updated with last completed transaction
 * @used: if not %NULL, updated with last used transaction
571 572
 */
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
573 574 575
					    dma_cookie_t cookie,
					    dma_cookie_t *done,
					    dma_cookie_t *used)
576 577 578 579 580 581 582 583 584 585
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	dma_cookie_t last_used;
	dma_cookie_t last_complete;
	enum dma_status ret;

	last_used = chan->cookie;
	last_complete = ioat_chan->completed_cookie;

	if (done)
586
		*done = last_complete;
587 588 589 590 591 592 593 594 595 596 597 598 599
	if (used)
		*used = last_used;

	ret = dma_async_is_complete(cookie, last_complete, last_used);
	if (ret == DMA_SUCCESS)
		return ret;

	ioat_dma_memcpy_cleanup(ioat_chan);

	last_used = chan->cookie;
	last_complete = ioat_chan->completed_cookie;

	if (done)
600
		*done = last_complete;
601 602 603 604 605 606 607 608
	if (used)
		*used = last_used;

	return dma_async_is_complete(cookie, last_complete, last_used);
}

/* PCI API */

609
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
610 611 612 613 614
{
	struct ioat_desc_sw *desc;

	spin_lock_bh(&ioat_chan->desc_lock);

615
	desc = ioat_dma_get_next_descriptor(ioat_chan);
616 617
	desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
	desc->hw->next = 0;
618
	desc->async_tx.ack = 1;
619 620 621 622

	list_add_tail(&desc->node, &ioat_chan->used_desc);
	spin_unlock_bh(&ioat_chan->desc_lock);

623
	writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
624
	       ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
625
	writel(((u64) desc->async_tx.phys) >> 32,
626 627
	       ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);

628
	writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
629 630 631 632 633 634 635
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
#define IOAT_TEST_SIZE 2000

636 637 638 639 640
/**
 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
 * @device: device to be tested
 */
static int ioat_dma_self_test(struct ioatdma_device *device)
641 642 643 644 645
{
	int i;
	u8 *src;
	u8 *dest;
	struct dma_chan *dma_chan;
646 647
	struct dma_async_tx_descriptor *tx;
	dma_addr_t addr;
648 649 650
	dma_cookie_t cookie;
	int err = 0;

651
	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
652 653
	if (!src)
		return -ENOMEM;
654
	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
655 656 657 658 659 660 661 662 663 664 665
	if (!dest) {
		kfree(src);
		return -ENOMEM;
	}

	/* Fill in src buffer */
	for (i = 0; i < IOAT_TEST_SIZE; i++)
		src[i] = (u8)i;

	/* Start copy, using first DMA channel */
	dma_chan = container_of(device->common.channels.next,
666 667
				struct dma_chan,
				device_node);
668
	if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
669 670
		dev_err(&device->pdev->dev,
			"selftest cannot allocate chan resource\n");
671 672 673 674
		err = -ENODEV;
		goto out;
	}

675 676 677 678 679 680 681 682 683
	tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
	async_tx_ack(tx);
	addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
			DMA_TO_DEVICE);
	ioat_set_src(addr, tx, 0);
	addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
			DMA_FROM_DEVICE);
	ioat_set_dest(addr, tx, 0);
	cookie = ioat_tx_submit(tx);
684 685 686 687
	ioat_dma_memcpy_issue_pending(dma_chan);
	msleep(1);

	if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
688 689
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy timed out, disabling\n");
690 691 692 693
		err = -ENODEV;
		goto free_resources;
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
694 695
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy failed compare, disabling\n");
696 697 698 699 700 701 702 703 704 705 706 707
		err = -ENODEV;
		goto free_resources;
	}

free_resources:
	ioat_dma_free_chan_resources(dma_chan);
out:
	kfree(src);
	kfree(dest);
	return err;
}

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
		    sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
		 "set ioat interrupt style: msix (default), "
		 "msix-single-vector, msi, intx)");

/**
 * ioat_dma_setup_interrupts - setup interrupt handler
 * @device: ioat device
 */
static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
	struct ioat_dma_chan *ioat_chan;
	int err, i, j, msixcnt;
	u8 intrctrl = 0;

	if (!strcmp(ioat_interrupt_style, "msix"))
		goto msix;
	if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
		goto msix_single_vector;
	if (!strcmp(ioat_interrupt_style, "msi"))
		goto msi;
	if (!strcmp(ioat_interrupt_style, "intx"))
		goto intx;

msix:
	/* The number of MSI-X vectors should equal the number of channels */
	msixcnt = device->common.chancnt;
	for (i = 0; i < msixcnt; i++)
		device->msix_entries[i].entry = i;

	err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
	if (err < 0)
		goto msi;
	if (err > 0)
		goto msix_single_vector;

	for (i = 0; i < msixcnt; i++) {
		ioat_chan = ioat_lookup_chan_by_index(device, i);
		err = request_irq(device->msix_entries[i].vector,
				  ioat_dma_do_interrupt_msix,
				  0, "ioat-msix", ioat_chan);
		if (err) {
			for (j = 0; j < i; j++) {
				ioat_chan =
					ioat_lookup_chan_by_index(device, j);
				free_irq(device->msix_entries[j].vector,
					 ioat_chan);
			}
			goto msix_single_vector;
		}
	}
	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
	device->irq_mode = msix_multi_vector;
	goto done;

msix_single_vector:
	device->msix_entries[0].entry = 0;
	err = pci_enable_msix(device->pdev, device->msix_entries, 1);
	if (err)
		goto msi;

	err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
			  0, "ioat-msix", device);
	if (err) {
		pci_disable_msix(device->pdev);
		goto msi;
	}
	device->irq_mode = msix_single_vector;
	goto done;

msi:
	err = pci_enable_msi(device->pdev);
	if (err)
		goto intx;

	err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
			  0, "ioat-msi", device);
	if (err) {
		pci_disable_msi(device->pdev);
		goto intx;
	}
	/*
	 * CB 1.2 devices need a bit set in configuration space to enable MSI
	 */
	if (device->version == IOAT_VER_1_2) {
		u32 dmactrl;
		pci_read_config_dword(device->pdev,
				      IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
		pci_write_config_dword(device->pdev,
				       IOAT_PCI_DMACTRL_OFFSET, dmactrl);
	}
	device->irq_mode = msi;
	goto done;

intx:
	err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
			  IRQF_SHARED, "ioat-intx", device);
	if (err)
		goto err_no_irq;
	device->irq_mode = intx;

done:
	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
	return 0;

err_no_irq:
	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
	dev_err(&device->pdev->dev, "no usable interrupts\n");
	device->irq_mode = none;
	return -1;
}

/**
 * ioat_dma_remove_interrupts - remove whatever interrupts were set
 * @device: ioat device
 */
static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
{
	struct ioat_dma_chan *ioat_chan;
	int i;

	/* Disable all interrupt generation */
	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);

	switch (device->irq_mode) {
	case msix_multi_vector:
		for (i = 0; i < device->common.chancnt; i++) {
			ioat_chan = ioat_lookup_chan_by_index(device, i);
			free_irq(device->msix_entries[i].vector, ioat_chan);
		}
		pci_disable_msix(device->pdev);
		break;
	case msix_single_vector:
		free_irq(device->msix_entries[0].vector, device);
		pci_disable_msix(device->pdev);
		break;
	case msi:
		free_irq(device->pdev->irq, device);
		pci_disable_msi(device->pdev);
		break;
	case intx:
		free_irq(device->pdev->irq, device);
		break;
	case none:
		dev_warn(&device->pdev->dev,
			 "call to %s without interrupts setup\n", __func__);
	}
	device->irq_mode = none;
}

863 864
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
				      void __iomem *iobase)
865 866
{
	int err;
867
	struct ioatdma_device *device;
868 869 870 871 872 873

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device) {
		err = -ENOMEM;
		goto err_kzalloc;
	}
874 875 876
	device->pdev = pdev;
	device->reg_base = iobase;
	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
877 878 879

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
880 881
					   sizeof(struct ioat_dma_descriptor),
					   64, 0);
882 883 884 885 886
	if (!device->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

887 888 889
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
890 891 892 893 894 895
	if (!device->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

	INIT_LIST_HEAD(&device->common.channels);
896
	ioat_dma_enumerate_channels(device);
897

898
	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
899 900 901 902
	device->common.device_alloc_chan_resources =
						ioat_dma_alloc_chan_resources;
	device->common.device_free_chan_resources =
						ioat_dma_free_chan_resources;
903 904 905 906 907
	device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
	device->common.device_is_tx_complete = ioat_dma_is_complete;
	device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
	device->common.device_dependency_added = ioat_dma_dependency_added;
	device->common.dev = &pdev->dev;
908 909 910 911
	dev_err(&device->pdev->dev,
		"ioatdma: Intel(R) I/OAT DMA Engine found,"
		" %d channels, device version 0x%02x\n",
		device->common.chancnt, device->version);
912

913
	err = ioat_dma_setup_interrupts(device);
914
	if (err)
915
		goto err_setup_interrupts;
916

917
	err = ioat_dma_self_test(device);
918 919 920 921 922
	if (err)
		goto err_self_test;

	dma_async_device_register(&device->common);

923
	return device;
924 925

err_self_test:
926 927
	ioat_dma_remove_interrupts(device);
err_setup_interrupts:
928 929 930 931 932 933
	pci_pool_destroy(device->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
err_dma_pool:
	kfree(device);
err_kzalloc:
934
	iounmap(iobase);
935 936
	dev_err(&device->pdev->dev,
		"ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
937
	return NULL;
D
Dan Aloni 已提交
938 939
}

940
void ioat_dma_remove(struct ioatdma_device *device)
941 942 943 944 945 946
{
	struct dma_chan *chan, *_chan;
	struct ioat_dma_chan *ioat_chan;

	dma_async_device_unregister(&device->common);

947
	ioat_dma_remove_interrupts(device);
948

949 950
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
951

952 953
	list_for_each_entry_safe(chan, _chan,
				 &device->common.channels, device_node) {
954 955 956 957 958 959 960
		ioat_chan = to_ioat_chan(chan);
		list_del(&chan->device_node);
		kfree(ioat_chan);
	}
	kfree(device);
}