ioat_dma.c 22.3 KB
Newer Older
1
/*
2 3
 * Intel I/OAT DMA Linux driver
 * Copyright(c) 2004 - 2007 Intel Corporation.
4 5
 *
 * This program is free software; you can redistribute it and/or modify it
6 7
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
8 9 10 11 12 13 14
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
15 16 17 18 19
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 */

/*
 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
 * copy operations.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
34
#include <linux/dma-mapping.h>
35 36 37 38
#include "ioatdma.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"

39 40
#define INITIAL_IOAT_DESC_COUNT 128

41 42 43
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 46

/* internal functions */
47 48 49 50
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
static int __devinit ioat_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent);
D
Dan Aloni 已提交
51
static void ioat_shutdown(struct pci_dev *pdev);
52 53
static void __devexit ioat_remove(struct pci_dev *pdev);

54
static int ioat_dma_enumerate_channels(struct ioat_device *device)
55 56 57 58 59 60
{
	u8 xfercap_scale;
	u32 xfercap;
	int i;
	struct ioat_dma_chan *ioat_chan;

61 62
	device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));

	for (i = 0; i < device->common.chancnt; i++) {
		ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
		if (!ioat_chan) {
			device->common.chancnt = i;
			break;
		}

		ioat_chan->device = device;
		ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
		ioat_chan->xfercap = xfercap;
		spin_lock_init(&ioat_chan->cleanup_lock);
		spin_lock_init(&ioat_chan->desc_lock);
		INIT_LIST_HEAD(&ioat_chan->free_desc);
		INIT_LIST_HEAD(&ioat_chan->used_desc);
		/* This should be made common somewhere in dmaengine.c */
		ioat_chan->common.device = &device->common;
		list_add_tail(&ioat_chan->common.device_node,
82
			      &device->common.channels);
83 84 85 86
	}
	return device->common.chancnt;
}

87 88 89
static void ioat_set_src(dma_addr_t addr,
			 struct dma_async_tx_descriptor *tx,
			 int index)
90 91 92 93 94 95 96 97 98 99 100 101 102
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);

	pci_unmap_addr_set(desc, src, addr);

	list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
		iter->hw->src_addr = addr;
		addr += ioat_chan->xfercap;
	}

}

103 104 105
static void ioat_set_dest(dma_addr_t addr,
			  struct dma_async_tx_descriptor *tx,
			  int index)
106 107 108 109 110 111 112 113 114 115 116 117
{
	struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);

	pci_unmap_addr_set(desc, dst, addr);

	list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
		iter->hw->dst_addr = addr;
		addr += ioat_chan->xfercap;
	}
}

118
static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
	int append = 0;
	dma_cookie_t cookie;
	struct ioat_desc_sw *group_start;

	group_start = list_entry(desc->async_tx.tx_list.next,
				 struct ioat_desc_sw, node);
	spin_lock_bh(&ioat_chan->desc_lock);
	/* cookie incr and addition to used_list must be atomic */
	cookie = ioat_chan->common.cookie;
	cookie++;
	if (cookie < 0)
		cookie = 1;
	ioat_chan->common.cookie = desc->async_tx.cookie = cookie;

	/* write address into NextDescriptor field of last desc in chain */
	to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
						group_start->async_tx.phys;
	list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);

	ioat_chan->pending += desc->tx_cnt;
	if (ioat_chan->pending >= 4) {
		append = 1;
		ioat_chan->pending = 0;
	}
	spin_unlock_bh(&ioat_chan->desc_lock);

	if (append)
		writeb(IOAT_CHANCMD_APPEND,
			ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
S
Shannon Nelson 已提交
151

152 153 154
	return cookie;
}

155
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
156 157
					struct ioat_dma_chan *ioat_chan,
					gfp_t flags)
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
{
	struct ioat_dma_descriptor *desc;
	struct ioat_desc_sw *desc_sw;
	struct ioat_device *ioat_device;
	dma_addr_t phys;

	ioat_device = to_ioat_device(ioat_chan->common.device);
	desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
	if (unlikely(!desc))
		return NULL;

	desc_sw = kzalloc(sizeof(*desc_sw), flags);
	if (unlikely(!desc_sw)) {
		pci_pool_free(ioat_device->dma_pool, desc, phys);
		return NULL;
	}

	memset(desc, 0, sizeof(*desc));
176 177 178 179 180
	dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
	desc_sw->async_tx.tx_set_src = ioat_set_src;
	desc_sw->async_tx.tx_set_dest = ioat_set_dest;
	desc_sw->async_tx.tx_submit = ioat_tx_submit;
	INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
181
	desc_sw->hw = desc;
182
	desc_sw->async_tx.phys = phys;
183 184 185 186 187 188 189 190 191 192 193 194 195 196

	return desc_sw;
}

/* returns the actual number of allocated descriptors */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_desc_sw *desc = NULL;
	u16 chanctrl;
	u32 chanerr;
	int i;
	LIST_HEAD(tmp_list);

197 198 199
	/* have we already been set up? */
	if (!list_empty(&ioat_chan->free_desc))
		return INITIAL_IOAT_DESC_COUNT;
200

201
	/* Setup register to interrupt and write completion status on error */
202
	chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
203 204
		IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
		IOAT_CHANCTRL_ERR_COMPLETION_EN;
205
	writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
206

207
	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
208
	if (chanerr) {
209 210
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: CHANERR = %x, clearing\n", chanerr);
211
		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
212 213 214 215 216 217
	}

	/* Allocate descriptors */
	for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
		if (!desc) {
218 219
			dev_err(&ioat_chan->device->pdev->dev,
				"ioatdma: Only %d initial descriptors\n", i);
220 221 222 223 224 225 226 227 228 229 230 231
			break;
		}
		list_add_tail(&desc->node, &tmp_list);
	}
	spin_lock_bh(&ioat_chan->desc_lock);
	list_splice(&tmp_list, &ioat_chan->free_desc);
	spin_unlock_bh(&ioat_chan->desc_lock);

	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
	ioat_chan->completion_virt =
		pci_pool_alloc(ioat_chan->device->completion_pool,
232 233
			       GFP_KERNEL,
			       &ioat_chan->completion_addr);
234 235
	memset(ioat_chan->completion_virt, 0,
	       sizeof(*ioat_chan->completion_virt));
236 237 238 239
	writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
	writel(((u64) ioat_chan->completion_addr) >> 32,
	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
240

241
	ioat_dma_start_null_desc(ioat_chan);
242 243 244 245 246 247 248 249 250 251 252 253 254
	return i;
}

static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_device *ioat_device = to_ioat_device(chan->device);
	struct ioat_desc_sw *desc, *_desc;
	u16 chanctrl;
	int in_use_descs = 0;

	ioat_dma_memcpy_cleanup(ioat_chan);

255
	writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
256 257 258 259 260

	spin_lock_bh(&ioat_chan->desc_lock);
	list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
		in_use_descs++;
		list_del(&desc->node);
261 262
		pci_pool_free(ioat_device->dma_pool, desc->hw,
			      desc->async_tx.phys);
263 264 265 266
		kfree(desc);
	}
	list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
		list_del(&desc->node);
267 268
		pci_pool_free(ioat_device->dma_pool, desc->hw,
			      desc->async_tx.phys);
269 270 271 272 273
		kfree(desc);
	}
	spin_unlock_bh(&ioat_chan->desc_lock);

	pci_pool_free(ioat_device->completion_pool,
274 275
		      ioat_chan->completion_virt,
		      ioat_chan->completion_addr);
276 277 278

	/* one is ok since we left it on there on purpose */
	if (in_use_descs > 1)
279 280
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: Freeing %d in use descriptors!\n",
281 282 283 284 285
			in_use_descs - 1);

	ioat_chan->last_completion = ioat_chan->completion_addr = 0;
}

286 287 288 289
static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
						struct dma_chan *chan,
						size_t len,
						int int_en)
290
{
291 292
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	struct ioat_desc_sw *first, *prev, *new;
293 294 295
	LIST_HEAD(new_chain);
	u32 copy;
	size_t orig_len;
296
	int desc_count = 0;
297 298

	if (!len)
299
		return NULL;
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322

	orig_len = len;

	first = NULL;
	prev = NULL;

	spin_lock_bh(&ioat_chan->desc_lock);
	while (len) {
		if (!list_empty(&ioat_chan->free_desc)) {
			new = to_ioat_desc(ioat_chan->free_desc.next);
			list_del(&new->node);
		} else {
			/* try to get another desc */
			new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
			/* will this ever happen? */
			/* TODO add upper limit on these */
			BUG_ON(!new);
		}

		copy = min((u32) len, ioat_chan->xfercap);

		new->hw->size = copy;
		new->hw->ctl = 0;
323 324
		new->async_tx.cookie = 0;
		new->async_tx.ack = 1;
325 326 327 328 329

		/* chain together the physical address list for the HW */
		if (!first)
			first = new;
		else
330
			prev->hw->next = (u64) new->async_tx.phys;
331 332 333 334 335 336 337

		prev = new;
		len  -= copy;
		list_add_tail(&new->node, &new_chain);
		desc_count++;
	}

338
	list_splice(&new_chain, &new->async_tx.tx_list);
339

340 341 342 343 344
	new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
	new->hw->next = 0;
	new->tx_cnt = desc_count;
	new->async_tx.ack = 0; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;
345

346
	pci_unmap_len_set(new, len, orig_len);
347 348
	spin_unlock_bh(&ioat_chan->desc_lock);

349
	return new ? &new->async_tx : NULL;
350 351 352
}

/**
353 354
 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
 *                                 descriptors to hw
355 356 357 358 359 360 361 362
 * @chan: DMA channel handle
 */
static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);

	if (ioat_chan->pending != 0) {
		ioat_chan->pending = 0;
363 364
		writeb(IOAT_CHANCMD_APPEND,
		       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
365 366 367
	}
}

368
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
369 370 371 372 373
{
	unsigned long phys_complete;
	struct ioat_desc_sw *desc, *_desc;
	dma_cookie_t cookie = 0;

374
	prefetch(ioat_chan->completion_virt);
375

376
	if (!spin_trylock(&ioat_chan->cleanup_lock))
377 378 379 380 381 382 383 384 385
		return;

	/* The completion writeback can happen at any time,
	   so reads by the driver need to be atomic operations
	   The descriptor physical addresses are limited to 32-bits
	   when the CPU can only do a 32-bit mov */

#if (BITS_PER_LONG == 64)
	phys_complete =
386
	ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
387
#else
388
	phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
389 390
#endif

391 392 393 394 395
	if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
		dev_err(&ioat_chan->device->pdev->dev,
			"ioatdma: Channel halted, chanerr = %x\n",
			readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
396 397 398 399

		/* TODO do something to salvage the situation */
	}

400 401
	if (phys_complete == ioat_chan->last_completion) {
		spin_unlock(&ioat_chan->cleanup_lock);
402 403 404
		return;
	}

405 406
	spin_lock_bh(&ioat_chan->desc_lock);
	list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
407 408 409 410 411 412

		/*
		 * Incoming DMA requests may use multiple descriptors, due to
		 * exceeding xfercap, perhaps. If so, only the last one will
		 * have a cookie, and require unmapping.
		 */
413 414
		if (desc->async_tx.cookie) {
			cookie = desc->async_tx.cookie;
415

416 417 418 419 420
			/*
			 * yes we are unmapping both _page and _single alloc'd
			 * regions with unmap_page. Is this *really* that bad?
			 */
			pci_unmap_page(ioat_chan->device->pdev,
421
					pci_unmap_addr(desc, dst),
422
					pci_unmap_len(desc, len),
423
					PCI_DMA_FROMDEVICE);
424
			pci_unmap_page(ioat_chan->device->pdev,
425
					pci_unmap_addr(desc, src),
426
					pci_unmap_len(desc, len),
427 428 429
					PCI_DMA_TODEVICE);
		}

430
		if (desc->async_tx.phys != phys_complete) {
431 432
			/*
			 * a completed entry, but not the last, so cleanup
433 434 435 436
			 * if the client is done with the descriptor
			 */
			if (desc->async_tx.ack) {
				list_del(&desc->node);
437 438
				list_add_tail(&desc->node,
					      &ioat_chan->free_desc);
439 440
			} else
				desc->async_tx.cookie = 0;
441
		} else {
442 443 444 445
			/*
			 * last used desc. Do not remove, so we can append from
			 * it, but don't look at it next time, either
			 */
446
			desc->async_tx.cookie = 0;
447 448 449 450 451 452

			/* TODO check status bits? */
			break;
		}
	}

453
	spin_unlock_bh(&ioat_chan->desc_lock);
454

455
	ioat_chan->last_completion = phys_complete;
456
	if (cookie != 0)
457
		ioat_chan->completed_cookie = cookie;
458

459
	spin_unlock(&ioat_chan->cleanup_lock);
460 461
}

462 463 464 465 466 467 468 469 470 471 472
static void ioat_dma_dependency_added(struct dma_chan *chan)
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	spin_lock_bh(&ioat_chan->desc_lock);
	if (ioat_chan->pending == 0) {
		spin_unlock_bh(&ioat_chan->desc_lock);
		ioat_dma_memcpy_cleanup(ioat_chan);
	} else
		spin_unlock_bh(&ioat_chan->desc_lock);
}

473 474 475 476
/**
 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
 * @chan: IOAT DMA channel handle
 * @cookie: DMA transaction identifier
477 478
 * @done: if not %NULL, updated with last completed transaction
 * @used: if not %NULL, updated with last used transaction
479 480
 */
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
481 482 483
					    dma_cookie_t cookie,
					    dma_cookie_t *done,
					    dma_cookie_t *used)
484 485 486 487 488 489 490 491 492 493
{
	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
	dma_cookie_t last_used;
	dma_cookie_t last_complete;
	enum dma_status ret;

	last_used = chan->cookie;
	last_complete = ioat_chan->completed_cookie;

	if (done)
494
		*done = last_complete;
495 496 497 498 499 500 501 502 503 504 505 506 507
	if (used)
		*used = last_used;

	ret = dma_async_is_complete(cookie, last_complete, last_used);
	if (ret == DMA_SUCCESS)
		return ret;

	ioat_dma_memcpy_cleanup(ioat_chan);

	last_used = chan->cookie;
	last_complete = ioat_chan->completed_cookie;

	if (done)
508
		*done = last_complete;
509 510 511 512 513 514 515 516 517 518
	if (used)
		*used = last_used;

	return dma_async_is_complete(cookie, last_complete, last_used);
}

/* PCI API */

static struct pci_device_id ioat_pci_tbl[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
S
Shannon Nelson 已提交
519 520 521
	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
	{ PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
522 523 524
	{ 0, }
};

R
Randy Dunlap 已提交
525
static struct pci_driver ioat_pci_driver = {
526 527 528
	.name 	= "ioatdma",
	.id_table = ioat_pci_tbl,
	.probe	= ioat_probe,
D
Dan Aloni 已提交
529
	.shutdown = ioat_shutdown,
530 531 532
	.remove	= __devexit_p(ioat_remove),
};

533
static irqreturn_t ioat_do_interrupt(int irq, void *data)
534 535 536 537 538
{
	struct ioat_device *instance = data;
	unsigned long attnstatus;
	u8 intrctrl;

539
	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
540 541 542 543 544

	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
		return IRQ_NONE;

	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
545
		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
546 547 548
		return IRQ_NONE;
	}

549
	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
550

551
	printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);
552

553
	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
554 555 556
	return IRQ_HANDLED;
}

557
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
{
	struct ioat_desc_sw *desc;

	spin_lock_bh(&ioat_chan->desc_lock);

	if (!list_empty(&ioat_chan->free_desc)) {
		desc = to_ioat_desc(ioat_chan->free_desc.next);
		list_del(&desc->node);
	} else {
		/* try to get another desc */
		spin_unlock_bh(&ioat_chan->desc_lock);
		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
		spin_lock_bh(&ioat_chan->desc_lock);
		/* will this ever happen? */
		BUG_ON(!desc);
	}

	desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
	desc->hw->next = 0;
577
	desc->async_tx.ack = 1;
578 579 580 581

	list_add_tail(&desc->node, &ioat_chan->used_desc);
	spin_unlock_bh(&ioat_chan->desc_lock);

582
	writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
583
	       ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
584
	writel(((u64) desc->async_tx.phys) >> 32,
585 586
	       ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);

587
	writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
588 589 590 591 592 593 594 595 596 597 598 599 600
}

/*
 * Perform a IOAT transaction to verify the HW works.
 */
#define IOAT_TEST_SIZE 2000

static int ioat_self_test(struct ioat_device *device)
{
	int i;
	u8 *src;
	u8 *dest;
	struct dma_chan *dma_chan;
601 602
	struct dma_async_tx_descriptor *tx;
	dma_addr_t addr;
603 604 605
	dma_cookie_t cookie;
	int err = 0;

606
	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
607 608
	if (!src)
		return -ENOMEM;
609
	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
610 611 612 613 614 615 616 617 618 619 620
	if (!dest) {
		kfree(src);
		return -ENOMEM;
	}

	/* Fill in src buffer */
	for (i = 0; i < IOAT_TEST_SIZE; i++)
		src[i] = (u8)i;

	/* Start copy, using first DMA channel */
	dma_chan = container_of(device->common.channels.next,
621 622
				struct dma_chan,
				device_node);
623
	if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
624 625
		dev_err(&device->pdev->dev,
			"selftest cannot allocate chan resource\n");
626 627 628 629
		err = -ENODEV;
		goto out;
	}

630 631 632 633 634 635 636 637 638
	tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
	async_tx_ack(tx);
	addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
			DMA_TO_DEVICE);
	ioat_set_src(addr, tx, 0);
	addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
			DMA_FROM_DEVICE);
	ioat_set_dest(addr, tx, 0);
	cookie = ioat_tx_submit(tx);
639 640 641 642
	ioat_dma_memcpy_issue_pending(dma_chan);
	msleep(1);

	if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
643 644
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy timed out, disabling\n");
645 646 647 648
		err = -ENODEV;
		goto free_resources;
	}
	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
649 650
		dev_err(&device->pdev->dev,
			"ioatdma: Self-test copy failed compare, disabling\n");
651 652 653 654 655 656 657 658 659 660 661 662 663
		err = -ENODEV;
		goto free_resources;
	}

free_resources:
	ioat_dma_free_chan_resources(dma_chan);
out:
	kfree(src);
	kfree(dest);
	return err;
}

static int __devinit ioat_probe(struct pci_dev *pdev,
664
				const struct pci_device_id *ent)
665 666 667
{
	int err;
	unsigned long mmio_start, mmio_len;
A
Al Viro 已提交
668
	void __iomem *reg_base;
669 670 671 672 673 674 675 676 677 678 679 680
	struct ioat_device *device;

	err = pci_enable_device(pdev);
	if (err)
		goto err_enable_device;

	err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
	if (err)
		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
	if (err)
		goto err_set_dma_mask;

R
Randy Dunlap 已提交
681
	err = pci_request_regions(pdev, ioat_pci_driver.name);
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
	if (err)
		goto err_request_regions;

	mmio_start = pci_resource_start(pdev, 0);
	mmio_len = pci_resource_len(pdev, 0);

	reg_base = ioremap(mmio_start, mmio_len);
	if (!reg_base) {
		err = -ENOMEM;
		goto err_ioremap;
	}

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device) {
		err = -ENOMEM;
		goto err_kzalloc;
	}

	/* DMA coherent memory pool for DMA descriptor allocations */
	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
		sizeof(struct ioat_dma_descriptor), 64, 0);
	if (!device->dma_pool) {
		err = -ENOMEM;
		goto err_dma_pool;
	}

708 709 710
	device->completion_pool = pci_pool_create("completion_pool", pdev,
						  sizeof(u64), SMP_CACHE_BYTES,
						  SMP_CACHE_BYTES);
711 712 713 714 715 716 717 718 719 720 721 722 723 724
	if (!device->completion_pool) {
		err = -ENOMEM;
		goto err_completion_pool;
	}

	device->pdev = pdev;
	pci_set_drvdata(pdev, device);
#ifdef CONFIG_PCI_MSI
	if (pci_enable_msi(pdev) == 0) {
		device->msi = 1;
	} else {
		device->msi = 0;
	}
#endif
725
	err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
726 727 728 729 730 731
		device);
	if (err)
		goto err_irq;

	device->reg_base = reg_base;

732 733
	writeb(IOAT_INTRCTRL_MASTER_INT_EN,
	       device->reg_base + IOAT_INTRCTRL_OFFSET);
734 735 736
	pci_set_master(pdev);

	INIT_LIST_HEAD(&device->common.channels);
737
	ioat_dma_enumerate_channels(device);
738

739
	dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
740 741 742 743
	device->common.device_alloc_chan_resources =
						ioat_dma_alloc_chan_resources;
	device->common.device_free_chan_resources =
						ioat_dma_free_chan_resources;
744 745 746 747 748
	device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
	device->common.device_is_tx_complete = ioat_dma_is_complete;
	device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
	device->common.device_dependency_added = ioat_dma_dependency_added;
	device->common.dev = &pdev->dev;
749 750 751
	printk(KERN_INFO
		 "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n",
		 device->common.chancnt);
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775

	err = ioat_self_test(device);
	if (err)
		goto err_self_test;

	dma_async_device_register(&device->common);

	return 0;

err_self_test:
err_irq:
	pci_pool_destroy(device->completion_pool);
err_completion_pool:
	pci_pool_destroy(device->dma_pool);
err_dma_pool:
	kfree(device);
err_kzalloc:
	iounmap(reg_base);
err_ioremap:
	pci_release_regions(pdev);
err_request_regions:
err_set_dma_mask:
	pci_disable_device(pdev);
err_enable_device:
D
Dan Aloni 已提交
776

777 778
	printk(KERN_INFO
		"ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
D
Dan Aloni 已提交
779

780 781 782
	return err;
}

D
Dan Aloni 已提交
783 784 785 786 787 788 789 790
static void ioat_shutdown(struct pci_dev *pdev)
{
	struct ioat_device *device;
	device = pci_get_drvdata(pdev);

	dma_async_device_unregister(&device->common);
}

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
static void __devexit ioat_remove(struct pci_dev *pdev)
{
	struct ioat_device *device;
	struct dma_chan *chan, *_chan;
	struct ioat_dma_chan *ioat_chan;

	device = pci_get_drvdata(pdev);
	dma_async_device_unregister(&device->common);

	free_irq(device->pdev->irq, device);
#ifdef CONFIG_PCI_MSI
	if (device->msi)
		pci_disable_msi(device->pdev);
#endif
	pci_pool_destroy(device->dma_pool);
	pci_pool_destroy(device->completion_pool);
	iounmap(device->reg_base);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
810 811
	list_for_each_entry_safe(chan, _chan,
				 &device->common.channels, device_node) {
812 813 814 815 816 817 818 819
		ioat_chan = to_ioat_chan(chan);
		list_del(&chan->device_node);
		kfree(ioat_chan);
	}
	kfree(device);
}

/* MODULE API */
820
MODULE_VERSION("1.9");
821 822 823 824 825 826 827
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");

static int __init ioat_init_module(void)
{
	/* it's currently unsafe to unload this module */
	/* if forced, worst case is that rmmod hangs */
828
	__unsafe(THIS_MODULE);
829

R
Randy Dunlap 已提交
830
	return pci_register_driver(&ioat_pci_driver);
831 832 833 834 835 836
}

module_init(ioat_init_module);

static void __exit ioat_exit_module(void)
{
R
Randy Dunlap 已提交
837
	pci_unregister_driver(&ioat_pci_driver);
838 839 840
}

module_exit(ioat_exit_module);