ipath_driver.c 81.7 KB
Newer Older
B
Bryan O'Sullivan 已提交
1
/*
2
 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
B
Bryan O'Sullivan 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/pci.h>
37
#include <linux/io.h>
B
Bryan O'Sullivan 已提交
38 39 40 41 42
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>

#include "ipath_kernel.h"
43
#include "ipath_verbs.h"
B
Bryan O'Sullivan 已提交
44 45 46 47 48 49 50 51 52 53

static void ipath_update_pio_bufs(struct ipath_devdata *);

const char *ipath_get_unit_name(int unit)
{
	static char iname[16];
	snprintf(iname, sizeof iname, "infinipath%u", unit);
	return iname;
}

54
#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
B
Bryan O'Sullivan 已提交
55 56 57 58 59 60
#define PFX IPATH_DRV_NAME ": "

/*
 * The size has to be longer than this string, so we can append
 * board/chip information to it in the init code.
 */
61
const char ib_ipath_version[] = IPATH_IDSTR "\n";
B
Bryan O'Sullivan 已提交
62 63 64 65 66

static struct idr unit_table;
DEFINE_SPINLOCK(ipath_devs_lock);
LIST_HEAD(ipath_dev_list);

67
wait_queue_head_t ipath_state_wait;
B
Bryan O'Sullivan 已提交
68 69 70 71 72 73 74

unsigned ipath_debug = __IPATH_INFO;

module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "mask for debug prints");
EXPORT_SYMBOL_GPL(ipath_debug);

D
Dave Olson 已提交
75 76 77 78
unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");

79 80 81 82 83
static unsigned ipath_hol_timeout_ms = 13000;
module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
MODULE_PARM_DESC(hol_timeout_ms,
	"duration of user app suspension after link failure");

84 85 86 87
unsigned ipath_linkrecovery = 1;
module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");

B
Bryan O'Sullivan 已提交
88
MODULE_LICENSE("GPL");
89
MODULE_AUTHOR("QLogic <support@qlogic.com>");
90
MODULE_DESCRIPTION("QLogic InfiniPath driver");
B
Bryan O'Sullivan 已提交
91

92 93 94 95
/*
 * Table to translate the LINKTRAININGSTATE portion of
 * IBCStatus to a human-readable form.
 */
B
Bryan O'Sullivan 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109
const char *ipath_ibcstatus_str[] = {
	"Disabled",
	"LinkUp",
	"PollActive",
	"PollQuiet",
	"SleepDelay",
	"SleepQuiet",
	"LState6",		/* unused */
	"LState7",		/* unused */
	"CfgDebounce",
	"CfgRcvfCfg",
	"CfgWaitRmt",
	"CfgIdle",
	"RecovRetrain",
110
	"CfgTxRevLane",		/* unused before IBA7220 */
B
Bryan O'Sullivan 已提交
111 112
	"RecovWaitRmt",
	"RecovIdle",
113 114 115 116 117 118 119 120 121 122 123
	/* below were added for IBA7220 */
	"CfgEnhanced",
	"CfgTest",
	"CfgWaitRmtTest",
	"CfgWaitCfgEnhanced",
	"SendTS_T",
	"SendTstIdles",
	"RcvTS_T",
	"SendTst_TS1s",
	"LTState18", "LTState19", "LTState1A", "LTState1B",
	"LTState1C", "LTState1D", "LTState1E", "LTState1F"
B
Bryan O'Sullivan 已提交
124 125 126 127 128 129 130 131
};

static void __devexit ipath_remove_one(struct pci_dev *);
static int __devinit ipath_init_one(struct pci_dev *,
				    const struct pci_device_id *);

/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
132
#define PCI_VENDOR_ID_QLOGIC 0x1077
B
Bryan O'Sullivan 已提交
133 134
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
135
#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
B
Bryan O'Sullivan 已提交
136

137 138 139
/* Number of seconds before our card status check...  */
#define STATUS_TIMEOUT 60

B
Bryan O'Sullivan 已提交
140
static const struct pci_device_id ipath_pci_tbl[] = {
141 142
	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
143
	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
144
	{ 0, }
B
Bryan O'Sullivan 已提交
145 146 147 148 149 150 151 152 153
};

MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);

static struct pci_driver ipath_driver = {
	.name = IPATH_DRV_NAME,
	.probe = ipath_init_one,
	.remove = __devexit_p(ipath_remove_one),
	.id_table = ipath_pci_tbl,
154 155 156
	.driver = {
		.groups = ipath_driver_attr_groups,
	},
B
Bryan O'Sullivan 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
};

static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
			     u32 *bar0, u32 *bar1)
{
	int ret;

	ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
	if (ret)
		ipath_dev_err(dd, "failed to read bar0 before enable: "
			      "error %d\n", -ret);

	ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
	if (ret)
		ipath_dev_err(dd, "failed to read bar1 before enable: "
			      "error %d\n", -ret);

	ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
}

static void ipath_free_devdata(struct pci_dev *pdev,
			       struct ipath_devdata *dd)
{
	unsigned long flags;

	pci_set_drvdata(pdev, NULL);

	if (dd->ipath_unit != -1) {
		spin_lock_irqsave(&ipath_devs_lock, flags);
		idr_remove(&unit_table, dd->ipath_unit);
		list_del(&dd->ipath_list);
		spin_unlock_irqrestore(&ipath_devs_lock, flags);
	}
190
	vfree(dd);
B
Bryan O'Sullivan 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203
}

static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
{
	unsigned long flags;
	struct ipath_devdata *dd;
	int ret;

	if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
		dd = ERR_PTR(-ENOMEM);
		goto bail;
	}

204
	dd = vmalloc(sizeof(*dd));
B
Bryan O'Sullivan 已提交
205 206 207 208
	if (!dd) {
		dd = ERR_PTR(-ENOMEM);
		goto bail;
	}
209
	memset(dd, 0, sizeof(*dd));
B
Bryan O'Sullivan 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	dd->ipath_unit = -1;

	spin_lock_irqsave(&ipath_devs_lock, flags);

	ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
	if (ret < 0) {
		printk(KERN_ERR IPATH_DRV_NAME
		       ": Could not allocate unit ID: error %d\n", -ret);
		ipath_free_devdata(pdev, dd);
		dd = ERR_PTR(ret);
		goto bail_unlock;
	}

	dd->pcidev = pdev;
	pci_set_drvdata(pdev, dd);

	list_add(&dd->ipath_list, &ipath_dev_list);

bail_unlock:
	spin_unlock_irqrestore(&ipath_devs_lock, flags);

bail:
	return dd;
}

static inline struct ipath_devdata *__ipath_lookup(int unit)
{
	return idr_find(&unit_table, unit);
}

struct ipath_devdata *ipath_lookup(int unit)
{
	struct ipath_devdata *dd;
	unsigned long flags;

	spin_lock_irqsave(&ipath_devs_lock, flags);
	dd = __ipath_lookup(unit);
	spin_unlock_irqrestore(&ipath_devs_lock, flags);

	return dd;
}

252
int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
B
Bryan O'Sullivan 已提交
253 254 255 256
{
	int nunits, npresent, nup;
	struct ipath_devdata *dd;
	unsigned long flags;
257
	int maxports;
B
Bryan O'Sullivan 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

	nunits = npresent = nup = maxports = 0;

	spin_lock_irqsave(&ipath_devs_lock, flags);

	list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
		nunits++;
		if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
			npresent++;
		if (dd->ipath_lid &&
		    !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
					 | IPATH_LINKUNK)))
			nup++;
		if (dd->ipath_cfgports > maxports)
			maxports = dd->ipath_cfgports;
	}

	spin_unlock_irqrestore(&ipath_devs_lock, flags);

	if (npresentp)
		*npresentp = npresent;
	if (nupp)
		*nupp = nup;
	if (maxportsp)
		*maxportsp = maxports;

	return nunits;
}

/*
 * These next two routines are placeholders in case we don't have per-arch
 * code for controlling write combining.  If explicit control of write
 * combining is not available, performance will probably be awful.
 */

int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
{
	return -EOPNOTSUPP;
}

void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
{
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
/*
 * Perform a PIO buffer bandwidth write test, to verify proper system
 * configuration.  Even when all the setup calls work, occasionally
 * BIOS or other issues can prevent write combining from working, or
 * can cause other bandwidth problems to the chip.
 *
 * This test simply writes the same buffer over and over again, and
 * measures close to the peak bandwidth to the chip (not testing
 * data bandwidth to the wire).   On chips that use an address-based
 * trigger to send packets to the wire, this is easy.  On chips that
 * use a count to trigger, we want to make sure that the packet doesn't
 * go out on the wire, or trigger flow control checks.
 */
static void ipath_verify_pioperf(struct ipath_devdata *dd)
{
	u32 pbnum, cnt, lcnt;
	u32 __iomem *piobuf;
	u32 *addr;
	u64 msecs, emsecs;

322
	piobuf = ipath_getpiobuf(dd, 0, &pbnum);
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	if (!piobuf) {
		dev_info(&dd->pcidev->dev,
			"No PIObufs for checking perf, skipping\n");
		return;
	}

	/*
	 * Enough to give us a reasonable test, less than piobuf size, and
	 * likely multiple of store buffer length.
	 */
	cnt = 1024;

	addr = vmalloc(cnt);
	if (!addr) {
		dev_info(&dd->pcidev->dev,
			"Couldn't get memory for checking PIO perf,"
			" skipping\n");
		goto done;
	}

	preempt_disable();  /* we want reasonably accurate elapsed time */
	msecs = 1 + jiffies_to_msecs(jiffies);
	for (lcnt = 0; lcnt < 10000U; lcnt++) {
		/* wait until we cross msec boundary */
		if (jiffies_to_msecs(jiffies) >= msecs)
			break;
		udelay(1);
	}

352 353
	ipath_disable_armlaunch(dd);

354 355 356 357 358 359 360 361
	/*
	 * length 0, no dwords actually sent, and mark as VL15
	 * on chips where that may matter (due to IB flowcontrol)
	 */
	if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
		writeq(1UL << 63, piobuf);
	else
		writeq(0, piobuf);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	ipath_flush_wc();

	/*
	 * this is only roughly accurate, since even with preempt we
	 * still take interrupts that could take a while.   Running for
	 * >= 5 msec seems to get us "close enough" to accurate values
	 */
	msecs = jiffies_to_msecs(jiffies);
	for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
		__iowrite32_copy(piobuf + 64, addr, cnt >> 2);
		emsecs = jiffies_to_msecs(jiffies) - msecs;
	}

	/* 1 GiB/sec, slightly over IB SDR line rate */
	if (lcnt < (emsecs * 1024U))
		ipath_dev_err(dd,
			"Performance problem: bandwidth to PIO buffers is "
			"only %u MiB/sec\n",
			lcnt / (u32) emsecs);
	else
		ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
			lcnt / (u32) emsecs);

	preempt_enable();

	vfree(addr);

done:
	/* disarm piobuf, so it's available again */
	ipath_disarm_piobufs(dd, pbnum, 1);
392
	ipath_enable_armlaunch(dd);
393 394
}

B
Bryan O'Sullivan 已提交
395 396 397 398 399 400 401
static int __devinit ipath_init_one(struct pci_dev *pdev,
				    const struct pci_device_id *ent)
{
	int ret, len, j;
	struct ipath_devdata *dd;
	unsigned long long addr;
	u32 bar0 = 0, bar1 = 0;
402
	u8 rev;
B
Bryan O'Sullivan 已提交
403 404 405 406 407 408

	dd = ipath_alloc_devdata(pdev);
	if (IS_ERR(dd)) {
		ret = PTR_ERR(dd);
		printk(KERN_ERR IPATH_DRV_NAME
		       ": Could not allocate devdata: error %d\n", -ret);
409
		goto bail;
B
Bryan O'Sullivan 已提交
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	}

	ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);

	ret = pci_enable_device(pdev);
	if (ret) {
		/* This can happen iff:
		 *
		 * We did a chip reset, and then failed to reprogram the
		 * BAR, or the chip reset due to an internal error.  We then
		 * unloaded the driver and reloaded it.
		 *
		 * Both reset cases set the BAR back to initial state.  For
		 * the latter case, the AER sticky error bit at offset 0x718
		 * should be set, but the Linux kernel doesn't yet know
		 * about that, it appears.  If the original BAR was retained
		 * in the kernel data structures, this may be OK.
		 */
		ipath_dev_err(dd, "enable unit %d failed: error %d\n",
			      dd->ipath_unit, -ret);
		goto bail_devdata;
	}
	addr = pci_resource_start(pdev, 0);
	len = pci_resource_len(pdev, 0);
434
	ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
B
Bryan O'Sullivan 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
		   "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
		   ent->device, ent->driver_data);

	read_bars(dd, pdev, &bar0, &bar1);

	if (!bar1 && !(bar0 & ~0xf)) {
		if (addr) {
			dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
				 "rewriting as %llx\n", addr);
			ret = pci_write_config_dword(
				pdev, PCI_BASE_ADDRESS_0, addr);
			if (ret) {
				ipath_dev_err(dd, "rewrite of BAR0 "
					      "failed: err %d\n", -ret);
				goto bail_disable;
			}
			ret = pci_write_config_dword(
				pdev, PCI_BASE_ADDRESS_1, addr >> 32);
			if (ret) {
				ipath_dev_err(dd, "rewrite of BAR1 "
					      "failed: err %d\n", -ret);
				goto bail_disable;
			}
		} else {
			ipath_dev_err(dd, "BAR is 0 (probable RESET), "
				      "not usable until reboot\n");
			ret = -ENODEV;
			goto bail_disable;
		}
	}

	ret = pci_request_regions(pdev, IPATH_DRV_NAME);
	if (ret) {
		dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
			 "err %d\n", dd->ipath_unit, -ret);
		goto bail_disable;
	}

	ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
	if (ret) {
475 476 477 478 479 480 481
		/*
		 * if the 64 bit setup fails, try 32 bit.  Some systems
		 * do not setup 64 bit maps on systems with 2GB or less
		 * memory installed.
		 */
		ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (ret) {
482 483 484
			dev_info(&pdev->dev,
				"Unable to set DMA mask for unit %u: %d\n",
				dd->ipath_unit, ret);
485 486
			goto bail_regions;
		}
487
		else {
488
			ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
			ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
			if (ret)
				dev_info(&pdev->dev,
					"Unable to set DMA consistent mask "
					"for unit %u: %d\n",
					dd->ipath_unit, ret);

		}
	}
	else {
		ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (ret)
			dev_info(&pdev->dev,
				"Unable to set DMA consistent mask "
				"for unit %u: %d\n",
				dd->ipath_unit, ret);
B
Bryan O'Sullivan 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	}

	pci_set_master(pdev);

	/*
	 * Save BARs to rewrite after device reset.  Save all 64 bits of
	 * BAR, just in case.
	 */
	dd->ipath_pcibar0 = addr;
	dd->ipath_pcibar1 = addr >> 32;
	dd->ipath_deviceid = ent->device;	/* save for later use */
	dd->ipath_vendorid = ent->vendor;

	/* setup the chip-specific functions, as early as possible. */
	switch (ent->device) {
	case PCI_DEVICE_ID_INFINIPATH_HT:
521
#ifdef CONFIG_HT_IRQ
522
		ipath_init_iba6110_funcs(dd);
B
Bryan O'Sullivan 已提交
523
		break;
524 525 526 527
#else
		ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
			      "CONFIG_HT_IRQ is not enabled\n", ent->device);
		return -ENODEV;
528
#endif
B
Bryan O'Sullivan 已提交
529
	case PCI_DEVICE_ID_INFINIPATH_PE800:
530
#ifdef CONFIG_PCI_MSI
531
		ipath_init_iba6120_funcs(dd);
B
Bryan O'Sullivan 已提交
532
		break;
533 534 535 536
#else
		ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
			      "CONFIG_PCI_MSI is not enabled\n", ent->device);
		return -ENODEV;
537
#endif
538 539 540
	case PCI_DEVICE_ID_INFINIPATH_7220:
#ifndef CONFIG_PCI_MSI
		ipath_dbg("CONFIG_PCI_MSI is not enabled, "
541
			  "using INTx for unit %u\n", dd->ipath_unit);
542 543 544
#endif
		ipath_init_iba7220_funcs(dd);
		break;
B
Bryan O'Sullivan 已提交
545
	default:
546
		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
B
Bryan O'Sullivan 已提交
547 548 549 550 551 552 553
			      "failing\n", ent->device);
		return -ENODEV;
	}

	for (j = 0; j < 6; j++) {
		if (!pdev->resource[j].start)
			continue;
554 555 556 557
		ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
			   j, (unsigned long long)pdev->resource[j].start,
			   (unsigned long long)pdev->resource[j].end,
			   (unsigned long long)pci_resource_len(pdev, j));
B
Bryan O'Sullivan 已提交
558 559 560 561 562 563 564 565
	}

	if (!addr) {
		ipath_dev_err(dd, "No valid address in BAR 0!\n");
		ret = -ENODEV;
		goto bail_regions;
	}

566 567 568 569 570 571 572
	ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
	if (ret) {
		ipath_dev_err(dd, "Failed to read PCI revision ID unit "
			      "%u: err %d\n", dd->ipath_unit, -ret);
		goto bail_regions;	/* shouldn't ever happen */
	}
	dd->ipath_pcirev = rev;
B
Bryan O'Sullivan 已提交
573

574 575 576 577 578
#if defined(__powerpc__)
	/* There isn't a generic way to specify writethrough mappings */
	dd->ipath_kregbase = __ioremap(addr, len,
		(_PAGE_NO_CACHE|_PAGE_WRITETHRU));
#else
B
Bryan O'Sullivan 已提交
579
	dd->ipath_kregbase = ioremap_nocache(addr, len);
580
#endif
B
Bryan O'Sullivan 已提交
581 582 583 584 585 586 587 588 589 590 591

	if (!dd->ipath_kregbase) {
		ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
			  addr);
		ret = -ENOMEM;
		goto bail_iounmap;
	}
	dd->ipath_kregend = (u64 __iomem *)
		((void __iomem *)dd->ipath_kregbase + len);
	dd->ipath_physaddr = addr;	/* used for io_remap, etc. */
	/* for user mmap */
592 593
	ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
		   addr, dd->ipath_kregbase);
B
Bryan O'Sullivan 已提交
594 595 596 597 598 599

	if (dd->ipath_f_bus(dd, pdev))
		ipath_dev_err(dd, "Failed to setup config space; "
			      "continuing anyway\n");

	/*
600
	 * set up our interrupt handler; IRQF_SHARED probably not needed,
B
Bryan O'Sullivan 已提交
601 602 603 604
	 * since MSI interrupts shouldn't be shared but won't  hurt for now.
	 * check 0 irq after we return from chip-specific bus setup, since
	 * that can affect this due to setup
	 */
605
	if (!dd->ipath_irq)
B
Bryan O'Sullivan 已提交
606 607 608
		ipath_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
			      "work\n");
	else {
609
		ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
B
Bryan O'Sullivan 已提交
610 611 612
				  IPATH_DRV_NAME, dd);
		if (ret) {
			ipath_dev_err(dd, "Couldn't setup irq handler, "
613
				      "irq=%d: %d\n", dd->ipath_irq, ret);
B
Bryan O'Sullivan 已提交
614 615 616 617 618 619
			goto bail_iounmap;
		}
	}

	ret = ipath_init_chip(dd, 0);	/* do the chip-specific init */
	if (ret)
620
		goto bail_irqsetup;
B
Bryan O'Sullivan 已提交
621 622 623 624 625 626 627 628 629 630

	ret = ipath_enable_wc(dd);

	if (ret) {
		ipath_dev_err(dd, "Write combining not enabled "
			      "(err %d): performance may be poor\n",
			      -ret);
		ret = 0;
	}

631 632
	ipath_verify_pioperf(dd);

B
Bryan O'Sullivan 已提交
633 634 635
	ipath_device_create_group(&pdev->dev, dd);
	ipathfs_add_device(dd);
	ipath_user_add(dd);
636
	ipath_diag_add(dd);
637
	ipath_register_ib_device(dd);
B
Bryan O'Sullivan 已提交
638 639 640

	goto bail;

641
bail_irqsetup:
642 643
	if (pdev->irq)
		free_irq(pdev->irq, dd);
644

B
Bryan O'Sullivan 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
bail_iounmap:
	iounmap((volatile void __iomem *) dd->ipath_kregbase);

bail_regions:
	pci_release_regions(pdev);

bail_disable:
	pci_disable_device(pdev);

bail_devdata:
	ipath_free_devdata(pdev, dd);

bail:
	return ret;
}

661
static void __devexit cleanup_device(struct ipath_devdata *dd)
B
Bryan O'Sullivan 已提交
662
{
663
	int port;
664 665
	struct ipath_portdata **tmp;
	unsigned long flags;
B
Bryan O'Sullivan 已提交
666

667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
		/* can't do anything more with chip; needs re-init */
		*dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
		if (dd->ipath_kregbase) {
			/*
			 * if we haven't already cleaned up before these are
			 * to ensure any register reads/writes "fail" until
			 * re-init
			 */
			dd->ipath_kregbase = NULL;
			dd->ipath_uregbase = 0;
			dd->ipath_sregbase = 0;
			dd->ipath_cregbase = 0;
			dd->ipath_kregsize = 0;
		}
		ipath_disable_wc(dd);
	}
684

685 686 687 688
	if (dd->ipath_spectriggerhit)
		dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
			 dd->ipath_spectriggerhit);

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
	if (dd->ipath_pioavailregs_dma) {
		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
				  (void *) dd->ipath_pioavailregs_dma,
				  dd->ipath_pioavailregs_phys);
		dd->ipath_pioavailregs_dma = NULL;
	}
	if (dd->ipath_dummy_hdrq) {
		dma_free_coherent(&dd->pcidev->dev,
			dd->ipath_pd[0]->port_rcvhdrq_size,
			dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
		dd->ipath_dummy_hdrq = NULL;
	}

	if (dd->ipath_pageshadow) {
		struct page **tmpp = dd->ipath_pageshadow;
		dma_addr_t *tmpd = dd->ipath_physshadow;
		int i, cnt = 0;

		ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
			   "locked\n");
		for (port = 0; port < dd->ipath_cfgports; port++) {
			int port_tidbase = port * dd->ipath_rcvtidcnt;
			int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
			for (i = port_tidbase; i < maxtid; i++) {
				if (!tmpp[i])
					continue;
				pci_unmap_page(dd->pcidev, tmpd[i],
					PAGE_SIZE, PCI_DMA_FROMDEVICE);
				ipath_release_user_pages(&tmpp[i], 1);
				tmpp[i] = NULL;
				cnt++;
			}
		}
		if (cnt) {
			ipath_stats.sps_pageunlocks += cnt;
			ipath_cdbg(VERBOSE, "There were still %u expTID "
				   "entries locked\n", cnt);
		}
		if (ipath_stats.sps_pagelocks ||
		    ipath_stats.sps_pageunlocks)
			ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
				   "unlocked via ipath_m{un}lock\n",
				   (unsigned long long)
				   ipath_stats.sps_pagelocks,
				   (unsigned long long)
				   ipath_stats.sps_pageunlocks);

		ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
			   dd->ipath_pageshadow);
738
		tmpp = dd->ipath_pageshadow;
739
		dd->ipath_pageshadow = NULL;
740
		vfree(tmpp);
741 742

		dd->ipath_egrtidbase = NULL;
743 744
	}

745 746
	/*
	 * free any resources still in use (usually just kernel ports)
747 748 749 750
	 * at unload; we do for portcnt, because that's what we allocate.
	 * We acquire lock to be really paranoid that ipath_pd isn't being
	 * accessed from some interrupt-related code (that should not happen,
	 * but best to be sure).
751
	 */
752 753 754 755
	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
	tmp = dd->ipath_pd;
	dd->ipath_pd = NULL;
	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
756
	for (port = 0; port < dd->ipath_portcnt; port++) {
757 758
		struct ipath_portdata *pd = tmp[port];
		tmp[port] = NULL; /* debugging paranoia */
759 760
		ipath_free_pddata(dd, pd);
	}
761
	kfree(tmp);
762 763 764 765 766 767 768 769
}

static void __devexit ipath_remove_one(struct pci_dev *pdev)
{
	struct ipath_devdata *dd = pci_get_drvdata(pdev);

	ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);

770 771 772 773 774 775
	/*
	 * disable the IB link early, to be sure no new packets arrive, which
	 * complicates the shutdown process
	 */
	ipath_shutdown_device(dd);

776 777
	flush_scheduled_work();

778 779 780
	if (dd->verbs_dev)
		ipath_unregister_ib_device(dd->verbs_dev);

781 782
	ipath_diag_remove(dd);
	ipath_user_remove(dd);
B
Bryan O'Sullivan 已提交
783 784
	ipathfs_remove_device(dd);
	ipath_device_remove_group(&pdev->dev, dd);
785

B
Bryan O'Sullivan 已提交
786 787
	ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
		   "unit %u\n", dd, (u32) dd->ipath_unit);
788 789 790 791 792 793 794 795 796

	cleanup_device(dd);

	/*
	 * turn off rcv, send, and interrupts for all ports, all drivers
	 * should also hard reset the chip here?
	 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
	 * for all versions of the driver, if they were allocated
	 */
797 798 799 800
	if (dd->ipath_irq) {
		ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
			   dd->ipath_unit, dd->ipath_irq);
		dd->ipath_f_free_irq(dd);
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	} else
		ipath_dbg("irq is 0, not doing free_irq "
			  "for unit %u\n", dd->ipath_unit);
	/*
	 * we check for NULL here, because it's outside
	 * the kregbase check, and we need to call it
	 * after the free_irq.	Thus it's possible that
	 * the function pointers were never initialized.
	 */
	if (dd->ipath_f_cleanup)
		/* clean up chip-specific stuff */
		dd->ipath_f_cleanup(dd);

	ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
	iounmap((volatile void __iomem *) dd->ipath_kregbase);
B
Bryan O'Sullivan 已提交
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
	pci_release_regions(pdev);
	ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
	pci_disable_device(pdev);

	ipath_free_devdata(pdev, dd);
}

/* general driver use */
DEFINE_MUTEX(ipath_mutex);

static DEFINE_SPINLOCK(ipath_pioavail_lock);

/**
 * ipath_disarm_piobufs - cancel a range of PIO buffers
 * @dd: the infinipath device
 * @first: the first PIO buffer to cancel
 * @cnt: the number of PIO buffers to cancel
 *
 * cancel a range of PIO buffers, used when they might be armed, but
 * not triggered.  Used at init to ensure buffer state, and also user
 * process close, in case it died while writing to a PIO buffer
 * Also after errors.
 */
void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
			  unsigned cnt)
{
	unsigned i, last = first + cnt;
J
John Gregor 已提交
843
	unsigned long flags;
B
Bryan O'Sullivan 已提交
844 845 846

	ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
	for (i = first; i < last; i++) {
J
John Gregor 已提交
847 848 849 850 851 852
		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
		/*
		 * The disarm-related bits are write-only, so it
		 * is ok to OR them in with our copy of sendctrl
		 * while we hold the lock.
		 */
B
Bryan O'Sullivan 已提交
853
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
J
John Gregor 已提交
854 855 856 857 858
			dd->ipath_sendctrl | INFINIPATH_S_DISARM |
			(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
		/* can't disarm bufs back-to-back per iba7220 spec */
		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
B
Bryan O'Sullivan 已提交
859
	}
860 861
	/* on some older chips, update may not happen after cancel */
	ipath_force_pio_avail_update(dd);
B
Bryan O'Sullivan 已提交
862 863 864 865 866 867 868 869 870 871
}

/**
 * ipath_wait_linkstate - wait for an IB link state change to occur
 * @dd: the infinipath device
 * @state: the state to wait for
 * @msecs: the number of milliseconds to wait
 *
 * wait up to msecs milliseconds for IB link state change to occur for
 * now, take the easy polling route.  Currently used only by
872
 * ipath_set_linkstate.  Returns 0 if state reached, otherwise
B
Bryan O'Sullivan 已提交
873 874 875
 * -ETIMEDOUT state can have multiple states set, for any of several
 * transitions.
 */
876
int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
B
Bryan O'Sullivan 已提交
877
{
878 879
	dd->ipath_state_wanted = state;
	wait_event_interruptible_timeout(ipath_state_wait,
B
Bryan O'Sullivan 已提交
880 881
					 (dd->ipath_flags & state),
					 msecs_to_jiffies(msecs));
882
	dd->ipath_state_wanted = 0;
B
Bryan O'Sullivan 已提交
883 884 885

	if (!(dd->ipath_flags & state)) {
		u64 val;
886 887
		ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
			   " ms\n",
B
Bryan O'Sullivan 已提交
888 889 890 891 892 893 894 895 896 897
			   /* test INIT ahead of DOWN, both can be set */
			   (state & IPATH_LINKINIT) ? "INIT" :
			   ((state & IPATH_LINKDOWN) ? "DOWN" :
			    ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
			   msecs);
		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
		ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
			   (unsigned long long) ipath_read_kreg64(
				   dd, dd->ipath_kregs->kr_ibcctrl),
			   (unsigned long long) val,
898
			   ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
B
Bryan O'Sullivan 已提交
899 900 901 902
	}
	return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
	char *buf, size_t blen)
{
	static const struct {
		ipath_err_t err;
		const char *msg;
	} errs[] = {
		{ INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
		{ INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
		{ INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
		{ INFINIPATH_E_SDMABASE, "SDmaBase" },
		{ INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
		{ INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
		{ INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
		{ INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
		{ INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
		{ INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
		{ INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
		{ INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
	};
	int i;
	int expected;
	size_t bidx = 0;

	for (i = 0; i < ARRAY_SIZE(errs); i++) {
		expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
			test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
		if ((err & errs[i].err) && !expected)
			bidx += snprintf(buf + bidx, blen - bidx,
					 "%s ", errs[i].msg);
	}
}

936 937 938 939 940 941
/*
 * Decode the error status into strings, deciding whether to always
 * print * it or not depending on "normal packet errors" vs everything
 * else.   Return 1 if "real" errors, otherwise 0 if only packet
 * errors, so caller can decide what to print with the string.
 */
942 943
int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
	ipath_err_t err)
B
Bryan O'Sullivan 已提交
944
{
945
	int iserr = 1;
B
Bryan O'Sullivan 已提交
946
	*buf = '\0';
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	if (err & INFINIPATH_E_PKTERRS) {
		if (!(err & ~INFINIPATH_E_PKTERRS))
			iserr = 0; // if only packet errors.
		if (ipath_debug & __IPATH_ERRPKTDBG) {
			if (err & INFINIPATH_E_REBP)
				strlcat(buf, "EBP ", blen);
			if (err & INFINIPATH_E_RVCRC)
				strlcat(buf, "VCRC ", blen);
			if (err & INFINIPATH_E_RICRC) {
				strlcat(buf, "CRC ", blen);
				// clear for check below, so only once
				err &= INFINIPATH_E_RICRC;
			}
			if (err & INFINIPATH_E_RSHORTPKTLEN)
				strlcat(buf, "rshortpktlen ", blen);
			if (err & INFINIPATH_E_SDROPPEDDATAPKT)
				strlcat(buf, "sdroppeddatapkt ", blen);
			if (err & INFINIPATH_E_SPKTLEN)
				strlcat(buf, "spktlen ", blen);
		}
		if ((err & INFINIPATH_E_RICRC) &&
			!(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
			strlcat(buf, "CRC ", blen);
		if (!iserr)
			goto done;
	}
B
Bryan O'Sullivan 已提交
973 974 975 976 977 978 979 980
	if (err & INFINIPATH_E_RHDRLEN)
		strlcat(buf, "rhdrlen ", blen);
	if (err & INFINIPATH_E_RBADTID)
		strlcat(buf, "rbadtid ", blen);
	if (err & INFINIPATH_E_RBADVERSION)
		strlcat(buf, "rbadversion ", blen);
	if (err & INFINIPATH_E_RHDR)
		strlcat(buf, "rhdr ", blen);
981 982
	if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
		strlcat(buf, "sendspecialtrigger ", blen);
B
Bryan O'Sullivan 已提交
983 984 985 986 987 988
	if (err & INFINIPATH_E_RLONGPKTLEN)
		strlcat(buf, "rlongpktlen ", blen);
	if (err & INFINIPATH_E_RMAXPKTLEN)
		strlcat(buf, "rmaxpktlen ", blen);
	if (err & INFINIPATH_E_RMINPKTLEN)
		strlcat(buf, "rminpktlen ", blen);
989 990
	if (err & INFINIPATH_E_SMINPKTLEN)
		strlcat(buf, "sminpktlen ", blen);
B
Bryan O'Sullivan 已提交
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
	if (err & INFINIPATH_E_RFORMATERR)
		strlcat(buf, "rformaterr ", blen);
	if (err & INFINIPATH_E_RUNSUPVL)
		strlcat(buf, "runsupvl ", blen);
	if (err & INFINIPATH_E_RUNEXPCHAR)
		strlcat(buf, "runexpchar ", blen);
	if (err & INFINIPATH_E_RIBFLOW)
		strlcat(buf, "ribflow ", blen);
	if (err & INFINIPATH_E_SUNDERRUN)
		strlcat(buf, "sunderrun ", blen);
	if (err & INFINIPATH_E_SPIOARMLAUNCH)
		strlcat(buf, "spioarmlaunch ", blen);
	if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
		strlcat(buf, "sunexperrpktnum ", blen);
	if (err & INFINIPATH_E_SDROPPEDSMPPKT)
		strlcat(buf, "sdroppedsmppkt ", blen);
	if (err & INFINIPATH_E_SMAXPKTLEN)
		strlcat(buf, "smaxpktlen ", blen);
	if (err & INFINIPATH_E_SUNSUPVL)
		strlcat(buf, "sunsupVL ", blen);
	if (err & INFINIPATH_E_INVALIDADDR)
		strlcat(buf, "invalidaddr ", blen);
	if (err & INFINIPATH_E_RRCVEGRFULL)
		strlcat(buf, "rcvegrfull ", blen);
	if (err & INFINIPATH_E_RRCVHDRFULL)
		strlcat(buf, "rcvhdrfull ", blen);
	if (err & INFINIPATH_E_IBSTATUSCHANGED)
		strlcat(buf, "ibcstatuschg ", blen);
	if (err & INFINIPATH_E_RIBLOSTLINK)
		strlcat(buf, "riblostlink ", blen);
	if (err & INFINIPATH_E_HARDWARE)
		strlcat(buf, "hardware ", blen);
	if (err & INFINIPATH_E_RESET)
		strlcat(buf, "reset ", blen);
1025 1026
	if (err & INFINIPATH_E_SDMAERRS)
		decode_sdma_errs(dd, err, buf, blen);
1027 1028
	if (err & INFINIPATH_E_INVALIDEEPCMD)
		strlcat(buf, "invalideepromcmd ", blen);
1029 1030
done:
	return iserr;
B
Bryan O'Sullivan 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
}

/**
 * get_rhf_errstring - decode RHF errors
 * @err: the err number
 * @msg: the output buffer
 * @len: the length of the output buffer
 *
 * only used one place now, may want more later
 */
static void get_rhf_errstring(u32 err, char *msg, size_t len)
{
	/* if no errors, and so don't need to check what's first */
	*msg = '\0';

	if (err & INFINIPATH_RHF_H_ICRCERR)
		strlcat(msg, "icrcerr ", len);
	if (err & INFINIPATH_RHF_H_VCRCERR)
		strlcat(msg, "vcrcerr ", len);
	if (err & INFINIPATH_RHF_H_PARITYERR)
		strlcat(msg, "parityerr ", len);
	if (err & INFINIPATH_RHF_H_LENERR)
		strlcat(msg, "lenerr ", len);
	if (err & INFINIPATH_RHF_H_MTUERR)
		strlcat(msg, "mtuerr ", len);
	if (err & INFINIPATH_RHF_H_IHDRERR)
		/* infinipath hdr checksum error */
		strlcat(msg, "ipathhdrerr ", len);
	if (err & INFINIPATH_RHF_H_TIDERR)
		strlcat(msg, "tiderr ", len);
	if (err & INFINIPATH_RHF_H_MKERR)
		/* bad port, offset, etc. */
		strlcat(msg, "invalid ipathhdr ", len);
	if (err & INFINIPATH_RHF_H_IBERR)
		strlcat(msg, "iberr ", len);
	if (err & INFINIPATH_RHF_L_SWA)
		strlcat(msg, "swA ", len);
	if (err & INFINIPATH_RHF_L_SWB)
		strlcat(msg, "swB ", len);
}

/**
 * ipath_get_egrbuf - get an eager buffer
 * @dd: the infinipath device
 * @bufnum: the eager buffer to get
 *
 * must only be called if ipath_pd[port] is known to be allocated
 */
1079
static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
B
Bryan O'Sullivan 已提交
1080
{
1081 1082
	return dd->ipath_port0_skbinfo ?
		(void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
B
Bryan O'Sullivan 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
}

/**
 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
 * @dd: the infinipath device
 * @gfp_mask: the sk_buff SFP mask
 */
struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
				gfp_t gfp_mask)
{
	struct sk_buff *skb;
	u32 len;

	/*
	 * Only fully supported way to handle this is to allocate lots
	 * extra, align as needed, and then do skb_reserve().  That wastes
	 * a lot of memory...  I'll have to hack this into infinipath_copy
	 * also.
	 */

	/*
1104 1105 1106
	 * We need 2 extra bytes for ipath_ether data sent in the
	 * key header.  In order to keep everything dword aligned,
	 * we'll reserve 4 bytes.
B
Bryan O'Sullivan 已提交
1107
	 */
1108 1109
	len = dd->ipath_ibmaxlen + 4;

B
Bryan O'Sullivan 已提交
1110
	if (dd->ipath_flags & IPATH_4BYTE_TID) {
1111
		/* We need a 2KB multiple alignment, and there is no way
B
Bryan O'Sullivan 已提交
1112 1113 1114
		 * to do it except to allocate extra and then skb_reserve
		 * enough to bring it up to the right alignment.
		 */
1115
		len += 2047;
B
Bryan O'Sullivan 已提交
1116
	}
1117

B
Bryan O'Sullivan 已提交
1118 1119 1120 1121 1122 1123
	skb = __dev_alloc_skb(len, gfp_mask);
	if (!skb) {
		ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
			      len);
		goto bail;
	}
1124 1125 1126

	skb_reserve(skb, 4);

B
Bryan O'Sullivan 已提交
1127
	if (dd->ipath_flags & IPATH_4BYTE_TID) {
1128
		u32 una = (unsigned long)skb->data & 2047;
B
Bryan O'Sullivan 已提交
1129
		if (una)
1130 1131
			skb_reserve(skb, 2048 - una);
	}
B
Bryan O'Sullivan 已提交
1132 1133 1134 1135 1136

bail:
	return skb;
}

R
Ralph Campbell 已提交
1137 1138 1139 1140
static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
			     u32 eflags,
			     u32 l,
			     u32 etail,
1141 1142
			     __le32 *rhf_addr,
			     struct ipath_message_header *hdr)
R
Ralph Campbell 已提交
1143 1144 1145 1146 1147 1148 1149
{
	char emsg[128];

	get_rhf_errstring(eflags, emsg, sizeof emsg);
	ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
		   "tlen=%x opcode=%x egridx=%x: %s\n",
		   eflags, l,
1150 1151
		   ipath_hdrget_rcv_type(rhf_addr),
		   ipath_hdrget_length_in_bytes(rhf_addr),
R
Ralph Campbell 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
		   be32_to_cpu(hdr->bth[0]) >> 24,
		   etail, emsg);

	/* Count local link integrity errors. */
	if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
		u8 n = (dd->ipath_ibcctrl >>
			INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
			INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;

		if (++dd->ipath_lli_counter > n) {
			dd->ipath_lli_counter = 0;
			dd->ipath_lli_errors++;
		}
	}
}

B
Bryan O'Sullivan 已提交
1168 1169
/*
 * ipath_kreceive - receive a packet
1170
 * @pd: the infinipath port
B
Bryan O'Sullivan 已提交
1171 1172 1173
 *
 * called from interrupt handler for errors or receive interrupt
 */
1174
void ipath_kreceive(struct ipath_portdata *pd)
B
Bryan O'Sullivan 已提交
1175
{
1176
	struct ipath_devdata *dd = pd->port_dd;
1177
	__le32 *rhf_addr;
B
Bryan O'Sullivan 已提交
1178 1179 1180 1181
	void *ebuf;
	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
	u32 etail = -1, l, hdrqtail;
1182
	struct ipath_message_header *hdr;
1183
	u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
B
Bryan O'Sullivan 已提交
1184
	static u64 totcalls;	/* stats, may eventually remove */
1185
	int last;
B
Bryan O'Sullivan 已提交
1186

1187
	l = pd->port_head;
1188 1189 1190
	rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
		u32 seq = ipath_hdrget_seq(rhf_addr);
B
Bryan O'Sullivan 已提交
1191

1192 1193 1194 1195 1196 1197 1198 1199 1200
		if (seq != pd->port_seq_cnt)
			goto bail;
		hdrqtail = 0;
	} else {
		hdrqtail = ipath_get_rcvhdrtail(pd);
		if (l == hdrqtail)
			goto bail;
		smp_rmb();
	}
B
Bryan O'Sullivan 已提交
1201

1202
reloop:
1203
	for (last = 0, i = 1; !last; i += !last) {
1204 1205 1206
		hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
		eflags = ipath_hdrget_err_flags(rhf_addr);
		etype = ipath_hdrget_rcv_type(rhf_addr);
B
Bryan O'Sullivan 已提交
1207
		/* total length */
1208
		tlen = ipath_hdrget_length_in_bytes(rhf_addr);
B
Bryan O'Sullivan 已提交
1209
		ebuf = NULL;
1210 1211 1212
		if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
		    ipath_hdrget_use_egr_buf(rhf_addr) :
		    (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
B
Bryan O'Sullivan 已提交
1213
			/*
1214
			 * It turns out that the chip uses an eager buffer
B
Bryan O'Sullivan 已提交
1215 1216 1217 1218 1219
			 * for all non-expected packets, whether it "needs"
			 * one or not.  So always get the index, but don't
			 * set ebuf (so we try to copy data) unless the
			 * length requires it.
			 */
1220 1221
			etail = ipath_hdrget_index(rhf_addr);
			updegr = 1;
B
Bryan O'Sullivan 已提交
1222 1223
			if (tlen > sizeof(*hdr) ||
			    etype == RCVHQ_RCV_TYPE_NON_KD)
1224
				ebuf = ipath_get_egrbuf(dd, etail);
B
Bryan O'Sullivan 已提交
1225 1226 1227 1228 1229 1230 1231
		}

		/*
		 * both tiderr and ipathhdrerr are set for all plain IB
		 * packets; only ipathhdrerr should be set.
		 */

1232 1233 1234 1235
		if (etype != RCVHQ_RCV_TYPE_NON_KD &&
		    etype != RCVHQ_RCV_TYPE_ERROR &&
		    ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
		    IPS_PROTO_VERSION)
B
Bryan O'Sullivan 已提交
1236 1237 1238
			ipath_cdbg(PKT, "Bad InfiniPath protocol version "
				   "%x\n", etype);

R
Ralph Campbell 已提交
1239
		if (unlikely(eflags))
1240
			ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
R
Ralph Campbell 已提交
1241
		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1242
			ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1243 1244
			if (dd->ipath_lli_counter)
				dd->ipath_lli_counter--;
1245 1246 1247
		} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
			u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
			u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1248 1249
			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
				   "qp=%x), len %x; ignored\n",
1250
				   etype, opcode, qp, tlen);
B
Bryan O'Sullivan 已提交
1251 1252 1253
		}
		else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
			ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1254
				  be32_to_cpu(hdr->bth[0]) >> 24);
R
Ralph Campbell 已提交
1255
		else {
B
Bryan O'Sullivan 已提交
1256
			/*
D
Dave Olson 已提交
1257
			 * error packet, type of error unknown.
B
Bryan O'Sullivan 已提交
1258 1259
			 * Probably type 3, but we don't know, so don't
			 * even try to print the opcode, etc.
1260 1261
			 * Usually caused by a "bad packet", that has no
			 * BTH, when the LRH says it should.
B
Bryan O'Sullivan 已提交
1262
			 */
1263 1264
			ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
				  " %x, len %x hdrq+%x rhf: %Lx\n",
1265
				  etail, tlen, l, (unsigned long long)
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
				  le64_to_cpu(*(__le64 *) rhf_addr));
			if (ipath_debug & __IPATH_ERRPKTDBG) {
				u32 j, *d, dw = rsize-2;
				if (rsize > (tlen>>2))
					dw = tlen>>2;
				d = (u32 *)hdr;
				printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
					dw);
				for (j = 0; j < dw; j++)
					printk(KERN_DEBUG "%8x%s", d[j],
						(j%8) == 7 ? "\n" : " ");
				printk(KERN_DEBUG ".\n");
			}
B
Bryan O'Sullivan 已提交
1279 1280 1281 1282
		}
		l += rsize;
		if (l >= maxcnt)
			l = 0;
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
		rhf_addr = (__le32 *) pd->port_rcvhdrq +
			l + dd->ipath_rhf_offset;
		if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
			u32 seq = ipath_hdrget_seq(rhf_addr);

			if (++pd->port_seq_cnt > 13)
				pd->port_seq_cnt = 1;
			if (seq != pd->port_seq_cnt)
				last = 1;
		} else if (l == hdrqtail)
			last = 1;
B
Bryan O'Sullivan 已提交
1294
		/*
1295 1296 1297
		 * update head regs on last packet, and every 16 packets.
		 * Reduce bus traffic, while still trying to prevent
		 * rcvhdrq overflows, for when the queue is nearly full
B
Bryan O'Sullivan 已提交
1298
		 */
1299 1300 1301 1302 1303 1304 1305 1306
		if (last || !(i & 0xf)) {
			u64 lval = l;

			/* request IBA6120 and 7220 interrupt only on last */
			if (last)
				lval |= dd->ipath_rhdrhead_intr_off;
			ipath_write_ureg(dd, ur_rcvhdrhead, lval,
				pd->port_port);
1307
			if (updegr) {
1308
				ipath_write_ureg(dd, ur_rcvegrindexhead,
1309
						 etail, pd->port_port);
1310 1311 1312
				updegr = 0;
			}
		}
B
Bryan O'Sullivan 已提交
1313 1314
	}

1315 1316
	if (!dd->ipath_rhdrhead_intr_off && !reloop &&
	    !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1317
		/* IBA6110 workaround; we can have a race clearing chip
1318 1319 1320 1321 1322 1323 1324
		 * interrupt with another interrupt about to be delivered,
		 * and can clear it before it is delivered on the GPIO
		 * workaround.  By doing the extra check here for the
		 * in-memory tail register updating while we were doing
		 * earlier packets, we "almost" guarantee we have covered
		 * that case.
		 */
1325
		u32 hqtail = ipath_get_rcvhdrtail(pd);
1326 1327 1328 1329 1330 1331 1332
		if (hqtail != hdrqtail) {
			hdrqtail = hqtail;
			reloop = 1; /* loop 1 extra time at most */
			goto reloop;
		}
	}

B
Bryan O'Sullivan 已提交
1333 1334
	pkttot += i;

1335
	pd->port_head = l;
B
Bryan O'Sullivan 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

	if (pkttot > ipath_stats.sps_maxpkts_call)
		ipath_stats.sps_maxpkts_call = pkttot;
	ipath_stats.sps_port0pkts += pkttot;
	ipath_stats.sps_avgpkts_call =
		ipath_stats.sps_port0pkts / ++totcalls;

bail:;
}

/**
 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
 * @dd: the infinipath device
 *
 * called whenever our local copy indicates we have run out of send buffers
 * NOTE: This can be called from interrupt context by some code
 * and from non-interrupt context by ipath_getpiobuf().
 */

static void ipath_update_pio_bufs(struct ipath_devdata *dd)
{
	unsigned long flags;
	int i;
	const unsigned piobregs = (unsigned)dd->ipath_pioavregs;

	/* If the generation (check) bits have changed, then we update the
	 * busy bit for the corresponding PIO buffer.  This algorithm will
	 * modify positions to the value they already have in some cases
	 * (i.e., no change), but it's faster than changing only the bits
	 * that have changed.
	 *
	 * We would like to do this atomicly, to avoid spinlocks in the
	 * critical send path, but that's not really possible, given the
	 * type of changes, and that this routine could be called on
	 * multiple cpu's simultaneously, so we lock in this routine only,
	 * to avoid conflicting updates; all we change is the shadow, and
	 * it's a single 64 bit memory location, so by definition the update
	 * is atomic in terms of what other cpu's can see in testing the
	 * bits.  The spin_lock overhead isn't too bad, since it only
	 * happens when all buffers are in use, so only cpu overhead, not
	 * latency or bandwidth is affected.
	 */
	if (!dd->ipath_pioavailregs_dma) {
		ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
		return;
	}
	if (ipath_debug & __IPATH_VERBDBG) {
		/* only if packet debug and verbose */
		volatile __le64 *dma = dd->ipath_pioavailregs_dma;
		unsigned long *shadow = dd->ipath_pioavailshadow;

		ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
			   "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
			   "s3=%lx\n",
			   (unsigned long long) le64_to_cpu(dma[0]),
			   shadow[0],
			   (unsigned long long) le64_to_cpu(dma[1]),
			   shadow[1],
			   (unsigned long long) le64_to_cpu(dma[2]),
			   shadow[2],
			   (unsigned long long) le64_to_cpu(dma[3]),
			   shadow[3]);
		if (piobregs > 4)
			ipath_cdbg(
				PKT, "2nd group, dma4=%llx shad4=%lx, "
				"d5=%llx s5=%lx, d6=%llx s6=%lx, "
				"d7=%llx s7=%lx\n",
				(unsigned long long) le64_to_cpu(dma[4]),
				shadow[4],
				(unsigned long long) le64_to_cpu(dma[5]),
				shadow[5],
				(unsigned long long) le64_to_cpu(dma[6]),
				shadow[6],
				(unsigned long long) le64_to_cpu(dma[7]),
				shadow[7]);
	}
	spin_lock_irqsave(&ipath_pioavail_lock, flags);
	for (i = 0; i < piobregs; i++) {
		u64 pchbusy, pchg, piov, pnew;
		/*
		 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
		 */
1418 1419 1420
		if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
		else
B
Bryan O'Sullivan 已提交
1421
			piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1422
		pchg = dd->ipath_pioavailkernel[i] &
B
Bryan O'Sullivan 已提交
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
			~(dd->ipath_pioavailshadow[i] ^ piov);
		pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
		if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
			pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
			pnew |= piov & pchbusy;
			dd->ipath_pioavailshadow[i] = pnew;
		}
	}
	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
/*
 * used to force update of pioavailshadow if we can't get a pio buffer.
 * Needed primarily due to exitting freeze mode after recovering
 * from errors.  Done lazily, because it's safer (known to not
 * be writing pio buffers).
 */
static void ipath_reset_availshadow(struct ipath_devdata *dd)
{
	int i, im;
	unsigned long flags;

	spin_lock_irqsave(&ipath_pioavail_lock, flags);
	for (i = 0; i < dd->ipath_pioavregs; i++) {
		u64 val, oldval;
		/* deal with 6110 chip bug on high register #s */
		im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
			i ^ 1 : i;
		val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
		/*
		 * busy out the buffers not in the kernel avail list,
		 * without changing the generation bits.
		 */
		oldval = dd->ipath_pioavailshadow[i];
		dd->ipath_pioavailshadow[i] = val |
			((~dd->ipath_pioavailkernel[i] <<
			INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
			0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
		if (oldval != dd->ipath_pioavailshadow[i])
			ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1463 1464
				i, (unsigned long long) oldval,
				dd->ipath_pioavailshadow[i]);
1465 1466 1467 1468
	}
	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
}

B
Bryan O'Sullivan 已提交
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
/**
 * ipath_setrcvhdrsize - set the receive header size
 * @dd: the infinipath device
 * @rhdrsize: the receive header size
 *
 * called from user init code, and also layered driver init
 */
int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
{
	int ret = 0;

	if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
		if (dd->ipath_rcvhdrsize != rhdrsize) {
			dev_info(&dd->pcidev->dev,
				 "Error: can't set protocol header "
				 "size %u, already %u\n",
				 rhdrsize, dd->ipath_rcvhdrsize);
			ret = -EAGAIN;
		} else
			ipath_cdbg(VERBOSE, "Reuse same protocol header "
				   "size %u\n", dd->ipath_rcvhdrsize);
	} else if (rhdrsize > (dd->ipath_rcvhdrentsize -
			       (sizeof(u64) / sizeof(u32)))) {
		ipath_dbg("Error: can't set protocol header size %u "
			  "(> max %u)\n", rhdrsize,
			  dd->ipath_rcvhdrentsize -
			  (u32) (sizeof(u64) / sizeof(u32)));
		ret = -EOVERFLOW;
	} else {
		dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
		dd->ipath_rcvhdrsize = rhdrsize;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
				 dd->ipath_rcvhdrsize);
		ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
			   dd->ipath_rcvhdrsize);
	}
	return ret;
}

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
/*
 * debugging code and stats updates if no pio buffers available.
 */
static noinline void no_pio_bufs(struct ipath_devdata *dd)
{
	unsigned long *shadow = dd->ipath_pioavailshadow;
	__le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;

	dd->ipath_upd_pio_shadow = 1;

	/*
	 * not atomic, but if we lose a stat count in a while, that's OK
	 */
	ipath_stats.sps_nopiobufs++;
	if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1523 1524 1525 1526
		ipath_force_pio_avail_update(dd); /* at start */
		ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
			"%llx %llx %llx %llx\n"
			"ipath  shadow:  %lx %lx %lx %lx\n",
1527
			dd->ipath_consec_nopiobuf,
1528
			(unsigned long)get_cycles(),
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
			(unsigned long long) le64_to_cpu(dma[0]),
			(unsigned long long) le64_to_cpu(dma[1]),
			(unsigned long long) le64_to_cpu(dma[2]),
			(unsigned long long) le64_to_cpu(dma[3]),
			shadow[0], shadow[1], shadow[2], shadow[3]);
		/*
		 * 4 buffers per byte, 4 registers above, cover rest
		 * below
		 */
		if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
		    (sizeof(shadow[0]) * 4 * 4))
1540 1541 1542
			ipath_dbg("2nd group: dmacopy: "
				  "%llx %llx %llx %llx\n"
				  "ipath  shadow:  %lx %lx %lx %lx\n",
1543 1544 1545 1546
				  (unsigned long long)le64_to_cpu(dma[4]),
				  (unsigned long long)le64_to_cpu(dma[5]),
				  (unsigned long long)le64_to_cpu(dma[6]),
				  (unsigned long long)le64_to_cpu(dma[7]),
1547 1548 1549 1550
				  shadow[4], shadow[5], shadow[6], shadow[7]);

		/* at end, so update likely happened */
		ipath_reset_availshadow(dd);
1551 1552 1553 1554 1555 1556
	}
}

/*
 * common code for normal driver pio buffer allocation, and reserved
 * allocation.
B
Bryan O'Sullivan 已提交
1557 1558 1559 1560
 *
 * do appropriate marking as busy, etc.
 * returns buffer number if one found (>=0), negative number is error.
 */
1561 1562
static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
	u32 *pbufnum, u32 first, u32 last, u32 firsti)
B
Bryan O'Sullivan 已提交
1563
{
1564 1565
	int i, j, updated = 0;
	unsigned piobcnt;
B
Bryan O'Sullivan 已提交
1566 1567 1568 1569
	unsigned long flags;
	unsigned long *shadow = dd->ipath_pioavailshadow;
	u32 __iomem *buf;

1570
	piobcnt = last - first;
B
Bryan O'Sullivan 已提交
1571 1572 1573 1574 1575 1576 1577
	if (dd->ipath_upd_pio_shadow) {
		/*
		 * Minor optimization.  If we had no buffers on last call,
		 * start out by doing the update; continue and do scan even
		 * if no buffers were updated, to be paranoid
		 */
		ipath_update_pio_bufs(dd);
1578 1579
		updated++;
		i = first;
B
Bryan O'Sullivan 已提交
1580
	} else
1581
		i = firsti;
B
Bryan O'Sullivan 已提交
1582 1583 1584 1585 1586 1587 1588
rescan:
	/*
	 * while test_and_set_bit() is atomic, we do that and then the
	 * change_bit(), and the pair is not.  See if this is the cause
	 * of the remaining armlaunch errors.
	 */
	spin_lock_irqsave(&ipath_pioavail_lock, flags);
1589 1590 1591 1592
	for (j = 0; j < piobcnt; j++, i++) {
		if (i >= last)
			i = first;
		if (__test_and_set_bit((2 * i) + 1, shadow))
B
Bryan O'Sullivan 已提交
1593 1594
			continue;
		/* flip generation bit */
1595
		__change_bit(2 * i, shadow);
B
Bryan O'Sullivan 已提交
1596 1597 1598 1599
		break;
	}
	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);

1600
	if (j == piobcnt) {
B
Bryan O'Sullivan 已提交
1601
		if (!updated) {
1602 1603 1604 1605
			/*
			 * first time through; shadow exhausted, but may be
			 * buffers available, try an update and then rescan.
			 */
B
Bryan O'Sullivan 已提交
1606
			ipath_update_pio_bufs(dd);
1607 1608
			updated++;
			i = first;
B
Bryan O'Sullivan 已提交
1609
			goto rescan;
1610 1611 1612 1613
		} else if (updated == 1 && piobcnt <=
			((dd->ipath_sendctrl
			>> INFINIPATH_S_UPDTHRESH_SHIFT) &
			INFINIPATH_S_UPDTHRESH_MASK)) {
B
Bryan O'Sullivan 已提交
1614
			/*
1615 1616 1617 1618
			 * for chips supporting and using the update
			 * threshold we need to force an update of the
			 * in-memory copy if the count is less than the
			 * thershold, then check one more time.
B
Bryan O'Sullivan 已提交
1619
			 */
1620 1621 1622 1623 1624
			ipath_force_pio_avail_update(dd);
			ipath_update_pio_bufs(dd);
			updated++;
			i = first;
			goto rescan;
B
Bryan O'Sullivan 已提交
1625
		}
1626 1627

		no_pio_bufs(dd);
B
Bryan O'Sullivan 已提交
1628
		buf = NULL;
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
	} else {
		if (i < dd->ipath_piobcnt2k)
			buf = (u32 __iomem *) (dd->ipath_pio2kbase +
					       i * dd->ipath_palign);
		else
			buf = (u32 __iomem *)
				(dd->ipath_pio4kbase +
				 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
		if (pbufnum)
			*pbufnum = i;
B
Bryan O'Sullivan 已提交
1639 1640
	}

1641 1642
	return buf;
}
B
Bryan O'Sullivan 已提交
1643

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
/**
 * ipath_getpiobuf - find an available pio buffer
 * @dd: the infinipath device
 * @plen: the size of the PIO buffer needed in 32-bit words
 * @pbufnum: the buffer number is placed here
 */
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
{
	u32 __iomem *buf;
	u32 pnum, nbufs;
	u32 first, lasti;

	if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
		first = dd->ipath_piobcnt2k;
		lasti = dd->ipath_lastpioindexl;
	} else {
		first = 0;
		lasti = dd->ipath_lastpioindex;
	}
	nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
	buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);

	if (buf) {
		/*
		 * Set next starting place.  It's just an optimization,
		 * it doesn't matter who wins on this, so no locking
		 */
		if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
			dd->ipath_lastpioindexl = pnum + 1;
		else
			dd->ipath_lastpioindex = pnum + 1;
		if (dd->ipath_upd_pio_shadow)
			dd->ipath_upd_pio_shadow = 0;
		if (dd->ipath_consec_nopiobuf)
			dd->ipath_consec_nopiobuf = 0;
		ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
			   pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
		if (pbufnum)
			*pbufnum = pnum;

	}
B
Bryan O'Sullivan 已提交
1685 1686 1687
	return buf;
}

1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
/**
 * ipath_chg_pioavailkernel - change which send buffers are available for kernel
 * @dd: the infinipath device
 * @start: the starting send buffer number
 * @len: the number of send buffers
 * @avail: true if the buffers are available for kernel use, false otherwise
 */
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
			      unsigned len, int avail)
{
	unsigned long flags;
1699
	unsigned end, cnt = 0, next;
1700 1701 1702

	/* There are two bits per send buffer (busy and generation) */
	start *= 2;
1703
	end = start + len * 2;
1704 1705

	spin_lock_irqsave(&ipath_pioavail_lock, flags);
1706
	/* Set or clear the busy bit in the shadow. */
1707 1708
	while (start < end) {
		if (avail) {
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
			unsigned long dma;
			int i, im;
			/*
			 * the BUSY bit will never be set, because we disarm
			 * the user buffers before we hand them back to the
			 * kernel.  We do have to make sure the generation
			 * bit is set correctly in shadow, since it could
			 * have changed many times while allocated to user.
			 * We can't use the bitmap functions on the full
			 * dma array because it is always little-endian, so
			 * we have to flip to host-order first.
			 * BITS_PER_LONG is slightly wrong, since it's
			 * always 64 bits per register in chip...
			 * We only work on 64 bit kernels, so that's OK.
			 */
			/* deal with 6110 chip bug on high register #s */
			i = start / BITS_PER_LONG;
			im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
				i ^ 1 : i;
			__clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
				+ start, dd->ipath_pioavailshadow);
			dma = (unsigned long) le64_to_cpu(
				dd->ipath_pioavailregs_dma[im]);
			if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
				+ start) % BITS_PER_LONG, &dma))
				__set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
					+ start, dd->ipath_pioavailshadow);
			else
				__clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
					+ start, dd->ipath_pioavailshadow);
1739 1740 1741 1742 1743 1744 1745 1746
			__set_bit(start, dd->ipath_pioavailkernel);
		} else {
			__set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
				dd->ipath_pioavailshadow);
			__clear_bit(start, dd->ipath_pioavailkernel);
		}
		start += 2;
	}
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756

	if (dd->ipath_pioupd_thresh) {
		end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
		next = find_first_bit(dd->ipath_pioavailkernel, end);
		while (next < end) {
			cnt++;
			next = find_next_bit(dd->ipath_pioavailkernel, end,
					next + 1);
		}
	}
1757
	spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784

	/*
	 * When moving buffers from kernel to user, if number assigned to
	 * the user is less than the pio update threshold, and threshold
	 * is supported (cnt was computed > 0), drop the update threshold
	 * so we update at least once per allocated number of buffers.
	 * In any case, if the kernel buffers are less than the threshold,
	 * drop the threshold.  We don't bother increasing it, having once
	 * decreased it, since it would typically just cycle back and forth.
	 * If we don't decrease below buffers in use, we can wait a long
	 * time for an update, until some other context uses PIO buffers.
	 */
	if (!avail && len < cnt)
		cnt = len;
	if (cnt < dd->ipath_pioupd_thresh) {
		dd->ipath_pioupd_thresh = cnt;
		ipath_dbg("Decreased pio update threshold to %u\n",
			dd->ipath_pioupd_thresh);
		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
		dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
			<< INFINIPATH_S_UPDTHRESH_SHIFT);
		dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
			<< INFINIPATH_S_UPDTHRESH_SHIFT;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
			dd->ipath_sendctrl);
		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
	}
1785 1786
}

B
Bryan O'Sullivan 已提交
1787 1788 1789 1790 1791
/**
 * ipath_create_rcvhdrq - create a receive header queue
 * @dd: the infinipath device
 * @pd: the port data
 *
1792 1793 1794
 * this must be contiguous memory (from an i/o perspective), and must be
 * DMA'able (which means for some systems, it will go through an IOMMU,
 * or be forced into a low address range).
B
Bryan O'Sullivan 已提交
1795 1796 1797 1798
 */
int ipath_create_rcvhdrq(struct ipath_devdata *dd,
			 struct ipath_portdata *pd)
{
1799
	int ret = 0;
B
Bryan O'Sullivan 已提交
1800 1801

	if (!pd->port_rcvhdrq) {
1802
		dma_addr_t phys_hdrqtail;
B
Bryan O'Sullivan 已提交
1803
		gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1804 1805
		int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
				sizeof(u32), PAGE_SIZE);
B
Bryan O'Sullivan 已提交
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817

		pd->port_rcvhdrq = dma_alloc_coherent(
			&dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
			gfp_flags);

		if (!pd->port_rcvhdrq) {
			ipath_dev_err(dd, "attempt to allocate %d bytes "
				      "for port %u rcvhdrq failed\n",
				      amt, pd->port_port);
			ret = -ENOMEM;
			goto bail;
		}
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837

		if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
			pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
				GFP_KERNEL);
			if (!pd->port_rcvhdrtail_kvaddr) {
				ipath_dev_err(dd, "attempt to allocate 1 page "
					"for port %u rcvhdrqtailaddr "
					"failed\n", pd->port_port);
				ret = -ENOMEM;
				dma_free_coherent(&dd->pcidev->dev, amt,
					pd->port_rcvhdrq,
					pd->port_rcvhdrq_phys);
				pd->port_rcvhdrq = NULL;
				goto bail;
			}
			pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
			ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
				   "physical\n", pd->port_port,
				   (unsigned long long) phys_hdrqtail);
1838
		}
B
Bryan O'Sullivan 已提交
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848

		pd->port_rcvhdrq_size = amt;

		ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
			   "for port %u rcvhdr Q\n",
			   amt >> PAGE_SHIFT, pd->port_rcvhdrq,
			   (unsigned long) pd->port_rcvhdrq_phys,
			   (unsigned long) pd->port_rcvhdrq_size,
			   pd->port_port);
	}
1849 1850 1851 1852
	else
		ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
			   "hdrtailaddr@%p %llx physical\n",
			   pd->port_port, pd->port_rcvhdrq,
1853 1854 1855
			   (unsigned long long) pd->port_rcvhdrq_phys,
			   pd->port_rcvhdrtail_kvaddr, (unsigned long long)
			   pd->port_rcvhdrqtailaddr_phys);
1856 1857 1858

	/* clear for security and sanity on each use */
	memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1859 1860
	if (pd->port_rcvhdrtail_kvaddr)
		memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
B
Bryan O'Sullivan 已提交
1861 1862 1863

	/*
	 * tell chip each time we init it, even if we are re-using previous
1864
	 * memory (we zero the register at process close)
B
Bryan O'Sullivan 已提交
1865
	 */
1866 1867
	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
			      pd->port_port, pd->port_rcvhdrqtailaddr_phys);
B
Bryan O'Sullivan 已提交
1868 1869 1870 1871 1872 1873 1874
	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
			      pd->port_port, pd->port_rcvhdrq_phys);

bail:
	return ret;
}

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884

/*
 * Flush all sends that might be in the ready to send state, as well as any
 * that are in the process of being sent.   Used whenever we need to be
 * sure the send side is idle.  Cleans up all buffer state by canceling
 * all pio buffers, and issuing an abort, which cleans up anything in the
 * launch fifo.  The cancel is superfluous on some chip versions, but
 * it's safer to always do it.
 * PIOAvail bits are updated by the chip as if normal send had happened.
 */
1885
void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1886
{
1887 1888
	unsigned long flags;

1889 1890 1891 1892
	if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
		ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
		goto bail;
	}
1893 1894 1895 1896 1897 1898 1899 1900
	/*
	 * If we have SDMA, and it's not disabled, we have to kick off the
	 * abort state machine, provided we aren't already aborting.
	 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
	 * we skip the rest of this routine. It is already "in progress"
	 */
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
		int skip_cancel;
1901
		unsigned long *statp = &dd->ipath_sdma_status;
1902 1903 1904

		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
		skip_cancel =
1905 1906
			test_and_set_bit(IPATH_SDMA_ABORTING, statp)
			&& !test_bit(IPATH_SDMA_DISABLED, statp);
1907 1908 1909 1910 1911
		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
		if (skip_cancel)
			goto bail;
	}

1912
	ipath_dbg("Cancelling all in-progress send buffers\n");
1913 1914 1915 1916

	/* skip armlaunch errs for a while */
	dd->ipath_lastcancel = jiffies + HZ / 2;

1917
	/*
1918 1919 1920 1921 1922 1923
	 * The abort bit is auto-clearing.  We also don't want pioavail
	 * update happening during this, and we don't want any other
	 * sends going out, so turn those off for the duration.  We read
	 * the scratch register to be sure that cancels and the abort
	 * have taken effect in the chip.  Otherwise two parts are same
	 * as ipath_force_pio_avail_update()
1924
	 */
1925 1926 1927
	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
	dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
		| INFINIPATH_S_PIOENABLE);
1928
	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1929
		dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1930
	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1931 1932 1933
	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);

	/* disarm all send buffers */
1934
	ipath_disarm_piobufs(dd, 0,
1935 1936
		dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);

1937 1938 1939
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
		set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);

1940 1941 1942 1943 1944
	if (restore_sendctrl) {
		/* else done by caller later if needed */
		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
		dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
			INFINIPATH_S_PIOENABLE;
1945
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1946 1947 1948 1949 1950
			dd->ipath_sendctrl);
		/* and again, be sure all have hit the chip */
		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
	}
1951

1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
	    !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
	    test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
		/* only wait so long for intr */
		dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
		dd->ipath_sdma_reset_wait = 200;
		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
	}
1963
bail:;
1964 1965
}

1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
/*
 * Force an update of in-memory copy of the pioavail registers, when
 * needed for any of a variety of reasons.  We read the scratch register
 * to make it highly likely that the update will have happened by the
 * time we return.  If already off (as in cancel_sends above), this
 * routine is a nop, on the assumption that the caller will "do the
 * right thing".
 */
void ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
	unsigned long flags;

	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
	if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
			dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
			dd->ipath_sendctrl);
		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
	}
	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}

1990 1991
static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
				int linitcmd)
B
Bryan O'Sullivan 已提交
1992
{
1993
	u64 mod_wd;
B
Bryan O'Sullivan 已提交
1994
	static const char *what[4] = {
1995 1996
		[0] = "NOP",
		[INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
B
Bryan O'Sullivan 已提交
1997 1998 1999
		[INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
		[INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
	};
2000

2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
	if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
		/*
		 * If we are told to disable, note that so link-recovery
		 * code does not attempt to bring us back up.
		 */
		preempt_disable();
		dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
		preempt_enable();
	} else if (linitcmd) {
		/*
		 * Any other linkinitcmd will lead to LINKDOWN and then
		 * to INIT (if all is well), so clear flag to let
		 * link-recovery code attempt to bring us back up.
		 */
		preempt_disable();
		dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
		preempt_enable();
	}

	mod_wd = (linkcmd << dd->ibcc_lc_shift) |
		(linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
	ipath_cdbg(VERBOSE,
		"Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
		dd->ipath_unit, what[linkcmd], linitcmd,
		ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
2026
			ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
B
Bryan O'Sullivan 已提交
2027 2028

	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2029 2030 2031
			 dd->ipath_ibcctrl | mod_wd);
	/* read from chip so write is flushed */
	(void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
B
Bryan O'Sullivan 已提交
2032 2033
}

2034 2035 2036 2037 2038 2039
int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
{
	u32 lstate;
	int ret;

	switch (newstate) {
2040
	case IPATH_IB_LINKDOWN_ONLY:
2041
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2042 2043 2044 2045
		/* don't wait */
		ret = 0;
		goto bail;

2046
	case IPATH_IB_LINKDOWN:
2047 2048
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
					INFINIPATH_IBCC_LINKINITCMD_POLL);
2049 2050 2051 2052 2053
		/* don't wait */
		ret = 0;
		goto bail;

	case IPATH_IB_LINKDOWN_SLEEP:
2054 2055
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
					INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2056 2057 2058 2059 2060
		/* don't wait */
		ret = 0;
		goto bail;

	case IPATH_IB_LINKDOWN_DISABLE:
2061 2062
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
					INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
		/* don't wait */
		ret = 0;
		goto bail;

	case IPATH_IB_LINKARM:
		if (dd->ipath_flags & IPATH_LINKARMED) {
			ret = 0;
			goto bail;
		}
		if (!(dd->ipath_flags &
		      (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
			ret = -EINVAL;
			goto bail;
		}
2077 2078
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);

2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
		/*
		 * Since the port can transition to ACTIVE by receiving
		 * a non VL 15 packet, wait for either state.
		 */
		lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
		break;

	case IPATH_IB_LINKACTIVE:
		if (dd->ipath_flags & IPATH_LINKACTIVE) {
			ret = 0;
			goto bail;
		}
		if (!(dd->ipath_flags & IPATH_LINKARMED)) {
			ret = -EINVAL;
			goto bail;
		}
2095
		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2096 2097 2098
		lstate = IPATH_LINKACTIVE;
		break;

2099 2100 2101 2102 2103
	case IPATH_IB_LINK_LOOPBACK:
		dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
				 dd->ipath_ibcctrl);
2104 2105 2106 2107 2108

		/* turn heartbeat off, as it causes loopback to fail */
		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
				       IPATH_IB_HRTBT_OFF);
		/* don't wait */
2109
		ret = 0;
2110
		goto bail;
2111 2112

	case IPATH_IB_LINK_EXTERNAL:
2113 2114 2115 2116
		dev_info(&dd->pcidev->dev,
			"Disabling IB local loopback (normal)\n");
		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
				       IPATH_IB_HRTBT_ON);
2117 2118 2119
		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
				 dd->ipath_ibcctrl);
2120
		/* don't wait */
2121
		ret = 0;
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
		goto bail;

	/*
	 * Heartbeat can be explicitly enabled by the user via
	 * "hrtbt_enable" "file", and if disabled, trying to enable here
	 * will have no effect.  Implicit changes (heartbeat off when
	 * loopback on, and vice versa) are included to ease testing.
	 */
	case IPATH_IB_LINK_HRTBT:
		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
			IPATH_IB_HRTBT_ON);
		goto bail;

	case IPATH_IB_LINK_NO_HRTBT:
		ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
			IPATH_IB_HRTBT_OFF);
		goto bail;
2139

2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
	default:
		ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
		ret = -EINVAL;
		goto bail;
	}
	ret = ipath_wait_linkstate(dd, lstate, 2000);

bail:
	return ret;
}

/**
 * ipath_set_mtu - set the MTU
 * @dd: the infinipath device
 * @arg: the new MTU
 *
 * we can handle "any" incoming size, the issue here is whether we
 * need to restrict our outgoing size.   For now, we don't do any
 * sanity checking on this, and we don't deal with what happens to
 * programs that are already running when the size changes.
 * NOTE: changing the MTU will usually cause the IBC to go back to
2161
 * link INIT state...
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
 */
int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
{
	u32 piosize;
	int changed = 0;
	int ret;

	/*
	 * mtu is IB data payload max.  It's the largest power of 2 less
	 * than piosize (or even larger, since it only really controls the
	 * largest we can receive; we can send the max of the mtu and
	 * piosize).  We check that it's one of the valid IB sizes.
	 */
	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
D
Dave Olson 已提交
2176
	    (arg != 4096 || !ipath_mtu4096)) {
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
		ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
		ret = -EINVAL;
		goto bail;
	}
	if (dd->ipath_ibmtu == arg) {
		ret = 0;        /* same as current */
		goto bail;
	}

	piosize = dd->ipath_ibmaxlen;
	dd->ipath_ibmtu = arg;

	if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
		/* Only if it's not the initial value (or reset to it) */
		if (piosize != dd->ipath_init_ibmaxlen) {
D
Dave Olson 已提交
2192 2193
			if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
				piosize = dd->ipath_init_ibmaxlen;
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
			dd->ipath_ibmaxlen = piosize;
			changed = 1;
		}
	} else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
		piosize = arg + IPATH_PIO_MAXIBHDR;
		ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
			   "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
			   arg);
		dd->ipath_ibmaxlen = piosize;
		changed = 1;
	}

	if (changed) {
D
Dave Olson 已提交
2207
		u64 ibc = dd->ipath_ibcctrl, ibdw;
2208
		/*
D
Dave Olson 已提交
2209 2210 2211
		 * update our housekeeping variables, and set IBC max
		 * size, same as init code; max IBC is max we allow in
		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2212
		 */
D
Dave Olson 已提交
2213 2214
		dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
		ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2215
		ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
D
Dave Olson 已提交
2216 2217
			 dd->ibcc_mpl_shift);
		ibc |= ibdw << dd->ibcc_mpl_shift;
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
		dd->ipath_ibcctrl = ibc;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
				 dd->ipath_ibcctrl);
		dd->ipath_f_tidtemplate(dd);
	}

	ret = 0;

bail:
	return ret;
}

2230
int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2231
{
2232
	dd->ipath_lid = lid;
2233 2234
	dd->ipath_lmc = lmc;

2235 2236 2237 2238 2239
	dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
		(~((1U << lmc) - 1)) << 16);

	dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);

2240 2241 2242
	return 0;
}

B
Bryan O'Sullivan 已提交
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268

/**
 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
 * @dd: the infinipath device
 * @regno: the register number to write
 * @port: the port containing the register
 * @value: the value to write
 *
 * Registers that vary with the chip implementation constants (port)
 * use this routine.
 */
void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
			  unsigned port, u64 value)
{
	u16 where;

	if (port < dd->ipath_portcnt &&
	    (regno == dd->ipath_kregs->kr_rcvhdraddr ||
	     regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
		where = regno + port;
	else
		where = -1;

	ipath_write_kreg(dd, where, value);
}

2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
/*
 * Following deal with the "obviously simple" task of overriding the state
 * of the LEDS, which normally indicate link physical and logical status.
 * The complications arise in dealing with different hardware mappings
 * and the board-dependent routine being called from interrupts.
 * and then there's the requirement to _flash_ them.
 */
#define LED_OVER_FREQ_SHIFT 8
#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
/* Below is "non-zero" to force override, but both actual LEDs are off */
#define LED_OVER_BOTH_OFF (8)

2281
static void ipath_run_led_override(unsigned long opaque)
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
{
	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
	int timeoff;
	int pidx;
	u64 lstate, ltstate, val;

	if (!(dd->ipath_flags & IPATH_INITTED))
		return;

	pidx = dd->ipath_led_override_phase++ & 1;
	dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
	timeoff = dd->ipath_led_override_timeoff;

	/*
	 * below potentially restores the LED values per current status,
	 * should also possibly setup the traffic-blink register,
	 * but leave that to per-chip functions.
	 */
	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2301 2302
	ltstate = ipath_ib_linktrstate(dd, val);
	lstate = ipath_ib_linkstate(dd, val);
2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342

	dd->ipath_f_setextled(dd, lstate, ltstate);
	mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
}

void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
{
	int timeoff, freq;

	if (!(dd->ipath_flags & IPATH_INITTED))
		return;

	/* First check if we are blinking. If not, use 1HZ polling */
	timeoff = HZ;
	freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;

	if (freq) {
		/* For blink, set each phase from one nybble of val */
		dd->ipath_led_override_vals[0] = val & 0xF;
		dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
		timeoff = (HZ << 4)/freq;
	} else {
		/* Non-blink set both phases the same. */
		dd->ipath_led_override_vals[0] = val & 0xF;
		dd->ipath_led_override_vals[1] = val & 0xF;
	}
	dd->ipath_led_override_timeoff = timeoff;

	/*
	 * If the timer has not already been started, do so. Use a "quick"
	 * timeout so the function will be called soon, to look at our request.
	 */
	if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
		/* Need to start timer */
		init_timer(&dd->ipath_led_override_timer);
		dd->ipath_led_override_timer.function =
						 ipath_run_led_override;
		dd->ipath_led_override_timer.data = (unsigned long) dd;
		dd->ipath_led_override_timer.expires = jiffies + 1;
		add_timer(&dd->ipath_led_override_timer);
2343
	} else
2344 2345 2346
		atomic_dec(&dd->ipath_led_override_timer_active);
}

B
Bryan O'Sullivan 已提交
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
/**
 * ipath_shutdown_device - shut down a device
 * @dd: the infinipath device
 *
 * This is called to make the device quiet when we are about to
 * unload the driver, and also when the device is administratively
 * disabled.   It does not free any data structures.
 * Everything it does has to be setup again by ipath_init_chip(dd,1)
 */
void ipath_shutdown_device(struct ipath_devdata *dd)
{
J
John Gregor 已提交
2358 2359
	unsigned long flags;

B
Bryan O'Sullivan 已提交
2360 2361
	ipath_dbg("Shutting down the device\n");

2362 2363
	ipath_hol_up(dd); /* make sure user processes aren't suspended */

B
Bryan O'Sullivan 已提交
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
	dd->ipath_flags |= IPATH_LINKUNK;
	dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
			     IPATH_LINKINIT | IPATH_LINKARMED |
			     IPATH_LINKACTIVE);
	*dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
				IPATH_STATUS_IB_READY);

	/* mask interrupts, but not errors */
	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);

	dd->ipath_rcvctrl = 0;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
			 dd->ipath_rcvctrl);

2378 2379 2380
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
		teardown_sdma(dd);

B
Bryan O'Sullivan 已提交
2381 2382 2383 2384
	/*
	 * gracefully stop all sends allowing any in progress to trickle out
	 * first.
	 */
J
John Gregor 已提交
2385 2386 2387
	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
	dd->ipath_sendctrl = 0;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
B
Bryan O'Sullivan 已提交
2388
	/* flush it */
2389
	ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
J
John Gregor 已提交
2390 2391
	spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);

B
Bryan O'Sullivan 已提交
2392 2393 2394 2395 2396 2397
	/*
	 * enough for anything that's going to trickle out to have actually
	 * done so.
	 */
	udelay(5);

2398 2399
	dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */

2400
	ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2401
	ipath_cancel_sends(dd, 0);
B
Bryan O'Sullivan 已提交
2402

2403 2404 2405 2406 2407
	/*
	 * we are shutting down, so tell components that care.  We don't do
	 * this on just a link state change, much like ethernet, a cable
	 * unplug, etc. doesn't change driver state
	 */
2408 2409
	signal_ib_event(dd, IB_EVENT_PORT_ERR);

B
Bryan O'Sullivan 已提交
2410 2411 2412
	/* disable IBC */
	dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2413
			 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
B
Bryan O'Sullivan 已提交
2414 2415 2416 2417 2418 2419 2420 2421

	/*
	 * clear SerdesEnable and turn the leds off; do this here because
	 * we are unloading, so don't count on interrupts to move along
	 * Turn the LEDs off explictly for the same reason.
	 */
	dd->ipath_f_quiet_serdes(dd);

2422 2423
	/* stop all the timers that might still be running */
	del_timer_sync(&dd->ipath_hol_timer);
B
Bryan O'Sullivan 已提交
2424 2425 2426 2427
	if (dd->ipath_stats_timer_active) {
		del_timer_sync(&dd->ipath_stats_timer);
		dd->ipath_stats_timer_active = 0;
	}
2428 2429 2430 2431
	if (dd->ipath_intrchk_timer.data) {
		del_timer_sync(&dd->ipath_intrchk_timer);
		dd->ipath_intrchk_timer.data = 0;
	}
2432 2433 2434 2435
	if (atomic_read(&dd->ipath_led_override_timer_active)) {
		del_timer_sync(&dd->ipath_led_override_timer);
		atomic_set(&dd->ipath_led_override_timer_active, 0);
	}
B
Bryan O'Sullivan 已提交
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445

	/*
	 * clear all interrupts and errors, so that the next time the driver
	 * is loaded or device is enabled, we know that whatever is set
	 * happened while we were unloaded
	 */
	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
			 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2446 2447 2448

	ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
	ipath_update_eeprom_log(dd);
B
Bryan O'Sullivan 已提交
2449 2450 2451 2452 2453
}

/**
 * ipath_free_pddata - free a port's allocated data
 * @dd: the infinipath device
2454
 * @pd: the portdata structure
B
Bryan O'Sullivan 已提交
2455
 *
2456 2457 2458 2459 2460 2461
 * free up any allocated data for a port
 * This should not touch anything that would affect a simultaneous
 * re-allocation of port data, because it is called after ipath_mutex
 * is released (and can be called from reinit as well).
 * It should never change any chip state, or global driver state.
 * (The only exception to global state is freeing the port0 port0_skbs.)
B
Bryan O'Sullivan 已提交
2462
 */
2463
void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
B
Bryan O'Sullivan 已提交
2464 2465 2466
{
	if (!pd)
		return;
2467 2468

	if (pd->port_rcvhdrq) {
B
Bryan O'Sullivan 已提交
2469 2470 2471 2472 2473 2474
		ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
			   "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
			   (unsigned long) pd->port_rcvhdrq_size);
		dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
				  pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
		pd->port_rcvhdrq = NULL;
2475 2476
		if (pd->port_rcvhdrtail_kvaddr) {
			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2477
					 pd->port_rcvhdrtail_kvaddr,
2478 2479 2480
					 pd->port_rcvhdrqtailaddr_phys);
			pd->port_rcvhdrtail_kvaddr = NULL;
		}
B
Bryan O'Sullivan 已提交
2481
	}
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
	if (pd->port_port && pd->port_rcvegrbuf) {
		unsigned e;

		for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
			void *base = pd->port_rcvegrbuf[e];
			size_t size = pd->port_rcvegrbuf_size;

			ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
				   "chunk %u/%u\n", base,
				   (unsigned long) size,
				   e, pd->port_rcvegrbuf_chunks);
			dma_free_coherent(&dd->pcidev->dev, size,
				base, pd->port_rcvegrbuf_phys[e]);
B
Bryan O'Sullivan 已提交
2495
		}
2496
		kfree(pd->port_rcvegrbuf);
2497
		pd->port_rcvegrbuf = NULL;
2498
		kfree(pd->port_rcvegrbuf_phys);
2499
		pd->port_rcvegrbuf_phys = NULL;
B
Bryan O'Sullivan 已提交
2500
		pd->port_rcvegrbuf_chunks = 0;
2501
	} else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
B
Bryan O'Sullivan 已提交
2502
		unsigned e;
2503
		struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
B
Bryan O'Sullivan 已提交
2504

2505 2506 2507 2508
		dd->ipath_port0_skbinfo = NULL;
		ipath_cdbg(VERBOSE, "free closed port %d "
			   "ipath_port0_skbinfo @ %p\n", pd->port_port,
			   skbinfo);
2509
		for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2510 2511 2512 2513 2514 2515
			if (skbinfo[e].skb) {
				pci_unmap_single(dd->pcidev, skbinfo[e].phys,
						 dd->ipath_ibmaxlen,
						 PCI_DMA_FROMDEVICE);
				dev_kfree_skb(skbinfo[e].skb);
			}
2516
		vfree(skbinfo);
B
Bryan O'Sullivan 已提交
2517
	}
2518
	kfree(pd->port_tid_pg_list);
2519 2520 2521
	vfree(pd->subport_uregbase);
	vfree(pd->subport_rcvegrbuf);
	vfree(pd->subport_rcvhdr_base);
2522
	kfree(pd);
B
Bryan O'Sullivan 已提交
2523 2524
}

R
Roland Dreier 已提交
2525
static int __init infinipath_init(void)
B
Bryan O'Sullivan 已提交
2526 2527 2528
{
	int ret;

2529 2530
	if (ipath_debug & __IPATH_DBG)
		printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
B
Bryan O'Sullivan 已提交
2531 2532 2533 2534 2535 2536 2537

	/*
	 * These must be called before the driver is registered with
	 * the PCI subsystem.
	 */
	idr_init(&unit_table);
	if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2538
		printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
B
Bryan O'Sullivan 已提交
2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553
		ret = -ENOMEM;
		goto bail;
	}

	ret = pci_register_driver(&ipath_driver);
	if (ret < 0) {
		printk(KERN_ERR IPATH_DRV_NAME
		       ": Unable to register driver: error %d\n", -ret);
		goto bail_unit;
	}

	ret = ipath_init_ipathfs();
	if (ret < 0) {
		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
		       "ipathfs: error %d\n", -ret);
2554
		goto bail_pci;
B
Bryan O'Sullivan 已提交
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
	}

	goto bail;

bail_pci:
	pci_unregister_driver(&ipath_driver);

bail_unit:
	idr_destroy(&unit_table);

bail:
	return ret;
}

static void __exit infinipath_cleanup(void)
{
	ipath_exit_ipathfs();

	ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
	pci_unregister_driver(&ipath_driver);

	idr_destroy(&unit_table);
}

/**
 * ipath_reset_device - reset the chip if possible
 * @unit: the device to reset
 *
 * Whether or not reset is successful, we attempt to re-initialize the chip
 * (that is, much like a driver unload/reload).  We clear the INITTED flag
 * so that the various entry points will fail until we reinitialize.  For
 * now, we only allow this if no user ports are open that use chip resources
 */
int ipath_reset_device(int unit)
{
	int ret, i;
	struct ipath_devdata *dd = ipath_lookup(unit);
2592
	unsigned long flags;
B
Bryan O'Sullivan 已提交
2593 2594 2595 2596 2597 2598

	if (!dd) {
		ret = -ENODEV;
		goto bail;
	}

2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
	if (atomic_read(&dd->ipath_led_override_timer_active)) {
		/* Need to stop LED timer, _then_ shut off LEDs */
		del_timer_sync(&dd->ipath_led_override_timer);
		atomic_set(&dd->ipath_led_override_timer_active, 0);
	}

	/* Shut off LEDs after we are sure timer is not running */
	dd->ipath_led_override = LED_OVER_BOTH_OFF;
	dd->ipath_f_setextled(dd, 0, 0);

B
Bryan O'Sullivan 已提交
2609 2610 2611 2612 2613 2614 2615 2616 2617
	dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);

	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
		dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
			 "not initialized or not present\n", unit);
		ret = -ENXIO;
		goto bail;
	}

2618
	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
B
Bryan O'Sullivan 已提交
2619
	if (dd->ipath_pd)
2620
		for (i = 1; i < dd->ipath_cfgports; i++) {
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
			if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
				continue;
			spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
			ipath_dbg("unit %u port %d is in use "
				  "(PID %u cmd %s), can't reset\n",
				  unit, i,
				  pid_nr(dd->ipath_pd[i]->port_pid),
				  dd->ipath_pd[i]->port_comm);
			ret = -EBUSY;
			goto bail;
B
Bryan O'Sullivan 已提交
2631
		}
2632
	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
B
Bryan O'Sullivan 已提交
2633

2634 2635 2636
	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
		teardown_sdma(dd);

B
Bryan O'Sullivan 已提交
2637
	dd->ipath_flags &= ~IPATH_INITTED;
2638
	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
B
Bryan O'Sullivan 已提交
2639
	ret = dd->ipath_f_reset(dd);
2640 2641 2642 2643 2644 2645
	if (ret == 1) {
		ipath_dbg("Reinitializing unit %u after reset attempt\n",
			  unit);
		ret = ipath_init_chip(dd, 1);
	} else
		ret = -EAGAIN;
B
Bryan O'Sullivan 已提交
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
	if (ret)
		ipath_dev_err(dd, "Reinitialize unit %u after "
			      "reset failed with %d\n", unit, ret);
	else
		dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
			 "resetting\n", unit);

bail:
	return ret;
}

2657 2658 2659 2660 2661 2662 2663 2664
/*
 * send a signal to all the processes that have the driver open
 * through the normal interfaces (i.e., everything other than diags
 * interface).  Returns number of signalled processes.
 */
static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
{
	int i, sub, any = 0;
2665
	struct pid *pid;
2666
	unsigned long flags;
2667 2668 2669

	if (!dd->ipath_pd)
		return 0;
2670 2671

	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2672
	for (i = 1; i < dd->ipath_cfgports; i++) {
2673
		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2674 2675
			continue;
		pid = dd->ipath_pd[i]->port_pid;
2676 2677 2678
		if (!pid)
			continue;

2679 2680
		dev_info(&dd->pcidev->dev, "context %d in use "
			  "(PID %u), sending signal %d\n",
2681 2682
			  i, pid_nr(pid), sig);
		kill_pid(pid, sig, 1);
2683 2684 2685 2686 2687 2688 2689
		any++;
		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
			pid = dd->ipath_pd[i]->port_subpid[sub];
			if (!pid)
				continue;
			dev_info(&dd->pcidev->dev, "sub-context "
				"%d:%d in use (PID %u), sending "
2690 2691
				"signal %d\n", i, sub, pid_nr(pid), sig);
			kill_pid(pid, sig, 1);
2692 2693 2694
			any++;
		}
	}
2695
	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
	return any;
}

static void ipath_hol_signal_down(struct ipath_devdata *dd)
{
	if (ipath_signal_procs(dd, SIGSTOP))
		ipath_dbg("Stopped some processes\n");
	ipath_cancel_sends(dd, 1);
}


static void ipath_hol_signal_up(struct ipath_devdata *dd)
{
	if (ipath_signal_procs(dd, SIGCONT))
		ipath_dbg("Continued some processes\n");
}

/*
 * link is down, stop any users processes, and flush pending sends
 * to prevent HoL blocking, then start the HoL timer that
 * periodically continues, then stop procs, so they can detect
 * link down if they want, and do something about it.
 * Timer may already be running, so use __mod_timer, not add_timer.
 */
void ipath_hol_down(struct ipath_devdata *dd)
{
	dd->ipath_hol_state = IPATH_HOL_DOWN;
	ipath_hol_signal_down(dd);
	dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
	dd->ipath_hol_timer.expires = jiffies +
		msecs_to_jiffies(ipath_hol_timeout_ms);
	__mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
}

/*
 * link is up, continue any user processes, and ensure timer
 * is a nop, if running.  Let timer keep running, if set; it
 * will nop when it sees the link is up
 */
void ipath_hol_up(struct ipath_devdata *dd)
{
	ipath_hol_signal_up(dd);
	dd->ipath_hol_state = IPATH_HOL_UP;
}

/*
 * toggle the running/not running state of user proceses
 * to prevent HoL blocking on chip resources, but still allow
 * user processes to do link down special case handling.
 * Should only be called via the timer
 */
void ipath_hol_event(unsigned long opaque)
{
	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;

	if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
		&& dd->ipath_hol_state != IPATH_HOL_UP) {
		dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
		ipath_dbg("Stopping processes\n");
		ipath_hol_signal_down(dd);
	} else { /* may do "extra" if also in ipath_hol_up() */
		dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
		ipath_dbg("Continuing processes\n");
		ipath_hol_signal_up(dd);
	}
	if (dd->ipath_hol_state == IPATH_HOL_UP)
		ipath_dbg("link's up, don't resched timer\n");
	else {
		dd->ipath_hol_timer.expires = jiffies +
			msecs_to_jiffies(ipath_hol_timeout_ms);
		__mod_timer(&dd->ipath_hol_timer,
			dd->ipath_hol_timer.expires);
	}
}

2771 2772 2773
int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
{
	u64 val;
2774 2775

	if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2776
		return -1;
2777
	if (dd->ipath_rx_pol_inv != new_pol_inv) {
2778 2779 2780
		dd->ipath_rx_pol_inv = new_pol_inv;
		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
R
Roland Dreier 已提交
2781 2782 2783
			 INFINIPATH_XGXS_RX_POL_SHIFT);
		val |= ((u64)dd->ipath_rx_pol_inv) <<
			INFINIPATH_XGXS_RX_POL_SHIFT;
2784 2785 2786 2787
		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
	}
	return 0;
}
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816

/*
 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
 * the 7220, which is count-based, rather than trigger-based.  Safe for the
 * driver check, since it's at init.   Not completely safe when used for
 * user-mode checking, since some error checking can be lost, but not
 * particularly risky, and only has problematic side-effects in the face of
 * very buggy user code.  There is no reference counting, but that's also
 * fine, given the intended use.
 */
void ipath_enable_armlaunch(struct ipath_devdata *dd)
{
	dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
		INFINIPATH_E_SPIOARMLAUNCH);
	dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
		dd->ipath_errormask);
}

void ipath_disable_armlaunch(struct ipath_devdata *dd)
{
	/* so don't re-enable if already set */
	dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
	dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
		dd->ipath_errormask);
}

B
Bryan O'Sullivan 已提交
2817 2818
module_init(infinipath_init);
module_exit(infinipath_cleanup);