ntb_hw_intel.c 58.7 KB
Newer Older
1 2 3 4 5 6 7
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 10 11 12 13 14 15 16
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Intel PCIe NTB Linux driver
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
50

51
#include <linux/debugfs.h>
J
Jon Mason 已提交
52
#include <linux/delay.h>
53 54 55 56
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
J
Jon Mason 已提交
57
#include <linux/random.h>
58
#include <linux/slab.h>
59 60
#include <linux/ntb.h>

61
#include "ntb_hw_intel.h"
62

63 64 65
#define NTB_NAME	"ntb_hw_intel"
#define NTB_DESC	"Intel(R) PCI-E Non-Transparent Bridge Driver"
#define NTB_VER		"2.0"
66

67
MODULE_DESCRIPTION(NTB_DESC);
68 69 70 71
MODULE_VERSION(NTB_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");

72 73 74
#define bar0_off(base, bar) ((base) + ((bar) << 2))
#define bar2_off(base, bar) bar0_off(base, (bar) - 2)

75 76 77 78 79 80 81 82 83 84 85 86 87 88
static const struct intel_ntb_reg atom_reg;
static const struct intel_ntb_alt_reg atom_pri_reg;
static const struct intel_ntb_alt_reg atom_sec_reg;
static const struct intel_ntb_alt_reg atom_b2b_reg;
static const struct intel_ntb_xlat_reg atom_pri_xlat;
static const struct intel_ntb_xlat_reg atom_sec_xlat;
static const struct intel_ntb_reg xeon_reg;
static const struct intel_ntb_alt_reg xeon_pri_reg;
static const struct intel_ntb_alt_reg xeon_sec_reg;
static const struct intel_ntb_alt_reg xeon_b2b_reg;
static const struct intel_ntb_xlat_reg xeon_pri_xlat;
static const struct intel_ntb_xlat_reg xeon_sec_xlat;
static struct intel_b2b_addr xeon_b2b_usd_addr;
static struct intel_b2b_addr xeon_b2b_dsd_addr;
89 90 91
static const struct ntb_dev_ops intel_ntb_ops;

static const struct file_operations intel_ntb_debugfs_info;
92 93
static struct dentry *debugfs_dir;

94 95 96 97 98 99 100 101 102 103 104 105 106 107
static int b2b_mw_idx = -1;
module_param(b2b_mw_idx, int, 0644);
MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
		 "value of zero or positive starts from first mw idx, and a "
		 "negative value starts from last mw idx.  Both sides MUST "
		 "set the same value here!");

static unsigned int b2b_mw_share;
module_param(b2b_mw_share, uint, 0644);
MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
		 "ntb so that the peer ntb only occupies the first half of "
		 "the mw, so the second half can still be used as a mw.  Both "
		 "sides MUST set the same value here!");

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
module_param_named(xeon_b2b_usd_bar2_addr64,
		   xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD BAR 2 64-bit address");

module_param_named(xeon_b2b_usd_bar4_addr64,
		   xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD BAR 4 64-bit address");

module_param_named(xeon_b2b_usd_bar4_addr32,
		   xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD split-BAR 4 32-bit address");

module_param_named(xeon_b2b_usd_bar5_addr32,
		   xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
		 "XEON B2B USD split-BAR 5 32-bit address");

module_param_named(xeon_b2b_dsd_bar2_addr64,
		   xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD BAR 2 64-bit address");

module_param_named(xeon_b2b_dsd_bar4_addr64,
		   xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD BAR 4 64-bit address");

module_param_named(xeon_b2b_dsd_bar4_addr32,
		   xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD split-BAR 4 32-bit address");

module_param_named(xeon_b2b_dsd_bar5_addr32,
		   xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
		 "XEON B2B DSD split-BAR 5 32-bit address");
147

148 149 150 151 152 153
#ifndef ioread64
#ifdef readq
#define ioread64 readq
#else
#define ioread64 _ioread64
static inline u64 _ioread64(void __iomem *mmio)
154
{
155
	u64 low, high;
156

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	low = ioread32(mmio);
	high = ioread32(mmio + sizeof(u32));
	return low | (high << 32);
}
#endif
#endif

#ifndef iowrite64
#ifdef writeq
#define iowrite64 writeq
#else
#define iowrite64 _iowrite64
static inline void _iowrite64(u64 val, void __iomem *mmio)
{
	iowrite32(val, mmio);
	iowrite32(val >> 32, mmio + sizeof(u32));
173
}
174 175
#endif
#endif
176

177
static inline int pdev_is_atom(struct pci_dev *pdev)
178
{
179
	switch (pdev->device) {
180 181 182 183 184 185
	case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
		return 1;
	}
	return 0;
}

186
static inline int pdev_is_xeon(struct pci_dev *pdev)
187
{
188
	switch (pdev->device) {
189 190 191 192 193 194 195 196 197 198 199 200
	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
201
		return 1;
202
	}
203
	return 0;
204 205
}

206
static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
207
{
208 209 210 211 212 213 214 215 216 217 218 219 220
	ndev->unsafe_flags = 0;
	ndev->unsafe_flags_ignore = 0;

	/* Only B2B has a workaround to avoid SDOORBELL */
	if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
		if (!ntb_topo_is_b2b(ndev->ntb.topo))
			ndev->unsafe_flags |= NTB_UNSAFE_DB;

	/* No low level workaround to avoid SB01BASE */
	if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
		ndev->unsafe_flags |= NTB_UNSAFE_DB;
		ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
	}
221 222
}

223 224
static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
				 unsigned long flag)
225
{
226
	return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
227 228
}

229 230
static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
				     unsigned long flag)
231
{
232 233
	flag &= ndev->unsafe_flags;
	ndev->unsafe_flags_ignore |= flag;
234

235
	return !!flag;
236 237
}

238
static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
239
{
240
	if (idx < 0 || idx > ndev->mw_count)
241
		return -EINVAL;
242 243
	return ndev->reg->mw_bar[idx];
}
244

245 246 247 248
static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
			       phys_addr_t *db_addr, resource_size_t *db_size,
			       phys_addr_t reg_addr, unsigned long reg)
{
249 250
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
251

252 253 254 255
	if (db_addr) {
		*db_addr = reg_addr + reg;
		dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
	}
256

257 258 259 260
	if (db_size) {
		*db_size = ndev->reg->db_size;
		dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
	}
261 262 263 264

	return 0;
}

265 266
static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
			       void __iomem *mmio)
267
{
268 269
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
270

271 272 273 274 275 276
	return ndev->reg->db_ioread(mmio);
}

static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
				void __iomem *mmio)
{
277 278
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
279

280 281
	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
282

283
	ndev->reg->db_iowrite(db_bits, mmio);
284

285
	return 0;
286 287
}

288 289
static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
				   void __iomem *mmio)
290
{
291
	unsigned long irqflags;
292

293 294
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
295 296 297

	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
298

299 300 301 302 303 304
	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
	{
		ndev->db_mask |= db_bits;
		ndev->reg->db_iowrite(ndev->db_mask, mmio);
	}
	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
305

306
	return 0;
307 308
}

309 310
static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
				     void __iomem *mmio)
311
{
312
	unsigned long irqflags;
313

314 315
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
		pr_warn_once("%s: NTB unsafe doorbell access", __func__);
316 317 318

	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;
319

320 321 322 323 324 325
	spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
	{
		ndev->db_mask &= ~db_bits;
		ndev->reg->db_iowrite(ndev->db_mask, mmio);
	}
	spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
326

327
	return 0;
328 329
}

330
static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
331
{
332
	u64 shift, mask;
333

334 335
	shift = ndev->db_vec_shift;
	mask = BIT_ULL(shift) - 1;
336

337
	return mask << (shift * db_vector);
338 339
}

340 341 342
static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
				 phys_addr_t *spad_addr, phys_addr_t reg_addr,
				 unsigned long reg)
343
{
344 345
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
346 347

	if (idx < 0 || idx >= ndev->spad_count)
348 349
		return -EINVAL;

350 351 352 353
	if (spad_addr) {
		*spad_addr = reg_addr + reg + (idx << 2);
		dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
	}
354 355 356 357

	return 0;
}

358 359
static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
				 void __iomem *mmio)
360
{
361 362
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
363

364 365
	if (idx < 0 || idx >= ndev->spad_count)
		return 0;
366

367
	return ioread32(mmio + (idx << 2));
368 369
}

370 371
static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
				  void __iomem *mmio)
372
{
373 374
	if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
		pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
375 376

	if (idx < 0 || idx >= ndev->spad_count)
377 378
		return -EINVAL;

379
	iowrite32(val, mmio + (idx << 2));
380 381 382 383

	return 0;
}

384
static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
385
{
386 387 388 389 390
	u64 vec_mask;

	vec_mask = ndev_vec_mask(ndev, vec);

	dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
391

392 393 394 395 396 397 398 399 400 401 402
	ndev->last_ts = jiffies;

	if (vec_mask & ndev->db_link_mask) {
		if (ndev->reg->poll_link(ndev))
			ntb_link_event(&ndev->ntb);
	}

	if (vec_mask & ndev->db_valid_mask)
		ntb_db_event(&ndev->ntb, vec);

	return IRQ_HANDLED;
403 404
}

405
static irqreturn_t ndev_vec_isr(int irq, void *dev)
406
{
407
	struct intel_ntb_vec *nvec = dev;
408

409
	return ndev_interrupt(nvec->ndev, nvec->num);
410 411
}

412
static irqreturn_t ndev_irq_isr(int irq, void *dev)
413
{
414
	struct intel_ntb_dev *ndev = dev;
415

416
	return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
417 418
}

419 420 421
static int ndev_init_isr(struct intel_ntb_dev *ndev,
			 int msix_min, int msix_max,
			 int msix_shift, int total_shift)
422
{
423
	struct pci_dev *pdev;
424
	int rc, i, msix_count, node;
425

426
	pdev = ndev_pdev(ndev);
427

428 429
	node = dev_to_node(&pdev->dev);

430 431 432 433 434
	/* Mask all doorbell interrupts */
	ndev->db_mask = ndev->db_valid_mask;
	ndev->reg->db_iowrite(ndev->db_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_mask);
435

436 437
	/* Try to set up msix irq */

438 439
	ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
				 GFP_KERNEL, node);
440 441 442
	if (!ndev->vec)
		goto err_msix_vec_alloc;

443 444
	ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
				  GFP_KERNEL, node);
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	if (!ndev->msix)
		goto err_msix_alloc;

	for (i = 0; i < msix_max; ++i)
		ndev->msix[i].entry = i;

	msix_count = pci_enable_msix_range(pdev, ndev->msix,
					   msix_min, msix_max);
	if (msix_count < 0)
		goto err_msix_enable;

	for (i = 0; i < msix_count; ++i) {
		ndev->vec[i].ndev = ndev;
		ndev->vec[i].num = i;
		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
				 "ndev_vec_isr", &ndev->vec[i]);
		if (rc)
			goto err_msix_request;
463 464
	}

465 466 467 468
	dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
	ndev->db_vec_count = msix_count;
	ndev->db_vec_shift = msix_shift;
	return 0;
469

470 471 472 473 474 475 476 477 478 479 480
err_msix_request:
	while (i-- > 0)
		free_irq(ndev->msix[i].vector, ndev);
	pci_disable_msix(pdev);
err_msix_enable:
	kfree(ndev->msix);
err_msix_alloc:
	kfree(ndev->vec);
err_msix_vec_alloc:
	ndev->msix = NULL;
	ndev->vec = NULL;
481

482
	/* Try to set up msi irq */
J
Jon Mason 已提交
483

484 485 486
	rc = pci_enable_msi(pdev);
	if (rc)
		goto err_msi_enable;
J
Jon Mason 已提交
487

488 489 490 491
	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_msi_request;
J
Jon Mason 已提交
492

493 494 495 496
	dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
	ndev->db_vec_count = 1;
	ndev->db_vec_shift = total_shift;
	return 0;
J
Jon Mason 已提交
497

498 499 500
err_msi_request:
	pci_disable_msi(pdev);
err_msi_enable:
J
Jon Mason 已提交
501

502
	/* Try to set up intx irq */
J
Jon Mason 已提交
503

504
	pci_intx(pdev, 1);
J
Jon Mason 已提交
505

506 507 508 509 510 511 512 513 514 515 516 517
	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_intx_request;

	dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
	ndev->db_vec_count = 1;
	ndev->db_vec_shift = total_shift;
	return 0;

err_intx_request:
	return rc;
J
Jon Mason 已提交
518 519
}

520
static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
521
{
522 523
	struct pci_dev *pdev;
	int i;
524

525
	pdev = ndev_pdev(ndev);
526

527 528 529 530 531
	/* Mask all doorbell interrupts */
	ndev->db_mask = ndev->db_valid_mask;
	ndev->reg->db_iowrite(ndev->db_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_mask);
J
Jon Mason 已提交
532

533 534 535 536 537 538 539
	if (ndev->msix) {
		i = ndev->db_vec_count;
		while (i--)
			free_irq(ndev->msix[i].vector, &ndev->vec[i]);
		pci_disable_msix(pdev);
		kfree(ndev->msix);
		kfree(ndev->vec);
540
	} else {
541 542 543
		free_irq(pdev->irq, ndev);
		if (pci_dev_msi_enabled(pdev))
			pci_disable_msi(pdev);
544 545 546
	}
}

547 548
static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
				 size_t count, loff_t *offp)
549
{
550 551 552 553 554 555
	struct intel_ntb_dev *ndev;
	void __iomem *mmio;
	char *buf;
	size_t buf_size;
	ssize_t ret, off;
	union { u64 v64; u32 v32; u16 v16; } u;
556

557 558
	ndev = filp->private_data;
	mmio = ndev->self_mmio;
559

560
	buf_size = min(count, 0x800ul);
561

562 563 564
	buf = kmalloc(buf_size, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
565

566
	off = 0;
567

568 569
	off += scnprintf(buf + off, buf_size - off,
			 "NTB Device Information:\n");
570

571 572 573
	off += scnprintf(buf + off, buf_size - off,
			 "Connection Topology -\t%s\n",
			 ntb_topo_string(ndev->ntb.topo));
574

575 576 577 578 579 580 581
	off += scnprintf(buf + off, buf_size - off,
			 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
	off += scnprintf(buf + off, buf_size - off,
			 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
	off += scnprintf(buf + off, buf_size - off,
			 "BAR4 Split -\t\t%s\n",
			 ndev->bar4_split ? "yes" : "no");
J
Jon Mason 已提交
582

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	off += scnprintf(buf + off, buf_size - off,
			 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
	off += scnprintf(buf + off, buf_size - off,
			 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);

	if (!ndev->reg->link_is_up(ndev)) {
		off += scnprintf(buf + off, buf_size - off,
				 "Link Status -\t\tDown\n");
	} else {
		off += scnprintf(buf + off, buf_size - off,
				 "Link Status -\t\tUp\n");
		off += scnprintf(buf + off, buf_size - off,
				 "Link Speed -\t\tPCI-E Gen %u\n",
				 NTB_LNK_STA_SPEED(ndev->lnk_sta));
		off += scnprintf(buf + off, buf_size - off,
				 "Link Width -\t\tx%u\n",
				 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
J
Jon Mason 已提交
600 601
	}

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	off += scnprintf(buf + off, buf_size - off,
			 "Memory Window Count -\t%u\n", ndev->mw_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Scratchpad Count -\t%u\n", ndev->spad_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Count -\t%u\n", ndev->db_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);

	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);

	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Mask -\t\t%#llx\n", u.v64);

	u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
	off += scnprintf(buf + off, buf_size - off,
			 "Doorbell Bell -\t\t%#llx\n", u.v64);

	off += scnprintf(buf + off, buf_size - off,
			 "\nNTB Incoming XLAT:\n");

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
	off += scnprintf(buf + off, buf_size - off,
			 "XLAT23 -\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
	off += scnprintf(buf + off, buf_size - off,
			 "XLAT45 -\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
	off += scnprintf(buf + off, buf_size - off,
			 "LMT23 -\t\t\t%#018llx\n", u.v64);

	u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
	off += scnprintf(buf + off, buf_size - off,
			 "LMT45 -\t\t\t%#018llx\n", u.v64);

647
	if (pdev_is_xeon(ndev->ntb.pdev)) {
648 649 650 651
		if (ntb_topo_is_b2b(ndev->ntb.topo)) {
			off += scnprintf(buf + off, buf_size - off,
					 "\nNTB Outgoing B2B XLAT:\n");

652
			u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
653 654 655
			off += scnprintf(buf + off, buf_size - off,
					 "B2B XLAT23 -\t\t%#018llx\n", u.v64);

656
			u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
657 658 659
			off += scnprintf(buf + off, buf_size - off,
					 "B2B XLAT45 -\t\t%#018llx\n", u.v64);

660
			u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
661 662 663
			off += scnprintf(buf + off, buf_size - off,
					 "B2B LMT23 -\t\t%#018llx\n", u.v64);

664
			u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
665 666 667 668 669 670
			off += scnprintf(buf + off, buf_size - off,
					 "B2B LMT45 -\t\t%#018llx\n", u.v64);

			off += scnprintf(buf + off, buf_size - off,
					 "\nNTB Secondary BAR:\n");

671
			u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
672 673 674
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR01 -\t\t%#018llx\n", u.v64);

675
			u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
676 677 678
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR23 -\t\t%#018llx\n", u.v64);

679
			u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
680 681 682 683 684
			off += scnprintf(buf + off, buf_size - off,
					 "SBAR45 -\t\t%#018llx\n", u.v64);
		}

		off += scnprintf(buf + off, buf_size - off,
685
				 "\nXEON NTB Statistics:\n");
686

687
		u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
688 689 690 691
		off += scnprintf(buf + off, buf_size - off,
				 "Upstream Memory Miss -\t%u\n", u.v16);

		off += scnprintf(buf + off, buf_size - off,
692
				 "\nXEON NTB Hardware Errors:\n");
693 694

		if (!pci_read_config_word(ndev->ntb.pdev,
695
					  XEON_DEVSTS_OFFSET, &u.v16))
696 697 698 699
			off += scnprintf(buf + off, buf_size - off,
					 "DEVSTS -\t\t%#06x\n", u.v16);

		if (!pci_read_config_word(ndev->ntb.pdev,
700
					  XEON_LINK_STATUS_OFFSET, &u.v16))
701 702
			off += scnprintf(buf + off, buf_size - off,
					 "LNKSTS -\t\t%#06x\n", u.v16);
J
Jon Mason 已提交
703

704
		if (!pci_read_config_dword(ndev->ntb.pdev,
705
					   XEON_UNCERRSTS_OFFSET, &u.v32))
706 707 708 709
			off += scnprintf(buf + off, buf_size - off,
					 "UNCERRSTS -\t\t%#06x\n", u.v32);

		if (!pci_read_config_dword(ndev->ntb.pdev,
710
					   XEON_CORERRSTS_OFFSET, &u.v32))
711 712 713 714 715 716 717
			off += scnprintf(buf + off, buf_size - off,
					 "CORERRSTS -\t\t%#06x\n", u.v32);
	}

	ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
	kfree(buf);
	return ret;
J
Jon Mason 已提交
718 719
}

720
static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
721
{
722 723 724 725 726 727 728 729 730 731 732 733 734
	if (!debugfs_dir) {
		ndev->debugfs_dir = NULL;
		ndev->debugfs_info = NULL;
	} else {
		ndev->debugfs_dir =
			debugfs_create_dir(ndev_name(ndev), debugfs_dir);
		if (!ndev->debugfs_dir)
			ndev->debugfs_info = NULL;
		else
			ndev->debugfs_info =
				debugfs_create_file("info", S_IRUSR,
						    ndev->debugfs_dir, ndev,
						    &intel_ntb_debugfs_info);
735
	}
736
}
737

738 739 740
static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
{
	debugfs_remove_recursive(ndev->debugfs_dir);
741 742
}

743
static int intel_ntb_mw_count(struct ntb_dev *ntb)
744
{
745 746
	return ntb_ndev(ntb)->mw_count;
}
J
Jon Mason 已提交
747

748 749 750 751 752 753 754 755
static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
				  phys_addr_t *base,
				  resource_size_t *size,
				  resource_size_t *align,
				  resource_size_t *align_size)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	int bar;
J
Jon Mason 已提交
756

757 758
	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
		idx += 1;
759

760 761 762
	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;
J
Jon Mason 已提交
763

764 765 766
	if (base)
		*base = pci_resource_start(ndev->ntb.pdev, bar) +
			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
767

768 769 770
	if (size)
		*size = pci_resource_len(ndev->ntb.pdev, bar) -
			(idx == ndev->b2b_idx ? ndev->b2b_off : 0);
771

772 773
	if (align)
		*align = pci_resource_len(ndev->ntb.pdev, bar);
J
Jon Mason 已提交
774

775 776
	if (align_size)
		*align_size = 1;
777 778 779 780

	return 0;
}

781 782
static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
				  dma_addr_t addr, resource_size_t size)
783
{
784 785 786 787 788 789
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
	unsigned long base_reg, xlat_reg, limit_reg;
	resource_size_t bar_size, mw_size;
	void __iomem *mmio;
	u64 base, limit, reg_val;
	int bar;
790

791 792
	if (idx >= ndev->b2b_idx && !ndev->b2b_off)
		idx += 1;
793

794 795 796
	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;
797

798 799 800 801 802 803 804 805 806
	bar_size = pci_resource_len(ndev->ntb.pdev, bar);

	if (idx == ndev->b2b_idx)
		mw_size = bar_size - ndev->b2b_off;
	else
		mw_size = bar_size;

	/* hardware requires that addr is aligned to bar size */
	if (addr & (bar_size - 1))
807
		return -EINVAL;
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873

	/* make sure the range fits in the usable mw size */
	if (size > mw_size)
		return -EINVAL;

	mmio = ndev->self_mmio;
	base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
	xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
	limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);

	if (bar < 4 || !ndev->bar4_split) {
		base = ioread64(mmio + base_reg);

		/* Set the limit if supported, if size is not mw_size */
		if (limit_reg && size != mw_size)
			limit = base + size;
		else
			limit = 0;

		/* set and verify setting the translation address */
		iowrite64(addr, mmio + xlat_reg);
		reg_val = ioread64(mmio + xlat_reg);
		if (reg_val != addr) {
			iowrite64(0, mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		iowrite64(limit, mmio + limit_reg);
		reg_val = ioread64(mmio + limit_reg);
		if (reg_val != limit) {
			iowrite64(base, mmio + limit_reg);
			iowrite64(0, mmio + xlat_reg);
			return -EIO;
		}
	} else {
		/* split bar addr range must all be 32 bit */
		if (addr & (~0ull << 32))
			return -EINVAL;
		if ((addr + size) & (~0ull << 32))
			return -EINVAL;

		base = ioread32(mmio + base_reg);

		/* Set the limit if supported, if size is not mw_size */
		if (limit_reg && size != mw_size)
			limit = base + size;
		else
			limit = 0;

		/* set and verify setting the translation address */
		iowrite32(addr, mmio + xlat_reg);
		reg_val = ioread32(mmio + xlat_reg);
		if (reg_val != addr) {
			iowrite32(0, mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		iowrite32(limit, mmio + limit_reg);
		reg_val = ioread32(mmio + limit_reg);
		if (reg_val != limit) {
			iowrite32(base, mmio + limit_reg);
			iowrite32(0, mmio + xlat_reg);
			return -EIO;
		}
874 875
	}

876 877
	return 0;
}
878

879 880 881 882 883
static int intel_ntb_link_is_up(struct ntb_dev *ntb,
				enum ntb_speed *speed,
				enum ntb_width *width)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
884

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
	if (ndev->reg->link_is_up(ndev)) {
		if (speed)
			*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
		if (width)
			*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
		return 1;
	} else {
		/* TODO MAYBE: is it possible to observe the link speed and
		 * width while link is training? */
		if (speed)
			*speed = NTB_SPEED_NONE;
		if (width)
			*width = NTB_WIDTH_NONE;
		return 0;
	}
}

static int intel_ntb_link_enable(struct ntb_dev *ntb,
				 enum ntb_speed max_speed,
				 enum ntb_width max_width)
{
	struct intel_ntb_dev *ndev;
	u32 ntb_ctl;

	ndev = container_of(ntb, struct intel_ntb_dev, ntb);

	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;

	dev_dbg(ndev_dev(ndev),
		"Enabling link with max_speed %d max_width %d\n",
		max_speed, max_width);
	if (max_speed != NTB_SPEED_AUTO)
		dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
	if (max_width != NTB_WIDTH_AUTO)
		dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);

	ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
	ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
	ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
	ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
	if (ndev->bar4_split)
		ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
	iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
929 930 931 932

	return 0;
}

933
static int intel_ntb_link_disable(struct ntb_dev *ntb)
934
{
935 936
	struct intel_ntb_dev *ndev;
	u32 ntb_cntl;
937

938
	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
939

940 941
	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;
942

943 944 945 946 947 948 949 950 951 952
	dev_dbg(ndev_dev(ndev), "Disabling link\n");

	/* Bring NTB link down */
	ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
	ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
	ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
	if (ndev->bar4_split)
		ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
	ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
	iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
953

954
	return 0;
955 956
}

957
static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
958
{
959
	return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
960 961
}

962
static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
963
{
964 965
	return ntb_ndev(ntb)->db_valid_mask;
}
966

967 968 969
static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
{
	struct intel_ntb_dev *ndev;
970

971
	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
972

973 974
	return ndev->db_vec_count;
}
975

976 977 978
static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
979

980 981
	if (db_vector < 0 || db_vector > ndev->db_vec_count)
		return 0;
982

983
	return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
984 985
}

986
static u64 intel_ntb_db_read(struct ntb_dev *ntb)
987
{
988
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
989

990 991 992 993
	return ndev_db_read(ndev,
			    ndev->self_mmio +
			    ndev->self_reg->db_bell);
}
994

995 996 997
static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
998

999 1000 1001 1002
	return ndev_db_write(ndev, db_bits,
			     ndev->self_mmio +
			     ndev->self_reg->db_bell);
}
1003

1004 1005 1006
static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1007

1008 1009 1010
	return ndev_db_set_mask(ndev, db_bits,
				ndev->self_mmio +
				ndev->self_reg->db_mask);
1011 1012
}

1013
static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1014
{
1015
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1016

1017 1018 1019 1020
	return ndev_db_clear_mask(ndev, db_bits,
				  ndev->self_mmio +
				  ndev->self_reg->db_mask);
}
1021

1022 1023 1024 1025 1026
static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
				  phys_addr_t *db_addr,
				  resource_size_t *db_size)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1027

1028 1029
	return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
			    ndev->peer_reg->db_bell);
1030 1031
}

1032
static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1033
{
1034
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1035

1036 1037 1038 1039
	return ndev_db_write(ndev, db_bits,
			     ndev->peer_mmio +
			     ndev->peer_reg->db_bell);
}
1040

1041 1042 1043 1044
static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
{
	return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
}
1045

1046 1047 1048
static int intel_ntb_spad_count(struct ntb_dev *ntb)
{
	struct intel_ntb_dev *ndev;
1049

1050
	ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1051

1052 1053
	return ndev->spad_count;
}
1054

1055 1056 1057
static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1058

1059 1060 1061
	return ndev_spad_read(ndev, idx,
			      ndev->self_mmio +
			      ndev->self_reg->spad);
1062 1063
}

1064 1065
static int intel_ntb_spad_write(struct ntb_dev *ntb,
				int idx, u32 val)
1066
{
1067
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1068

1069 1070 1071 1072
	return ndev_spad_write(ndev, idx, val,
			       ndev->self_mmio +
			       ndev->self_reg->spad);
}
1073

1074 1075 1076 1077
static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
				    phys_addr_t *spad_addr)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1078

1079 1080 1081
	return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
			      ndev->peer_reg->spad);
}
1082

1083 1084 1085
static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1086

1087 1088 1089 1090
	return ndev_spad_read(ndev, idx,
			      ndev->peer_mmio +
			      ndev->peer_reg->spad);
}
1091

1092 1093 1094 1095
static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
				     int idx, u32 val)
{
	struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1096

1097 1098 1099 1100
	return ndev_spad_write(ndev, idx, val,
			       ndev->peer_mmio +
			       ndev->peer_reg->spad);
}
1101

1102
/* ATOM */
1103

1104
static u64 atom_db_ioread(void __iomem *mmio)
1105 1106 1107 1108
{
	return ioread64(mmio);
}

1109
static void atom_db_iowrite(u64 bits, void __iomem *mmio)
1110 1111
{
	iowrite64(bits, mmio);
1112 1113
}

1114
static int atom_poll_link(struct intel_ntb_dev *ndev)
1115
{
1116
	u32 ntb_ctl;
1117

1118
	ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
1119

1120 1121
	if (ntb_ctl == ndev->ntb_ctl)
		return 0;
1122

1123
	ndev->ntb_ctl = ntb_ctl;
1124

1125
	ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
1126

1127 1128
	return 1;
}
1129

1130
static int atom_link_is_up(struct intel_ntb_dev *ndev)
1131
{
1132
	return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1133
}
1134

1135
static int atom_link_is_err(struct intel_ntb_dev *ndev)
1136
{
1137 1138
	if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
	    & ATOM_LTSSMSTATEJMP_FORCEDETECT)
1139
		return 1;
1140

1141 1142
	if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
	    & ATOM_IBIST_ERR_OFLOW)
1143 1144 1145
		return 1;

	return 0;
1146 1147
}

1148
static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1149
{
1150 1151
	switch (ppd & ATOM_PPD_TOPO_MASK) {
	case ATOM_PPD_TOPO_B2B_USD:
1152 1153 1154
		dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
		return NTB_TOPO_B2B_USD;

1155
	case ATOM_PPD_TOPO_B2B_DSD:
1156 1157 1158
		dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
		return NTB_TOPO_B2B_DSD;

1159 1160 1161 1162
	case ATOM_PPD_TOPO_PRI_USD:
	case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
	case ATOM_PPD_TOPO_SEC_USD:
	case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1163 1164 1165
		dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
		return NTB_TOPO_NONE;
	}
1166

1167 1168 1169 1170
	dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
	return NTB_TOPO_NONE;
}

1171
static void atom_link_hb(struct work_struct *work)
1172 1173 1174 1175 1176 1177
{
	struct intel_ntb_dev *ndev = hb_ndev(work);
	unsigned long poll_ts;
	void __iomem *mmio;
	u32 status32;

1178
	poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
1179 1180 1181 1182

	/* Delay polling the link status if an interrupt was received,
	 * unless the cached link status says the link is down.
	 */
1183
	if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
1184 1185
		schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
		return;
1186 1187
	}

1188
	if (atom_poll_link(ndev))
1189 1190
		ntb_link_event(&ndev->ntb);

1191 1192
	if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
		schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1193
		return;
1194 1195
	}

1196
	/* Link is down with error: recover the link! */
1197

1198
	mmio = ndev->self_mmio;
1199

1200
	/* Driver resets the NTB ModPhy lanes - magic! */
1201 1202 1203 1204
	iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
	iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
	iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
	iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
1205

1206 1207 1208 1209
	/* Driver waits 100ms to allow the NTB ModPhy to settle */
	msleep(100);

	/* Clear AER Errors, write to clear */
1210
	status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
1211 1212
	dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
	status32 &= PCI_ERR_COR_REP_ROLL;
1213
	iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
1214 1215

	/* Clear unexpected electrical idle event in LTSSM, write to clear */
1216
	status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
1217
	dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1218 1219
	status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
	iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
1220 1221

	/* Clear DeSkew Buffer error, write to clear */
1222
	status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
1223
	dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1224 1225
	status32 |= ATOM_DESKEWSTS_DBERR;
	iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
1226

1227
	status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1228
	dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1229 1230
	status32 &= ATOM_IBIST_ERR_OFLOW;
	iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1231 1232

	/* Releases the NTB state machine to allow the link to retrain */
1233
	status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1234
	dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1235 1236
	status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
	iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1237 1238 1239 1240 1241 1242

	/* There is a potential race between the 2 NTB devices recovering at the
	 * same time.  If the times are the same, the link will not recover and
	 * the driver will be stuck in this loop forever.  Add a random interval
	 * to the recovery time to prevent this race.
	 */
1243 1244
	schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
			      + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
1245 1246
}

1247
static int atom_init_isr(struct intel_ntb_dev *ndev)
1248 1249 1250
{
	int rc;

1251 1252
	rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
			   ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
1253 1254 1255
	if (rc)
		return rc;

1256
	/* ATOM doesn't have link status interrupt, poll on that platform */
1257
	ndev->last_ts = jiffies;
1258 1259
	INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
	schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1260 1261 1262 1263

	return 0;
}

1264
static void atom_deinit_isr(struct intel_ntb_dev *ndev)
1265
{
1266 1267 1268
	cancel_delayed_work_sync(&ndev->hb_timer);
	ndev_deinit_isr(ndev);
}
1269

1270
static int atom_init_ntb(struct intel_ntb_dev *ndev)
1271
{
1272 1273 1274
	ndev->mw_count = ATOM_MW_COUNT;
	ndev->spad_count = ATOM_SPAD_COUNT;
	ndev->db_count = ATOM_DB_COUNT;
1275

1276 1277 1278
	switch (ndev->ntb.topo) {
	case NTB_TOPO_B2B_USD:
	case NTB_TOPO_B2B_DSD:
1279 1280 1281
		ndev->self_reg = &atom_pri_reg;
		ndev->peer_reg = &atom_b2b_reg;
		ndev->xlat_reg = &atom_sec_xlat;
1282 1283 1284

		/* Enable Bus Master and Memory Space on the secondary side */
		iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1285
			  ndev->self_mmio + ATOM_SPCICMD_OFFSET);
1286 1287 1288 1289 1290 1291 1292 1293

		break;

	default:
		return -EINVAL;
	}

	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1294 1295 1296 1297

	return 0;
}

1298
static int atom_init_dev(struct intel_ntb_dev *ndev)
1299
{
1300
	u32 ppd;
1301 1302
	int rc;

1303
	rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
1304 1305
	if (rc)
		return -EIO;
1306

1307
	ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
1308 1309
	if (ndev->ntb.topo == NTB_TOPO_NONE)
		return -EINVAL;
1310

1311
	rc = atom_init_ntb(ndev);
1312 1313
	if (rc)
		return rc;
1314

1315
	rc = atom_init_isr(ndev);
1316
	if (rc)
1317
		return rc;
1318 1319 1320

	if (ndev->ntb.topo != NTB_TOPO_SEC) {
		/* Initiate PCI-E link training */
1321 1322
		rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
					    ppd | ATOM_PPD_INIT_LINK);
1323 1324
		if (rc)
			return rc;
1325 1326 1327 1328 1329
	}

	return 0;
}

1330
static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1331
{
1332
	atom_deinit_isr(ndev);
1333
}
1334

1335
/* XEON */
1336

1337
static u64 xeon_db_ioread(void __iomem *mmio)
1338 1339 1340
{
	return (u64)ioread16(mmio);
}
1341

1342
static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1343 1344 1345
{
	iowrite16((u16)bits, mmio);
}
1346

1347
static int xeon_poll_link(struct intel_ntb_dev *ndev)
1348 1349 1350 1351 1352 1353 1354 1355 1356
{
	u16 reg_val;
	int rc;

	ndev->reg->db_iowrite(ndev->db_link_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_bell);

	rc = pci_read_config_word(ndev->ntb.pdev,
1357
				  XEON_LINK_STATUS_OFFSET, &reg_val);
1358 1359 1360 1361 1362 1363 1364 1365 1366
	if (rc)
		return 0;

	if (reg_val == ndev->lnk_sta)
		return 0;

	ndev->lnk_sta = reg_val;

	return 1;
1367 1368
}

1369
static int xeon_link_is_up(struct intel_ntb_dev *ndev)
1370
{
1371 1372 1373
	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return 1;

1374 1375
	return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
}
1376

1377
static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1378
{
1379 1380
	switch (ppd & XEON_PPD_TOPO_MASK) {
	case XEON_PPD_TOPO_B2B_USD:
1381 1382
		return NTB_TOPO_B2B_USD;

1383
	case XEON_PPD_TOPO_B2B_DSD:
1384 1385
		return NTB_TOPO_B2B_DSD;

1386 1387
	case XEON_PPD_TOPO_PRI_USD:
	case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1388
		return NTB_TOPO_PRI;
1389

1390 1391
	case XEON_PPD_TOPO_SEC_USD:
	case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1392
		return NTB_TOPO_SEC;
1393 1394
	}

1395
	return NTB_TOPO_NONE;
1396 1397
}

1398
static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1399
{
1400
	if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1401 1402 1403 1404 1405
		dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
		return 1;
	}
	return 0;
}
1406

1407
static int xeon_init_isr(struct intel_ntb_dev *ndev)
1408
{
1409 1410 1411 1412
	return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
			     XEON_DB_MSIX_VECTOR_COUNT,
			     XEON_DB_MSIX_VECTOR_SHIFT,
			     XEON_DB_TOTAL_SHIFT);
1413
}
1414

1415
static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1416 1417
{
	ndev_deinit_isr(ndev);
1418 1419
}

1420 1421 1422
static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
			     const struct intel_b2b_addr *addr,
			     const struct intel_b2b_addr *peer_addr)
J
Jon Mason 已提交
1423
{
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
	struct pci_dev *pdev;
	void __iomem *mmio;
	resource_size_t bar_size;
	phys_addr_t bar_addr;
	int b2b_bar;
	u8 bar_sz;

	pdev = ndev_pdev(ndev);
	mmio = ndev->self_mmio;

	if (ndev->b2b_idx >= ndev->mw_count) {
		dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
		b2b_bar = 0;
		ndev->b2b_off = 0;
	} else {
		b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
		if (b2b_bar < 0)
			return -EIO;
J
Jon Mason 已提交
1442

1443
		dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
J
Jon Mason 已提交
1444

1445
		bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
J
Jon Mason 已提交
1446

1447
		dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
J
Jon Mason 已提交
1448

1449
		if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1450 1451 1452
			dev_dbg(ndev_dev(ndev),
				"b2b using first half of bar\n");
			ndev->b2b_off = bar_size >> 1;
1453
		} else if (XEON_B2B_MIN_SIZE <= bar_size) {
1454 1455 1456 1457 1458 1459 1460 1461 1462
			dev_dbg(ndev_dev(ndev),
				"b2b using whole bar\n");
			ndev->b2b_off = 0;
			--ndev->mw_count;
		} else {
			dev_dbg(ndev_dev(ndev),
				"b2b bar size is too small\n");
			return -EIO;
		}
J
Jon Mason 已提交
1463 1464
	}

1465 1466 1467 1468 1469 1470
	/* Reset the secondary bar sizes to match the primary bar sizes,
	 * except disable or halve the size of the b2b secondary bar.
	 *
	 * Note: code for each specific bar size register, because the register
	 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
	 */
1471
	pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1472 1473 1474 1475 1476 1477 1478
	dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
	if (b2b_bar == 2) {
		if (ndev->b2b_off)
			bar_sz -= 1;
		else
			bar_sz = 0;
	}
1479 1480
	pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
	pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1481 1482 1483
	dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);

	if (!ndev->bar4_split) {
1484
		pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1485 1486 1487 1488 1489 1490 1491
		dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
		if (b2b_bar == 4) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
1492 1493
		pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1494 1495
		dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
	} else {
1496
		pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1497 1498 1499 1500 1501 1502 1503
		dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
		if (b2b_bar == 4) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
1504 1505
		pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1506 1507
		dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);

1508
		pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1509 1510 1511 1512 1513 1514 1515
		dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
		if (b2b_bar == 5) {
			if (ndev->b2b_off)
				bar_sz -= 1;
			else
				bar_sz = 0;
		}
1516 1517
		pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
		pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1518 1519
		dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
	}
J
Jon Mason 已提交
1520

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
	/* SBAR01 hit by first part of the b2b bar */
	if (b2b_bar == 0)
		bar_addr = addr->bar0_addr;
	else if (b2b_bar == 2)
		bar_addr = addr->bar2_addr64;
	else if (b2b_bar == 4 && !ndev->bar4_split)
		bar_addr = addr->bar4_addr64;
	else if (b2b_bar == 4)
		bar_addr = addr->bar4_addr32;
	else if (b2b_bar == 5)
		bar_addr = addr->bar5_addr32;
	else
		return -EIO;
J
Jon Mason 已提交
1534

1535
	dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1536
	iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
J
Jon Mason 已提交
1537

1538 1539 1540 1541
	/* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
	 * The b2b bar is either disabled above, or configured half-size, and
	 * it starts at the PBAR xlat + offset.
	 */
1542

1543
	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1544 1545
	iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
	bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1546 1547 1548 1549 1550
	dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);

	if (!ndev->bar4_split) {
		bar_addr = addr->bar4_addr64 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
1551 1552
		iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
		bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1553 1554 1555 1556
		dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
	} else {
		bar_addr = addr->bar4_addr32 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
1557 1558
		iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1559 1560 1561 1562
		dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);

		bar_addr = addr->bar5_addr32 +
			(b2b_bar == 5 ? ndev->b2b_off : 0);
1563 1564
		iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1565 1566
		dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
	}
1567

1568
	/* setup incoming bar limits == base addrs (zero length windows) */
1569

1570
	bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1571 1572
	iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
	bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1573
	dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1574

1575 1576 1577
	if (!ndev->bar4_split) {
		bar_addr = addr->bar4_addr64 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
1578 1579
		iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
		bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1580 1581 1582 1583
		dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
	} else {
		bar_addr = addr->bar4_addr32 +
			(b2b_bar == 4 ? ndev->b2b_off : 0);
1584 1585
		iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1586 1587 1588 1589
		dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);

		bar_addr = addr->bar5_addr32 +
			(b2b_bar == 5 ? ndev->b2b_off : 0);
1590 1591
		iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
		bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1592
		dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1593 1594
	}

1595
	/* zero incoming translation addrs */
1596
	iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
J
Jon Mason 已提交
1597

1598
	if (!ndev->bar4_split) {
1599
		iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1600
	} else {
1601 1602
		iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
		iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1603
	}
1604

1605
	/* zero outgoing translation limits (whole bar size windows) */
1606
	iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1607
	if (!ndev->bar4_split) {
1608
		iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1609
	} else {
1610 1611
		iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
		iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
J
Jon Mason 已提交
1612
	}
J
Jon Mason 已提交
1613

1614 1615
	/* set outgoing translation offsets */
	bar_addr = peer_addr->bar2_addr64;
1616 1617
	iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
	bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1618 1619 1620 1621
	dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);

	if (!ndev->bar4_split) {
		bar_addr = peer_addr->bar4_addr64;
1622 1623
		iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
		bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1624 1625 1626
		dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
	} else {
		bar_addr = peer_addr->bar4_addr32;
1627 1628
		iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
		bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1629 1630 1631
		dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);

		bar_addr = peer_addr->bar5_addr32;
1632 1633
		iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
		bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1634 1635
		dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
	}
J
Jon Mason 已提交
1636

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
	/* set the translation offset for b2b registers */
	if (b2b_bar == 0)
		bar_addr = peer_addr->bar0_addr;
	else if (b2b_bar == 2)
		bar_addr = peer_addr->bar2_addr64;
	else if (b2b_bar == 4 && !ndev->bar4_split)
		bar_addr = peer_addr->bar4_addr64;
	else if (b2b_bar == 4)
		bar_addr = peer_addr->bar4_addr32;
	else if (b2b_bar == 5)
		bar_addr = peer_addr->bar5_addr32;
	else
		return -EIO;

	/* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
	dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1653 1654
	iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
	iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1655 1656 1657 1658

	if (b2b_bar) {
		/* map peer ntb mmio config space registers */
		ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1659
					    XEON_B2B_MIN_SIZE);
1660 1661
		if (!ndev->peer_mmio)
			return -EIO;
J
Jon Mason 已提交
1662 1663
	}

1664
	return 0;
J
Jon Mason 已提交
1665 1666
}

1667
static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1668
{
1669
	int rc;
1670
	u32 ntb_ctl;
1671 1672 1673

	if (ndev->bar4_split)
		ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1674
	else
1675
		ndev->mw_count = XEON_MW_COUNT;
1676

1677 1678 1679
	ndev->spad_count = XEON_SPAD_COUNT;
	ndev->db_count = XEON_DB_COUNT;
	ndev->db_link_mask = XEON_DB_LINK_BIT;
1680

1681 1682 1683 1684 1685 1686
	switch (ndev->ntb.topo) {
	case NTB_TOPO_PRI:
		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
			dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
			return -EINVAL;
		}
1687 1688 1689 1690 1691 1692

		/* enable link to allow secondary side device to appear */
		ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
		ntb_ctl &= ~NTB_CTL_DISABLE;
		iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);

1693 1694
		/* use half the spads for the peer */
		ndev->spad_count >>= 1;
1695 1696 1697
		ndev->self_reg = &xeon_pri_reg;
		ndev->peer_reg = &xeon_sec_reg;
		ndev->xlat_reg = &xeon_sec_xlat;
1698
		break;
1699

1700 1701 1702 1703 1704 1705 1706
	case NTB_TOPO_SEC:
		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
			dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
			return -EINVAL;
		}
		/* use half the spads for the peer */
		ndev->spad_count >>= 1;
1707 1708 1709
		ndev->self_reg = &xeon_sec_reg;
		ndev->peer_reg = &xeon_pri_reg;
		ndev->xlat_reg = &xeon_pri_xlat;
1710
		break;
1711

1712 1713
	case NTB_TOPO_B2B_USD:
	case NTB_TOPO_B2B_DSD:
1714 1715 1716
		ndev->self_reg = &xeon_pri_reg;
		ndev->peer_reg = &xeon_b2b_reg;
		ndev->xlat_reg = &xeon_sec_xlat;
1717

1718
		if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1719
			ndev->peer_reg = &xeon_pri_reg;
1720

1721 1722 1723 1724
			if (b2b_mw_idx < 0)
				ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
			else
				ndev->b2b_idx = b2b_mw_idx;
1725

1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
			dev_dbg(ndev_dev(ndev),
				"setting up b2b mw idx %d means %d\n",
				b2b_mw_idx, ndev->b2b_idx);

		} else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
			dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
			ndev->db_count -= 1;
		}

		if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1736 1737 1738
			rc = xeon_setup_b2b_mw(ndev,
					       &xeon_b2b_dsd_addr,
					       &xeon_b2b_usd_addr);
1739
		} else {
1740 1741 1742
			rc = xeon_setup_b2b_mw(ndev,
					       &xeon_b2b_usd_addr,
					       &xeon_b2b_dsd_addr);
1743 1744 1745 1746 1747 1748
		}
		if (rc)
			return rc;

		/* Enable Bus Master and Memory Space on the secondary side */
		iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1749
			  ndev->self_mmio + XEON_SPCICMD_OFFSET);
1750

1751
		break;
1752

1753
	default:
1754
		return -EINVAL;
1755 1756
	}

1757 1758 1759 1760 1761
	ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;

	ndev->reg->db_iowrite(ndev->db_valid_mask,
			      ndev->self_mmio +
			      ndev->self_reg->db_mask);
1762

1763 1764 1765
	return 0;
}

1766
static int xeon_init_dev(struct intel_ntb_dev *ndev)
1767
{
1768 1769 1770 1771
	struct pci_dev *pdev;
	u8 ppd;
	int rc, mem;

1772 1773 1774
	pdev = ndev_pdev(ndev);

	switch (pdev->device) {
1775 1776 1777 1778 1779 1780
	/* There is a Xeon hardware errata related to writes to SDOORBELL or
	 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
	 * which may hang the system.  To workaround this use the second memory
	 * window to access the interrupt and scratch pad registers on the
	 * remote system.
	 */
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
		ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
		break;
	}
1796

1797
	switch (pdev->device) {
1798 1799 1800
	/* There is a hardware errata related to accessing any register in
	 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
	 */
1801 1802 1803 1804 1805 1806 1807 1808 1809
	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
		ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
		break;
	}
1810

1811
	switch (pdev->device) {
1812 1813 1814 1815
	/* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
	 * mirrored to the remote system.  Shrink the number of bits by one,
	 * since bit 14 is the last bit.
	 */
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
	case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
	case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
	case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
	case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
		ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
		break;
	}
1831

1832
	ndev->reg = &xeon_reg;
1833

1834
	rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1835
	if (rc)
1836
		return -EIO;
1837

1838
	ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1839 1840 1841
	dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
		ntb_topo_string(ndev->ntb.topo));
	if (ndev->ntb.topo == NTB_TOPO_NONE)
1842
		return -EINVAL;
1843 1844

	if (ndev->ntb.topo != NTB_TOPO_SEC) {
1845
		ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
		dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
			ppd, ndev->bar4_split);
	} else {
		/* This is a way for transparent BAR to figure out if we are
		 * doing split BAR or not. There is no way for the hw on the
		 * transparent side to know and set the PPD.
		 */
		mem = pci_select_bars(pdev, IORESOURCE_MEM);
		ndev->bar4_split = hweight32(mem) ==
			HSX_SPLIT_BAR_MW_COUNT + 1;
		dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
			mem, ndev->bar4_split);
1858 1859
	}

1860
	rc = xeon_init_ntb(ndev);
1861 1862
	if (rc)
		return rc;
1863

1864
	return xeon_init_isr(ndev);
1865 1866
}

1867
static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1868
{
1869
	xeon_deinit_isr(ndev);
1870 1871
}

1872
static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1873 1874 1875
{
	int rc;

1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
	pci_set_drvdata(pdev, ndev);

	rc = pci_enable_device(pdev);
	if (rc)
		goto err_pci_enable;

	rc = pci_request_regions(pdev, NTB_NAME);
	if (rc)
		goto err_pci_regions;

	pci_set_master(pdev);

	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
	}
1895

1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (rc) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (rc)
			goto err_dma_mask;
		dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
	}

	ndev->self_mmio = pci_iomap(pdev, 0, 0);
	if (!ndev->self_mmio) {
		rc = -EIO;
		goto err_mmio;
	}
	ndev->peer_mmio = ndev->self_mmio;
1910 1911

	return 0;
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921

err_mmio:
err_dma_mask:
	pci_clear_master(pdev);
	pci_release_regions(pdev);
err_pci_regions:
	pci_disable_device(pdev);
err_pci_enable:
	pci_set_drvdata(pdev, NULL);
	return rc;
1922 1923
}

1924
static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1925
{
1926
	struct pci_dev *pdev = ndev_pdev(ndev);
1927

1928 1929 1930
	if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
		pci_iounmap(pdev, ndev->peer_mmio);
	pci_iounmap(pdev, ndev->self_mmio);
1931

1932 1933 1934 1935 1936
	pci_clear_master(pdev);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
}
1937

1938 1939 1940 1941 1942 1943
static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
				    struct pci_dev *pdev)
{
	ndev->ntb.pdev = pdev;
	ndev->ntb.topo = NTB_TOPO_NONE;
	ndev->ntb.ops = &intel_ntb_ops;
1944

1945 1946
	ndev->b2b_off = 0;
	ndev->b2b_idx = INT_MAX;
1947

1948
	ndev->bar4_split = 0;
1949

1950 1951 1952 1953 1954
	ndev->mw_count = 0;
	ndev->spad_count = 0;
	ndev->db_count = 0;
	ndev->db_vec_count = 0;
	ndev->db_vec_shift = 0;
1955

1956 1957
	ndev->ntb_ctl = 0;
	ndev->lnk_sta = 0;
1958

1959 1960 1961
	ndev->db_valid_mask = 0;
	ndev->db_link_mask = 0;
	ndev->db_mask = 0;
1962

1963 1964
	spin_lock_init(&ndev->db_mask_lock);
}
1965

1966 1967 1968 1969
static int intel_ntb_pci_probe(struct pci_dev *pdev,
			       const struct pci_device_id *id)
{
	struct intel_ntb_dev *ndev;
1970 1971 1972
	int rc, node;

	node = dev_to_node(&pdev->dev);
1973

1974
	if (pdev_is_atom(pdev)) {
1975
		ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1976 1977 1978 1979
		if (!ndev) {
			rc = -ENOMEM;
			goto err_ndev;
		}
1980

1981
		ndev_init_struct(ndev, pdev);
1982

1983 1984 1985 1986
		rc = intel_ntb_init_pci(ndev, pdev);
		if (rc)
			goto err_init_pci;

1987
		rc = atom_init_dev(ndev);
1988 1989
		if (rc)
			goto err_init_dev;
1990

1991
	} else if (pdev_is_xeon(pdev)) {
1992
		ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1993 1994 1995
		if (!ndev) {
			rc = -ENOMEM;
			goto err_ndev;
1996 1997
		}

1998
		ndev_init_struct(ndev, pdev);
1999

2000 2001 2002
		rc = intel_ntb_init_pci(ndev, pdev);
		if (rc)
			goto err_init_pci;
2003

2004
		rc = xeon_init_dev(ndev);
2005
		if (rc)
2006
			goto err_init_dev;
2007

2008 2009 2010
	} else {
		rc = -EINVAL;
		goto err_ndev;
2011 2012
	}

2013
	ndev_reset_unsafe_flags(ndev);
2014

2015
	ndev->reg->poll_link(ndev);
2016

2017
	ndev_init_debugfs(ndev);
2018

2019
	rc = ntb_register_device(&ndev->ntb);
2020
	if (rc)
2021
		goto err_register;
2022

2023 2024
	dev_info(&pdev->dev, "NTB device registered.\n");

2025 2026
	return 0;

2027 2028
err_register:
	ndev_deinit_debugfs(ndev);
2029 2030 2031 2032
	if (pdev_is_atom(pdev))
		atom_deinit_dev(ndev);
	else if (pdev_is_xeon(pdev))
		xeon_deinit_dev(ndev);
2033 2034 2035
err_init_dev:
	intel_ntb_deinit_pci(ndev);
err_init_pci:
2036
	kfree(ndev);
2037
err_ndev:
2038 2039 2040
	return rc;
}

2041
static void intel_ntb_pci_remove(struct pci_dev *pdev)
2042
{
2043 2044 2045 2046
	struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);

	ntb_unregister_device(&ndev->ntb);
	ndev_deinit_debugfs(ndev);
2047 2048 2049 2050
	if (pdev_is_atom(pdev))
		atom_deinit_dev(ndev);
	else if (pdev_is_xeon(pdev))
		xeon_deinit_dev(ndev);
2051 2052 2053
	intel_ntb_deinit_pci(ndev);
	kfree(ndev);
}
2054

2055 2056 2057 2058 2059
static const struct intel_ntb_reg atom_reg = {
	.poll_link		= atom_poll_link,
	.link_is_up		= atom_link_is_up,
	.db_ioread		= atom_db_ioread,
	.db_iowrite		= atom_db_iowrite,
2060
	.db_size		= sizeof(u64),
2061
	.ntb_ctl		= ATOM_NTBCNTL_OFFSET,
2062 2063
	.mw_bar			= {2, 4},
};
2064

2065 2066 2067 2068
static const struct intel_ntb_alt_reg atom_pri_reg = {
	.db_bell		= ATOM_PDOORBELL_OFFSET,
	.db_mask		= ATOM_PDBMSK_OFFSET,
	.spad			= ATOM_SPAD_OFFSET,
2069
};
2070

2071 2072 2073
static const struct intel_ntb_alt_reg atom_b2b_reg = {
	.db_bell		= ATOM_B2B_DOORBELL_OFFSET,
	.spad			= ATOM_B2B_SPAD_OFFSET,
2074
};
2075

2076 2077 2078 2079
static const struct intel_ntb_xlat_reg atom_sec_xlat = {
	/* FIXME : .bar0_base	= ATOM_SBAR0BASE_OFFSET, */
	/* FIXME : .bar2_limit	= ATOM_SBAR2LMT_OFFSET, */
	.bar2_xlat		= ATOM_SBAR2XLAT_OFFSET,
2080
};
2081

2082 2083 2084 2085 2086
static const struct intel_ntb_reg xeon_reg = {
	.poll_link		= xeon_poll_link,
	.link_is_up		= xeon_link_is_up,
	.db_ioread		= xeon_db_ioread,
	.db_iowrite		= xeon_db_iowrite,
2087
	.db_size		= sizeof(u32),
2088
	.ntb_ctl		= XEON_NTBCNTL_OFFSET,
2089 2090
	.mw_bar			= {2, 4, 5},
};
2091

2092 2093 2094 2095
static const struct intel_ntb_alt_reg xeon_pri_reg = {
	.db_bell		= XEON_PDOORBELL_OFFSET,
	.db_mask		= XEON_PDBMSK_OFFSET,
	.spad			= XEON_SPAD_OFFSET,
2096 2097
};

2098 2099 2100
static const struct intel_ntb_alt_reg xeon_sec_reg = {
	.db_bell		= XEON_SDOORBELL_OFFSET,
	.db_mask		= XEON_SDBMSK_OFFSET,
2101
	/* second half of the scratchpads */
2102
	.spad			= XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
2103
};
2104

2105 2106 2107
static const struct intel_ntb_alt_reg xeon_b2b_reg = {
	.db_bell		= XEON_B2B_DOORBELL_OFFSET,
	.spad			= XEON_B2B_SPAD_OFFSET,
2108 2109
};

2110
static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	/* Note: no primary .bar0_base visible to the secondary side.
	 *
	 * The secondary side cannot get the base address stored in primary
	 * bars.  The base address is necessary to set the limit register to
	 * any value other than zero, or unlimited.
	 *
	 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
	 * window by setting the limit equal to base, nor can it limit the size
	 * of the memory window by setting the limit to base + size.
	 */
2121 2122
	.bar2_limit		= XEON_PBAR23LMT_OFFSET,
	.bar2_xlat		= XEON_PBAR23XLAT_OFFSET,
2123 2124
};

2125 2126 2127 2128
static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
	.bar0_base		= XEON_SBAR0BASE_OFFSET,
	.bar2_limit		= XEON_SBAR23LMT_OFFSET,
	.bar2_xlat		= XEON_SBAR23XLAT_OFFSET,
2129 2130
};

2131 2132 2133 2134 2135
static struct intel_b2b_addr xeon_b2b_usd_addr = {
	.bar2_addr64		= XEON_B2B_BAR2_USD_ADDR64,
	.bar4_addr64		= XEON_B2B_BAR4_USD_ADDR64,
	.bar4_addr32		= XEON_B2B_BAR4_USD_ADDR32,
	.bar5_addr32		= XEON_B2B_BAR5_USD_ADDR32,
2136 2137
};

2138 2139 2140 2141 2142
static struct intel_b2b_addr xeon_b2b_dsd_addr = {
	.bar2_addr64		= XEON_B2B_BAR2_DSD_ADDR64,
	.bar4_addr64		= XEON_B2B_BAR4_DSD_ADDR64,
	.bar4_addr32		= XEON_B2B_BAR4_DSD_ADDR32,
	.bar5_addr32		= XEON_B2B_BAR5_DSD_ADDR32,
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
};

/* operations for primary side of local ntb */
static const struct ntb_dev_ops intel_ntb_ops = {
	.mw_count		= intel_ntb_mw_count,
	.mw_get_range		= intel_ntb_mw_get_range,
	.mw_set_trans		= intel_ntb_mw_set_trans,
	.link_is_up		= intel_ntb_link_is_up,
	.link_enable		= intel_ntb_link_enable,
	.link_disable		= intel_ntb_link_disable,
	.db_is_unsafe		= intel_ntb_db_is_unsafe,
	.db_valid_mask		= intel_ntb_db_valid_mask,
	.db_vector_count	= intel_ntb_db_vector_count,
	.db_vector_mask		= intel_ntb_db_vector_mask,
	.db_read		= intel_ntb_db_read,
	.db_clear		= intel_ntb_db_clear,
	.db_set_mask		= intel_ntb_db_set_mask,
	.db_clear_mask		= intel_ntb_db_clear_mask,
	.peer_db_addr		= intel_ntb_peer_db_addr,
	.peer_db_set		= intel_ntb_peer_db_set,
	.spad_is_unsafe		= intel_ntb_spad_is_unsafe,
	.spad_count		= intel_ntb_spad_count,
	.spad_read		= intel_ntb_spad_read,
	.spad_write		= intel_ntb_spad_write,
	.peer_spad_addr		= intel_ntb_peer_spad_addr,
	.peer_spad_read		= intel_ntb_peer_spad_read,
	.peer_spad_write	= intel_ntb_peer_spad_write,
};

static const struct file_operations intel_ntb_debugfs_info = {
	.owner = THIS_MODULE,
	.open = simple_open,
	.read = ndev_debugfs_read,
};

static const struct pci_device_id intel_ntb_pci_tbl[] = {
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
	{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);

static struct pci_driver intel_ntb_pci_driver = {
2197
	.name = KBUILD_MODNAME,
2198 2199 2200
	.id_table = intel_ntb_pci_tbl,
	.probe = intel_ntb_pci_probe,
	.remove = intel_ntb_pci_remove,
2201
};
J
Jon Mason 已提交
2202

2203 2204
static int __init intel_ntb_pci_driver_init(void)
{
2205 2206
	pr_info("%s %s\n", NTB_DESC, NTB_VER);

2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
	if (debugfs_initialized())
		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);

	return pci_register_driver(&intel_ntb_pci_driver);
}
module_init(intel_ntb_pci_driver_init);

static void __exit intel_ntb_pci_driver_exit(void)
{
	pci_unregister_driver(&intel_ntb_pci_driver);

	debugfs_remove_recursive(debugfs_dir);
}
module_exit(intel_ntb_pci_driver_exit);