ntb_transport.c 48.5 KB
Newer Older
1 2 3 4 5 6 7
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 10 11 12 13 14 15 16
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
45
 * PCIe NTB Transport Linux driver
46 47 48 49 50 51
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
#include <linux/delay.h>
52
#include <linux/dmaengine.h>
53 54 55 56 57 58 59 60
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
61 62
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
63

64 65 66 67 68 69 70 71 72 73 74 75 76
#define NTB_TRANSPORT_VERSION	4
#define NTB_TRANSPORT_VER	"4"
#define NTB_TRANSPORT_NAME	"ntb_transport"
#define NTB_TRANSPORT_DESC	"Software Queue-Pair Transport over NTB"

MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");

static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
77

78
static unsigned int transport_mtu = 0x401E;
79 80 81
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");

J
Jon Mason 已提交
82
static unsigned char max_num_clients;
83 84 85
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");

86 87 88 89
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");

90 91
static struct dentry *nt_debugfs_dir;

92 93 94
struct ntb_queue_entry {
	/* ntb_queue list reference */
	struct list_head entry;
95
	/* pointers to data to be transferred */
96 97 98 99
	void *cb_data;
	void *buf;
	unsigned int len;
	unsigned int flags;
100 101 102 103 104 105 106

	struct ntb_transport_qp *qp;
	union {
		struct ntb_payload_header __iomem *tx_hdr;
		struct ntb_payload_header *rx_hdr;
	};
	unsigned int index;
107 108
};

J
Jon Mason 已提交
109 110 111 112
struct ntb_rx_info {
	unsigned int entry;
};

113
struct ntb_transport_qp {
114 115
	struct ntb_transport_ctx *transport;
	struct ntb_dev *ndev;
116
	void *cb_data;
117
	struct dma_chan *dma_chan;
118 119

	bool client_ready;
120 121
	bool link_is_up;

122
	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
123
	u64 qp_bit;
124

J
Jon Mason 已提交
125
	struct ntb_rx_info __iomem *rx_info;
J
Jon Mason 已提交
126 127
	struct ntb_rx_info *remote_rx_info;

J
Jon Mason 已提交
128 129
	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
130 131
	struct list_head tx_free_q;
	spinlock_t ntb_tx_free_q_lock;
J
Jon Mason 已提交
132
	void __iomem *tx_mw;
133
	dma_addr_t tx_mw_phys;
J
Jon Mason 已提交
134 135
	unsigned int tx_index;
	unsigned int tx_max_entry;
136
	unsigned int tx_max_frame;
137

J
Jon Mason 已提交
138 139
	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
140 141 142 143
	struct list_head rx_pend_q;
	struct list_head rx_free_q;
	spinlock_t ntb_rx_pend_q_lock;
	spinlock_t ntb_rx_free_q_lock;
J
Jon Mason 已提交
144 145 146
	void *rx_buff;
	unsigned int rx_index;
	unsigned int rx_max_entry;
147
	unsigned int rx_max_frame;
148
	dma_cookie_t last_cookie;
149
	struct tasklet_struct rxc_db_work;
150

J
Jon Mason 已提交
151
	void (*event_handler)(void *data, int status);
152
	struct delayed_work link_work;
153
	struct work_struct link_cleanup;
154 155 156 157 158 159 160 161 162 163 164

	struct dentry *debugfs_dir;
	struct dentry *debugfs_stats;

	/* Stats */
	u64 rx_bytes;
	u64 rx_pkts;
	u64 rx_ring_empty;
	u64 rx_err_no_buf;
	u64 rx_err_oflow;
	u64 rx_err_ver;
165 166
	u64 rx_memcpy;
	u64 rx_async;
167 168 169
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_ring_full;
170 171 172
	u64 tx_err_no_buf;
	u64 tx_memcpy;
	u64 tx_async;
173 174 175
};

struct ntb_transport_mw {
176 177 178 179 180 181 182
	phys_addr_t phys_addr;
	resource_size_t phys_size;
	resource_size_t xlat_align;
	resource_size_t xlat_align_size;
	void __iomem *vbase;
	size_t xlat_size;
	size_t buff_size;
183 184 185 186 187 188
	void *virt_addr;
	dma_addr_t dma_addr;
};

struct ntb_transport_client_dev {
	struct list_head entry;
189
	struct ntb_transport_ctx *nt;
190 191 192
	struct device dev;
};

193
struct ntb_transport_ctx {
194 195 196
	struct list_head entry;
	struct list_head client_devs;

197 198 199 200 201 202 203 204 205 206
	struct ntb_dev *ndev;

	struct ntb_transport_mw *mw_vec;
	struct ntb_transport_qp *qp_vec;
	unsigned int mw_count;
	unsigned int qp_count;
	u64 qp_bitmap;
	u64 qp_bitmap_free;

	bool link_is_up;
207
	struct delayed_work link_work;
208
	struct work_struct link_cleanup;
209 210 211
};

enum {
212 213
	DESC_DONE_FLAG = BIT(0),
	LINK_DOWN_FLAG = BIT(1),
214 215 216
};

struct ntb_payload_header {
J
Jon Mason 已提交
217
	unsigned int ver;
218 219 220 221 222 223 224
	unsigned int len;
	unsigned int flags;
};

enum {
	VERSION = 0,
	QP_LINKS,
J
Jon Mason 已提交
225 226 227 228 229 230
	NUM_QPS,
	NUM_MWS,
	MW0_SZ_HIGH,
	MW0_SZ_LOW,
	MW1_SZ_HIGH,
	MW1_SZ_LOW,
231 232 233
	MAX_SPAD,
};

234 235 236 237 238 239 240
#define dev_client_dev(__dev) \
	container_of((__dev), struct ntb_transport_client_dev, dev)

#define drv_client(__drv) \
	container_of((__drv), struct ntb_transport_client, driver)

#define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
241 242 243
#define NTB_QP_DEF_NUM_ENTRIES	100
#define NTB_LINK_DOWN_TIMEOUT	10

244 245 246 247 248 249
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;

static int ntb_transport_bus_match(struct device *dev,
				   struct device_driver *drv)
250 251 252 253
{
	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}

254
static int ntb_transport_bus_probe(struct device *dev)
255
{
256
	const struct ntb_transport_client *client;
257 258 259
	int rc = -EINVAL;

	get_device(dev);
260 261 262

	client = drv_client(dev->driver);
	rc = client->probe(dev);
263 264 265 266 267 268
	if (rc)
		put_device(dev);

	return rc;
}

269
static int ntb_transport_bus_remove(struct device *dev)
270
{
271
	const struct ntb_transport_client *client;
272

273 274
	client = drv_client(dev->driver);
	client->remove(dev);
275 276 277 278 279 280

	put_device(dev);

	return 0;
}

281 282 283 284 285
static struct bus_type ntb_transport_bus = {
	.name = "ntb_transport",
	.match = ntb_transport_bus_match,
	.probe = ntb_transport_bus_probe,
	.remove = ntb_transport_bus_remove,
286 287 288 289
};

static LIST_HEAD(ntb_transport_list);

290
static int ntb_bus_init(struct ntb_transport_ctx *nt)
291 292 293 294 295
{
	list_add(&nt->entry, &ntb_transport_list);
	return 0;
}

296
static void ntb_bus_remove(struct ntb_transport_ctx *nt)
297 298 299 300 301 302 303 304 305 306 307 308 309
{
	struct ntb_transport_client_dev *client_dev, *cd;

	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
			dev_name(&client_dev->dev));
		list_del(&client_dev->entry);
		device_unregister(&client_dev->dev);
	}

	list_del(&nt->entry);
}

310
static void ntb_transport_client_release(struct device *dev)
311 312 313
{
	struct ntb_transport_client_dev *client_dev;

314
	client_dev = dev_client_dev(dev);
315 316 317 318
	kfree(client_dev);
}

/**
319
 * ntb_transport_unregister_client_dev - Unregister NTB client device
320 321 322 323
 * @device_name: Name of NTB client device
 *
 * Unregister an NTB client device with the NTB transport layer
 */
324
void ntb_transport_unregister_client_dev(char *device_name)
325 326
{
	struct ntb_transport_client_dev *client, *cd;
327
	struct ntb_transport_ctx *nt;
328 329 330 331 332 333 334 335 336

	list_for_each_entry(nt, &ntb_transport_list, entry)
		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
			if (!strncmp(dev_name(&client->dev), device_name,
				     strlen(device_name))) {
				list_del(&client->entry);
				device_unregister(&client->dev);
			}
}
337
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
338 339

/**
340
 * ntb_transport_register_client_dev - Register NTB client device
341 342 343 344
 * @device_name: Name of NTB client device
 *
 * Register an NTB client device with the NTB transport layer
 */
345
int ntb_transport_register_client_dev(char *device_name)
346 347
{
	struct ntb_transport_client_dev *client_dev;
348
	struct ntb_transport_ctx *nt;
J
Jon Mason 已提交
349
	int rc, i = 0;
350

351 352 353
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

354 355 356
	list_for_each_entry(nt, &ntb_transport_list, entry) {
		struct device *dev;

357
		client_dev = kzalloc(sizeof(*client_dev),
358 359 360 361 362 363 364 365 366
				     GFP_KERNEL);
		if (!client_dev) {
			rc = -ENOMEM;
			goto err;
		}

		dev = &client_dev->dev;

		/* setup and register client devices */
J
Jon Mason 已提交
367
		dev_set_name(dev, "%s%d", device_name, i);
368 369 370
		dev->bus = &ntb_transport_bus;
		dev->release = ntb_transport_client_release;
		dev->parent = &nt->ndev->dev;
371 372 373 374 375 376 377 378

		rc = device_register(dev);
		if (rc) {
			kfree(client_dev);
			goto err;
		}

		list_add_tail(&client_dev->entry, &nt->client_devs);
J
Jon Mason 已提交
379
		i++;
380 381 382 383 384
	}

	return 0;

err:
385
	ntb_transport_unregister_client_dev(device_name);
386 387 388

	return rc;
}
389
EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
390 391

/**
392
 * ntb_transport_register_client - Register NTB client driver
393 394 395 396 397 398
 * @drv: NTB client driver to be registered
 *
 * Register an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
399
int ntb_transport_register_client(struct ntb_transport_client *drv)
400
{
401
	drv->driver.bus = &ntb_transport_bus;
402

403 404 405
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

406 407
	return driver_register(&drv->driver);
}
408
EXPORT_SYMBOL_GPL(ntb_transport_register_client);
409 410

/**
411
 * ntb_transport_unregister_client - Unregister NTB client driver
412 413 414 415 416 417
 * @drv: NTB client driver to be unregistered
 *
 * Unregister an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
418
void ntb_transport_unregister_client(struct ntb_transport_client *drv)
419 420 421
{
	driver_unregister(&drv->driver);
}
422
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
423 424 425 426 427

static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
			    loff_t *offp)
{
	struct ntb_transport_qp *qp;
428
	char *buf;
429 430
	ssize_t ret, out_offset, out_count;

431
	out_count = 1000;
432 433 434 435

	buf = kmalloc(out_count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
436 437 438 439 440 441 442 443 444

	qp = filp->private_data;
	out_offset = 0;
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "NTB QP stats\n");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_bytes - \t%llu\n", qp->rx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_pkts - \t%llu\n", qp->rx_pkts);
445 446 447 448
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_async - \t%llu\n", qp->rx_async);
449 450 451 452 453 454 455 456 457
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
458
			       "rx_buff - \t%p\n", qp->rx_buff);
459
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
460
			       "rx_index - \t%u\n", qp->rx_index);
461
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
462
			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
463 464 465 466 467

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_bytes - \t%llu\n", qp->tx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_pkts - \t%llu\n", qp->tx_pkts);
468 469 470 471
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_async - \t%llu\n", qp->tx_async);
472 473
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
474 475
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
476
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
477
			       "tx_mw - \t%p\n", qp->tx_mw);
478
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
479
			       "tx_index - \t%u\n", qp->tx_index);
480
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
J
Jon Mason 已提交
481
			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
482 483

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 485
			       "\nQP Link %s\n",
			       qp->link_is_up ? "Up" : "Down");
486 487
	if (out_offset > out_count)
		out_offset = out_count;
488 489

	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
490
	kfree(buf);
491 492 493 494 495
	return ret;
}

static const struct file_operations ntb_qp_debugfs_stats = {
	.owner = THIS_MODULE,
J
Jon Mason 已提交
496
	.open = simple_open,
497 498 499 500 501 502 503 504 505 506 507 508 509 510
	.read = debugfs_read,
};

static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
			 struct list_head *list)
{
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	list_add_tail(entry, list);
	spin_unlock_irqrestore(lock, flags);
}

static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
J
Jon Mason 已提交
511
					   struct list_head *list)
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	if (list_empty(list)) {
		entry = NULL;
		goto out;
	}
	entry = list_first_entry(list, struct ntb_queue_entry, entry);
	list_del(&entry->entry);
out:
	spin_unlock_irqrestore(lock, flags);

	return entry;
}

529 530
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
				     unsigned int qp_num)
531
{
532 533
	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
	struct ntb_transport_mw *mw;
534
	unsigned int rx_size, num_qps_mw;
535
	unsigned int mw_num, mw_count, qp_count;
J
Jon Mason 已提交
536
	unsigned int i;
537

538 539
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
J
Jon Mason 已提交
540

541 542 543 544 545
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	if (!mw->virt_addr)
		return -ENOMEM;
546

547 548
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
549
	else
550
		num_qps_mw = qp_count / mw_count;
551

552 553
	rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
	qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
J
Jon Mason 已提交
554 555
	rx_size -= sizeof(struct ntb_rx_info);

556 557
	qp->remote_rx_info = qp->rx_buff + rx_size;

558 559
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
J
Jon Mason 已提交
560 561 562
	qp->rx_max_entry = rx_size / qp->rx_max_frame;
	qp->rx_index = 0;

563
	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
564

565
	/* setup the hdr offsets with 0's */
J
Jon Mason 已提交
566
	for (i = 0; i < qp->rx_max_entry; i++) {
567 568
		void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
				sizeof(struct ntb_payload_header));
569
		memset(offset, 0, sizeof(struct ntb_payload_header));
J
Jon Mason 已提交
570
	}
571 572 573

	qp->rx_pkts = 0;
	qp->tx_pkts = 0;
J
Jon Mason 已提交
574
	qp->tx_index = 0;
575 576

	return 0;
577 578
}

579
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
J
Jon Mason 已提交
580
{
581 582
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
J
Jon Mason 已提交
583 584 585 586

	if (!mw->virt_addr)
		return;

587 588 589 590 591
	ntb_mw_clear_trans(nt->ndev, num_mw);
	dma_free_coherent(&pdev->dev, mw->buff_size,
			  mw->virt_addr, mw->dma_addr);
	mw->xlat_size = 0;
	mw->buff_size = 0;
J
Jon Mason 已提交
592 593 594
	mw->virt_addr = NULL;
}

595 596
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
		      unsigned int size)
597
{
598 599 600 601 602 603 604
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
	unsigned int xlat_size, buff_size;
	int rc;

	xlat_size = round_up(size, mw->xlat_align_size);
	buff_size = round_up(size, mw->xlat_align);
605

J
Jon Mason 已提交
606
	/* No need to re-setup */
607
	if (mw->xlat_size == xlat_size)
J
Jon Mason 已提交
608 609
		return 0;

610
	if (mw->buff_size)
J
Jon Mason 已提交
611 612
		ntb_free_mw(nt, num_mw);

613 614 615
	/* Alloc memory for receiving data.  Must be aligned */
	mw->xlat_size = xlat_size;
	mw->buff_size = buff_size;
616

617 618
	mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
					   &mw->dma_addr, GFP_KERNEL);
619
	if (!mw->virt_addr) {
620 621 622 623
		mw->xlat_size = 0;
		mw->buff_size = 0;
		dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
			buff_size);
624 625 626
		return -ENOMEM;
	}

627 628 629 630 631 632
	/*
	 * we must ensure that the memory address allocated is BAR size
	 * aligned in order for the XLAT register to take the value. This
	 * is a requirement of the hardware. It is recommended to setup CMA
	 * for BAR sizes equal or greater than 4MB.
	 */
633 634
	if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
		dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
635 636 637 638 639
			&mw->dma_addr);
		ntb_free_mw(nt, num_mw);
		return -ENOMEM;
	}

640
	/* Notify HW the memory location of the receive buffer */
641 642 643 644 645 646
	rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
	if (rc) {
		dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
		ntb_free_mw(nt, num_mw);
		return -EIO;
	}
647 648 649 650

	return 0;
}

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
	qp->link_is_up = false;

	qp->tx_index = 0;
	qp->rx_index = 0;
	qp->rx_bytes = 0;
	qp->rx_pkts = 0;
	qp->rx_ring_empty = 0;
	qp->rx_err_no_buf = 0;
	qp->rx_err_oflow = 0;
	qp->rx_err_ver = 0;
	qp->rx_memcpy = 0;
	qp->rx_async = 0;
	qp->tx_bytes = 0;
	qp->tx_pkts = 0;
	qp->tx_ring_full = 0;
	qp->tx_err_no_buf = 0;
	qp->tx_memcpy = 0;
	qp->tx_async = 0;
}

J
Jon Mason 已提交
673
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
674
{
675 676
	struct ntb_transport_ctx *nt = qp->transport;
	struct pci_dev *pdev = nt->ndev->pdev;
677

678
	dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
679 680 681

	cancel_delayed_work_sync(&qp->link_work);
	ntb_qp_link_down_reset(qp);
682 683 684

	if (qp->event_handler)
		qp->event_handler(qp->cb_data, qp->link_is_up);
J
Jon Mason 已提交
685 686 687 688 689 690 691
}

static void ntb_qp_link_cleanup_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_cleanup);
692
	struct ntb_transport_ctx *nt = qp->transport;
J
Jon Mason 已提交
693 694

	ntb_qp_link_cleanup(qp);
695

696
	if (nt->link_is_up)
697 698 699 700
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

701 702 703 704 705
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
	schedule_work(&qp->link_cleanup);
}

706
static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
707
{
708 709
	struct ntb_transport_qp *qp;
	u64 qp_bitmap_alloc;
710 711
	int i;

712 713
	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;

J
Jon Mason 已提交
714
	/* Pass along the info to any clients */
715 716 717 718 719 720 721
	for (i = 0; i < nt->qp_count; i++)
		if (qp_bitmap_alloc & BIT_ULL(i)) {
			qp = &nt->qp_vec[i];
			ntb_qp_link_cleanup(qp);
			cancel_work_sync(&qp->link_cleanup);
			cancel_delayed_work_sync(&qp->link_work);
		}
J
Jon Mason 已提交
722

723
	if (!nt->link_is_up)
724 725 726 727 728 729 730
		cancel_delayed_work_sync(&nt->link_work);

	/* The scratchpad registers keep the values if the remote side
	 * goes down, blast them now to give them a sane value the next
	 * time they are accessed
	 */
	for (i = 0; i < MAX_SPAD; i++)
731
		ntb_spad_write(nt->ndev, i, 0);
732 733
}

J
Jon Mason 已提交
734 735
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
736 737
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_cleanup);
J
Jon Mason 已提交
738 739 740 741

	ntb_transport_link_cleanup(nt);
}

742
static void ntb_transport_event_callback(void *data)
743
{
744
	struct ntb_transport_ctx *nt = data;
745

746
	if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
747
		schedule_delayed_work(&nt->link_work, 0);
748
	else
749
		schedule_work(&nt->link_cleanup);
750 751 752 753
}

static void ntb_transport_link_work(struct work_struct *work)
{
754 755 756 757 758
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_work.work);
	struct ntb_dev *ndev = nt->ndev;
	struct pci_dev *pdev = ndev->pdev;
	resource_size_t size;
759
	u32 val;
760
	int rc, i, spad;
761

J
Jon Mason 已提交
762
	/* send the local info, in the opposite order of the way we read it */
763 764
	for (i = 0; i < nt->mw_count; i++) {
		size = nt->mw_vec[i].phys_size;
765

766 767
		if (max_mw_size && size > max_mw_size)
			size = max_mw_size;
768

769 770
		spad = MW0_SZ_HIGH + (i * 2);
		ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
771

772 773
		spad = MW0_SZ_LOW + (i * 2);
		ntb_peer_spad_write(ndev, spad, (u32)size);
774 775
	}

776
	ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
777

778
	ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
779

780
	ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
781

782
	/* Query the remote side for its info */
783
	val = ntb_spad_read(ndev, VERSION);
784 785
	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
	if (val != NTB_TRANSPORT_VERSION)
786 787
		goto out;

788
	val = ntb_spad_read(ndev, NUM_QPS);
789
	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
790
	if (val != nt->qp_count)
791 792
		goto out;

793
	val = ntb_spad_read(ndev, NUM_MWS);
J
Jon Mason 已提交
794
	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
795 796
	if (val != nt->mw_count)
		goto out;
797

798
	for (i = 0; i < nt->mw_count; i++) {
J
Jon Mason 已提交
799
		u64 val64;
800

801
		val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
802
		val64 = (u64)val << 32;
J
Jon Mason 已提交
803

804
		val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
J
Jon Mason 已提交
805 806
		val64 |= val;

807
		dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
J
Jon Mason 已提交
808 809 810 811 812

		rc = ntb_set_mw(nt, i, val64);
		if (rc)
			goto out1;
	}
813

814
	nt->link_is_up = true;
815

816 817
	for (i = 0; i < nt->qp_count; i++) {
		struct ntb_transport_qp *qp = &nt->qp_vec[i];
818 819 820

		ntb_transport_setup_qp_mw(nt, i);

821
		if (qp->client_ready)
822 823 824 825 826
			schedule_delayed_work(&qp->link_work, 0);
	}

	return;

J
Jon Mason 已提交
827
out1:
828
	for (i = 0; i < nt->mw_count; i++)
J
Jon Mason 已提交
829
		ntb_free_mw(nt, i);
830
out:
831
	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
832 833 834 835 836 837 838 839 840
		schedule_delayed_work(&nt->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

static void ntb_qp_link_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_work.work);
841 842 843
	struct pci_dev *pdev = qp->ndev->pdev;
	struct ntb_transport_ctx *nt = qp->transport;
	int val;
844

845
	WARN_ON(!nt->link_is_up);
846

847
	val = ntb_spad_read(nt->ndev, QP_LINKS);
848

849
	ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
850 851

	/* query remote spad for qp ready bits */
852
	ntb_peer_spad_read(nt->ndev, QP_LINKS);
853 854 855
	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);

	/* See if the remote side is up */
856
	if (val & BIT(qp->qp_num)) {
857
		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
858 859
		qp->link_is_up = true;

860
		if (qp->event_handler)
861 862
			qp->event_handler(qp->cb_data, qp->link_is_up);
	} else if (nt->link_is_up)
863 864 865 866
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

867
static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
J
Jon Mason 已提交
868
				    unsigned int qp_num)
869 870
{
	struct ntb_transport_qp *qp;
871 872 873
	struct ntb_transport_mw *mw;
	phys_addr_t mw_base;
	resource_size_t mw_size;
874
	unsigned int num_qps_mw, tx_size;
875
	unsigned int mw_num, mw_count, qp_count;
876
	u64 qp_offset;
J
Jon Mason 已提交
877

878 879
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
880

881 882 883 884
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	qp = &nt->qp_vec[qp_num];
885 886 887
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ndev = nt->ndev;
888
	qp->client_ready = false;
889
	qp->event_handler = NULL;
890
	ntb_qp_link_down_reset(qp);
891

892 893
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
894
	else
895 896 897 898
		num_qps_mw = qp_count / mw_count;

	mw_base = nt->mw_vec[mw_num].phys_addr;
	mw_size = nt->mw_vec[mw_num].phys_size;
899

900 901 902 903
	tx_size = (unsigned int)mw_size / num_qps_mw;
	qp_offset = tx_size * qp_num / mw_count;

	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
904 905 906
	if (!qp->tx_mw)
		return -EINVAL;

907
	qp->tx_mw_phys = mw_base + qp_offset;
908 909 910
	if (!qp->tx_mw_phys)
		return -EINVAL;

J
Jon Mason 已提交
911
	tx_size -= sizeof(struct ntb_rx_info);
912
	qp->rx_info = qp->tx_mw + tx_size;
J
Jon Mason 已提交
913

914 915
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
J
Jon Mason 已提交
916
	qp->tx_max_entry = tx_size / qp->tx_max_frame;
917

918
	if (nt_debugfs_dir) {
919 920 921 922
		char debugfs_name[4];

		snprintf(debugfs_name, 4, "qp%d", qp_num);
		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
923
						     nt_debugfs_dir);
924 925 926 927

		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
							qp->debugfs_dir, qp,
							&ntb_qp_debugfs_stats);
928 929 930
	} else {
		qp->debugfs_dir = NULL;
		qp->debugfs_stats = NULL;
931 932 933
	}

	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
J
Jon Mason 已提交
934
	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
935 936 937 938 939 940 941 942

	spin_lock_init(&qp->ntb_rx_pend_q_lock);
	spin_lock_init(&qp->ntb_rx_free_q_lock);
	spin_lock_init(&qp->ntb_tx_free_q_lock);

	INIT_LIST_HEAD(&qp->rx_pend_q);
	INIT_LIST_HEAD(&qp->rx_free_q);
	INIT_LIST_HEAD(&qp->tx_free_q);
943

944 945 946
	tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
		     (unsigned long)qp);

947
	return 0;
948 949
}

950
static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
951
{
952 953 954 955
	struct ntb_transport_ctx *nt;
	struct ntb_transport_mw *mw;
	unsigned int mw_count, qp_count;
	u64 qp_bitmap;
956 957
	int rc, i;

958 959 960 961 962 963 964 965
	if (ntb_db_is_unsafe(ndev))
		dev_dbg(&ndev->dev,
			"doorbell is unsafe, proceed anyway...\n");
	if (ntb_spad_is_unsafe(ndev))
		dev_dbg(&ndev->dev,
			"scratchpad is unsafe, proceed anyway...\n");

	nt = kzalloc(sizeof(*nt), GFP_KERNEL);
966 967 968
	if (!nt)
		return -ENOMEM;

969 970 971 972 973 974 975 976 977
	nt->ndev = ndev;

	mw_count = ntb_mw_count(ndev);

	nt->mw_count = mw_count;

	nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL);
	if (!nt->mw_vec) {
		rc = -ENOMEM;
978 979 980
		goto err;
	}

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
	for (i = 0; i < mw_count; i++) {
		mw = &nt->mw_vec[i];

		rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
				      &mw->xlat_align, &mw->xlat_align_size);
		if (rc)
			goto err1;

		mw->vbase = ioremap(mw->phys_addr, mw->phys_size);
		if (!mw->vbase) {
			rc = -ENOMEM;
			goto err1;
		}

		mw->buff_size = 0;
		mw->xlat_size = 0;
		mw->virt_addr = NULL;
		mw->dma_addr = 0;
J
Jon Mason 已提交
999 1000
	}

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	qp_bitmap = ntb_db_valid_mask(ndev);

	qp_count = ilog2(qp_bitmap);
	if (max_num_clients && max_num_clients < qp_count)
		qp_count = max_num_clients;
	else if (mw_count < qp_count)
		qp_count = mw_count;

	qp_bitmap &= BIT_ULL(qp_count) - 1;

	nt->qp_count = qp_count;
	nt->qp_bitmap = qp_bitmap;
	nt->qp_bitmap_free = qp_bitmap;
1014

1015 1016
	nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL);
	if (!nt->qp_vec) {
1017
		rc = -ENOMEM;
J
Jon Mason 已提交
1018
		goto err2;
1019 1020
	}

1021
	for (i = 0; i < qp_count; i++) {
1022 1023 1024 1025
		rc = ntb_transport_init_queue(nt, i);
		if (rc)
			goto err3;
	}
1026 1027

	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
J
Jon Mason 已提交
1028
	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1029

1030
	rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1031
	if (rc)
J
Jon Mason 已提交
1032
		goto err3;
1033 1034 1035 1036

	INIT_LIST_HEAD(&nt->client_devs);
	rc = ntb_bus_init(nt);
	if (rc)
J
Jon Mason 已提交
1037
		goto err4;
1038

1039 1040 1041
	nt->link_is_up = false;
	ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ndev);
1042 1043 1044

	return 0;

J
Jon Mason 已提交
1045
err4:
1046
	ntb_clear_ctx(ndev);
J
Jon Mason 已提交
1047
err3:
1048
	kfree(nt->qp_vec);
J
Jon Mason 已提交
1049
err2:
1050
	kfree(nt->mw_vec);
1051
err1:
1052 1053 1054 1055
	while (i--) {
		mw = &nt->mw_vec[i];
		iounmap(mw->vbase);
	}
1056 1057 1058 1059 1060
err:
	kfree(nt);
	return rc;
}

1061
static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1062
{
1063 1064 1065
	struct ntb_transport_ctx *nt = ndev->ctx;
	struct ntb_transport_qp *qp;
	u64 qp_bitmap_alloc;
1066 1067
	int i;

J
Jon Mason 已提交
1068
	ntb_transport_link_cleanup(nt);
1069 1070 1071 1072
	cancel_work_sync(&nt->link_cleanup);
	cancel_delayed_work_sync(&nt->link_work);

	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1073 1074

	/* verify that all the qp's are freed */
1075 1076 1077 1078 1079
	for (i = 0; i < nt->qp_count; i++) {
		qp = &nt->qp_vec[i];
		if (qp_bitmap_alloc & BIT_ULL(i))
			ntb_transport_free_queue(qp);
		debugfs_remove_recursive(qp->debugfs_dir);
1080
	}
1081

1082 1083
	ntb_link_disable(ndev);
	ntb_clear_ctx(ndev);
1084

1085
	ntb_bus_remove(nt);
1086

1087
	for (i = nt->mw_count; i--; ) {
J
Jon Mason 已提交
1088
		ntb_free_mw(nt, i);
1089 1090
		iounmap(nt->mw_vec[i].vbase);
	}
1091

1092 1093
	kfree(nt->qp_vec);
	kfree(nt->mw_vec);
1094 1095 1096
	kfree(nt);
}

1097
static void ntb_rx_copy_callback(void *data)
1098
{
1099 1100
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
1101 1102
	void *cb_data = entry->cb_data;
	unsigned int len = entry->len;
1103 1104 1105
	struct ntb_payload_header *hdr = entry->rx_hdr;

	hdr->flags = 0;
1106

1107
	iowrite32(entry->index, &qp->rx_info->entry);
1108 1109

	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1110

1111
	if (qp->rx_handler && qp->client_ready)
1112
		qp->rx_handler(qp, qp->cb_data, cb_data, len);
1113 1114
}

1115 1116 1117 1118 1119 1120 1121
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
{
	void *buf = entry->buf;
	size_t len = entry->len;

	memcpy(buf, offset, len);

1122 1123 1124
	/* Ensure that the data is fully copied out before clearing the flag */
	wmb();

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	ntb_rx_copy_callback(entry);
}

static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
			 size_t len)
{
	struct dma_async_tx_descriptor *txd;
	struct ntb_transport_qp *qp = entry->qp;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t pay_off, buff_off;
1136
	struct dmaengine_unmap_data *unmap;
1137 1138 1139 1140 1141 1142 1143 1144
	dma_cookie_t cookie;
	void *buf = entry->buf;

	entry->len = len;

	if (!chan)
		goto err;

J
Jon Mason 已提交
1145
	if (len < copy_bytes)
1146
		goto err_wait;
1147 1148

	device = chan->device;
1149 1150
	pay_off = (size_t)offset & ~PAGE_MASK;
	buff_off = (size_t)buf & ~PAGE_MASK;
1151 1152

	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1153
		goto err_wait;
1154

1155 1156 1157
	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
	if (!unmap)
		goto err_wait;
1158

1159 1160 1161 1162 1163 1164 1165
	unmap->len = len;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
				      pay_off, len, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;
1166

1167 1168 1169 1170 1171 1172 1173 1174
	unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
				      buff_off, len, DMA_FROM_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[1]))
		goto err_get_unmap;

	unmap->from_cnt = 1;

	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1175 1176
					     unmap->addr[0], len,
					     DMA_PREP_INTERRUPT);
1177
	if (!txd)
1178
		goto err_get_unmap;
1179 1180 1181

	txd->callback = ntb_rx_copy_callback;
	txd->callback_param = entry;
1182
	dma_set_unmap(txd, unmap);
1183 1184 1185

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
1186 1187 1188
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);
1189 1190 1191 1192 1193 1194 1195

	qp->last_cookie = cookie;

	qp->rx_async++;

	return;

1196 1197 1198 1199 1200
err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
err_wait:
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	/* If the callbacks come out of order, the writing of the index to the
	 * last completed will be out of order.  This may result in the
	 * receive stalling forever.
	 */
	dma_sync_wait(chan, qp->last_cookie);
err:
	ntb_memcpy_rx(entry, offset);
	qp->rx_memcpy++;
}

1211 1212 1213 1214 1215
static int ntb_process_rxc(struct ntb_transport_qp *qp)
{
	struct ntb_payload_header *hdr;
	struct ntb_queue_entry *entry;
	void *offset;
1216
	int rc;
1217

J
Jon Mason 已提交
1218 1219 1220
	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);

1221 1222
	dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
		qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1223 1224

	if (!(hdr->flags & DESC_DONE_FLAG)) {
1225
		dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1226 1227 1228 1229
		qp->rx_ring_empty++;
		return -EAGAIN;
	}

1230 1231 1232 1233
	if (hdr->flags & LINK_DOWN_FLAG) {
		dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
		ntb_qp_link_down(qp);
		hdr->flags = 0;
1234
		return -EAGAIN;
1235 1236 1237 1238 1239 1240
	}

	if (hdr->ver != (u32)qp->rx_pkts) {
		dev_dbg(&qp->ndev->pdev->dev,
			"version mismatch, expected %llu - got %u\n",
			qp->rx_pkts, hdr->ver);
1241 1242 1243 1244
		qp->rx_err_ver++;
		return -EIO;
	}

1245 1246 1247 1248
	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
	if (!entry) {
		dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
		qp->rx_err_no_buf++;
1249

1250
		rc = -ENOMEM;
1251
		goto err;
1252 1253
	}

1254
	if (hdr->len > entry->len) {
1255 1256
		dev_dbg(&qp->ndev->pdev->dev,
			"receive buffer overflow! Wanted %d got %d\n",
1257
			hdr->len, entry->len);
1258
		qp->rx_err_oflow++;
1259

1260
		rc = -EIO;
1261
		goto err;
1262 1263
	}

1264 1265 1266 1267 1268 1269 1270
	dev_dbg(&qp->ndev->pdev->dev,
		"RX OK index %u ver %u size %d into buf size %d\n",
		qp->rx_index, hdr->ver, hdr->len, entry->len);

	qp->rx_bytes += hdr->len;
	qp->rx_pkts++;

1271 1272 1273 1274
	entry->index = qp->rx_index;
	entry->rx_hdr = hdr;

	ntb_async_rx(entry, offset, hdr->len);
1275

1276 1277 1278 1279 1280 1281
	qp->rx_index++;
	qp->rx_index %= qp->rx_max_entry;

	return 0;

err:
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
	/* FIXME: if this syncrhonous update of the rx_index gets ahead of
	 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
	 * scenarios:
	 *
	 * 1) The peer might miss this update, but observe the update
	 * from the memcpy completion callback.  In this case, the buffer will
	 * not be freed on the peer to be reused for a different packet.  The
	 * successful rx of a later packet would clear the condition, but the
	 * condition could persist if several rx fail in a row.
	 *
	 * 2) The peer may observe this update before the asyncrhonous copy of
	 * prior packets is completed.  The peer may overwrite the buffers of
	 * the prior packets before they are copied.
	 *
	 * 3) Both: the peer may observe the update, and then observe the index
	 * decrement by the asynchronous completion callback.  Who knows what
	 * badness that will cause.
	 */
J
Jon Mason 已提交
1300
	hdr->flags = 0;
J
Jon Mason 已提交
1301
	iowrite32(qp->rx_index, &qp->rx_info->entry);
J
Jon Mason 已提交
1302

1303
	return rc;
1304 1305
}

1306
static void ntb_transport_rxc_db(unsigned long data)
1307
{
1308
	struct ntb_transport_qp *qp = (void *)data;
J
Jon Mason 已提交
1309
	int rc, i;
1310

1311 1312
	dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
		__func__, qp->qp_num);
1313

J
Jon Mason 已提交
1314 1315 1316 1317
	/* Limit the number of packets processed in a single interrupt to
	 * provide fairness to others
	 */
	for (i = 0; i < qp->rx_max_entry; i++) {
1318
		rc = ntb_process_rxc(qp);
J
Jon Mason 已提交
1319 1320 1321
		if (rc)
			break;
	}
1322 1323 1324

	if (qp->dma_chan)
		dma_async_issue_pending(qp->dma_chan);
1325

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	if (i == qp->rx_max_entry) {
		/* there is more work to do */
		tasklet_schedule(&qp->rxc_db_work);
	} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
		/* the doorbell bit is set: clear it */
		ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
		/* ntb_db_read ensures ntb_db_clear write is committed */
		ntb_db_read(qp->ndev);

		/* an interrupt may have arrived between finishing
		 * ntb_process_rxc and clearing the doorbell bit:
		 * there might be some more work to do.
		 */
		tasklet_schedule(&qp->rxc_db_work);
	}
1341 1342
}

1343
static void ntb_tx_copy_callback(void *data)
1344
{
1345 1346 1347
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1348

J
Jon Mason 已提交
1349
	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1350

1351
	ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367

	/* The entry length can only be zero if the packet is intended to be a
	 * "link down" or similar.  Since no payload is being sent in these
	 * cases, there is nothing to add to the completion queue.
	 */
	if (entry->len > 0) {
		qp->tx_bytes += entry->len;

		if (qp->tx_handler)
			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
				       entry->len);
	}

	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}

1368
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1369
{
1370 1371
	memcpy_toio(offset, entry->buf, entry->len);

1372 1373 1374
	/* Ensure that the data is fully copied out before setting the flags */
	wmb();

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	ntb_tx_copy_callback(entry);
}

static void ntb_async_tx(struct ntb_transport_qp *qp,
			 struct ntb_queue_entry *entry)
{
	struct ntb_payload_header __iomem *hdr;
	struct dma_async_tx_descriptor *txd;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t dest_off, buff_off;
1386 1387
	struct dmaengine_unmap_data *unmap;
	dma_addr_t dest;
1388
	dma_cookie_t cookie;
J
Jon Mason 已提交
1389
	void __iomem *offset;
1390 1391
	size_t len = entry->len;
	void *buf = entry->buf;
1392

J
Jon Mason 已提交
1393
	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1394 1395
	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
	entry->tx_hdr = hdr;
1396

1397
	iowrite32(entry->len, &hdr->len);
1398
	iowrite32((u32)qp->tx_pkts, &hdr->ver);
1399 1400 1401 1402 1403 1404 1405 1406 1407

	if (!chan)
		goto err;

	if (len < copy_bytes)
		goto err;

	device = chan->device;
	dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1408 1409
	buff_off = (size_t)buf & ~PAGE_MASK;
	dest_off = (size_t)dest & ~PAGE_MASK;
1410 1411 1412 1413

	if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
		goto err;

1414 1415
	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
	if (!unmap)
1416 1417
		goto err;

1418 1419 1420 1421 1422 1423 1424 1425 1426
	unmap->len = len;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
				      buff_off, len, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;

	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1427
					     DMA_PREP_INTERRUPT);
1428
	if (!txd)
1429
		goto err_get_unmap;
1430 1431 1432

	txd->callback = ntb_tx_copy_callback;
	txd->callback_param = entry;
1433
	dma_set_unmap(txd, unmap);
1434 1435 1436

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
1437 1438 1439
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);
1440 1441 1442 1443 1444

	dma_async_issue_pending(chan);
	qp->tx_async++;

	return;
1445 1446 1447 1448
err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
1449 1450 1451 1452 1453 1454 1455 1456
err:
	ntb_memcpy_tx(entry, offset);
	qp->tx_memcpy++;
}

static int ntb_process_tx(struct ntb_transport_qp *qp,
			  struct ntb_queue_entry *entry)
{
J
Jon Mason 已提交
1457
	if (qp->tx_index == qp->remote_rx_info->entry) {
1458 1459 1460 1461
		qp->tx_ring_full++;
		return -EAGAIN;
	}

1462
	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1463 1464 1465 1466 1467 1468 1469 1470
		if (qp->tx_handler)
			qp->tx_handler(qp->cb_data, qp, NULL, -EIO);

		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
			     &qp->tx_free_q);
		return 0;
	}

1471
	ntb_async_tx(qp, entry);
1472

J
Jon Mason 已提交
1473 1474
	qp->tx_index++;
	qp->tx_index %= qp->tx_max_entry;
1475 1476 1477 1478 1479 1480 1481 1482

	qp->tx_pkts++;

	return 0;
}

static void ntb_send_link_down(struct ntb_transport_qp *qp)
{
1483
	struct pci_dev *pdev = qp->ndev->pdev;
1484 1485 1486
	struct ntb_queue_entry *entry;
	int i, rc;

1487
	if (!qp->link_is_up)
1488 1489
		return;

1490
	dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1491 1492

	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
J
Jon Mason 已提交
1493
		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
		if (entry)
			break;
		msleep(100);
	}

	if (!entry)
		return;

	entry->cb_data = NULL;
	entry->buf = NULL;
	entry->len = 0;
	entry->flags = LINK_DOWN_FLAG;

	rc = ntb_process_tx(qp, entry);
	if (rc)
		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
			qp->qp_num);
1511 1512

	ntb_qp_link_down_reset(qp);
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
}

/**
 * ntb_transport_create_queue - Create a new NTB transport layer queue
 * @rx_handler: receive callback function
 * @tx_handler: transmit callback function
 * @event_handler: event callback function
 *
 * Create a new NTB transport layer queue and provide the queue with a callback
 * routine for both transmit and receive.  The receive callback routine will be
 * used to pass up data when the transport has received it on the queue.   The
 * transmit callback routine will be called when the transport has completed the
 * transmission of the data on the queue and the data is ready to be freed.
 *
 * RETURNS: pointer to newly created ntb_queue, NULL on error.
 */
struct ntb_transport_qp *
1530
ntb_transport_create_queue(void *data, struct device *client_dev,
1531 1532
			   const struct ntb_queue_handlers *handlers)
{
1533 1534 1535
	struct ntb_dev *ndev;
	struct pci_dev *pdev;
	struct ntb_transport_ctx *nt;
1536 1537
	struct ntb_queue_entry *entry;
	struct ntb_transport_qp *qp;
1538
	u64 qp_bit;
1539
	unsigned int free_queue;
1540
	int i;
1541

1542 1543 1544
	ndev = dev_ntb(client_dev->parent);
	pdev = ndev->pdev;
	nt = ndev->ctx;
1545 1546 1547 1548 1549 1550 1551 1552

	free_queue = ffs(nt->qp_bitmap);
	if (!free_queue)
		goto err;

	/* decrement free_queue to make it zero based */
	free_queue--;

1553 1554 1555 1556
	qp = &nt->qp_vec[free_queue];
	qp_bit = BIT_ULL(qp->qp_num);

	nt->qp_bitmap_free &= ~qp_bit;
1557 1558 1559 1560 1561 1562

	qp->cb_data = data;
	qp->rx_handler = handlers->rx_handler;
	qp->tx_handler = handlers->tx_handler;
	qp->event_handler = handlers->event_handler;

J
Jon Mason 已提交
1563
	dmaengine_get();
1564
	qp->dma_chan = dma_find_channel(DMA_MEMCPY);
J
Jon Mason 已提交
1565 1566
	if (!qp->dma_chan) {
		dmaengine_put();
1567
		dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
J
Jon Mason 已提交
1568
	}
1569

1570
	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1571
		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1572 1573 1574
		if (!entry)
			goto err1;

1575
		entry->qp = qp;
1576
		ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
J
Jon Mason 已提交
1577
			     &qp->rx_free_q);
1578 1579 1580
	}

	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1581
		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1582 1583 1584
		if (!entry)
			goto err2;

1585
		entry->qp = qp;
1586
		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
J
Jon Mason 已提交
1587
			     &qp->tx_free_q);
1588 1589
	}

1590 1591
	ntb_db_clear(qp->ndev, qp_bit);
	ntb_db_clear_mask(qp->ndev, qp_bit);
1592 1593 1594 1595 1596 1597

	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);

	return qp;

err2:
J
Jon Mason 已提交
1598
	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1599 1600
		kfree(entry);
err1:
J
Jon Mason 已提交
1601
	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1602
		kfree(entry);
J
Jon Mason 已提交
1603 1604
	if (qp->dma_chan)
		dmaengine_put();
1605
	nt->qp_bitmap_free |= qp_bit;
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
err:
	return NULL;
}
EXPORT_SYMBOL_GPL(ntb_transport_create_queue);

/**
 * ntb_transport_free_queue - Frees NTB transport queue
 * @qp: NTB queue to be freed
 *
 * Frees NTB transport queue
 */
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
1619
	struct ntb_transport_ctx *nt = qp->transport;
1620
	struct pci_dev *pdev;
1621
	struct ntb_queue_entry *entry;
1622
	u64 qp_bit;
1623 1624 1625 1626

	if (!qp)
		return;

1627
	pdev = qp->ndev->pdev;
1628

1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	if (qp->dma_chan) {
		struct dma_chan *chan = qp->dma_chan;
		/* Putting the dma_chan to NULL will force any new traffic to be
		 * processed by the CPU instead of the DAM engine
		 */
		qp->dma_chan = NULL;

		/* Try to be nice and wait for any queued DMA engine
		 * transactions to process before smashing it with a rock
		 */
		dma_sync_wait(chan, qp->last_cookie);
		dmaengine_terminate_all(chan);
		dmaengine_put();
	}
1643

1644 1645 1646 1647
	qp_bit = BIT_ULL(qp->qp_num);

	ntb_db_set_mask(qp->ndev, qp_bit);
	tasklet_disable(&qp->rxc_db_work);
1648

1649 1650
	cancel_delayed_work_sync(&qp->link_work);

1651 1652 1653 1654 1655
	qp->cb_data = NULL;
	qp->rx_handler = NULL;
	qp->tx_handler = NULL;
	qp->event_handler = NULL;

J
Jon Mason 已提交
1656
	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1657 1658
		kfree(entry);

J
Jon Mason 已提交
1659
	while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1660 1661 1662 1663
		dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
		kfree(entry);
	}

J
Jon Mason 已提交
1664
	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1665 1666
		kfree(entry);

1667
	nt->qp_bitmap_free |= qp_bit;
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687

	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
EXPORT_SYMBOL_GPL(ntb_transport_free_queue);

/**
 * ntb_transport_rx_remove - Dequeues enqueued rx packet
 * @qp: NTB queue to be freed
 * @len: pointer to variable to write enqueued buffers length
 *
 * Dequeues unused buffers from receive queue.  Should only be used during
 * shutdown of qp.
 *
 * RETURNS: NULL error value on error, or void* for success.
 */
void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
{
	struct ntb_queue_entry *entry;
	void *buf;

1688
	if (!qp || qp->client_ready)
1689 1690 1691 1692 1693 1694 1695 1696 1697
		return NULL;

	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
	if (!entry)
		return NULL;

	buf = entry->cb_data;
	*len = entry->len;

J
Jon Mason 已提交
1698
	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731

	return buf;
}
EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);

/**
 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
 * @qp: NTB transport layer queue the entry is to be enqueued on
 * @cb: per buffer pointer for callback function to use
 * @data: pointer to data buffer that incoming packets will be copied into
 * @len: length of the data buffer
 *
 * Enqueue a new receive buffer onto the transport queue into which a NTB
 * payload can be received into.
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
			     unsigned int len)
{
	struct ntb_queue_entry *entry;

	if (!qp)
		return -EINVAL;

	entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
	if (!entry)
		return -ENOMEM;

	entry->cb_data = cb;
	entry->buf = data;
	entry->len = len;

J
Jon Mason 已提交
1732
	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745

	return 0;
}
EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);

/**
 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
 * @qp: NTB transport layer queue the entry is to be enqueued on
 * @cb: per buffer pointer for callback function to use
 * @data: pointer to data buffer that will be sent
 * @len: length of the data buffer
 *
 * Enqueue a new transmit buffer onto the transport queue from which a NTB
J
Jon Mason 已提交
1746
 * payload will be transmitted.  This assumes that a lock is being held to
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
 * serialize access to the qp.
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
			     unsigned int len)
{
	struct ntb_queue_entry *entry;
	int rc;

1757
	if (!qp || !qp->link_is_up || !len)
1758 1759 1760
		return -EINVAL;

	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1761 1762
	if (!entry) {
		qp->tx_err_no_buf++;
1763
		return -ENOMEM;
1764
	}
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790

	entry->cb_data = cb;
	entry->buf = data;
	entry->len = len;
	entry->flags = 0;

	rc = ntb_process_tx(qp, entry);
	if (rc)
		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
			     &qp->tx_free_q);

	return rc;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);

/**
 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
 * @qp: NTB transport layer queue to be enabled
 *
 * Notify NTB transport layer of client readiness to use queue
 */
void ntb_transport_link_up(struct ntb_transport_qp *qp)
{
	if (!qp)
		return;

1791
	qp->client_ready = true;
1792

1793
	if (qp->transport->link_is_up)
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
		schedule_delayed_work(&qp->link_work, 0);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_up);

/**
 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
 * @qp: NTB transport layer queue to be disabled
 *
 * Notify NTB transport layer of client's desire to no longer receive data on
 * transport queue specified.  It is the client's responsibility to ensure all
J
Jon Mason 已提交
1804
 * entries on queue are purged or otherwise handled appropriately.
1805 1806 1807
 */
void ntb_transport_link_down(struct ntb_transport_qp *qp)
{
1808
	struct pci_dev *pdev;
1809
	int val;
1810 1811 1812 1813

	if (!qp)
		return;

1814 1815
	pdev = qp->ndev->pdev;
	qp->client_ready = false;
1816

1817
	val = ntb_spad_read(qp->ndev, QP_LINKS);
1818

1819 1820
	ntb_peer_spad_write(qp->ndev, QP_LINKS,
			    val & ~BIT(qp->qp_num));
1821

1822
	if (qp->link_is_up)
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
		ntb_send_link_down(qp);
	else
		cancel_delayed_work_sync(&qp->link_work);
}
EXPORT_SYMBOL_GPL(ntb_transport_link_down);

/**
 * ntb_transport_link_query - Query transport link state
 * @qp: NTB transport layer queue to be queried
 *
 * Query connectivity to the remote system of the NTB transport queue
 *
 * RETURNS: true for link up or false for link down
 */
bool ntb_transport_link_query(struct ntb_transport_qp *qp)
{
1839 1840 1841
	if (!qp)
		return false;

1842
	return qp->link_is_up;
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
}
EXPORT_SYMBOL_GPL(ntb_transport_link_query);

/**
 * ntb_transport_qp_num - Query the qp number
 * @qp: NTB transport layer queue to be queried
 *
 * Query qp number of the NTB transport queue
 *
 * RETURNS: a zero based number specifying the qp number
 */
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
{
1856 1857 1858
	if (!qp)
		return 0;

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
	return qp->qp_num;
}
EXPORT_SYMBOL_GPL(ntb_transport_qp_num);

/**
 * ntb_transport_max_size - Query the max payload size of a qp
 * @qp: NTB transport layer queue to be queried
 *
 * Query the maximum payload size permissible on the given qp
 *
 * RETURNS: the max payload size of a qp
 */
1871
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1872
{
1873 1874
	unsigned int max;

1875 1876 1877
	if (!qp)
		return 0;

1878 1879 1880 1881 1882 1883 1884 1885
	if (!qp->dma_chan)
		return qp->tx_max_frame - sizeof(struct ntb_payload_header);

	/* If DMA engine usage is possible, try to find the max size for that */
	max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
	max -= max % (1 << qp->dma_chan->device->copy_align);

	return max;
1886 1887
}
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953

static void ntb_transport_doorbell_callback(void *data, int vector)
{
	struct ntb_transport_ctx *nt = data;
	struct ntb_transport_qp *qp;
	u64 db_bits;
	unsigned int qp_num;

	db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
		   ntb_db_vector_mask(nt->ndev, vector));

	while (db_bits) {
		qp_num = __ffs(db_bits);
		qp = &nt->qp_vec[qp_num];

		tasklet_schedule(&qp->rxc_db_work);

		db_bits &= ~BIT_ULL(qp_num);
	}
}

static const struct ntb_ctx_ops ntb_transport_ops = {
	.link_event = ntb_transport_event_callback,
	.db_event = ntb_transport_doorbell_callback,
};

static struct ntb_client ntb_transport_client = {
	.ops = {
		.probe = ntb_transport_probe,
		.remove = ntb_transport_free,
	},
};

static int __init ntb_transport_init(void)
{
	int rc;

	if (debugfs_initialized())
		nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);

	rc = bus_register(&ntb_transport_bus);
	if (rc)
		goto err_bus;

	rc = ntb_register_client(&ntb_transport_client);
	if (rc)
		goto err_client;

	return 0;

err_client:
	bus_unregister(&ntb_transport_bus);
err_bus:
	debugfs_remove_recursive(nt_debugfs_dir);
	return rc;
}
module_init(ntb_transport_init);

static void __exit ntb_transport_exit(void)
{
	debugfs_remove_recursive(nt_debugfs_dir);

	ntb_unregister_client(&ntb_transport_client);
	bus_unregister(&ntb_transport_bus);
}
module_exit(ntb_transport_exit);