qib_qp.c 13.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3
 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/err.h>
#include <linux/vmalloc.h>
37
#include <rdma/rdma_vt.h>
38 39 40
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
41 42 43

#include "qib.h"

44 45 46 47 48 49
/*
 * mask field which was present in now deleted qib_qpn_table
 * is not present in rvt_qpn_table. Defining the same field
 * as qpt_mask here instead of adding the mask field to
 * rvt_qpn_table.
 */
50
u16 qpt_mask;
51

52 53
static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
			      struct rvt_qpn_map *map, unsigned off)
54
{
55
	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
56 57
}

58 59
static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
					struct rvt_qpn_map *map, unsigned off,
60
					unsigned n)
61
{
62
	if (qpt_mask) {
63
		off++;
64 65 66 67 68
		if (((off & qpt_mask) >> 1) >= n)
			off = (off | qpt_mask) + 2;
	} else {
		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
	}
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
	return off;
}

/*
 * Convert the AETH credit code into the number of credits.
 */
static u32 credit_table[31] = {
	0,                      /* 0 */
	1,                      /* 1 */
	2,                      /* 2 */
	3,                      /* 3 */
	4,                      /* 4 */
	6,                      /* 5 */
	8,                      /* 6 */
	12,                     /* 7 */
	16,                     /* 8 */
	24,                     /* 9 */
	32,                     /* A */
	48,                     /* B */
	64,                     /* C */
	96,                     /* D */
	128,                    /* E */
	192,                    /* F */
	256,                    /* 10 */
	384,                    /* 11 */
	512,                    /* 12 */
	768,                    /* 13 */
	1024,                   /* 14 */
	1536,                   /* 15 */
	2048,                   /* 16 */
	3072,                   /* 17 */
	4096,                   /* 18 */
	6144,                   /* 19 */
	8192,                   /* 1A */
	12288,                  /* 1B */
	16384,                  /* 1C */
	24576,                  /* 1D */
	32768                   /* 1E */
};

109
static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
110
			 gfp_t gfp)
111
{
112
	unsigned long page = get_zeroed_page(gfp);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

	/*
	 * Free the page if someone raced with us installing it.
	 */

	spin_lock(&qpt->lock);
	if (map->page)
		free_page(page);
	else
		map->page = (void *)page;
	spin_unlock(&qpt->lock);
}

/*
 * Allocate the next available QPN or
 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
 */
130 131
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
		  enum ib_qp_type type, u8 port, gfp_t gfp)
132 133
{
	u32 i, offset, max_scan, qpn;
134
	struct rvt_qpn_map *map;
135
	u32 ret;
136 137 138
	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
					      verbs_dev);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
		unsigned n;

		ret = type == IB_QPT_GSI;
		n = 1 << (ret + 2 * (port - 1));
		spin_lock(&qpt->lock);
		if (qpt->flags & n)
			ret = -EINVAL;
		else
			qpt->flags |= n;
		spin_unlock(&qpt->lock);
		goto bail;
	}

M
Mike Marciniszyn 已提交
154
	qpn = qpt->last + 2;
155
	if (qpn >= RVT_QPN_MAX)
156
		qpn = 2;
157 158 159 160
	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
		qpn = (qpn | qpt_mask) + 2;
	offset = qpn & RVT_BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
161 162 163
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
164
			get_map_page(qpt, map, gfp);
165 166 167 168 169 170 171 172 173
			if (unlikely(!map->page))
				break;
		}
		do {
			if (!test_and_set_bit(offset, map->page)) {
				qpt->last = qpn;
				ret = qpn;
				goto bail;
			}
174 175
			offset = find_next_offset(qpt, map, offset,
				dd->n_krcv_queues);
176 177 178 179 180 181 182 183 184
			qpn = mk_qpn(qpt, map, offset);
			/*
			 * This test differs from alloc_pidmap().
			 * If find_next_offset() does find a zero
			 * bit, we don't need to check for QPN
			 * wrapping around past our starting QPN.
			 * We just need to be sure we don't loop
			 * forever.
			 */
185
		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
186 187 188 189 190 191
		/*
		 * In order to keep the number of pages allocated to a
		 * minimum, we scan the all existing pages before increasing
		 * the size of the bitmap table.
		 */
		if (++i > max_scan) {
192
			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
193 194
				break;
			map = &qpt->map[qpt->nmaps++];
195
			offset = 0;
196 197
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
198
			offset = 0;
199 200
		} else {
			map = &qpt->map[0];
201
			offset = 2;
202 203 204 205 206 207 208 209 210 211 212 213 214
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}

/**
 * qib_free_all_qps - check for QPs still in use
 */
215
unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
216
{
217 218 219
	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
					      verbs_dev);
220 221 222 223 224
	unsigned n, qp_inuse = 0;

	for (n = 0; n < dd->num_pports; n++) {
		struct qib_ibport *ibp = &dd->pport[n].ibport_data;

M
Mike Marciniszyn 已提交
225
		rcu_read_lock();
226
		if (rcu_dereference(ibp->rvp.qp[0]))
227
			qp_inuse++;
228
		if (rcu_dereference(ibp->rvp.qp[1]))
229
			qp_inuse++;
M
Mike Marciniszyn 已提交
230
		rcu_read_unlock();
231 232 233 234
	}
	return qp_inuse;
}

235
void qib_notify_qp_reset(struct rvt_qp *qp)
236
{
237
	struct qib_qp_priv *priv = qp->priv;
238

239
	atomic_set(&priv->s_dma_busy, 0);
240 241
}

242
void qib_notify_error_qp(struct rvt_qp *qp)
243
{
244
	struct qib_qp_priv *priv = qp->priv;
245
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
246

H
Harish Chegondi 已提交
247
	spin_lock(&dev->rdi.pending_lock);
248 249
	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
250
		list_del_init(&priv->iowait);
251
	}
H
Harish Chegondi 已提交
252
	spin_unlock(&dev->rdi.pending_lock);
253

254
	if (!(qp->s_flags & RVT_S_BUSY)) {
255 256
		qp->s_hdrwords = 0;
		if (qp->s_rdma_mr) {
257
			rvt_put_mr(qp->s_rdma_mr);
258 259
			qp->s_rdma_mr = NULL;
		}
260 261 262
		if (priv->s_tx) {
			qib_put_txreq(priv->s_tx);
			priv->s_tx = NULL;
263 264 265 266
		}
	}
}

267
static int mtu_to_enum(u32 mtu)
268
{
269
	int enum_mtu;
270

271 272 273
	switch (mtu) {
	case 4096:
		enum_mtu = IB_MTU_4096;
274
		break;
275 276
	case 2048:
		enum_mtu = IB_MTU_2048;
277
		break;
278 279
	case 1024:
		enum_mtu = IB_MTU_1024;
280
		break;
281 282
	case 512:
		enum_mtu = IB_MTU_512;
283
		break;
284 285
	case 256:
		enum_mtu = IB_MTU_256;
286 287
		break;
	default:
288
		enum_mtu = IB_MTU_2048;
289
	}
290 291
	return enum_mtu;
}
292

293 294
int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
			   struct ib_qp_attr *attr)
295 296 297 298 299 300 301 302
{
	int mtu, pmtu, pidx = qp->port_num - 1;
	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
					      verbs_dev);
	mtu = ib_mtu_enum_to_int(attr->path_mtu);
	if (mtu == -1)
		return -EINVAL;
303

304 305 306 307 308 309
	if (mtu > dd->pport[pidx].ibmtu)
		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
	else
		pmtu = attr->path_mtu;
	return pmtu;
}
310

311
int qib_mtu_to_path_mtu(u32 mtu)
312 313 314
{
	return mtu_to_enum(mtu);
}
315

316
u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317 318
{
	return ib_mtu_enum_to_int(pmtu);
319 320 321 322 323 324 325 326
}

/**
 * qib_compute_aeth - compute the AETH (syndrome + MSN)
 * @qp: the queue pair to compute the AETH for
 *
 * Returns the AETH.
 */
327
__be32 qib_compute_aeth(struct rvt_qp *qp)
328 329 330 331 332 333 334 335 336 337 338 339
{
	u32 aeth = qp->r_msn & QIB_MSN_MASK;

	if (qp->ibqp.srq) {
		/*
		 * Shared receive queues don't generate credits.
		 * Set the credit field to the invalid value.
		 */
		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
	} else {
		u32 min, max, x;
		u32 credits;
340
		struct rvt_rwq *wq = qp->r_rq.wq;
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
		u32 head;
		u32 tail;

		/* sanity check pointers before trusting them */
		head = wq->head;
		if (head >= qp->r_rq.size)
			head = 0;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
			tail = 0;
		/*
		 * Compute the number of credits available (RWQEs).
		 * XXX Not holding the r_rq.lock here so there is a small
		 * chance that the pair of reads are not atomic.
		 */
		credits = head - tail;
		if ((int)credits < 0)
			credits += qp->r_rq.size;
		/*
		 * Binary search the credit table to find the code to
		 * use.
		 */
		min = 0;
		max = 31;
		for (;;) {
			x = (min + max) / 2;
			if (credit_table[x] == credits)
				break;
			if (credit_table[x] > credits)
				max = x;
			else if (min == x)
				break;
			else
				min = x;
		}
		aeth |= x << QIB_AETH_CREDIT_SHIFT;
	}
	return cpu_to_be32(aeth);
}

381
void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
382
{
383
	struct qib_qp_priv *priv;
384

385 386 387 388
	priv = kzalloc(sizeof(*priv), gfp);
	if (!priv)
		return ERR_PTR(-ENOMEM);
	priv->owner = qp;
389

390 391 392 393
	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
	if (!priv->s_hdr) {
		kfree(priv);
		return ERR_PTR(-ENOMEM);
394
	}
395
	init_waitqueue_head(&priv->wait_dma);
396
	INIT_WORK(&priv->s_work, _qib_do_send);
397
	INIT_LIST_HEAD(&priv->iowait);
398

399 400
	return priv;
}
401

402
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
403 404
{
	struct qib_qp_priv *priv = qp->priv;
405

406 407
	kfree(priv->s_hdr);
	kfree(priv);
408 409
}

410
void qib_stop_send_queue(struct rvt_qp *qp)
411 412 413 414
{
	struct qib_qp_priv *priv = qp->priv;

	cancel_work_sync(&priv->s_work);
415
	del_timer_sync(&qp->s_timer);
416 417
}

418
void qib_quiesce_qp(struct rvt_qp *qp)
419 420 421 422 423 424 425 426 427 428
{
	struct qib_qp_priv *priv = qp->priv;

	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
	if (priv->s_tx) {
		qib_put_txreq(priv->s_tx);
		priv->s_tx = NULL;
	}
}

429
void qib_flush_qp_waiters(struct rvt_qp *qp)
430 431 432 433 434 435 436 437 438 439
{
	struct qib_qp_priv *priv = qp->priv;
	struct qib_ibdev *dev = to_idev(qp->ibqp.device);

	spin_lock(&dev->rdi.pending_lock);
	if (!list_empty(&priv->iowait))
		list_del_init(&priv->iowait);
	spin_unlock(&dev->rdi.pending_lock);
}

440 441 442 443 444 445 446
/**
 * qib_get_credit - flush the send work queue of a QP
 * @qp: the qp who's send work queue to flush
 * @aeth: the Acknowledge Extended Transport Header
 *
 * The QP s_lock should be held.
 */
447
void qib_get_credit(struct rvt_qp *qp, u32 aeth)
448 449 450 451 452 453 454 455 456
{
	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;

	/*
	 * If the credit is invalid, we can send
	 * as many packets as we like.  Otherwise, we have to
	 * honor the credit field.
	 */
	if (credit == QIB_AETH_CREDIT_INVAL) {
457 458 459 460
		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
461 462 463
				qib_schedule_send(qp);
			}
		}
464
	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
465 466 467 468
		/* Compute new LSN (i.e., MSN + credit) */
		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
		if (qib_cmp24(credit, qp->s_lsn) > 0) {
			qp->s_lsn = credit;
469 470
			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
471 472 473 474 475
				qib_schedule_send(qp);
			}
		}
	}
}
476 477 478 479 480

#ifdef CONFIG_DEBUG_FS

struct qib_qp_iter {
	struct qib_ibdev *dev;
481
	struct rvt_qp *qp;
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	int n;
};

struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
{
	struct qib_qp_iter *iter;

	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return NULL;

	iter->dev = dev;
	if (qib_qp_iter_next(iter)) {
		kfree(iter);
		return NULL;
	}

	return iter;
}

int qib_qp_iter_next(struct qib_qp_iter *iter)
{
	struct qib_ibdev *dev = iter->dev;
	int n = iter->n;
	int ret = 1;
507 508
	struct rvt_qp *pqp = iter->qp;
	struct rvt_qp *qp;
509

510
	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
511 512 513
		if (pqp)
			qp = rcu_dereference(pqp->next);
		else
514
			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
		pqp = qp;
		if (qp) {
			iter->qp = qp;
			iter->n = n;
			return 0;
		}
	}
	return ret;
}

static const char * const qp_type_str[] = {
	"SMI", "GSI", "RC", "UC", "UD",
};

void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{
531 532
	struct rvt_swqe *wqe;
	struct rvt_qp *qp = iter->qp;
533
	struct qib_qp_priv *priv = qp->priv;
534

535
	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
536 537 538 539 540 541 542 543 544
	seq_printf(s,
		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
		   iter->n,
		   qp->ibqp.qp_num,
		   qp_type_str[qp->ibqp.qp_type],
		   qp->state,
		   wqe->wr.opcode,
		   qp->s_hdrwords,
		   qp->s_flags,
545 546
		   atomic_read(&priv->s_dma_busy),
		   !list_empty(&priv->iowait),
547 548 549 550 551 552 553 554 555 556 557 558 559
		   qp->timeout,
		   wqe->ssn,
		   qp->s_lsn,
		   qp->s_last_psn,
		   qp->s_psn, qp->s_next_psn,
		   qp->s_sending_psn, qp->s_sending_hpsn,
		   qp->s_last, qp->s_acked, qp->s_cur,
		   qp->s_tail, qp->s_head, qp->s_size,
		   qp->remote_qpn,
		   qp->remote_ah_attr.dlid);
}

#endif