ipath_ruc.c 19.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include "ipath_verbs.h"
35
#include "ipath_kernel.h"
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

/*
 * Convert the AETH RNR timeout code into the number of milliseconds.
 */
const u32 ib_ipath_rnr_table[32] = {
	656,			/* 0 */
	1,			/* 1 */
	1,			/* 2 */
	1,			/* 3 */
	1,			/* 4 */
	1,			/* 5 */
	1,			/* 6 */
	1,			/* 7 */
	1,			/* 8 */
	1,			/* 9 */
	1,			/* A */
	1,			/* B */
	1,			/* C */
	1,			/* D */
	2,			/* E */
	2,			/* F */
	3,			/* 10 */
	4,			/* 11 */
	6,			/* 12 */
	8,			/* 13 */
	11,			/* 14 */
	16,			/* 15 */
	21,			/* 16 */
	31,			/* 17 */
	41,			/* 18 */
	62,			/* 19 */
	82,			/* 1A */
	123,			/* 1B */
	164,			/* 1C */
	246,			/* 1D */
	328,			/* 1E */
	492			/* 1F */
};

/**
 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
 * @qp: the QP
 *
 * XXX Use a simple list for now.  We might need a priority
 * queue if we have lots of QPs waiting for RNR timeouts
 * but that should be rare.
 */
void ipath_insert_rnr_queue(struct ipath_qp *qp)
{
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	unsigned long flags;

	spin_lock_irqsave(&dev->pending_lock, flags);
	if (list_empty(&dev->rnrwait))
		list_add(&qp->timerwait, &dev->rnrwait);
	else {
		struct list_head *l = &dev->rnrwait;
		struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
						  timerwait);

		while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
			qp->s_rnr_timeout -= nqp->s_rnr_timeout;
			l = l->next;
			if (l->next == &dev->rnrwait)
				break;
			nqp = list_entry(l->next, struct ipath_qp,
					 timerwait);
		}
		list_add(&qp->timerwait, l);
	}
	spin_unlock_irqrestore(&dev->pending_lock, flags);
}

109 110 111 112 113 114 115 116 117 118 119 120
static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
{
	int user = to_ipd(qp->ibqp.pd)->user;
	int i, j, ret;
	struct ib_wc wc;

	qp->r_len = 0;
	for (i = j = 0; i < wqe->num_sge; i++) {
		if (wqe->sg_list[i].length == 0)
			continue;
		/* Check LKEY */
		if ((user && wqe->sg_list[i].lkey == 0) ||
121
		    !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i],
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
				   IB_ACCESS_LOCAL_WRITE))
			goto bad_lkey;
		qp->r_len += wqe->sg_list[i].length;
		j++;
	}
	qp->r_sge.sge = qp->r_sg_list[0];
	qp->r_sge.sg_list = qp->r_sg_list + 1;
	qp->r_sge.num_sge = j;
	ret = 1;
	goto bail;

bad_lkey:
	wc.wr_id = wqe->wr_id;
	wc.status = IB_WC_LOC_PROT_ERR;
	wc.opcode = IB_WC_RECV;
	wc.vendor_err = 0;
	wc.byte_len = 0;
	wc.imm_data = 0;
140
	wc.qp = &qp->ibqp;
141 142 143 144 145 146 147 148 149 150 151 152 153 154
	wc.src_qp = 0;
	wc.wc_flags = 0;
	wc.pkey_index = 0;
	wc.slid = 0;
	wc.sl = 0;
	wc.dlid_path_bits = 0;
	wc.port_num = 0;
	/* Signal solicited completion event. */
	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	ret = 0;
bail:
	return ret;
}

155 156 157 158 159 160 161
/**
 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update wr_id only, not SGEs
 *
 * Return 0 if no RWQE is available, otherwise return 1.
 *
162
 * Can be called from interrupt level.
163 164 165
 */
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
166
	unsigned long flags;
167
	struct ipath_rq *rq;
168
	struct ipath_rwq *wq;
169 170
	struct ipath_srq *srq;
	struct ipath_rwqe *wqe;
171 172 173
	void (*handler)(struct ib_event *, void *);
	u32 tail;
	int ret;
174

175 176 177 178 179 180 181
	if (qp->ibqp.srq) {
		srq = to_isrq(qp->ibqp.srq);
		handler = srq->ibsrq.event_handler;
		rq = &srq->rq;
	} else {
		srq = NULL;
		handler = NULL;
182 183 184
		rq = &qp->r_rq;
	}

185
	spin_lock_irqsave(&rq->lock, flags);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
	wq = rq->wq;
	tail = wq->tail;
	/* Validate tail before using it since it is user writable. */
	if (tail >= rq->size)
		tail = 0;
	do {
		if (unlikely(tail == wq->head)) {
			spin_unlock_irqrestore(&rq->lock, flags);
			ret = 0;
			goto bail;
		}
		wqe = get_rwqe_ptr(rq, tail);
		if (++tail >= rq->size)
			tail = 0;
	} while (!wr_id_only && !init_sge(qp, wqe));
201
	qp->r_wr_id = wqe->wr_id;
202 203 204
	wq->tail = tail;

	ret = 1;
205
	qp->r_wrid_valid = 1;
206
	if (handler) {
207 208
		u32 n;

209 210 211 212 213 214 215 216 217
		/*
		 * validate head pointer value and compute
		 * the number of remaining WQEs.
		 */
		n = wq->head;
		if (n >= rq->size)
			n = 0;
		if (n < tail)
			n += rq->size - tail;
218
		else
219
			n -= tail;
220
		if (n < srq->limit) {
221 222
			struct ib_event ev;

223
			srq->limit = 0;
224
			spin_unlock_irqrestore(&rq->lock, flags);
225 226 227
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
228
			handler(&ev, srq->ibsrq.srq_context);
229 230 231 232
			goto bail;
		}
	}
	spin_unlock_irqrestore(&rq->lock, flags);
233

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
bail:
	return ret;
}

/**
 * ipath_ruc_loopback - handle UC and RC lookback requests
 * @sqp: the loopback QP
 *
 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
 * forward a WQE addressed to the same HCA.
 * Note that although we are single threaded due to the tasklet, we still
 * have to protect against post_send().  We don't have to worry about
 * receive interrupts since this is a connected protocol and all packets
 * will pass through here.
 */
249
static void ipath_ruc_loopback(struct ipath_qp *sqp)
250 251 252 253 254 255
{
	struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
	struct ipath_qp *qp;
	struct ipath_swqe *wqe;
	struct ipath_sge *sge;
	unsigned long flags;
256
	struct ib_wc wc;
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	u64 sdata;

	qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
	if (!qp) {
		dev->n_pkt_drops++;
		return;
	}

again:
	spin_lock_irqsave(&sqp->s_lock, flags);

	if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
		spin_unlock_irqrestore(&sqp->s_lock, flags);
		goto done;
	}

	/* Get the next send request. */
	if (sqp->s_last == sqp->s_head) {
		/* Send work queue is empty. */
		spin_unlock_irqrestore(&sqp->s_lock, flags);
		goto done;
	}

	/*
	 * We can rely on the entry not changing without the s_lock
	 * being held until we update s_last.
	 */
	wqe = get_swqe_ptr(sqp, sqp->s_last);
	spin_unlock_irqrestore(&sqp->s_lock, flags);

287 288
	wc.wc_flags = 0;
	wc.imm_data = 0;
289 290 291 292 293 294 295

	sqp->s_sge.sge = wqe->sg_list[0];
	sqp->s_sge.sg_list = wqe->sg_list + 1;
	sqp->s_sge.num_sge = wqe->wr.num_sge;
	sqp->s_len = wqe->length;
	switch (wqe->wr.opcode) {
	case IB_WR_SEND_WITH_IMM:
296 297
		wc.wc_flags = IB_WC_WITH_IMM;
		wc.imm_data = wqe->wr.imm_data;
298 299 300 301 302 303 304 305
		/* FALLTHROUGH */
	case IB_WR_SEND:
		if (!ipath_get_rwqe(qp, 0)) {
		rnr_nak:
			/* Handle RNR NAK */
			if (qp->ibqp.qp_type == IB_QPT_UC)
				goto send_comp;
			if (sqp->s_rnr_retry == 0) {
306
				wc.status = IB_WC_RNR_RETRY_EXC_ERR;
307 308 309 310 311 312
				goto err;
			}
			if (sqp->s_rnr_retry_cnt < 7)
				sqp->s_rnr_retry--;
			dev->n_rnr_naks++;
			sqp->s_rnr_timeout =
313
				ib_ipath_rnr_table[sqp->r_min_rnr_timer];
314 315 316 317 318 319
			ipath_insert_rnr_queue(sqp);
			goto done;
		}
		break;

	case IB_WR_RDMA_WRITE_WITH_IMM:
320 321
		wc.wc_flags = IB_WC_WITH_IMM;
		wc.imm_data = wqe->wr.imm_data;
322 323 324 325 326 327
		if (!ipath_get_rwqe(qp, 1))
			goto rnr_nak;
		/* FALLTHROUGH */
	case IB_WR_RDMA_WRITE:
		if (wqe->length == 0)
			break;
328
		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
329 330 331 332
					    wqe->wr.wr.rdma.remote_addr,
					    wqe->wr.wr.rdma.rkey,
					    IB_ACCESS_REMOTE_WRITE))) {
		acc_err:
333
			wc.status = IB_WC_REM_ACCESS_ERR;
334
		err:
335 336 337 338
			wc.wr_id = wqe->wr.wr_id;
			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
			wc.vendor_err = 0;
			wc.byte_len = 0;
339
			wc.qp = &sqp->ibqp;
340 341 342 343 344 345 346
			wc.src_qp = sqp->remote_qpn;
			wc.pkey_index = 0;
			wc.slid = sqp->remote_ah_attr.dlid;
			wc.sl = sqp->remote_ah_attr.sl;
			wc.dlid_path_bits = 0;
			wc.port_num = 0;
			ipath_sqerror_qp(sqp, &wc);
347 348 349 350 351
			goto done;
		}
		break;

	case IB_WR_RDMA_READ:
352
		if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
353 354 355 356 357 358 359 360 361 362 363 364 365 366
					    wqe->wr.wr.rdma.remote_addr,
					    wqe->wr.wr.rdma.rkey,
					    IB_ACCESS_REMOTE_READ)))
			goto acc_err;
		if (unlikely(!(qp->qp_access_flags &
			       IB_ACCESS_REMOTE_READ)))
			goto acc_err;
		qp->r_sge.sge = wqe->sg_list[0];
		qp->r_sge.sg_list = wqe->sg_list + 1;
		qp->r_sge.num_sge = wqe->wr.num_sge;
		break;

	case IB_WR_ATOMIC_CMP_AND_SWP:
	case IB_WR_ATOMIC_FETCH_AND_ADD:
367
		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
					    wqe->wr.wr.rdma.remote_addr,
					    wqe->wr.wr.rdma.rkey,
					    IB_ACCESS_REMOTE_ATOMIC)))
			goto acc_err;
		/* Perform atomic OP and save result. */
		sdata = wqe->wr.wr.atomic.swap;
		spin_lock_irqsave(&dev->pending_lock, flags);
		qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
		if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
			*(u64 *) qp->r_sge.sge.vaddr =
				qp->r_atomic_data + sdata;
		else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
			*(u64 *) qp->r_sge.sge.vaddr = sdata;
		spin_unlock_irqrestore(&dev->pending_lock, flags);
		*(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
		goto send_comp;

	default:
		goto done;
	}

	sge = &sqp->s_sge.sge;
	while (sqp->s_len) {
		u32 len = sqp->s_len;

		if (len > sge->length)
			len = sge->length;
		BUG_ON(len == 0);
		ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
		sge->vaddr += len;
		sge->length -= len;
		sge->sge_length -= len;
		if (sge->sge_length == 0) {
			if (--sqp->s_sge.num_sge)
				*sge = *sqp->s_sge.sg_list++;
		} else if (sge->length == 0 && sge->mr != NULL) {
			if (++sge->n >= IPATH_SEGSZ) {
				if (++sge->m >= sge->mr->mapsz)
					break;
				sge->n = 0;
			}
			sge->vaddr =
				sge->mr->map[sge->m]->segs[sge->n].vaddr;
			sge->length =
				sge->mr->map[sge->m]->segs[sge->n].length;
		}
		sqp->s_len -= len;
	}

	if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
	    wqe->wr.opcode == IB_WR_RDMA_READ)
		goto send_comp;

	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
422
		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
423
	else
424 425 426 427 428
		wc.opcode = IB_WC_RECV;
	wc.wr_id = qp->r_wr_id;
	wc.status = IB_WC_SUCCESS;
	wc.vendor_err = 0;
	wc.byte_len = wqe->length;
429
	wc.qp = &qp->ibqp;
430
	wc.src_qp = qp->remote_qpn;
431
	/* XXX do we know which pkey matched? Only needed for GSI. */
432 433 434 435
	wc.pkey_index = 0;
	wc.slid = qp->remote_ah_attr.dlid;
	wc.sl = qp->remote_ah_attr.sl;
	wc.dlid_path_bits = 0;
436
	/* Signal completion event if the solicited bit is set. */
437
	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
438 439 440 441 442 443 444
		       wqe->wr.send_flags & IB_SEND_SOLICITED);

send_comp:
	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;

	if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
	    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
445 446 447 448 449
		wc.wr_id = wqe->wr.wr_id;
		wc.status = IB_WC_SUCCESS;
		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
		wc.vendor_err = 0;
		wc.byte_len = wqe->length;
450
		wc.qp = &sqp->ibqp;
451 452 453 454 455 456 457
		wc.src_qp = 0;
		wc.pkey_index = 0;
		wc.slid = 0;
		wc.sl = 0;
		wc.dlid_path_bits = 0;
		wc.port_num = 0;
		ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
458 459 460 461 462 463 464 465 466 467 468 469 470 471
	}

	/* Update s_last now that we are finished with the SWQE */
	spin_lock_irqsave(&sqp->s_lock, flags);
	if (++sqp->s_last >= sqp->s_size)
		sqp->s_last = 0;
	spin_unlock_irqrestore(&sqp->s_lock, flags);
	goto again;

done:
	if (atomic_dec_and_test(&qp->refcount))
		wake_up(&qp->wait);
}

472 473 474 475 476 477 478 479 480
static int want_buffer(struct ipath_devdata *dd)
{
	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
			 dd->ipath_sendctrl);

	return 0;
}

481 482 483 484 485 486 487 488 489 490 491 492
/**
 * ipath_no_bufs_available - tell the layer driver we need buffers
 * @qp: the QP that caused the problem
 * @dev: the device we ran out of buffers on
 *
 * Called when we run out of PIO buffers.
 */
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
{
	unsigned long flags;

	spin_lock_irqsave(&dev->pending_lock, flags);
493
	if (list_empty(&qp->piowait))
494 495 496
		list_add_tail(&qp->piowait, &dev->piowait);
	spin_unlock_irqrestore(&dev->pending_lock, flags);
	/*
497
	 * Note that as soon as want_buffer() is called and
498 499 500 501 502 503 504 505 506
	 * possibly before it returns, ipath_ib_piobufavail()
	 * could be called.  If we are still in the tasklet function,
	 * tasklet_hi_schedule() will not call us until the next time
	 * tasklet_hi_schedule() is called.
	 * We clear the tasklet flag now since we are committing to return
	 * from the tasklet function.
	 */
	clear_bit(IPATH_S_BUSY, &qp->s_flags);
	tasklet_unlock(&qp->s_task);
507
	want_buffer(dev->dd);
508 509 510 511
	dev->n_piowait++;
}

/**
512
 * ipath_post_ruc_send - post RC and UC sends
513 514 515
 * @qp: the QP to post on
 * @wr: the work request to send
 */
516
int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
{
	struct ipath_swqe *wqe;
	unsigned long flags;
	u32 next;
	int i, j;
	int acc;
	int ret;

	/*
	 * Don't allow RDMA reads or atomic operations on UC or
	 * undefined operations.
	 * Make sure buffer is large enough to hold the result for atomics.
	 */
	if (qp->ibqp.qp_type == IB_QPT_UC) {
		if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
			ret = -EINVAL;
			goto bail;
		}
	} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
		ret = -EINVAL;
		goto bail;
	} else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
		   (wr->num_sge == 0 ||
		    wr->sg_list[0].length < sizeof(u64) ||
		    wr->sg_list[0].addr & (sizeof(u64) - 1))) {
		ret = -EINVAL;
		goto bail;
	}
	/* IB spec says that num_sge == 0 is OK. */
	if (wr->num_sge > qp->s_max_sge) {
		ret = -ENOMEM;
		goto bail;
	}
	spin_lock_irqsave(&qp->s_lock, flags);
	next = qp->s_head + 1;
	if (next >= qp->s_size)
		next = 0;
	if (next == qp->s_last) {
		spin_unlock_irqrestore(&qp->s_lock, flags);
		ret = -EINVAL;
		goto bail;
	}

	wqe = get_swqe_ptr(qp, qp->s_head);
	wqe->wr = *wr;
	wqe->ssn = qp->s_ssn++;
	wqe->sg_list[0].mr = NULL;
	wqe->sg_list[0].vaddr = NULL;
	wqe->sg_list[0].length = 0;
	wqe->sg_list[0].sge_length = 0;
	wqe->length = 0;
	acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
	for (i = 0, j = 0; i < wr->num_sge; i++) {
		if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
			spin_unlock_irqrestore(&qp->s_lock, flags);
			ret = -EINVAL;
			goto bail;
		}
		if (wr->sg_list[i].length == 0)
			continue;
577
		if (!ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i],
578 579 580 581 582 583 584 585 586 587 588 589
				   acc)) {
			spin_unlock_irqrestore(&qp->s_lock, flags);
			ret = -EINVAL;
			goto bail;
		}
		wqe->length += wr->sg_list[i].length;
		j++;
	}
	wqe->wr.num_sge = j;
	qp->s_head = next;
	spin_unlock_irqrestore(&qp->s_lock, flags);

590
	ipath_do_ruc_send((unsigned long) qp);
591 592 593 594 595 596

	ret = 0;

bail:
	return ret;
}
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

/**
 * ipath_make_grh - construct a GRH header
 * @dev: a pointer to the ipath device
 * @hdr: a pointer to the GRH header being constructed
 * @grh: the global route address to send to
 * @hwords: the number of 32 bit words of header being sent
 * @nwords: the number of 32 bit words of data being sent
 *
 * Return the size of the header in 32 bit words.
 */
u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
		   struct ib_global_route *grh, u32 hwords, u32 nwords)
{
	hdr->version_tclass_flow =
		cpu_to_be32((6 << 28) |
			    (grh->traffic_class << 20) |
			    grh->flow_label);
	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
	hdr->next_hdr = 0x1B;
	hdr->hop_limit = grh->hop_limit;
	/* The SGID is 32-bit aligned. */
	hdr->sgid.global.subnet_prefix = dev->gid_prefix;
621
	hdr->sgid.global.interface_id = dev->dd->ipath_guid;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	hdr->dgid = grh->dgid;

	/* GRH header size in 32-bit words. */
	return sizeof(struct ib_grh) / sizeof(u32);
}

/**
 * ipath_do_ruc_send - perform a send on an RC or UC QP
 * @data: contains a pointer to the QP
 *
 * Process entries in the send work queue until credit or queue is
 * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
 * Otherwise, after we drop the QP s_lock, two threads could send
 * packets out of order.
 */
void ipath_do_ruc_send(unsigned long data)
{
	struct ipath_qp *qp = (struct ipath_qp *)data;
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	unsigned long flags;
	u16 lrh0;
	u32 nwords;
	u32 extra_bytes;
	u32 bth0;
	u32 bth2;
	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
	struct ipath_other_headers *ohdr;

	if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
		goto bail;

653
	if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
		ipath_ruc_loopback(qp);
		goto clear;
	}

	ohdr = &qp->s_hdr.u.oth;
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
		ohdr = &qp->s_hdr.u.l.oth;

again:
	/* Check for a constructed packet to be sent. */
	if (qp->s_hdrwords != 0) {
		/*
		 * If no PIO bufs are available, return.  An interrupt will
		 * call ipath_ib_piobufavail() when one is available.
		 */
		if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
				     (u32 *) &qp->s_hdr, qp->s_cur_size,
				     qp->s_cur_sge)) {
			ipath_no_bufs_available(qp, dev);
			goto bail;
		}
		dev->n_unicast_xmit++;
		/* Record that we sent the packet and s_hdr is empty. */
		qp->s_hdrwords = 0;
	}

	/*
	 * The lock is needed to synchronize between setting
	 * qp->s_ack_state, resend timer, and post_send().
	 */
	spin_lock_irqsave(&qp->s_lock, flags);

	/* Sending responses has higher priority over sending requests. */
	if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
	    (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
689
		bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK;
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
	else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
		   ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
		   ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
		/*
		 * Clear the busy bit before unlocking to avoid races with
		 * adding new work queue items and then failing to process
		 * them.
		 */
		clear_bit(IPATH_S_BUSY, &qp->s_flags);
		spin_unlock_irqrestore(&qp->s_lock, flags);
		goto bail;
	}

	spin_unlock_irqrestore(&qp->s_lock, flags);

	/* Construct the header. */
	extra_bytes = (4 - qp->s_cur_size) & 3;
	nwords = (qp->s_cur_size + extra_bytes) >> 2;
708
	lrh0 = IPATH_LRH_BTH;
709 710 711 712
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
		qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
						 &qp->remote_ah_attr.grh,
						 qp->s_hdrwords, nwords);
713
		lrh0 = IPATH_LRH_GRH;
714 715 716 717 718 719
	}
	lrh0 |= qp->remote_ah_attr.sl << 4;
	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
				       SIZE_OF_CRC);
720 721
	qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
	bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
722 723 724 725 726 727 728 729 730 731 732 733 734
	bth0 |= extra_bytes << 20;
	ohdr->bth[0] = cpu_to_be32(bth0);
	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
	ohdr->bth[2] = cpu_to_be32(bth2);

	/* Check for more work to do. */
	goto again;

clear:
	clear_bit(IPATH_S_BUSY, &qp->s_flags);
bail:
	return;
}