rc.c 63.9 KB
Newer Older
M
Mike Marciniszyn 已提交
1
/*
J
Jubin John 已提交
2
 * Copyright(c) 2015, 2016 Intel Corporation.
M
Mike Marciniszyn 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * BSD LICENSE
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  - Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  - Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  - Neither the name of Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/io.h>
49 50
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
M
Mike Marciniszyn 已提交
51 52 53

#include "hfi.h"
#include "qp.h"
54
#include "verbs_txreq.h"
M
Mike Marciniszyn 已提交
55 56 57
#include "trace.h"

/* cut down ridiculously long IB macro names */
58
#define OP(x) RC_OP(x)
M
Mike Marciniszyn 已提交
59

60
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
M
Mike Marciniszyn 已提交
61 62 63 64 65 66 67 68 69
		       u32 psn, u32 pmtu)
{
	u32 len;

	len = delta_psn(psn, wqe->psn) * pmtu;
	ss->sge = wqe->sg_list[0];
	ss->sg_list = wqe->sg_list + 1;
	ss->num_sge = wqe->wr.num_sge;
	ss->total_len = wqe->length;
70
	rvt_skip_sge(ss, len, false);
M
Mike Marciniszyn 已提交
71 72 73 74 75 76 77 78
	return wqe->length - len;
}

/**
 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
 * @dev: the device for this QP
 * @qp: a pointer to the QP
 * @ohdr: a pointer to the IB header being constructed
79
 * @ps: the xmit packet state
M
Mike Marciniszyn 已提交
80 81 82 83 84
 *
 * Return 1 if constructed; otherwise, return 0.
 * Note that we are in the responder's side of the QP context.
 * Note the QP s_lock must be held.
 */
85
static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
86
		       struct ib_other_headers *ohdr,
87
		       struct hfi1_pkt_state *ps)
M
Mike Marciniszyn 已提交
88
{
89
	struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
90 91 92 93 94
	u32 hwords;
	u32 len;
	u32 bth0;
	u32 bth2;
	int middle = 0;
95
	u32 pmtu = qp->pmtu;
96
	struct hfi1_qp_priv *priv = qp->priv;
M
Mike Marciniszyn 已提交
97

98
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
99
	/* Don't send an ACK if we aren't supposed to. */
100
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
M
Mike Marciniszyn 已提交
101 102 103 104 105 106 107 108 109 110
		goto bail;

	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	hwords = 5;

	switch (qp->s_ack_state) {
	case OP(RDMA_READ_RESPONSE_LAST):
	case OP(RDMA_READ_RESPONSE_ONLY):
		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
		if (e->rdma_sge.mr) {
111
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
			e->rdma_sge.mr = NULL;
		}
		/* FALLTHROUGH */
	case OP(ATOMIC_ACKNOWLEDGE):
		/*
		 * We can increment the tail pointer now that the last
		 * response has been sent instead of only being
		 * constructed.
		 */
		if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
			qp->s_tail_ack_queue = 0;
		/* FALLTHROUGH */
	case OP(SEND_ONLY):
	case OP(ACKNOWLEDGE):
		/* Check for no next entry in the queue. */
		if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
128
			if (qp->s_flags & RVT_S_ACK_PENDING)
M
Mike Marciniszyn 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
				goto normal;
			goto bail;
		}

		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST)) {
			/*
			 * If a RDMA read response is being resent and
			 * we haven't seen the duplicate request yet,
			 * then stop sending the remaining responses the
			 * responder has seen until the requester re-sends it.
			 */
			len = e->rdma_sge.sge_length;
			if (len && !e->rdma_sge.mr) {
				qp->s_tail_ack_queue = qp->r_head_ack_queue;
				goto bail;
			}
			/* Copy SGE state in case we need to resend */
147 148 149
			ps->s_txreq->mr = e->rdma_sge.mr;
			if (ps->s_txreq->mr)
				rvt_get_mr(ps->s_txreq->mr);
M
Mike Marciniszyn 已提交
150 151
			qp->s_ack_rdma_sge.sge = e->rdma_sge;
			qp->s_ack_rdma_sge.num_sge = 1;
152
			ps->s_txreq->ss = &qp->s_ack_rdma_sge;
M
Mike Marciniszyn 已提交
153 154 155 156 157 158 159
			if (len > pmtu) {
				len = pmtu;
				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
			} else {
				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
				e->sent = 1;
			}
160
			ohdr->u.aeth = rvt_compute_aeth(qp);
M
Mike Marciniszyn 已提交
161 162 163 164 165
			hwords++;
			qp->s_ack_rdma_psn = e->psn;
			bth2 = mask_psn(qp->s_ack_rdma_psn++);
		} else {
			/* COMPARE_SWAP or FETCH_ADD */
166
			ps->s_txreq->ss = NULL;
M
Mike Marciniszyn 已提交
167 168
			len = 0;
			qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
169
			ohdr->u.at.aeth = rvt_compute_aeth(qp);
170
			ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
M
Mike Marciniszyn 已提交
171 172 173 174 175 176 177 178 179 180 181
			hwords += sizeof(ohdr->u.at) / sizeof(u32);
			bth2 = mask_psn(e->psn);
			e->sent = 1;
		}
		bth0 = qp->s_ack_state << 24;
		break;

	case OP(RDMA_READ_RESPONSE_FIRST):
		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
		/* FALLTHROUGH */
	case OP(RDMA_READ_RESPONSE_MIDDLE):
182
		ps->s_txreq->ss = &qp->s_ack_rdma_sge;
183 184 185
		ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
		if (ps->s_txreq->mr)
			rvt_get_mr(ps->s_txreq->mr);
M
Mike Marciniszyn 已提交
186 187 188 189 190
		len = qp->s_ack_rdma_sge.sge.sge_length;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
		} else {
191
			ohdr->u.aeth = rvt_compute_aeth(qp);
M
Mike Marciniszyn 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
			hwords++;
			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
			e->sent = 1;
		}
		bth0 = qp->s_ack_state << 24;
		bth2 = mask_psn(qp->s_ack_rdma_psn++);
		break;

	default:
normal:
		/*
		 * Send a regular ACK.
		 * Set the s_ack_state so we wait until after sending
		 * the ACK before setting s_ack_state to ACKNOWLEDGE
		 * (see above).
		 */
		qp->s_ack_state = OP(SEND_ONLY);
210
		qp->s_flags &= ~RVT_S_ACK_PENDING;
211
		ps->s_txreq->ss = NULL;
M
Mike Marciniszyn 已提交
212 213
		if (qp->s_nak_state)
			ohdr->u.aeth =
214
				cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
M
Mike Marciniszyn 已提交
215
					    (qp->s_nak_state <<
216
					     IB_AETH_CREDIT_SHIFT));
M
Mike Marciniszyn 已提交
217
		else
218
			ohdr->u.aeth = rvt_compute_aeth(qp);
M
Mike Marciniszyn 已提交
219 220 221 222 223 224 225
		hwords++;
		len = 0;
		bth0 = OP(ACKNOWLEDGE) << 24;
		bth2 = mask_psn(qp->s_ack_psn);
	}
	qp->s_rdma_ack_cnt++;
	qp->s_hdrwords = hwords;
226
	ps->s_txreq->sde = priv->s_sde;
227
	ps->s_txreq->s_cur_size = len;
228
	hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
229 230
	/* pbc */
	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
M
Mike Marciniszyn 已提交
231 232 233 234 235 236
	return 1;

bail:
	qp->s_ack_state = OP(ACKNOWLEDGE);
	/*
	 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
237
	 * RVT_S_RESP_PENDING
M
Mike Marciniszyn 已提交
238 239
	 */
	smp_wmb();
240 241 242
	qp->s_flags &= ~(RVT_S_RESP_PENDING
				| RVT_S_ACK_PENDING
				| RVT_S_AHG_VALID);
M
Mike Marciniszyn 已提交
243 244 245 246 247 248 249
	return 0;
}

/**
 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
 * @qp: a pointer to the QP
 *
250 251
 * Assumes s_lock is held.
 *
M
Mike Marciniszyn 已提交
252 253
 * Return 1 if constructed; otherwise, return 0.
 */
254
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
M
Mike Marciniszyn 已提交
255
{
256
	struct hfi1_qp_priv *priv = qp->priv;
M
Mike Marciniszyn 已提交
257
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
258
	struct ib_other_headers *ohdr;
259 260
	struct rvt_sge_state *ss;
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
261 262 263 264 265 266 267 268 269 270
	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	u32 hwords = 5;
	u32 len;
	u32 bth0 = 0;
	u32 bth2;
	u32 pmtu = qp->pmtu;
	char newreq;
	int middle = 0;
	int delta;

271
	lockdep_assert_held(&qp->s_lock);
272 273 274 275 276
	ps->s_txreq = get_txreq(ps->dev, qp);
	if (IS_ERR(ps->s_txreq))
		goto bail_no_tx;

	ohdr = &ps->s_txreq->phdr.hdr.u.oth;
M
Mike Marciniszyn 已提交
277
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
278
		ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
M
Mike Marciniszyn 已提交
279 280

	/* Sending responses has higher priority over sending requests. */
281
	if ((qp->s_flags & RVT_S_RESP_PENDING) &&
282
	    make_rc_ack(dev, qp, ohdr, ps))
283
		return 1;
M
Mike Marciniszyn 已提交
284

285 286
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
M
Mike Marciniszyn 已提交
287 288
			goto bail;
		/* We are in the error state, flush the work request. */
289
		smp_read_barrier_depends(); /* see post_one_send() */
290
		if (qp->s_last == READ_ONCE(qp->s_head))
M
Mike Marciniszyn 已提交
291 292
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
293
		if (iowait_sdma_pending(&priv->s_iowait)) {
294
			qp->s_flags |= RVT_S_WAIT_DMA;
M
Mike Marciniszyn 已提交
295 296 297
			goto bail;
		}
		clear_ahg(qp);
298
		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
M
Mike Marciniszyn 已提交
299 300 301
		hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
			IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
		/* will get called again */
302
		goto done_free_tx;
M
Mike Marciniszyn 已提交
303 304
	}

305
	if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
M
Mike Marciniszyn 已提交
306 307 308 309
		goto bail;

	if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
		if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
310
			qp->s_flags |= RVT_S_WAIT_PSN;
M
Mike Marciniszyn 已提交
311 312 313 314 315 316 317
			goto bail;
		}
		qp->s_sending_psn = qp->s_psn;
		qp->s_sending_hpsn = qp->s_psn - 1;
	}

	/* Send a request. */
318
	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
M
Mike Marciniszyn 已提交
319 320
	switch (qp->s_state) {
	default:
321
		if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
M
Mike Marciniszyn 已提交
322 323 324 325 326 327 328 329 330 331 332
			goto bail;
		/*
		 * Resend an old request or start a new one.
		 *
		 * We keep track of the current SWQE so that
		 * we don't reset the "furthest progress" state
		 * if we need to back up.
		 */
		newreq = 0;
		if (qp->s_cur == qp->s_tail) {
			/* Check if send work queue is empty. */
333 334
			smp_read_barrier_depends(); /* see post_one_send() */
			if (qp->s_tail == READ_ONCE(qp->s_head)) {
M
Mike Marciniszyn 已提交
335 336 337 338 339 340 341 342 343
				clear_ahg(qp);
				goto bail;
			}
			/*
			 * If a fence is requested, wait for previous
			 * RDMA read and atomic operations to finish.
			 */
			if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
			    qp->s_num_rd_atomic) {
344
				qp->s_flags |= RVT_S_WAIT_FENCE;
M
Mike Marciniszyn 已提交
345 346
				goto bail;
			}
347 348 349 350 351 352
			/*
			 * Local operations are processed immediately
			 * after all prior requests have completed
			 */
			if (wqe->wr.opcode == IB_WR_REG_MR ||
			    wqe->wr.opcode == IB_WR_LOCAL_INV) {
353 354 355
				int local_ops = 0;
				int err = 0;

356 357 358 359 360 361
				if (qp->s_last != qp->s_cur)
					goto bail;
				if (++qp->s_cur == qp->s_size)
					qp->s_cur = 0;
				if (++qp->s_tail == qp->s_size)
					qp->s_tail = 0;
362 363
				if (!(wqe->wr.send_flags &
				      RVT_SEND_COMPLETION_ONLY)) {
364 365 366
					err = rvt_invalidate_rkey(
						qp,
						wqe->wr.ex.invalidate_rkey);
367 368
					local_ops = 1;
				}
369 370 371
				hfi1_send_complete(qp, wqe,
						   err ? IB_WC_LOC_PROT_ERR
						       : IB_WC_SUCCESS);
372 373
				if (local_ops)
					atomic_dec(&qp->local_ops_pending);
374 375 376 377
				qp->s_hdrwords = 0;
				goto done_free_tx;
			}

M
Mike Marciniszyn 已提交
378
			newreq = 1;
379
			qp->s_psn = wqe->psn;
M
Mike Marciniszyn 已提交
380 381 382 383 384 385 386 387 388 389 390 391
		}
		/*
		 * Note that we have to be careful not to modify the
		 * original work request since we may need to resend
		 * it.
		 */
		len = wqe->length;
		ss = &qp->s_sge;
		bth2 = mask_psn(qp->s_psn);
		switch (wqe->wr.opcode) {
		case IB_WR_SEND:
		case IB_WR_SEND_WITH_IMM:
392
		case IB_WR_SEND_WITH_INV:
M
Mike Marciniszyn 已提交
393
			/* If no credit, return. */
394
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
395
			    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
396
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
M
Mike Marciniszyn 已提交
397 398 399 400 401 402 403
				goto bail;
			}
			if (len > pmtu) {
				qp->s_state = OP(SEND_FIRST);
				len = pmtu;
				break;
			}
404
			if (wqe->wr.opcode == IB_WR_SEND) {
M
Mike Marciniszyn 已提交
405
				qp->s_state = OP(SEND_ONLY);
406
			} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
M
Mike Marciniszyn 已提交
407 408 409 410
				qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
				/* Immediate data comes after the BTH */
				ohdr->u.imm_data = wqe->wr.ex.imm_data;
				hwords += 1;
411 412 413 414 415 416
			} else {
				qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
				/* Invalidate rkey comes after the BTH */
				ohdr->u.ieth = cpu_to_be32(
						wqe->wr.ex.invalidate_rkey);
				hwords += 1;
M
Mike Marciniszyn 已提交
417 418 419 420 421 422 423 424 425
			}
			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
				bth0 |= IB_BTH_SOLICITED;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_RDMA_WRITE:
426
			if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
427 428 429 430
				qp->s_lsn++;
			/* FALLTHROUGH */
		case IB_WR_RDMA_WRITE_WITH_IMM:
			/* If no credit, return. */
431
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
432
			    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
433
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
M
Mike Marciniszyn 已提交
434 435
				goto bail;
			}
436 437 438
			put_ib_reth_vaddr(
				wqe->rdma_wr.remote_addr,
				&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
439
			ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
440
				cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
441 442 443 444 445 446 447
			ohdr->u.rc.reth.length = cpu_to_be32(len);
			hwords += sizeof(struct ib_reth) / sizeof(u32);
			if (len > pmtu) {
				qp->s_state = OP(RDMA_WRITE_FIRST);
				len = pmtu;
				break;
			}
448
			if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
M
Mike Marciniszyn 已提交
449
				qp->s_state = OP(RDMA_WRITE_ONLY);
450
			} else {
M
Mike Marciniszyn 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
				qp->s_state =
					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
				/* Immediate data comes after RETH */
				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
				hwords += 1;
				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
					bth0 |= IB_BTH_SOLICITED;
			}
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_RDMA_READ:
			/*
			 * Don't allow more operations to be started
			 * than the QP limits allow.
			 */
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
472
					qp->s_flags |= RVT_S_WAIT_RDMAR;
M
Mike Marciniszyn 已提交
473 474 475
					goto bail;
				}
				qp->s_num_rd_atomic++;
476
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
477 478
					qp->s_lsn++;
			}
479 480 481
			put_ib_reth_vaddr(
				wqe->rdma_wr.remote_addr,
				&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
482
			ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
483
				cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
			ohdr->u.rc.reth.length = cpu_to_be32(len);
			qp->s_state = OP(RDMA_READ_REQUEST);
			hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
			ss = NULL;
			len = 0;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_ATOMIC_CMP_AND_SWP:
		case IB_WR_ATOMIC_FETCH_AND_ADD:
			/*
			 * Don't allow more operations to be started
			 * than the QP limits allow.
			 */
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
503
					qp->s_flags |= RVT_S_WAIT_RDMAR;
M
Mike Marciniszyn 已提交
504 505 506
					goto bail;
				}
				qp->s_num_rd_atomic++;
507
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
508 509 510 511
					qp->s_lsn++;
			}
			if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
				qp->s_state = OP(COMPARE_SWAP);
512 513 514 515
				put_ib_ateth_swap(wqe->atomic_wr.swap,
						  &ohdr->u.atomic_eth);
				put_ib_ateth_compare(wqe->atomic_wr.compare_add,
						     &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
516 517
			} else {
				qp->s_state = OP(FETCH_ADD);
518 519 520
				put_ib_ateth_swap(wqe->atomic_wr.compare_add,
						  &ohdr->u.atomic_eth);
				put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
521
			}
522 523
			put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
					   &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
524
			ohdr->u.atomic_eth.rkey = cpu_to_be32(
C
Christoph Hellwig 已提交
525
				wqe->atomic_wr.rkey);
M
Mike Marciniszyn 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
			hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
			ss = NULL;
			len = 0;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		default:
			goto bail;
		}
		qp->s_sge.sge = wqe->sg_list[0];
		qp->s_sge.sg_list = wqe->sg_list + 1;
		qp->s_sge.num_sge = wqe->wr.num_sge;
		qp->s_sge.total_len = wqe->length;
		qp->s_len = wqe->length;
		if (newreq) {
			qp->s_tail++;
			if (qp->s_tail >= qp->s_size)
				qp->s_tail = 0;
		}
		if (wqe->wr.opcode == IB_WR_RDMA_READ)
			qp->s_psn = wqe->lpsn + 1;
549
		else
M
Mike Marciniszyn 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
			qp->s_psn++;
		break;

	case OP(RDMA_READ_RESPONSE_FIRST):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
		 * thread to indicate a SEND needs to be restarted from an
		 * earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
		/* FALLTHROUGH */
	case OP(SEND_FIRST):
		qp->s_state = OP(SEND_MIDDLE);
		/* FALLTHROUGH */
	case OP(SEND_MIDDLE):
		bth2 = mask_psn(qp->s_psn++);
		ss = &qp->s_sge;
		len = qp->s_len;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
			break;
		}
577
		if (wqe->wr.opcode == IB_WR_SEND) {
M
Mike Marciniszyn 已提交
578
			qp->s_state = OP(SEND_LAST);
579
		} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
M
Mike Marciniszyn 已提交
580 581 582 583
			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
			/* Immediate data comes after the BTH */
			ohdr->u.imm_data = wqe->wr.ex.imm_data;
			hwords += 1;
584 585 586 587 588
		} else {
			qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
			/* invalidate data comes after the BTH */
			ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
			hwords += 1;
M
Mike Marciniszyn 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
		}
		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
			bth0 |= IB_BTH_SOLICITED;
		bth2 |= IB_BTH_REQ_ACK;
		qp->s_cur++;
		if (qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		break;

	case OP(RDMA_READ_RESPONSE_LAST):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
		 * thread to indicate a RDMA write needs to be restarted from
		 * an earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
		/* FALLTHROUGH */
	case OP(RDMA_WRITE_FIRST):
		qp->s_state = OP(RDMA_WRITE_MIDDLE);
		/* FALLTHROUGH */
	case OP(RDMA_WRITE_MIDDLE):
		bth2 = mask_psn(qp->s_psn++);
		ss = &qp->s_sge;
		len = qp->s_len;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
			break;
		}
622
		if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
M
Mike Marciniszyn 已提交
623
			qp->s_state = OP(RDMA_WRITE_LAST);
624
		} else {
M
Mike Marciniszyn 已提交
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
			qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
			/* Immediate data comes after the BTH */
			ohdr->u.imm_data = wqe->wr.ex.imm_data;
			hwords += 1;
			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
				bth0 |= IB_BTH_SOLICITED;
		}
		bth2 |= IB_BTH_REQ_ACK;
		qp->s_cur++;
		if (qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		break;

	case OP(RDMA_READ_RESPONSE_MIDDLE):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
		 * thread to indicate a RDMA read needs to be restarted from
		 * an earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
649 650 651
		put_ib_reth_vaddr(
			wqe->rdma_wr.remote_addr + len,
			&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
652
		ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
653
			cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
		qp->s_state = OP(RDMA_READ_REQUEST);
		hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
		bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
		qp->s_psn = wqe->lpsn + 1;
		ss = NULL;
		len = 0;
		qp->s_cur++;
		if (qp->s_cur == qp->s_size)
			qp->s_cur = 0;
		break;
	}
	qp->s_sending_hpsn = bth2;
	delta = delta_psn(bth2, wqe->psn);
	if (delta && delta % HFI1_PSN_CREDIT == 0)
		bth2 |= IB_BTH_REQ_ACK;
670 671 672
	if (qp->s_flags & RVT_S_SEND_ONE) {
		qp->s_flags &= ~RVT_S_SEND_ONE;
		qp->s_flags |= RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
673 674 675 676
		bth2 |= IB_BTH_REQ_ACK;
	}
	qp->s_len -= len;
	qp->s_hdrwords = hwords;
677
	ps->s_txreq->sde = priv->s_sde;
678
	ps->s_txreq->ss = ss;
679
	ps->s_txreq->s_cur_size = len;
M
Mike Marciniszyn 已提交
680 681 682 683 684
	hfi1_make_ruc_header(
		qp,
		ohdr,
		bth0 | (qp->s_state << 24),
		bth2,
685 686
		middle,
		ps);
687 688
	/* pbc */
	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
689 690 691 692 693
	return 1;

done_free_tx:
	hfi1_put_txreq(ps->s_txreq);
	ps->s_txreq = NULL;
694
	return 1;
695

M
Mike Marciniszyn 已提交
696
bail:
697 698 699 700
	hfi1_put_txreq(ps->s_txreq);

bail_no_tx:
	ps->s_txreq = NULL;
701
	qp->s_flags &= ~RVT_S_BUSY;
702 703
	qp->s_hdrwords = 0;
	return 0;
M
Mike Marciniszyn 已提交
704 705 706 707 708 709 710 711
}

/**
 * hfi1_send_rc_ack - Construct an ACK packet and send it
 * @qp: a pointer to the QP
 *
 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
 * Note that RDMA reads and atomics are handled in the
712
 * send side QP state and send engine.
M
Mike Marciniszyn 已提交
713
 */
714
void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
M
Mike Marciniszyn 已提交
715 716
		      int is_fecn)
{
717
	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
M
Mike Marciniszyn 已提交
718 719 720 721 722 723 724 725 726
	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
	u64 pbc, pbc_flags = 0;
	u16 lrh0;
	u16 sc5;
	u32 bth0;
	u32 hwords;
	u32 vl, plen;
	struct send_context *sc;
	struct pio_buf *pbuf;
727 728
	struct ib_header hdr;
	struct ib_other_headers *ohdr;
729
	unsigned long flags;
730 731 732 733
	struct hfi1_qp_priv *priv = qp->priv;

	/* clear the defer count */
	priv->r_adefered = 0;
M
Mike Marciniszyn 已提交
734 735

	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
736
	if (qp->s_flags & RVT_S_RESP_PENDING)
M
Mike Marciniszyn 已提交
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
		goto queue_ack;

	/* Ensure s_rdma_ack_cnt changes are committed */
	smp_read_barrier_depends();
	if (qp->s_rdma_ack_cnt)
		goto queue_ack;

	/* Construct the header */
	/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
	hwords = 6;
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
		hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
				       &qp->remote_ah_attr.grh, hwords, 0);
		ohdr = &hdr.u.l.oth;
		lrh0 = HFI1_LRH_GRH;
	} else {
		ohdr = &hdr.u.oth;
		lrh0 = HFI1_LRH_BTH;
	}
	/* read pkey_index w/o lock (its atomic) */
	bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
	if (qp->s_mig_state == IB_MIG_MIGRATED)
		bth0 |= IB_BTH_MIG_REQ;
	if (qp->r_nak_state)
761
		ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
M
Mike Marciniszyn 已提交
762
					    (qp->r_nak_state <<
763
					     IB_AETH_CREDIT_SHIFT));
M
Mike Marciniszyn 已提交
764
	else
765
		ohdr->u.aeth = rvt_compute_aeth(qp);
M
Mike Marciniszyn 已提交
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
	/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
	pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
	lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
	hdr.lrh[0] = cpu_to_be16(lrh0);
	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
	hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
	ohdr->bth[0] = cpu_to_be32(bth0);
	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
	ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
	ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));

	/* Don't try to send ACKs if the link isn't ACTIVE */
	if (driver_lstate(ppd) != IB_PORT_ACTIVE)
		return;

	sc = rcd->sc;
	plen = 2 /* PBC */ + hwords;
	vl = sc_to_vlt(ppd->dd, sc5);
	pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);

	pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
	if (!pbuf) {
		/*
		 * We have no room to send at the moment.  Pass
792
		 * responsibility for sending the ACK to the send engine
M
Mike Marciniszyn 已提交
793 794 795 796 797 798
		 * so that when enough buffer space becomes available,
		 * the ACK is sent ahead of other outgoing packets.
		 */
		goto queue_ack;
	}

799
	trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
M
Mike Marciniszyn 已提交
800 801 802 803 804 805 806

	/* write the pbc and data */
	ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);

	return;

queue_ack:
807
	spin_lock_irqsave(&qp->s_lock, flags);
808 809 810
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
		goto unlock;
	this_cpu_inc(*ibp->rvp.rc_qacks);
811
	qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
812 813 814
	qp->s_nak_state = qp->r_nak_state;
	qp->s_ack_psn = qp->r_ack_psn;
	if (is_fecn)
815
		qp->s_flags |= RVT_S_ECN;
M
Mike Marciniszyn 已提交
816

817
	/* Schedule the send engine. */
M
Mike Marciniszyn 已提交
818
	hfi1_schedule_send(qp);
819
unlock:
820
	spin_unlock_irqrestore(&qp->s_lock, flags);
M
Mike Marciniszyn 已提交
821 822 823 824 825 826 827 828 829 830 831
}

/**
 * reset_psn - reset the QP state to send starting from PSN
 * @qp: the QP
 * @psn: the packet sequence number to restart at
 *
 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
 * for the given QP.
 * Called at interrupt level with the QP s_lock held.
 */
832
static void reset_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
833 834
{
	u32 n = qp->s_acked;
835
	struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
836 837
	u32 opcode;

838
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
	qp->s_cur = n;

	/*
	 * If we are starting the request from the beginning,
	 * let the normal send code handle initialization.
	 */
	if (cmp_psn(psn, wqe->psn) <= 0) {
		qp->s_state = OP(SEND_LAST);
		goto done;
	}

	/* Find the work request opcode corresponding to the given PSN. */
	opcode = wqe->wr.opcode;
	for (;;) {
		int diff;

		if (++n == qp->s_size)
			n = 0;
		if (n == qp->s_tail)
			break;
859
		wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		diff = cmp_psn(psn, wqe->psn);
		if (diff < 0)
			break;
		qp->s_cur = n;
		/*
		 * If we are starting the request from the beginning,
		 * let the normal send code handle initialization.
		 */
		if (diff == 0) {
			qp->s_state = OP(SEND_LAST);
			goto done;
		}
		opcode = wqe->wr.opcode;
	}

	/*
	 * Set the state to restart in the middle of a request.
	 * Don't change the s_sge, s_cur_sge, or s_cur_size.
	 * See hfi1_make_rc_req().
	 */
	switch (opcode) {
	case IB_WR_SEND:
	case IB_WR_SEND_WITH_IMM:
		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
		break;

	case IB_WR_RDMA_WRITE:
	case IB_WR_RDMA_WRITE_WITH_IMM:
		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
		break;

	case IB_WR_RDMA_READ:
		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
		break;

	default:
		/*
		 * This case shouldn't happen since its only
		 * one PSN per req.
		 */
		qp->s_state = OP(SEND_LAST);
	}
done:
	qp->s_psn = psn;
	/*
905
	 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
906
	 * asynchronously before the send engine can get scheduled.
M
Mike Marciniszyn 已提交
907 908 909 910
	 * Doing it in hfi1_make_rc_req() is too late.
	 */
	if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
	    (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
911 912
		qp->s_flags |= RVT_S_WAIT_PSN;
	qp->s_flags &= ~RVT_S_AHG_VALID;
M
Mike Marciniszyn 已提交
913 914 915 916 917 918
}

/*
 * Back up requester to resend the last un-ACKed request.
 * The QP r_lock and s_lock should be held and interrupts disabled.
 */
919
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
M
Mike Marciniszyn 已提交
920
{
921
	struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
922 923
	struct hfi1_ibport *ibp;

924 925
	lockdep_assert_held(&qp->r_lock);
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
926 927 928 929 930 931
	if (qp->s_retry == 0) {
		if (qp->s_mig_state == IB_MIG_ARMED) {
			hfi1_migrate_qp(qp);
			qp->s_retry = qp->s_retry_cnt;
		} else if (qp->s_last == qp->s_acked) {
			hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
932
			rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
933
			return;
934
		} else { /* need to handle delayed completion */
M
Mike Marciniszyn 已提交
935
			return;
936 937
		}
	} else {
M
Mike Marciniszyn 已提交
938
		qp->s_retry--;
939
	}
M
Mike Marciniszyn 已提交
940 941 942

	ibp = to_iport(qp->ibqp.device, qp->port_num);
	if (wqe->wr.opcode == IB_WR_RDMA_READ)
943
		ibp->rvp.n_rc_resends++;
M
Mike Marciniszyn 已提交
944
	else
945
		ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
M
Mike Marciniszyn 已提交
946

947 948 949
	qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
			 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
			 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
950
	if (wait)
951
		qp->s_flags |= RVT_S_SEND_ONE;
M
Mike Marciniszyn 已提交
952 953 954 955 956 957 958
	reset_psn(qp, psn);
}

/*
 * Set qp->s_sending_psn to the next PSN after the given one.
 * This would be psn+1 except when RDMA reads are present.
 */
959
static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
960
{
961
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
962 963
	u32 n = qp->s_last;

964
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
965 966
	/* Find the work request corresponding to the given PSN. */
	for (;;) {
967
		wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
		if (cmp_psn(psn, wqe->lpsn) <= 0) {
			if (wqe->wr.opcode == IB_WR_RDMA_READ)
				qp->s_sending_psn = wqe->lpsn + 1;
			else
				qp->s_sending_psn = psn + 1;
			break;
		}
		if (++n == qp->s_size)
			n = 0;
		if (n == qp->s_tail)
			break;
	}
}

/*
 * This should be called with the QP s_lock held and interrupts disabled.
 */
985
void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
M
Mike Marciniszyn 已提交
986
{
987
	struct ib_other_headers *ohdr;
988
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
989 990 991
	u32 opcode;
	u32 psn;

992
	lockdep_assert_held(&qp->s_lock);
993
	if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
M
Mike Marciniszyn 已提交
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		return;

	/* Find out where the BTH is */
	if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
		ohdr = &hdr->u.oth;
	else
		ohdr = &hdr->u.l.oth;

	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
		WARN_ON(!qp->s_rdma_ack_cnt);
		qp->s_rdma_ack_cnt--;
		return;
	}

	psn = be32_to_cpu(ohdr->bth[2]);
	reset_sending_psn(qp, psn);

	/*
	 * Start timer after a packet requesting an ACK has been sent and
	 * there are still requests that haven't been acked.
	 */
	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
	    !(qp->s_flags &
1019
		(RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1020
		(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1021
		rvt_add_retry_timer(qp);
M
Mike Marciniszyn 已提交
1022 1023

	while (qp->s_last != qp->s_acked) {
1024 1025
		u32 s_last;

1026
		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
M
Mike Marciniszyn 已提交
1027 1028 1029
		if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
		    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
			break;
1030 1031 1032 1033 1034 1035
		s_last = qp->s_last;
		if (++s_last >= qp->s_size)
			s_last = 0;
		qp->s_last = s_last;
		/* see post_send() */
		barrier();
1036
		rvt_put_swqe(wqe);
1037 1038 1039 1040
		rvt_qp_swqe_complete(qp,
				     wqe,
				     ib_hfi1_wc_opcode[wqe->wr.opcode],
				     IB_WC_SUCCESS);
M
Mike Marciniszyn 已提交
1041 1042 1043 1044 1045
	}
	/*
	 * If we were waiting for sends to complete before re-sending,
	 * and they are now complete, restart sending.
	 */
1046
	trace_hfi1_sendcomplete(qp, psn);
1047
	if (qp->s_flags & RVT_S_WAIT_PSN &&
M
Mike Marciniszyn 已提交
1048
	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1049
		qp->s_flags &= ~RVT_S_WAIT_PSN;
M
Mike Marciniszyn 已提交
1050 1051 1052 1053 1054 1055
		qp->s_sending_psn = qp->s_psn;
		qp->s_sending_hpsn = qp->s_psn - 1;
		hfi1_schedule_send(qp);
	}
}

1056
static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065
{
	qp->s_last_psn = psn;
}

/*
 * Generate a SWQE completion.
 * This is similar to hfi1_send_complete but has to check to be sure
 * that the SGEs are not being referenced if the SWQE is being resent.
 */
1066 1067 1068
static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
					 struct rvt_swqe *wqe,
					 struct hfi1_ibport *ibp)
M
Mike Marciniszyn 已提交
1069
{
1070
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
1071 1072 1073 1074 1075 1076 1077
	/*
	 * Don't decrement refcount and don't generate a
	 * completion if the SWQE is being resent until the send
	 * is finished.
	 */
	if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1078 1079
		u32 s_last;

1080
		rvt_put_swqe(wqe);
1081 1082 1083 1084 1085 1086
		s_last = qp->s_last;
		if (++s_last >= qp->s_size)
			s_last = 0;
		qp->s_last = s_last;
		/* see post_send() */
		barrier();
1087 1088 1089 1090
		rvt_qp_swqe_complete(qp,
				     wqe,
				     ib_hfi1_wc_opcode[wqe->wr.opcode],
				     IB_WC_SUCCESS);
M
Mike Marciniszyn 已提交
1091 1092 1093
	} else {
		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);

1094
		this_cpu_inc(*ibp->rvp.rc_delayed_comp);
M
Mike Marciniszyn 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
		/*
		 * If send progress not running attempt to progress
		 * SDMA queue.
		 */
		if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
			struct sdma_engine *engine;
			u8 sc5;

			/* For now use sc to find engine */
			sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
			engine = qp_to_sdma_engine(qp, sc5);
			sdma_engine_progress_schedule(engine);
		}
	}

	qp->s_retry = qp->s_retry_cnt;
	update_last_psn(qp, wqe->lpsn);

	/*
	 * If we are completing a request which is in the process of
	 * being resent, we can stop re-sending it since we know the
	 * responder has already seen it.
	 */
	if (qp->s_acked == qp->s_cur) {
		if (++qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		qp->s_acked = qp->s_cur;
1122
		wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
M
Mike Marciniszyn 已提交
1123 1124 1125 1126 1127 1128 1129 1130 1131
		if (qp->s_acked != qp->s_tail) {
			qp->s_state = OP(SEND_LAST);
			qp->s_psn = wqe->psn;
		}
	} else {
		if (++qp->s_acked >= qp->s_size)
			qp->s_acked = 0;
		if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
			qp->s_draining = 0;
1132
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}
	return wqe;
}

/**
 * do_rc_ack - process an incoming RC ACK
 * @qp: the QP the ACK came in on
 * @psn: the packet sequence number of the ACK
 * @opcode: the opcode of the request that resulted in the ACK
 *
 * This is called from rc_rcv_resp() to process an incoming RC ACK
 * for the given QP.
1145
 * May be called at interrupt level, with the QP s_lock held.
M
Mike Marciniszyn 已提交
1146 1147
 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
 */
1148
static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
M
Mike Marciniszyn 已提交
1149 1150 1151 1152
		     u64 val, struct hfi1_ctxtdata *rcd)
{
	struct hfi1_ibport *ibp;
	enum ib_wc_status status;
1153
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1154 1155 1156 1157
	int ret = 0;
	u32 ack_psn;
	int diff;

1158
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
1159 1160 1161 1162 1163 1164 1165
	/*
	 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
	 * requests and implicitly NAK RDMA read and atomic requests issued
	 * before the NAK'ed request.  The MSN won't include the NAK'ed
	 * request but will include an ACK'ed request(s).
	 */
	ack_psn = psn;
1166
	if (aeth >> IB_AETH_NAK_SHIFT)
M
Mike Marciniszyn 已提交
1167
		ack_psn--;
1168
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1169
	ibp = rcd_to_iport(rcd);
M
Mike Marciniszyn 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185

	/*
	 * The MSN might be for a later WQE than the PSN indicates so
	 * only complete WQEs that the PSN finishes.
	 */
	while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
		/*
		 * RDMA_READ_RESPONSE_ONLY is a special case since
		 * we want to generate completion events for everything
		 * before the RDMA read, copy the data, then generate
		 * the completion for the read.
		 */
		if (wqe->wr.opcode == IB_WR_RDMA_READ &&
		    opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
		    diff == 0) {
			ret = 1;
1186
			goto bail_stop;
M
Mike Marciniszyn 已提交
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
		}
		/*
		 * If this request is a RDMA read or atomic, and the ACK is
		 * for a later operation, this ACK NAKs the RDMA read or
		 * atomic.  In other words, only a RDMA_READ_LAST or ONLY
		 * can ACK a RDMA read and likewise for atomic ops.  Note
		 * that the NAK case can only happen if relaxed ordering is
		 * used and requests are sent after an RDMA read or atomic
		 * is sent but before the response is received.
		 */
		if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
		     (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
		    ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
		     (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
			/* Retry this request. */
1203 1204
			if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
				qp->r_flags |= RVT_R_RDMAR_SEQ;
1205
				hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
M
Mike Marciniszyn 已提交
1206
				if (list_empty(&qp->rspwait)) {
1207
					qp->r_flags |= RVT_R_RSP_SEND;
1208
					rvt_get_qp(qp);
M
Mike Marciniszyn 已提交
1209 1210 1211 1212 1213 1214 1215 1216
					list_add_tail(&qp->rspwait,
						      &rcd->qp_wait_list);
				}
			}
			/*
			 * No need to process the ACK/NAK since we are
			 * restarting an earlier request.
			 */
1217
			goto bail_stop;
M
Mike Marciniszyn 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
		}
		if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
			u64 *vaddr = wqe->sg_list[0].vaddr;
			*vaddr = val;
		}
		if (qp->s_num_rd_atomic &&
		    (wqe->wr.opcode == IB_WR_RDMA_READ ||
		     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
			qp->s_num_rd_atomic--;
			/* Restart sending task if fence is complete */
1230
			if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
M
Mike Marciniszyn 已提交
1231
			    !qp->s_num_rd_atomic) {
1232 1233
				qp->s_flags &= ~(RVT_S_WAIT_FENCE |
						 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
1234
				hfi1_schedule_send(qp);
1235 1236 1237
			} else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
				qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
						 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
1238 1239 1240 1241 1242 1243 1244 1245
				hfi1_schedule_send(qp);
			}
		}
		wqe = do_rc_completion(qp, wqe, ibp);
		if (qp->s_acked == qp->s_tail)
			break;
	}

1246
	switch (aeth >> IB_AETH_NAK_SHIFT) {
M
Mike Marciniszyn 已提交
1247
	case 0:         /* ACK */
1248
		this_cpu_inc(*ibp->rvp.rc_acks);
M
Mike Marciniszyn 已提交
1249 1250 1251
		if (qp->s_acked != qp->s_tail) {
			/*
			 * We are expecting more ACKs so
1252
			 * mod the retry timer.
M
Mike Marciniszyn 已提交
1253
			 */
1254
			rvt_mod_retry_timer(qp);
M
Mike Marciniszyn 已提交
1255 1256 1257 1258 1259 1260
			/*
			 * We can stop re-sending the earlier packets and
			 * continue with the next packet the receiver wants.
			 */
			if (cmp_psn(qp->s_psn, psn) <= 0)
				reset_psn(qp, psn + 1);
1261 1262
		} else {
			/* No more acks - kill all timers */
1263
			rvt_stop_rc_timers(qp);
1264 1265 1266 1267
			if (cmp_psn(qp->s_psn, psn) <= 0) {
				qp->s_state = OP(SEND_LAST);
				qp->s_psn = psn + 1;
			}
M
Mike Marciniszyn 已提交
1268
		}
1269 1270
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
1271 1272
			hfi1_schedule_send(qp);
		}
1273
		rvt_get_credit(qp, aeth);
M
Mike Marciniszyn 已提交
1274 1275 1276
		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
		qp->s_retry = qp->s_retry_cnt;
		update_last_psn(qp, psn);
1277
		return 1;
M
Mike Marciniszyn 已提交
1278 1279

	case 1:         /* RNR NAK */
1280
		ibp->rvp.n_rnr_naks++;
M
Mike Marciniszyn 已提交
1281
		if (qp->s_acked == qp->s_tail)
1282
			goto bail_stop;
1283
		if (qp->s_flags & RVT_S_WAIT_RNR)
1284
			goto bail_stop;
M
Mike Marciniszyn 已提交
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
		if (qp->s_rnr_retry == 0) {
			status = IB_WC_RNR_RETRY_EXC_ERR;
			goto class_b;
		}
		if (qp->s_rnr_retry_cnt < 7)
			qp->s_rnr_retry--;

		/* The last valid PSN is the previous PSN. */
		update_last_psn(qp, psn - 1);

1295
		ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
M
Mike Marciniszyn 已提交
1296 1297 1298

		reset_psn(qp, psn);

1299
		qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1300 1301
		rvt_stop_rc_timers(qp);
		rvt_add_rnr_timer(qp, aeth);
1302
		return 0;
M
Mike Marciniszyn 已提交
1303 1304 1305

	case 3:         /* NAK */
		if (qp->s_acked == qp->s_tail)
1306
			goto bail_stop;
M
Mike Marciniszyn 已提交
1307 1308
		/* The last valid PSN is the previous PSN. */
		update_last_psn(qp, psn - 1);
1309 1310
		switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
			IB_AETH_CREDIT_MASK) {
M
Mike Marciniszyn 已提交
1311
		case 0: /* PSN sequence error */
1312
			ibp->rvp.n_seq_naks++;
M
Mike Marciniszyn 已提交
1313 1314 1315 1316 1317 1318
			/*
			 * Back up to the responder's expected PSN.
			 * Note that we might get a NAK in the middle of an
			 * RDMA READ response which terminates the RDMA
			 * READ.
			 */
1319
			hfi1_restart_rc(qp, psn, 0);
M
Mike Marciniszyn 已提交
1320 1321 1322 1323 1324
			hfi1_schedule_send(qp);
			break;

		case 1: /* Invalid Request */
			status = IB_WC_REM_INV_REQ_ERR;
1325
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1326 1327 1328 1329
			goto class_b;

		case 2: /* Remote Access Error */
			status = IB_WC_REM_ACCESS_ERR;
1330
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1331 1332 1333 1334
			goto class_b;

		case 3: /* Remote Operation Error */
			status = IB_WC_REM_OP_ERR;
1335
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1336 1337 1338
class_b:
			if (qp->s_last == qp->s_acked) {
				hfi1_send_complete(qp, wqe, status);
1339
				rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
1340 1341 1342 1343 1344 1345 1346 1347 1348
			}
			break;

		default:
			/* Ignore other reserved NAK error codes */
			goto reserved;
		}
		qp->s_retry = qp->s_retry_cnt;
		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1349
		goto bail_stop;
M
Mike Marciniszyn 已提交
1350 1351 1352 1353

	default:                /* 2: reserved */
reserved:
		/* Ignore reserved NAK codes. */
1354
		goto bail_stop;
M
Mike Marciniszyn 已提交
1355
	}
1356
	/* cannot be reached  */
1357
bail_stop:
1358
	rvt_stop_rc_timers(qp);
M
Mike Marciniszyn 已提交
1359 1360 1361 1362 1363 1364 1365
	return ret;
}

/*
 * We have seen an out of sequence RDMA read middle or last packet.
 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
 */
1366
static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
M
Mike Marciniszyn 已提交
1367 1368
			 struct hfi1_ctxtdata *rcd)
{
1369
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1370

1371
	lockdep_assert_held(&qp->s_lock);
M
Mike Marciniszyn 已提交
1372
	/* Remove QP from retry timer */
1373
	rvt_stop_rc_timers(qp);
M
Mike Marciniszyn 已提交
1374

1375
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384

	while (cmp_psn(psn, wqe->lpsn) > 0) {
		if (wqe->wr.opcode == IB_WR_RDMA_READ ||
		    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
			break;
		wqe = do_rc_completion(qp, wqe, ibp);
	}

1385
	ibp->rvp.n_rdma_seq++;
1386
	qp->r_flags |= RVT_R_RDMAR_SEQ;
1387
	hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
M
Mike Marciniszyn 已提交
1388
	if (list_empty(&qp->rspwait)) {
1389
		qp->r_flags |= RVT_R_RSP_SEND;
1390
		rvt_get_qp(qp);
M
Mike Marciniszyn 已提交
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
}

/**
 * rc_rcv_resp - process an incoming RC response packet
 * @ibp: the port this packet came in on
 * @ohdr: the other headers for this packet
 * @data: the packet data
 * @tlen: the packet length
 * @qp: the QP for this packet
 * @opcode: the opcode for this packet
 * @psn: the packet sequence number for this packet
 * @hdrsize: the header length
 * @pmtu: the path MTU
 *
 * This is called from hfi1_rc_rcv() to process an incoming RC response
 * packet for the given QP.
 * Called at interrupt level.
 */
static void rc_rcv_resp(struct hfi1_ibport *ibp,
1412
			struct ib_other_headers *ohdr,
1413
			void *data, u32 tlen, struct rvt_qp *qp,
M
Mike Marciniszyn 已提交
1414 1415 1416
			u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
			struct hfi1_ctxtdata *rcd)
{
1417
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1418 1419 1420 1421 1422 1423 1424 1425 1426
	enum ib_wc_status status;
	unsigned long flags;
	int diff;
	u32 pad;
	u32 aeth;
	u64 val;

	spin_lock_irqsave(&qp->s_lock, flags);

1427
	trace_hfi1_ack(qp, psn);
1428

M
Mike Marciniszyn 已提交
1429
	/* Ignore invalid responses. */
1430
	smp_read_barrier_depends(); /* see post_one_send */
1431
	if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
M
Mike Marciniszyn 已提交
1432 1433 1434 1435 1436 1437 1438 1439
		goto ack_done;

	/* Ignore duplicate responses. */
	diff = cmp_psn(psn, qp->s_last_psn);
	if (unlikely(diff <= 0)) {
		/* Update credits for "ghost" ACKs */
		if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
			aeth = be32_to_cpu(ohdr->u.aeth);
1440
			if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1441
				rvt_get_credit(qp, aeth);
M
Mike Marciniszyn 已提交
1442 1443 1444 1445 1446 1447 1448 1449
		}
		goto ack_done;
	}

	/*
	 * Skip everything other than the PSN we expect, if we are waiting
	 * for a reply to a restarted RDMA read or atomic op.
	 */
1450
	if (qp->r_flags & RVT_R_RDMAR_SEQ) {
M
Mike Marciniszyn 已提交
1451 1452
		if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
			goto ack_done;
1453
		qp->r_flags &= ~RVT_R_RDMAR_SEQ;
M
Mike Marciniszyn 已提交
1454 1455 1456 1457
	}

	if (unlikely(qp->s_acked == qp->s_tail))
		goto ack_done;
1458
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1459 1460 1461 1462 1463 1464 1465
	status = IB_WC_SUCCESS;

	switch (opcode) {
	case OP(ACKNOWLEDGE):
	case OP(ATOMIC_ACKNOWLEDGE):
	case OP(RDMA_READ_RESPONSE_FIRST):
		aeth = be32_to_cpu(ohdr->u.aeth);
1466 1467 1468
		if (opcode == OP(ATOMIC_ACKNOWLEDGE))
			val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
		else
M
Mike Marciniszyn 已提交
1469 1470 1471 1472
			val = 0;
		if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
		    opcode != OP(RDMA_READ_RESPONSE_FIRST))
			goto ack_done;
1473
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
		/*
		 * If this is a response to a resent RDMA read, we
		 * have to be careful to copy the data to the right
		 * location.
		 */
		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
						  wqe, psn, pmtu);
		goto read_middle;

	case OP(RDMA_READ_RESPONSE_MIDDLE):
		/* no AETH, no ACK */
		if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
			goto ack_seq_err;
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
read_middle:
		if (unlikely(tlen != (hdrsize + pmtu + 4)))
			goto ack_len_err;
		if (unlikely(pmtu >= qp->s_rdma_read_len))
			goto ack_len_err;

		/*
		 * We got a response so update the timeout.
		 * 4.096 usec. * (1 << qp->timeout)
		 */
1501
		rvt_mod_retry_timer(qp);
1502 1503
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
			hfi1_schedule_send(qp);
		}

		if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
			qp->s_retry = qp->s_retry_cnt;

		/*
		 * Update the RDMA receive state but do the copy w/o
		 * holding the locks and blocking interrupts.
		 */
		qp->s_rdma_read_len -= pmtu;
		update_last_psn(qp, psn);
		spin_unlock_irqrestore(&qp->s_lock, flags);
1517
		hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
M
Mike Marciniszyn 已提交
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
		goto bail;

	case OP(RDMA_READ_RESPONSE_ONLY):
		aeth = be32_to_cpu(ohdr->u.aeth);
		if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
			goto ack_done;
		/* Get the number of bytes the message was padded by. */
		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
		/*
		 * Check that the data size is >= 0 && <= pmtu.
		 * Remember to account for ICRC (4).
		 */
		if (unlikely(tlen < (hdrsize + pad + 4)))
			goto ack_len_err;
		/*
		 * If this is a response to a resent RDMA read, we
		 * have to be careful to copy the data to the right
		 * location.
		 */
1537
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
						  wqe, psn, pmtu);
		goto read_last;

	case OP(RDMA_READ_RESPONSE_LAST):
		/* ACKs READ req. */
		if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
			goto ack_seq_err;
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
		/* Get the number of bytes the message was padded by. */
		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
		/*
		 * Check that the data size is >= 1 && <= pmtu.
		 * Remember to account for ICRC (4).
		 */
		if (unlikely(tlen <= (hdrsize + pad + 4)))
			goto ack_len_err;
read_last:
		tlen -= hdrsize + pad + 4;
		if (unlikely(tlen != qp->s_rdma_read_len))
			goto ack_len_err;
		aeth = be32_to_cpu(ohdr->u.aeth);
1561
		hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
M
Mike Marciniszyn 已提交
1562
		WARN_ON(qp->s_rdma_read_sge.num_sge);
1563
		(void)do_rc_ack(qp, aeth, psn,
M
Mike Marciniszyn 已提交
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
				 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
		goto ack_done;
	}

ack_op_err:
	status = IB_WC_LOC_QP_OP_ERR;
	goto ack_err;

ack_seq_err:
	rdma_seq_err(qp, ibp, psn, rcd);
	goto ack_done;

ack_len_err:
	status = IB_WC_LOC_LEN_ERR;
ack_err:
	if (qp->s_last == qp->s_acked) {
		hfi1_send_complete(qp, wqe, status);
1581
		rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
1582 1583 1584 1585 1586 1587 1588
	}
ack_done:
	spin_unlock_irqrestore(&qp->s_lock, flags);
bail:
	return;
}

1589
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1590
				  struct rvt_qp *qp)
1591 1592
{
	if (list_empty(&qp->rspwait)) {
1593
		qp->r_flags |= RVT_R_RSP_NAK;
1594
		rvt_get_qp(qp);
1595 1596 1597 1598
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
}

1599
static inline void rc_cancel_ack(struct rvt_qp *qp)
1600
{
1601 1602 1603
	struct hfi1_qp_priv *priv = qp->priv;

	priv->r_adefered = 0;
1604 1605 1606
	if (list_empty(&qp->rspwait))
		return;
	list_del_init(&qp->rspwait);
1607
	qp->r_flags &= ~RVT_R_RSP_NAK;
1608
	rvt_put_qp(qp);
1609 1610
}

M
Mike Marciniszyn 已提交
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
/**
 * rc_rcv_error - process an incoming duplicate or error RC packet
 * @ohdr: the other headers for this packet
 * @data: the packet data
 * @qp: the QP for this packet
 * @opcode: the opcode for this packet
 * @psn: the packet sequence number for this packet
 * @diff: the difference between the PSN and the expected PSN
 *
 * This is called from hfi1_rc_rcv() to process an unexpected
 * incoming RC packet for the given QP.
 * Called at interrupt level.
 * Return 1 if no more processing is needed; otherwise return 0 to
 * schedule a response to be sent.
 */
1626
static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
1627 1628
				 struct rvt_qp *qp, u32 opcode, u32 psn,
				 int diff, struct hfi1_ctxtdata *rcd)
M
Mike Marciniszyn 已提交
1629
{
1630
	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1631
	struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
1632 1633 1634 1635
	unsigned long flags;
	u8 i, prev;
	int old_req;

1636
	trace_hfi1_rcv_error(qp, psn);
M
Mike Marciniszyn 已提交
1637 1638 1639 1640 1641 1642 1643
	if (diff > 0) {
		/*
		 * Packet sequence error.
		 * A NAK will ACK earlier sends and RDMA writes.
		 * Don't queue the NAK if we already sent one.
		 */
		if (!qp->r_nak_state) {
1644
			ibp->rvp.n_rc_seqnak++;
M
Mike Marciniszyn 已提交
1645 1646 1647 1648 1649 1650 1651 1652
			qp->r_nak_state = IB_NAK_PSN_ERROR;
			/* Use the expected PSN. */
			qp->r_ack_psn = qp->r_psn;
			/*
			 * Wait to send the sequence NAK until all packets
			 * in the receive queue have been processed.
			 * Otherwise, we end up propagating congestion.
			 */
1653
			rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
		}
		goto done;
	}

	/*
	 * Handle a duplicate request.  Don't re-execute SEND, RDMA
	 * write or atomic op.  Don't NAK errors, just silently drop
	 * the duplicate request.  Note that r_sge, r_len, and
	 * r_rcv_len may be in use so don't modify them.
	 *
	 * We are supposed to ACK the earliest duplicate PSN but we
	 * can coalesce an outstanding duplicate ACK.  We have to
	 * send the earliest so that RDMA reads can be restarted at
	 * the requester's expected PSN.
	 *
	 * First, find where this duplicate PSN falls within the
	 * ACKs previously sent.
	 * old_req is true if there is an older response that is scheduled
	 * to be sent before sending this one.
	 */
	e = NULL;
	old_req = 1;
1676
	ibp->rvp.n_rc_dupreq++;
M
Mike Marciniszyn 已提交
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728

	spin_lock_irqsave(&qp->s_lock, flags);

	for (i = qp->r_head_ack_queue; ; i = prev) {
		if (i == qp->s_tail_ack_queue)
			old_req = 0;
		if (i)
			prev = i - 1;
		else
			prev = HFI1_MAX_RDMA_ATOMIC;
		if (prev == qp->r_head_ack_queue) {
			e = NULL;
			break;
		}
		e = &qp->s_ack_queue[prev];
		if (!e->opcode) {
			e = NULL;
			break;
		}
		if (cmp_psn(psn, e->psn) >= 0) {
			if (prev == qp->s_tail_ack_queue &&
			    cmp_psn(psn, e->lpsn) <= 0)
				old_req = 0;
			break;
		}
	}
	switch (opcode) {
	case OP(RDMA_READ_REQUEST): {
		struct ib_reth *reth;
		u32 offset;
		u32 len;

		/*
		 * If we didn't find the RDMA read request in the ack queue,
		 * we can ignore this request.
		 */
		if (!e || e->opcode != OP(RDMA_READ_REQUEST))
			goto unlock_done;
		/* RETH comes after BTH */
		reth = &ohdr->u.rc.reth;
		/*
		 * Address range must be a subset of the original
		 * request and start on pmtu boundaries.
		 * We reuse the old ack_queue slot since the requester
		 * should not back up and request an earlier PSN for the
		 * same request.
		 */
		offset = delta_psn(psn, e->psn) * qp->pmtu;
		len = be32_to_cpu(reth->length);
		if (unlikely(offset + len != e->rdma_sge.sge_length))
			goto unlock_done;
		if (e->rdma_sge.mr) {
1729
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
1730 1731 1732 1733
			e->rdma_sge.mr = NULL;
		}
		if (len != 0) {
			u32 rkey = be32_to_cpu(reth->rkey);
1734
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
1735 1736
			int ok;

1737 1738
			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
					 IB_ACCESS_REMOTE_READ);
M
Mike Marciniszyn 已提交
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
			if (unlikely(!ok))
				goto unlock_done;
		} else {
			e->rdma_sge.vaddr = NULL;
			e->rdma_sge.length = 0;
			e->rdma_sge.sge_length = 0;
		}
		e->psn = psn;
		if (old_req)
			goto unlock_done;
		qp->s_tail_ack_queue = prev;
		break;
	}

	case OP(COMPARE_SWAP):
	case OP(FETCH_ADD): {
		/*
		 * If we didn't find the atomic request in the ack queue
1757
		 * or the send engine is already backed up to send an
M
Mike Marciniszyn 已提交
1758 1759
		 * earlier entry, we can ignore this request.
		 */
1760
		if (!e || e->opcode != (u8)opcode || old_req)
M
Mike Marciniszyn 已提交
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
			goto unlock_done;
		qp->s_tail_ack_queue = prev;
		break;
	}

	default:
		/*
		 * Ignore this operation if it doesn't request an ACK
		 * or an earlier RDMA read or atomic is going to be resent.
		 */
		if (!(psn & IB_BTH_REQ_ACK) || old_req)
			goto unlock_done;
		/*
		 * Resend the most recent ACK if this request is
		 * after all the previous RDMA reads and atomics.
		 */
		if (i == qp->r_head_ack_queue) {
			spin_unlock_irqrestore(&qp->s_lock, flags);
			qp->r_nak_state = 0;
			qp->r_ack_psn = qp->r_psn - 1;
			goto send_ack;
		}

		/*
		 * Resend the RDMA read or atomic op which
		 * ACKs this duplicate request.
		 */
		qp->s_tail_ack_queue = i;
		break;
	}
	qp->s_ack_state = OP(ACKNOWLEDGE);
1792
	qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	qp->r_nak_state = 0;
	hfi1_schedule_send(qp);

unlock_done:
	spin_unlock_irqrestore(&qp->s_lock, flags);
done:
	return 1;

send_ack:
	return 0;
}

1805
static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
M
Mike Marciniszyn 已提交
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
{
	unsigned next;

	next = n + 1;
	if (next > HFI1_MAX_RDMA_ATOMIC)
		next = 0;
	qp->s_tail_ack_queue = next;
	qp->s_ack_state = OP(ACKNOWLEDGE);
}

static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
			  u32 lqpn, u32 rqpn, u8 svc_type)
{
	struct opa_hfi1_cong_log_event_internal *cc_event;
1820
	unsigned long flags;
M
Mike Marciniszyn 已提交
1821 1822 1823 1824

	if (sl >= OPA_MAX_SLS)
		return;

1825
	spin_lock_irqsave(&ppd->cc_log_lock, flags);
M
Mike Marciniszyn 已提交
1826

1827
	ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
M
Mike Marciniszyn 已提交
1828 1829 1830 1831 1832
	ppd->threshold_event_counter++;

	cc_event = &ppd->cc_events[ppd->cc_log_idx++];
	if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
		ppd->cc_log_idx = 0;
1833 1834
	cc_event->lqpn = lqpn & RVT_QPN_MASK;
	cc_event->rqpn = rqpn & RVT_QPN_MASK;
M
Mike Marciniszyn 已提交
1835 1836 1837 1838 1839 1840
	cc_event->sl = sl;
	cc_event->svc_type = svc_type;
	cc_event->rlid = rlid;
	/* keep timestamp in units of 1.024 usec */
	cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;

1841
	spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
M
Mike Marciniszyn 已提交
1842 1843 1844 1845 1846 1847 1848 1849 1850
}

void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
		  u32 rqpn, u8 svc_type)
{
	struct cca_timer *cca_timer;
	u16 ccti, ccti_incr, ccti_timer, ccti_limit;
	u8 trigger_threshold;
	struct cc_state *cc_state;
1851
	unsigned long flags;
M
Mike Marciniszyn 已提交
1852 1853 1854 1855 1856 1857

	if (sl >= OPA_MAX_SLS)
		return;

	cc_state = get_cc_state(ppd);

1858
	if (!cc_state)
M
Mike Marciniszyn 已提交
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
		return;

	/*
	 * 1) increase CCTI (for this SL)
	 * 2) select IPG (i.e., call set_link_ipg())
	 * 3) start timer
	 */
	ccti_limit = cc_state->cct.ccti_limit;
	ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
	ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
	trigger_threshold =
		cc_state->cong_setting.entries[sl].trigger_threshold;

1872
	spin_lock_irqsave(&ppd->cca_timer_lock, flags);
M
Mike Marciniszyn 已提交
1873

1874
	cca_timer = &ppd->cca_timer[sl];
M
Mike Marciniszyn 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
	if (cca_timer->ccti < ccti_limit) {
		if (cca_timer->ccti + ccti_incr <= ccti_limit)
			cca_timer->ccti += ccti_incr;
		else
			cca_timer->ccti = ccti_limit;
		set_link_ipg(ppd);
	}

	ccti = cca_timer->ccti;

	if (!hrtimer_active(&cca_timer->hrtimer)) {
		/* ccti_timer is in units of 1.024 usec */
		unsigned long nsec = 1024 * ccti_timer;

		hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
			      HRTIMER_MODE_REL);
	}

1893 1894
	spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);

M
Mike Marciniszyn 已提交
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
	if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
		log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
}

/**
 * hfi1_rc_rcv - process an incoming RC packet
 * @rcd: the context pointer
 * @hdr: the header of this packet
 * @rcv_flags: flags relevant to rcv processing
 * @data: the packet data
 * @tlen: the packet length
 * @qp: the QP for this packet
 *
 * This is called from qp_rcv() to process an incoming RC packet
 * for the given QP.
1910
 * May be called at interrupt level.
M
Mike Marciniszyn 已提交
1911 1912 1913 1914
 */
void hfi1_rc_rcv(struct hfi1_packet *packet)
{
	struct hfi1_ctxtdata *rcd = packet->rcd;
1915
	struct ib_header *hdr = packet->hdr;
M
Mike Marciniszyn 已提交
1916 1917 1918
	u32 rcv_flags = packet->rcv_flags;
	void *data = packet->ebuf;
	u32 tlen = packet->tlen;
1919
	struct rvt_qp *qp = packet->qp;
1920
	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1921
	struct ib_other_headers *ohdr = packet->ohdr;
M
Mike Marciniszyn 已提交
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
	u32 bth0, opcode;
	u32 hdrsize = packet->hlen;
	u32 psn;
	u32 pad;
	struct ib_wc wc;
	u32 pmtu = qp->pmtu;
	int diff;
	struct ib_reth *reth;
	unsigned long flags;
	int ret, is_fecn = 0;
1932
	bool copy_last = false;
1933
	u32 rkey;
M
Mike Marciniszyn 已提交
1934

1935
	lockdep_assert_held(&qp->r_lock);
M
Mike Marciniszyn 已提交
1936 1937 1938 1939
	bth0 = be32_to_cpu(ohdr->bth[0]);
	if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
		return;

1940
	is_fecn = process_ecn(qp, packet, false);
M
Mike Marciniszyn 已提交
1941 1942

	psn = be32_to_cpu(ohdr->bth[2]);
1943
	opcode = (bth0 >> 24) & 0xff;
M
Mike Marciniszyn 已提交
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973

	/*
	 * Process responses (ACKs) before anything else.  Note that the
	 * packet sequence number will be for something in the send work
	 * queue rather than the expected receive packet sequence number.
	 * In other words, this QP is the requester.
	 */
	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
		rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
			    hdrsize, pmtu, rcd);
		if (is_fecn)
			goto send_ack;
		return;
	}

	/* Compute 24 bits worth of difference. */
	diff = delta_psn(psn, qp->r_psn);
	if (unlikely(diff)) {
		if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
			return;
		goto send_ack;
	}

	/* Check for opcode sequence errors. */
	switch (qp->r_state) {
	case OP(SEND_FIRST):
	case OP(SEND_MIDDLE):
		if (opcode == OP(SEND_MIDDLE) ||
		    opcode == OP(SEND_LAST) ||
1974 1975
		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
		    opcode == OP(SEND_LAST_WITH_INVALIDATE))
M
Mike Marciniszyn 已提交
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
			break;
		goto nack_inv;

	case OP(RDMA_WRITE_FIRST):
	case OP(RDMA_WRITE_MIDDLE):
		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
		    opcode == OP(RDMA_WRITE_LAST) ||
		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
			break;
		goto nack_inv;

	default:
		if (opcode == OP(SEND_MIDDLE) ||
		    opcode == OP(SEND_LAST) ||
		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1991
		    opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
M
Mike Marciniszyn 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
		    opcode == OP(RDMA_WRITE_MIDDLE) ||
		    opcode == OP(RDMA_WRITE_LAST) ||
		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
			goto nack_inv;
		/*
		 * Note that it is up to the requester to not send a new
		 * RDMA read or atomic operation before receiving an ACK
		 * for the previous operation.
		 */
		break;
	}

2004
	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2005
		rvt_comm_est(qp);
M
Mike Marciniszyn 已提交
2006 2007 2008 2009

	/* OK, process the packet. */
	switch (opcode) {
	case OP(SEND_FIRST):
2010
		ret = hfi1_rvt_get_rwqe(qp, 0);
M
Mike Marciniszyn 已提交
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		qp->r_rcv_len = 0;
		/* FALLTHROUGH */
	case OP(SEND_MIDDLE):
	case OP(RDMA_WRITE_MIDDLE):
send_middle:
		/* Check for invalid length PMTU or posted rwqe len. */
		if (unlikely(tlen != (hdrsize + pmtu + 4)))
			goto nack_inv;
		qp->r_rcv_len += pmtu;
		if (unlikely(qp->r_rcv_len > qp->r_len))
			goto nack_inv;
2026
		hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
M
Mike Marciniszyn 已提交
2027 2028 2029 2030
		break;

	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
		/* consume RWQE */
2031
		ret = hfi1_rvt_get_rwqe(qp, 1);
M
Mike Marciniszyn 已提交
2032 2033 2034 2035 2036 2037 2038 2039
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		goto send_last_imm;

	case OP(SEND_ONLY):
	case OP(SEND_ONLY_WITH_IMMEDIATE):
2040
	case OP(SEND_ONLY_WITH_INVALIDATE):
2041
		ret = hfi1_rvt_get_rwqe(qp, 0);
M
Mike Marciniszyn 已提交
2042 2043 2044 2045 2046 2047 2048
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		qp->r_rcv_len = 0;
		if (opcode == OP(SEND_ONLY))
			goto no_immediate_data;
2049 2050
		if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
			goto send_last_inv;
M
Mike Marciniszyn 已提交
2051 2052 2053 2054 2055 2056
		/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
	case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
		wc.ex.imm_data = ohdr->u.imm_data;
		wc.wc_flags = IB_WC_WITH_IMM;
		goto send_last;
2057 2058 2059 2060 2061 2062 2063 2064
	case OP(SEND_LAST_WITH_INVALIDATE):
send_last_inv:
		rkey = be32_to_cpu(ohdr->u.ieth);
		if (rvt_invalidate_rkey(qp, rkey))
			goto no_immediate_data;
		wc.ex.invalidate_rkey = rkey;
		wc.wc_flags = IB_WC_WITH_INVALIDATE;
		goto send_last;
M
Mike Marciniszyn 已提交
2065
	case OP(RDMA_WRITE_LAST):
2066
		copy_last = rvt_is_user_qp(qp);
2067 2068
		/* fall through */
	case OP(SEND_LAST):
M
Mike Marciniszyn 已提交
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
no_immediate_data:
		wc.wc_flags = 0;
		wc.ex.imm_data = 0;
send_last:
		/* Get the number of bytes the message was padded by. */
		pad = (bth0 >> 20) & 3;
		/* Check for invalid length. */
		/* LAST len should be >= 1 */
		if (unlikely(tlen < (hdrsize + pad + 4)))
			goto nack_inv;
		/* Don't count the CRC. */
		tlen -= (hdrsize + pad + 4);
		wc.byte_len = tlen + qp->r_rcv_len;
		if (unlikely(wc.byte_len > qp->r_len))
			goto nack_inv;
2084
		hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
2085
		rvt_put_ss(&qp->r_sge);
M
Mike Marciniszyn 已提交
2086
		qp->r_msn++;
2087
		if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
M
Mike Marciniszyn 已提交
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
			break;
		wc.wr_id = qp->r_wr_id;
		wc.status = IB_WC_SUCCESS;
		if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
			wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
		else
			wc.opcode = IB_WC_RECV;
		wc.qp = &qp->ibqp;
		wc.src_qp = qp->remote_qpn;
		wc.slid = qp->remote_ah_attr.dlid;
		/*
		 * It seems that IB mandates the presence of an SL in a
		 * work completion only for the UD transport (see section
		 * 11.4.2 of IBTA Vol. 1).
		 *
		 * However, the way the SL is chosen below is consistent
		 * with the way that IB/qib works and is trying avoid
		 * introducing incompatibilities.
		 *
		 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
		 */
		wc.sl = qp->remote_ah_attr.sl;
		/* zero fields that are N/A */
		wc.vendor_err = 0;
		wc.pkey_index = 0;
		wc.dlid_path_bits = 0;
		wc.port_num = 0;
		/* Signal completion event if the solicited bit is set. */
2117 2118
		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
			     (bth0 & IB_BTH_SOLICITED) != 0);
M
Mike Marciniszyn 已提交
2119 2120 2121
		break;

	case OP(RDMA_WRITE_ONLY):
2122
		copy_last = rvt_is_user_qp(qp);
2123 2124
		/* fall through */
	case OP(RDMA_WRITE_FIRST):
M
Mike Marciniszyn 已提交
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
			goto nack_inv;
		/* consume RWQE */
		reth = &ohdr->u.rc.reth;
		qp->r_len = be32_to_cpu(reth->length);
		qp->r_rcv_len = 0;
		qp->r_sge.sg_list = NULL;
		if (qp->r_len != 0) {
			u32 rkey = be32_to_cpu(reth->rkey);
2135
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
2136 2137 2138
			int ok;

			/* Check rkey & NAK */
2139 2140
			ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
					 rkey, IB_ACCESS_REMOTE_WRITE);
M
Mike Marciniszyn 已提交
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
			if (unlikely(!ok))
				goto nack_acc;
			qp->r_sge.num_sge = 1;
		} else {
			qp->r_sge.num_sge = 0;
			qp->r_sge.sge.mr = NULL;
			qp->r_sge.sge.vaddr = NULL;
			qp->r_sge.sge.length = 0;
			qp->r_sge.sge.sge_length = 0;
		}
		if (opcode == OP(RDMA_WRITE_FIRST))
			goto send_middle;
		else if (opcode == OP(RDMA_WRITE_ONLY))
			goto no_immediate_data;
2155
		ret = hfi1_rvt_get_rwqe(qp, 1);
M
Mike Marciniszyn 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		wc.ex.imm_data = ohdr->u.rc.imm_data;
		wc.wc_flags = IB_WC_WITH_IMM;
		goto send_last;

	case OP(RDMA_READ_REQUEST): {
2165
		struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
		u32 len;
		u8 next;

		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
			goto nack_inv;
		next = qp->r_head_ack_queue + 1;
		/* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
		if (next > HFI1_MAX_RDMA_ATOMIC)
			next = 0;
		spin_lock_irqsave(&qp->s_lock, flags);
		if (unlikely(next == qp->s_tail_ack_queue)) {
			if (!qp->s_ack_queue[next].sent)
				goto nack_inv_unlck;
			update_ack_queue(qp, next);
		}
		e = &qp->s_ack_queue[qp->r_head_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2183
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
2184 2185 2186 2187 2188 2189
			e->rdma_sge.mr = NULL;
		}
		reth = &ohdr->u.rc.reth;
		len = be32_to_cpu(reth->length);
		if (len) {
			u32 rkey = be32_to_cpu(reth->rkey);
2190
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
2191 2192 2193
			int ok;

			/* Check rkey & NAK */
2194 2195
			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
					 rkey, IB_ACCESS_REMOTE_READ);
M
Mike Marciniszyn 已提交
2196 2197 2198 2199 2200 2201
			if (unlikely(!ok))
				goto nack_acc_unlck;
			/*
			 * Update the next expected PSN.  We add 1 later
			 * below, so only add the remainder here.
			 */
2202
			qp->r_psn += rvt_div_mtu(qp, len - 1);
M
Mike Marciniszyn 已提交
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
		} else {
			e->rdma_sge.mr = NULL;
			e->rdma_sge.vaddr = NULL;
			e->rdma_sge.length = 0;
			e->rdma_sge.sge_length = 0;
		}
		e->opcode = opcode;
		e->sent = 0;
		e->psn = psn;
		e->lpsn = qp->r_psn;
		/*
		 * We need to increment the MSN here instead of when we
		 * finish sending the result since a duplicate request would
		 * increment it more than once.
		 */
		qp->r_msn++;
		qp->r_psn++;
		qp->r_state = opcode;
		qp->r_nak_state = 0;
		qp->r_head_ack_queue = next;

2224
		/* Schedule the send engine. */
2225
		qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
		if (is_fecn)
			goto send_ack;
		return;
	}

	case OP(COMPARE_SWAP):
	case OP(FETCH_ADD): {
		struct ib_atomic_eth *ateth;
2237
		struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
		u64 vaddr;
		atomic64_t *maddr;
		u64 sdata;
		u32 rkey;
		u8 next;

		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
			goto nack_inv;
		next = qp->r_head_ack_queue + 1;
		if (next > HFI1_MAX_RDMA_ATOMIC)
			next = 0;
		spin_lock_irqsave(&qp->s_lock, flags);
		if (unlikely(next == qp->s_tail_ack_queue)) {
			if (!qp->s_ack_queue[next].sent)
				goto nack_inv_unlck;
			update_ack_queue(qp, next);
		}
		e = &qp->s_ack_queue[qp->r_head_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2257
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
2258 2259 2260
			e->rdma_sge.mr = NULL;
		}
		ateth = &ohdr->u.atomic_eth;
2261
		vaddr = get_ib_ateth_vaddr(ateth);
M
Mike Marciniszyn 已提交
2262 2263 2264 2265
		if (unlikely(vaddr & (sizeof(u64) - 1)))
			goto nack_inv_unlck;
		rkey = be32_to_cpu(ateth->rkey);
		/* Check rkey & NAK */
2266 2267 2268
		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
					  vaddr, rkey,
					  IB_ACCESS_REMOTE_ATOMIC)))
M
Mike Marciniszyn 已提交
2269 2270
			goto nack_acc_unlck;
		/* Perform atomic OP and save result. */
2271
		maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2272
		sdata = get_ib_ateth_swap(ateth);
M
Mike Marciniszyn 已提交
2273
		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2274 2275
			(u64)atomic64_add_return(sdata, maddr) - sdata :
			(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2276
				      get_ib_ateth_compare(ateth),
M
Mike Marciniszyn 已提交
2277
				      sdata);
2278
		rvt_put_mr(qp->r_sge.sge.mr);
M
Mike Marciniszyn 已提交
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
		qp->r_sge.num_sge = 0;
		e->opcode = opcode;
		e->sent = 0;
		e->psn = psn;
		e->lpsn = psn;
		qp->r_msn++;
		qp->r_psn++;
		qp->r_state = opcode;
		qp->r_nak_state = 0;
		qp->r_head_ack_queue = next;

2290
		/* Schedule the send engine. */
2291
		qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
		if (is_fecn)
			goto send_ack;
		return;
	}

	default:
		/* NAK unknown opcodes. */
		goto nack_inv;
	}
	qp->r_psn++;
	qp->r_state = opcode;
	qp->r_ack_psn = psn;
	qp->r_nak_state = 0;
	/* Send an ACK if requested or required. */
2309
	if (psn & IB_BTH_REQ_ACK) {
2310 2311
		struct hfi1_qp_priv *priv = qp->priv;

2312 2313 2314 2315
		if (packet->numpkt == 0) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
2316
		if (priv->r_adefered >= HFI1_PSN_CREDIT) {
2317 2318 2319 2320 2321 2322 2323
			rc_cancel_ack(qp);
			goto send_ack;
		}
		if (unlikely(is_fecn)) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
2324
		priv->r_adefered++;
2325 2326
		rc_defered_ack(rcd, qp);
	}
M
Mike Marciniszyn 已提交
2327 2328 2329
	return;

rnr_nak:
2330
	qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
M
Mike Marciniszyn 已提交
2331 2332
	qp->r_ack_psn = qp->r_psn;
	/* Queue RNR NAK for later */
2333
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2334 2335 2336
	return;

nack_op_err:
2337
	rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
M
Mike Marciniszyn 已提交
2338 2339 2340
	qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
2341
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2342 2343 2344 2345 2346
	return;

nack_inv_unlck:
	spin_unlock_irqrestore(&qp->s_lock, flags);
nack_inv:
2347
	rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
M
Mike Marciniszyn 已提交
2348 2349 2350
	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
2351
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2352 2353 2354 2355 2356
	return;

nack_acc_unlck:
	spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc:
2357
	rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
M
Mike Marciniszyn 已提交
2358 2359 2360 2361 2362 2363 2364 2365
	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
	qp->r_ack_psn = qp->r_psn;
send_ack:
	hfi1_send_rc_ack(rcd, qp, is_fecn);
}

void hfi1_rc_hdrerr(
	struct hfi1_ctxtdata *rcd,
2366
	struct ib_header *hdr,
M
Mike Marciniszyn 已提交
2367
	u32 rcv_flags,
2368
	struct rvt_qp *qp)
M
Mike Marciniszyn 已提交
2369 2370
{
	int has_grh = rcv_flags & HFI1_HAS_GRH;
2371
	struct ib_other_headers *ohdr;
2372
	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
M
Mike Marciniszyn 已提交
2373
	int diff;
2374
	u32 opcode;
2375
	u32 psn, bth0;
M
Mike Marciniszyn 已提交
2376 2377 2378 2379 2380 2381

	/* Check for GRH */
	ohdr = &hdr->u.oth;
	if (has_grh)
		ohdr = &hdr->u.l.oth;

2382 2383
	bth0 = be32_to_cpu(ohdr->bth[0]);
	if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
M
Mike Marciniszyn 已提交
2384 2385 2386
		return;

	psn = be32_to_cpu(ohdr->bth[2]);
2387
	opcode = (bth0 >> 24) & 0xff;
M
Mike Marciniszyn 已提交
2388 2389 2390 2391 2392

	/* Only deal with RDMA Writes for now */
	if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
		diff = delta_psn(psn, qp->r_psn);
		if (!qp->r_nak_state && diff >= 0) {
2393
			ibp->rvp.n_rc_seqnak++;
M
Mike Marciniszyn 已提交
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
			qp->r_nak_state = IB_NAK_PSN_ERROR;
			/* Use the expected PSN. */
			qp->r_ack_psn = qp->r_psn;
			/*
			 * Wait to send the sequence
			 * NAK until all packets
			 * in the receive queue have
			 * been processed.
			 * Otherwise, we end up
			 * propagating congestion.
			 */
2405
			rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2406 2407 2408
		} /* Out of sequence NAK */
	} /* QP Request NAKs */
}