rc.c 68.5 KB
Newer Older
M
Mike Marciniszyn 已提交
1
/*
J
Jubin John 已提交
2
 * Copyright(c) 2015, 2016 Intel Corporation.
M
Mike Marciniszyn 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * BSD LICENSE
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  - Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  - Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  - Neither the name of Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/io.h>
49 50
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
M
Mike Marciniszyn 已提交
51 52 53

#include "hfi.h"
#include "qp.h"
54
#include "verbs_txreq.h"
M
Mike Marciniszyn 已提交
55 56 57 58 59
#include "trace.h"

/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x

60 61 62 63 64 65 66 67
/**
 * hfi1_add_retry_timer - add/start a retry timer
 * @qp - the QP
 *
 * add a retry timer on the QP
 */
static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
{
68 69 70
	struct ib_qp *ibqp = &qp->ibqp;
	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);

71 72
	qp->s_flags |= RVT_S_TIMER;
	/* 4.096 usec. * (1 << qp->timeout) */
73 74
	qp->s_timer.expires = jiffies + qp->timeout_jiffies +
			      rdi->busy_jiffies;
75 76 77 78 79 80 81 82 83 84
	add_timer(&qp->s_timer);
}

/**
 * hfi1_add_rnr_timer - add/start an rnr timer
 * @qp - the QP
 * @to - timeout in usecs
 *
 * add an rnr timer on the QP
 */
85
void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
86
{
87 88
	struct hfi1_qp_priv *priv = qp->priv;

89 90
	qp->s_flags |= RVT_S_WAIT_RNR;
	qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
91
	add_timer(&priv->s_rnr_timer);
92 93 94 95 96 97 98 99 100 101 102
}

/**
 * hfi1_mod_retry_timer - mod a retry timer
 * @qp - the QP
 *
 * Modify a potentially already running retry
 * timer
 */
static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
{
103 104 105
	struct ib_qp *ibqp = &qp->ibqp;
	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);

106 107
	qp->s_flags |= RVT_S_TIMER;
	/* 4.096 usec. * (1 << qp->timeout) */
108 109
	mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
		  rdi->busy_jiffies);
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
}

/**
 * hfi1_stop_retry_timer - stop a retry timer
 * @qp - the QP
 *
 * stop a retry timer and return if the timer
 * had been pending.
 */
static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
{
	int rval = 0;

	/* Remove QP from retry */
	if (qp->s_flags & RVT_S_TIMER) {
		qp->s_flags &= ~RVT_S_TIMER;
		rval = del_timer(&qp->s_timer);
	}
	return rval;
}

/**
 * hfi1_stop_rc_timers - stop all timers
 * @qp - the QP
 *
 * stop any pending timers
 */
137
void hfi1_stop_rc_timers(struct rvt_qp *qp)
138
{
139 140
	struct hfi1_qp_priv *priv = qp->priv;

141 142 143 144
	/* Remove QP from all timers */
	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
		del_timer(&qp->s_timer);
145
		del_timer(&priv->s_rnr_timer);
146 147 148 149 150 151 152 153 154 155 156 157 158
	}
}

/**
 * hfi1_stop_rnr_timer - stop an rnr timer
 * @qp - the QP
 *
 * stop an rnr timer and return if the timer
 * had been pending.
 */
static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
{
	int rval = 0;
159
	struct hfi1_qp_priv *priv = qp->priv;
160 161 162 163

	/* Remove QP from rnr timer */
	if (qp->s_flags & RVT_S_WAIT_RNR) {
		qp->s_flags &= ~RVT_S_WAIT_RNR;
164
		rval = del_timer(&priv->s_rnr_timer);
165 166 167 168 169 170 171 172
	}
	return rval;
}

/**
 * hfi1_del_timers_sync - wait for any timeout routines to exit
 * @qp - the QP
 */
173
void hfi1_del_timers_sync(struct rvt_qp *qp)
174
{
175 176
	struct hfi1_qp_priv *priv = qp->priv;

177
	del_timer_sync(&qp->s_timer);
178
	del_timer_sync(&priv->s_rnr_timer);
179 180
}

181 182 183 184 185 186 187 188 189 190 191 192
/* only opcode mask for adaptive pio */
const u32 rc_only_opcode =
	BIT(OP(SEND_ONLY) & 0x1f) |
	BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
	BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
	BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) |
	BIT(OP(RDMA_READ_REQUEST & 0x1f)) |
	BIT(OP(ACKNOWLEDGE & 0x1f)) |
	BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) |
	BIT(OP(COMPARE_SWAP & 0x1f)) |
	BIT(OP(FETCH_ADD & 0x1f));

193
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
M
Mike Marciniszyn 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
		       u32 psn, u32 pmtu)
{
	u32 len;

	len = delta_psn(psn, wqe->psn) * pmtu;
	ss->sge = wqe->sg_list[0];
	ss->sg_list = wqe->sg_list + 1;
	ss->num_sge = wqe->wr.num_sge;
	ss->total_len = wqe->length;
	hfi1_skip_sge(ss, len, 0);
	return wqe->length - len;
}

/**
 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
 * @dev: the device for this QP
 * @qp: a pointer to the QP
 * @ohdr: a pointer to the IB header being constructed
212
 * @ps: the xmit packet state
M
Mike Marciniszyn 已提交
213 214 215 216 217
 *
 * Return 1 if constructed; otherwise, return 0.
 * Note that we are in the responder's side of the QP context.
 * Note the QP s_lock must be held.
 */
218
static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
219
		       struct ib_other_headers *ohdr,
220
		       struct hfi1_pkt_state *ps)
M
Mike Marciniszyn 已提交
221
{
222
	struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
223 224 225 226 227
	u32 hwords;
	u32 len;
	u32 bth0;
	u32 bth2;
	int middle = 0;
228
	u32 pmtu = qp->pmtu;
229
	struct hfi1_qp_priv *priv = qp->priv;
M
Mike Marciniszyn 已提交
230 231

	/* Don't send an ACK if we aren't supposed to. */
232
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
M
Mike Marciniszyn 已提交
233 234 235 236 237 238 239 240 241 242
		goto bail;

	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	hwords = 5;

	switch (qp->s_ack_state) {
	case OP(RDMA_READ_RESPONSE_LAST):
	case OP(RDMA_READ_RESPONSE_ONLY):
		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
		if (e->rdma_sge.mr) {
243
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
			e->rdma_sge.mr = NULL;
		}
		/* FALLTHROUGH */
	case OP(ATOMIC_ACKNOWLEDGE):
		/*
		 * We can increment the tail pointer now that the last
		 * response has been sent instead of only being
		 * constructed.
		 */
		if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
			qp->s_tail_ack_queue = 0;
		/* FALLTHROUGH */
	case OP(SEND_ONLY):
	case OP(ACKNOWLEDGE):
		/* Check for no next entry in the queue. */
		if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
260
			if (qp->s_flags & RVT_S_ACK_PENDING)
M
Mike Marciniszyn 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
				goto normal;
			goto bail;
		}

		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST)) {
			/*
			 * If a RDMA read response is being resent and
			 * we haven't seen the duplicate request yet,
			 * then stop sending the remaining responses the
			 * responder has seen until the requester re-sends it.
			 */
			len = e->rdma_sge.sge_length;
			if (len && !e->rdma_sge.mr) {
				qp->s_tail_ack_queue = qp->r_head_ack_queue;
				goto bail;
			}
			/* Copy SGE state in case we need to resend */
279 280 281
			ps->s_txreq->mr = e->rdma_sge.mr;
			if (ps->s_txreq->mr)
				rvt_get_mr(ps->s_txreq->mr);
M
Mike Marciniszyn 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
			qp->s_ack_rdma_sge.sge = e->rdma_sge;
			qp->s_ack_rdma_sge.num_sge = 1;
			qp->s_cur_sge = &qp->s_ack_rdma_sge;
			if (len > pmtu) {
				len = pmtu;
				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
			} else {
				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
				e->sent = 1;
			}
			ohdr->u.aeth = hfi1_compute_aeth(qp);
			hwords++;
			qp->s_ack_rdma_psn = e->psn;
			bth2 = mask_psn(qp->s_ack_rdma_psn++);
		} else {
			/* COMPARE_SWAP or FETCH_ADD */
			qp->s_cur_sge = NULL;
			len = 0;
			qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
			ohdr->u.at.aeth = hfi1_compute_aeth(qp);
302
			ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
M
Mike Marciniszyn 已提交
303 304 305 306 307 308 309 310 311 312 313 314
			hwords += sizeof(ohdr->u.at) / sizeof(u32);
			bth2 = mask_psn(e->psn);
			e->sent = 1;
		}
		bth0 = qp->s_ack_state << 24;
		break;

	case OP(RDMA_READ_RESPONSE_FIRST):
		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
		/* FALLTHROUGH */
	case OP(RDMA_READ_RESPONSE_MIDDLE):
		qp->s_cur_sge = &qp->s_ack_rdma_sge;
315 316 317
		ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
		if (ps->s_txreq->mr)
			rvt_get_mr(ps->s_txreq->mr);
M
Mike Marciniszyn 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
		len = qp->s_ack_rdma_sge.sge.sge_length;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
		} else {
			ohdr->u.aeth = hfi1_compute_aeth(qp);
			hwords++;
			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
			e->sent = 1;
		}
		bth0 = qp->s_ack_state << 24;
		bth2 = mask_psn(qp->s_ack_rdma_psn++);
		break;

	default:
normal:
		/*
		 * Send a regular ACK.
		 * Set the s_ack_state so we wait until after sending
		 * the ACK before setting s_ack_state to ACKNOWLEDGE
		 * (see above).
		 */
		qp->s_ack_state = OP(SEND_ONLY);
342
		qp->s_flags &= ~RVT_S_ACK_PENDING;
M
Mike Marciniszyn 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
		qp->s_cur_sge = NULL;
		if (qp->s_nak_state)
			ohdr->u.aeth =
				cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
					    (qp->s_nak_state <<
					     HFI1_AETH_CREDIT_SHIFT));
		else
			ohdr->u.aeth = hfi1_compute_aeth(qp);
		hwords++;
		len = 0;
		bth0 = OP(ACKNOWLEDGE) << 24;
		bth2 = mask_psn(qp->s_ack_psn);
	}
	qp->s_rdma_ack_cnt++;
	qp->s_hdrwords = hwords;
358
	ps->s_txreq->sde = priv->s_sde;
M
Mike Marciniszyn 已提交
359
	qp->s_cur_size = len;
360
	hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
361 362
	/* pbc */
	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
M
Mike Marciniszyn 已提交
363 364 365 366 367 368
	return 1;

bail:
	qp->s_ack_state = OP(ACKNOWLEDGE);
	/*
	 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
369
	 * RVT_S_RESP_PENDING
M
Mike Marciniszyn 已提交
370 371
	 */
	smp_wmb();
372 373 374
	qp->s_flags &= ~(RVT_S_RESP_PENDING
				| RVT_S_ACK_PENDING
				| RVT_S_AHG_VALID);
M
Mike Marciniszyn 已提交
375 376 377 378 379 380 381
	return 0;
}

/**
 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
 * @qp: a pointer to the QP
 *
382 383
 * Assumes s_lock is held.
 *
M
Mike Marciniszyn 已提交
384 385
 * Return 1 if constructed; otherwise, return 0.
 */
386
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
M
Mike Marciniszyn 已提交
387
{
388
	struct hfi1_qp_priv *priv = qp->priv;
M
Mike Marciniszyn 已提交
389
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
390
	struct ib_other_headers *ohdr;
391 392
	struct rvt_sge_state *ss;
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
393 394 395 396 397 398 399 400 401 402
	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
	u32 hwords = 5;
	u32 len;
	u32 bth0 = 0;
	u32 bth2;
	u32 pmtu = qp->pmtu;
	char newreq;
	int middle = 0;
	int delta;

403 404 405 406 407
	ps->s_txreq = get_txreq(ps->dev, qp);
	if (IS_ERR(ps->s_txreq))
		goto bail_no_tx;

	ohdr = &ps->s_txreq->phdr.hdr.u.oth;
M
Mike Marciniszyn 已提交
408
	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
409
		ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
M
Mike Marciniszyn 已提交
410 411

	/* Sending responses has higher priority over sending requests. */
412
	if ((qp->s_flags & RVT_S_RESP_PENDING) &&
413
	    make_rc_ack(dev, qp, ohdr, ps))
414
		return 1;
M
Mike Marciniszyn 已提交
415

416 417
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
M
Mike Marciniszyn 已提交
418 419
			goto bail;
		/* We are in the error state, flush the work request. */
420 421
		smp_read_barrier_depends(); /* see post_one_send() */
		if (qp->s_last == ACCESS_ONCE(qp->s_head))
M
Mike Marciniszyn 已提交
422 423
			goto bail;
		/* If DMAs are in progress, we can't flush immediately. */
424
		if (iowait_sdma_pending(&priv->s_iowait)) {
425
			qp->s_flags |= RVT_S_WAIT_DMA;
M
Mike Marciniszyn 已提交
426 427 428
			goto bail;
		}
		clear_ahg(qp);
429
		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
M
Mike Marciniszyn 已提交
430 431 432
		hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
			IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
		/* will get called again */
433
		goto done_free_tx;
M
Mike Marciniszyn 已提交
434 435
	}

436
	if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
M
Mike Marciniszyn 已提交
437 438 439 440
		goto bail;

	if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
		if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
441
			qp->s_flags |= RVT_S_WAIT_PSN;
M
Mike Marciniszyn 已提交
442 443 444 445 446 447 448
			goto bail;
		}
		qp->s_sending_psn = qp->s_psn;
		qp->s_sending_hpsn = qp->s_psn - 1;
	}

	/* Send a request. */
449
	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
M
Mike Marciniszyn 已提交
450 451
	switch (qp->s_state) {
	default:
452
		if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
M
Mike Marciniszyn 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
			goto bail;
		/*
		 * Resend an old request or start a new one.
		 *
		 * We keep track of the current SWQE so that
		 * we don't reset the "furthest progress" state
		 * if we need to back up.
		 */
		newreq = 0;
		if (qp->s_cur == qp->s_tail) {
			/* Check if send work queue is empty. */
			if (qp->s_tail == qp->s_head) {
				clear_ahg(qp);
				goto bail;
			}
			/*
			 * If a fence is requested, wait for previous
			 * RDMA read and atomic operations to finish.
			 */
			if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
			    qp->s_num_rd_atomic) {
474
				qp->s_flags |= RVT_S_WAIT_FENCE;
M
Mike Marciniszyn 已提交
475 476
				goto bail;
			}
477 478 479 480 481 482
			/*
			 * Local operations are processed immediately
			 * after all prior requests have completed
			 */
			if (wqe->wr.opcode == IB_WR_REG_MR ||
			    wqe->wr.opcode == IB_WR_LOCAL_INV) {
483 484 485
				int local_ops = 0;
				int err = 0;

486 487 488 489 490 491
				if (qp->s_last != qp->s_cur)
					goto bail;
				if (++qp->s_cur == qp->s_size)
					qp->s_cur = 0;
				if (++qp->s_tail == qp->s_size)
					qp->s_tail = 0;
492 493
				if (!(wqe->wr.send_flags &
				      RVT_SEND_COMPLETION_ONLY)) {
494 495 496
					err = rvt_invalidate_rkey(
						qp,
						wqe->wr.ex.invalidate_rkey);
497 498
					local_ops = 1;
				}
499 500 501
				hfi1_send_complete(qp, wqe,
						   err ? IB_WC_LOC_PROT_ERR
						       : IB_WC_SUCCESS);
502 503
				if (local_ops)
					atomic_dec(&qp->local_ops_pending);
504 505 506 507
				qp->s_hdrwords = 0;
				goto done_free_tx;
			}

M
Mike Marciniszyn 已提交
508
			newreq = 1;
509
			qp->s_psn = wqe->psn;
M
Mike Marciniszyn 已提交
510 511 512 513 514 515 516 517 518 519 520 521
		}
		/*
		 * Note that we have to be careful not to modify the
		 * original work request since we may need to resend
		 * it.
		 */
		len = wqe->length;
		ss = &qp->s_sge;
		bth2 = mask_psn(qp->s_psn);
		switch (wqe->wr.opcode) {
		case IB_WR_SEND:
		case IB_WR_SEND_WITH_IMM:
522
		case IB_WR_SEND_WITH_INV:
M
Mike Marciniszyn 已提交
523
			/* If no credit, return. */
524
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
M
Mike Marciniszyn 已提交
525
			    cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
526
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
M
Mike Marciniszyn 已提交
527 528 529 530 531 532 533
				goto bail;
			}
			if (len > pmtu) {
				qp->s_state = OP(SEND_FIRST);
				len = pmtu;
				break;
			}
534
			if (wqe->wr.opcode == IB_WR_SEND) {
M
Mike Marciniszyn 已提交
535
				qp->s_state = OP(SEND_ONLY);
536
			} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
M
Mike Marciniszyn 已提交
537 538 539 540
				qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
				/* Immediate data comes after the BTH */
				ohdr->u.imm_data = wqe->wr.ex.imm_data;
				hwords += 1;
541 542 543 544 545 546
			} else {
				qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
				/* Invalidate rkey comes after the BTH */
				ohdr->u.ieth = cpu_to_be32(
						wqe->wr.ex.invalidate_rkey);
				hwords += 1;
M
Mike Marciniszyn 已提交
547 548 549 550 551 552 553 554 555
			}
			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
				bth0 |= IB_BTH_SOLICITED;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_RDMA_WRITE:
556
			if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
557 558 559 560
				qp->s_lsn++;
			/* FALLTHROUGH */
		case IB_WR_RDMA_WRITE_WITH_IMM:
			/* If no credit, return. */
561
			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
M
Mike Marciniszyn 已提交
562
			    cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
563
				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
M
Mike Marciniszyn 已提交
564 565
				goto bail;
			}
566 567 568
			put_ib_reth_vaddr(
				wqe->rdma_wr.remote_addr,
				&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
569
			ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
570
				cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
571 572 573 574 575 576 577
			ohdr->u.rc.reth.length = cpu_to_be32(len);
			hwords += sizeof(struct ib_reth) / sizeof(u32);
			if (len > pmtu) {
				qp->s_state = OP(RDMA_WRITE_FIRST);
				len = pmtu;
				break;
			}
578
			if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
M
Mike Marciniszyn 已提交
579
				qp->s_state = OP(RDMA_WRITE_ONLY);
580
			} else {
M
Mike Marciniszyn 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
				qp->s_state =
					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
				/* Immediate data comes after RETH */
				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
				hwords += 1;
				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
					bth0 |= IB_BTH_SOLICITED;
			}
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_RDMA_READ:
			/*
			 * Don't allow more operations to be started
			 * than the QP limits allow.
			 */
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
602
					qp->s_flags |= RVT_S_WAIT_RDMAR;
M
Mike Marciniszyn 已提交
603 604 605
					goto bail;
				}
				qp->s_num_rd_atomic++;
606
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
607 608
					qp->s_lsn++;
			}
609 610 611
			put_ib_reth_vaddr(
				wqe->rdma_wr.remote_addr,
				&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
612
			ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
613
				cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
			ohdr->u.rc.reth.length = cpu_to_be32(len);
			qp->s_state = OP(RDMA_READ_REQUEST);
			hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
			ss = NULL;
			len = 0;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		case IB_WR_ATOMIC_CMP_AND_SWP:
		case IB_WR_ATOMIC_FETCH_AND_ADD:
			/*
			 * Don't allow more operations to be started
			 * than the QP limits allow.
			 */
			if (newreq) {
				if (qp->s_num_rd_atomic >=
				    qp->s_max_rd_atomic) {
633
					qp->s_flags |= RVT_S_WAIT_RDMAR;
M
Mike Marciniszyn 已提交
634 635 636
					goto bail;
				}
				qp->s_num_rd_atomic++;
637
				if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
M
Mike Marciniszyn 已提交
638 639 640 641
					qp->s_lsn++;
			}
			if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
				qp->s_state = OP(COMPARE_SWAP);
642 643 644 645
				put_ib_ateth_swap(wqe->atomic_wr.swap,
						  &ohdr->u.atomic_eth);
				put_ib_ateth_compare(wqe->atomic_wr.compare_add,
						     &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
646 647
			} else {
				qp->s_state = OP(FETCH_ADD);
648 649 650
				put_ib_ateth_swap(wqe->atomic_wr.compare_add,
						  &ohdr->u.atomic_eth);
				put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
651
			}
652 653
			put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
					   &ohdr->u.atomic_eth);
M
Mike Marciniszyn 已提交
654
			ohdr->u.atomic_eth.rkey = cpu_to_be32(
C
Christoph Hellwig 已提交
655
				wqe->atomic_wr.rkey);
M
Mike Marciniszyn 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
			hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
			ss = NULL;
			len = 0;
			bth2 |= IB_BTH_REQ_ACK;
			if (++qp->s_cur == qp->s_size)
				qp->s_cur = 0;
			break;

		default:
			goto bail;
		}
		qp->s_sge.sge = wqe->sg_list[0];
		qp->s_sge.sg_list = wqe->sg_list + 1;
		qp->s_sge.num_sge = wqe->wr.num_sge;
		qp->s_sge.total_len = wqe->length;
		qp->s_len = wqe->length;
		if (newreq) {
			qp->s_tail++;
			if (qp->s_tail >= qp->s_size)
				qp->s_tail = 0;
		}
		if (wqe->wr.opcode == IB_WR_RDMA_READ)
			qp->s_psn = wqe->lpsn + 1;
679
		else
M
Mike Marciniszyn 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
			qp->s_psn++;
		break;

	case OP(RDMA_READ_RESPONSE_FIRST):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
		 * thread to indicate a SEND needs to be restarted from an
		 * earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
		/* FALLTHROUGH */
	case OP(SEND_FIRST):
		qp->s_state = OP(SEND_MIDDLE);
		/* FALLTHROUGH */
	case OP(SEND_MIDDLE):
		bth2 = mask_psn(qp->s_psn++);
		ss = &qp->s_sge;
		len = qp->s_len;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
			break;
		}
707
		if (wqe->wr.opcode == IB_WR_SEND) {
M
Mike Marciniszyn 已提交
708
			qp->s_state = OP(SEND_LAST);
709
		} else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
M
Mike Marciniszyn 已提交
710 711 712 713
			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
			/* Immediate data comes after the BTH */
			ohdr->u.imm_data = wqe->wr.ex.imm_data;
			hwords += 1;
714 715 716 717 718
		} else {
			qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
			/* invalidate data comes after the BTH */
			ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
			hwords += 1;
M
Mike Marciniszyn 已提交
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
		}
		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
			bth0 |= IB_BTH_SOLICITED;
		bth2 |= IB_BTH_REQ_ACK;
		qp->s_cur++;
		if (qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		break;

	case OP(RDMA_READ_RESPONSE_LAST):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
		 * thread to indicate a RDMA write needs to be restarted from
		 * an earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
		/* FALLTHROUGH */
	case OP(RDMA_WRITE_FIRST):
		qp->s_state = OP(RDMA_WRITE_MIDDLE);
		/* FALLTHROUGH */
	case OP(RDMA_WRITE_MIDDLE):
		bth2 = mask_psn(qp->s_psn++);
		ss = &qp->s_sge;
		len = qp->s_len;
		if (len > pmtu) {
			len = pmtu;
			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
			break;
		}
752
		if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
M
Mike Marciniszyn 已提交
753
			qp->s_state = OP(RDMA_WRITE_LAST);
754
		} else {
M
Mike Marciniszyn 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
			qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
			/* Immediate data comes after the BTH */
			ohdr->u.imm_data = wqe->wr.ex.imm_data;
			hwords += 1;
			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
				bth0 |= IB_BTH_SOLICITED;
		}
		bth2 |= IB_BTH_REQ_ACK;
		qp->s_cur++;
		if (qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		break;

	case OP(RDMA_READ_RESPONSE_MIDDLE):
		/*
		 * qp->s_state is normally set to the opcode of the
		 * last packet constructed for new requests and therefore
		 * is never set to RDMA read response.
		 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
		 * thread to indicate a RDMA read needs to be restarted from
		 * an earlier PSN without interfering with the sending thread.
		 * See restart_rc().
		 */
		len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
779 780 781
		put_ib_reth_vaddr(
			wqe->rdma_wr.remote_addr + len,
			&ohdr->u.rc.reth);
M
Mike Marciniszyn 已提交
782
		ohdr->u.rc.reth.rkey =
C
Christoph Hellwig 已提交
783
			cpu_to_be32(wqe->rdma_wr.rkey);
M
Mike Marciniszyn 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
		ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
		qp->s_state = OP(RDMA_READ_REQUEST);
		hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
		bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
		qp->s_psn = wqe->lpsn + 1;
		ss = NULL;
		len = 0;
		qp->s_cur++;
		if (qp->s_cur == qp->s_size)
			qp->s_cur = 0;
		break;
	}
	qp->s_sending_hpsn = bth2;
	delta = delta_psn(bth2, wqe->psn);
	if (delta && delta % HFI1_PSN_CREDIT == 0)
		bth2 |= IB_BTH_REQ_ACK;
800 801 802
	if (qp->s_flags & RVT_S_SEND_ONE) {
		qp->s_flags &= ~RVT_S_SEND_ONE;
		qp->s_flags |= RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
803 804 805 806
		bth2 |= IB_BTH_REQ_ACK;
	}
	qp->s_len -= len;
	qp->s_hdrwords = hwords;
807
	ps->s_txreq->sde = priv->s_sde;
M
Mike Marciniszyn 已提交
808 809 810 811 812 813 814
	qp->s_cur_sge = ss;
	qp->s_cur_size = len;
	hfi1_make_ruc_header(
		qp,
		ohdr,
		bth0 | (qp->s_state << 24),
		bth2,
815 816
		middle,
		ps);
817 818
	/* pbc */
	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
819 820 821 822 823
	return 1;

done_free_tx:
	hfi1_put_txreq(ps->s_txreq);
	ps->s_txreq = NULL;
824
	return 1;
825

M
Mike Marciniszyn 已提交
826
bail:
827 828 829 830
	hfi1_put_txreq(ps->s_txreq);

bail_no_tx:
	ps->s_txreq = NULL;
831
	qp->s_flags &= ~RVT_S_BUSY;
832 833
	qp->s_hdrwords = 0;
	return 0;
M
Mike Marciniszyn 已提交
834 835 836 837 838 839 840 841 842 843
}

/**
 * hfi1_send_rc_ack - Construct an ACK packet and send it
 * @qp: a pointer to the QP
 *
 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
 * Note that RDMA reads and atomics are handled in the
 * send side QP state and tasklet.
 */
844
void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
M
Mike Marciniszyn 已提交
845 846 847 848 849 850 851 852 853 854 855 856
		      int is_fecn)
{
	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
	u64 pbc, pbc_flags = 0;
	u16 lrh0;
	u16 sc5;
	u32 bth0;
	u32 hwords;
	u32 vl, plen;
	struct send_context *sc;
	struct pio_buf *pbuf;
857 858
	struct ib_header hdr;
	struct ib_other_headers *ohdr;
859
	unsigned long flags;
M
Mike Marciniszyn 已提交
860 861

	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
862
	if (qp->s_flags & RVT_S_RESP_PENDING)
M
Mike Marciniszyn 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
		goto queue_ack;

	/* Ensure s_rdma_ack_cnt changes are committed */
	smp_read_barrier_depends();
	if (qp->s_rdma_ack_cnt)
		goto queue_ack;

	/* Construct the header */
	/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
	hwords = 6;
	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
		hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
				       &qp->remote_ah_attr.grh, hwords, 0);
		ohdr = &hdr.u.l.oth;
		lrh0 = HFI1_LRH_GRH;
	} else {
		ohdr = &hdr.u.oth;
		lrh0 = HFI1_LRH_BTH;
	}
	/* read pkey_index w/o lock (its atomic) */
	bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
	if (qp->s_mig_state == IB_MIG_MIGRATED)
		bth0 |= IB_BTH_MIG_REQ;
	if (qp->r_nak_state)
		ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
					    (qp->r_nak_state <<
					     HFI1_AETH_CREDIT_SHIFT));
	else
		ohdr->u.aeth = hfi1_compute_aeth(qp);
	sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
	/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
	pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
	lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
	hdr.lrh[0] = cpu_to_be16(lrh0);
	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
	hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
	ohdr->bth[0] = cpu_to_be32(bth0);
	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
	ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
	ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));

	/* Don't try to send ACKs if the link isn't ACTIVE */
	if (driver_lstate(ppd) != IB_PORT_ACTIVE)
		return;

	sc = rcd->sc;
	plen = 2 /* PBC */ + hwords;
	vl = sc_to_vlt(ppd->dd, sc5);
	pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);

	pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
	if (!pbuf) {
		/*
		 * We have no room to send at the moment.  Pass
		 * responsibility for sending the ACK to the send tasklet
		 * so that when enough buffer space becomes available,
		 * the ACK is sent ahead of other outgoing packets.
		 */
		goto queue_ack;
	}

925
	trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
M
Mike Marciniszyn 已提交
926 927 928 929 930 931 932

	/* write the pbc and data */
	ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);

	return;

queue_ack:
933
	this_cpu_inc(*ibp->rvp.rc_qacks);
934
	spin_lock_irqsave(&qp->s_lock, flags);
935
	qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
936 937 938
	qp->s_nak_state = qp->r_nak_state;
	qp->s_ack_psn = qp->r_ack_psn;
	if (is_fecn)
939
		qp->s_flags |= RVT_S_ECN;
M
Mike Marciniszyn 已提交
940 941 942

	/* Schedule the send tasklet. */
	hfi1_schedule_send(qp);
943
	spin_unlock_irqrestore(&qp->s_lock, flags);
M
Mike Marciniszyn 已提交
944 945 946 947 948 949 950 951 952 953 954
}

/**
 * reset_psn - reset the QP state to send starting from PSN
 * @qp: the QP
 * @psn: the packet sequence number to restart at
 *
 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
 * for the given QP.
 * Called at interrupt level with the QP s_lock held.
 */
955
static void reset_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
956 957
{
	u32 n = qp->s_acked;
958
	struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	u32 opcode;

	qp->s_cur = n;

	/*
	 * If we are starting the request from the beginning,
	 * let the normal send code handle initialization.
	 */
	if (cmp_psn(psn, wqe->psn) <= 0) {
		qp->s_state = OP(SEND_LAST);
		goto done;
	}

	/* Find the work request opcode corresponding to the given PSN. */
	opcode = wqe->wr.opcode;
	for (;;) {
		int diff;

		if (++n == qp->s_size)
			n = 0;
		if (n == qp->s_tail)
			break;
981
		wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
		diff = cmp_psn(psn, wqe->psn);
		if (diff < 0)
			break;
		qp->s_cur = n;
		/*
		 * If we are starting the request from the beginning,
		 * let the normal send code handle initialization.
		 */
		if (diff == 0) {
			qp->s_state = OP(SEND_LAST);
			goto done;
		}
		opcode = wqe->wr.opcode;
	}

	/*
	 * Set the state to restart in the middle of a request.
	 * Don't change the s_sge, s_cur_sge, or s_cur_size.
	 * See hfi1_make_rc_req().
	 */
	switch (opcode) {
	case IB_WR_SEND:
	case IB_WR_SEND_WITH_IMM:
		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
		break;

	case IB_WR_RDMA_WRITE:
	case IB_WR_RDMA_WRITE_WITH_IMM:
		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
		break;

	case IB_WR_RDMA_READ:
		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
		break;

	default:
		/*
		 * This case shouldn't happen since its only
		 * one PSN per req.
		 */
		qp->s_state = OP(SEND_LAST);
	}
done:
	qp->s_psn = psn;
	/*
1027
	 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
M
Mike Marciniszyn 已提交
1028 1029 1030 1031 1032
	 * asynchronously before the send tasklet can get scheduled.
	 * Doing it in hfi1_make_rc_req() is too late.
	 */
	if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
	    (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1033 1034
		qp->s_flags |= RVT_S_WAIT_PSN;
	qp->s_flags &= ~RVT_S_AHG_VALID;
M
Mike Marciniszyn 已提交
1035 1036 1037 1038 1039 1040
}

/*
 * Back up requester to resend the last un-ACKed request.
 * The QP r_lock and s_lock should be held and interrupts disabled.
 */
1041
static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
M
Mike Marciniszyn 已提交
1042
{
1043
	struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1044 1045 1046 1047 1048 1049 1050 1051
	struct hfi1_ibport *ibp;

	if (qp->s_retry == 0) {
		if (qp->s_mig_state == IB_MIG_ARMED) {
			hfi1_migrate_qp(qp);
			qp->s_retry = qp->s_retry_cnt;
		} else if (qp->s_last == qp->s_acked) {
			hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
1052
			rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
1053
			return;
1054
		} else { /* need to handle delayed completion */
M
Mike Marciniszyn 已提交
1055
			return;
1056 1057
		}
	} else {
M
Mike Marciniszyn 已提交
1058
		qp->s_retry--;
1059
	}
M
Mike Marciniszyn 已提交
1060 1061 1062

	ibp = to_iport(qp->ibqp.device, qp->port_num);
	if (wqe->wr.opcode == IB_WR_RDMA_READ)
1063
		ibp->rvp.n_rc_resends++;
M
Mike Marciniszyn 已提交
1064
	else
1065
		ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
M
Mike Marciniszyn 已提交
1066

1067 1068 1069
	qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
			 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
			 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
1070
	if (wait)
1071
		qp->s_flags |= RVT_S_SEND_ONE;
M
Mike Marciniszyn 已提交
1072 1073 1074 1075 1076 1077
	reset_psn(qp, psn);
}

/*
 * This is called from s_timer for missing responses.
 */
1078
void hfi1_rc_timeout(unsigned long arg)
M
Mike Marciniszyn 已提交
1079
{
1080
	struct rvt_qp *qp = (struct rvt_qp *)arg;
M
Mike Marciniszyn 已提交
1081 1082 1083 1084 1085
	struct hfi1_ibport *ibp;
	unsigned long flags;

	spin_lock_irqsave(&qp->r_lock, flags);
	spin_lock(&qp->s_lock);
1086
	if (qp->s_flags & RVT_S_TIMER) {
M
Mike Marciniszyn 已提交
1087
		ibp = to_iport(qp->ibqp.device, qp->port_num);
1088
		ibp->rvp.n_rc_timeouts++;
1089
		qp->s_flags &= ~RVT_S_TIMER;
M
Mike Marciniszyn 已提交
1090
		del_timer(&qp->s_timer);
1091
		trace_hfi1_timeout(qp, qp->s_last_psn + 1);
M
Mike Marciniszyn 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		restart_rc(qp, qp->s_last_psn + 1, 1);
		hfi1_schedule_send(qp);
	}
	spin_unlock(&qp->s_lock);
	spin_unlock_irqrestore(&qp->r_lock, flags);
}

/*
 * This is called from s_timer for RNR timeouts.
 */
void hfi1_rc_rnr_retry(unsigned long arg)
{
1104
	struct rvt_qp *qp = (struct rvt_qp *)arg;
M
Mike Marciniszyn 已提交
1105 1106 1107
	unsigned long flags;

	spin_lock_irqsave(&qp->s_lock, flags);
1108 1109
	hfi1_stop_rnr_timer(qp);
	hfi1_schedule_send(qp);
M
Mike Marciniszyn 已提交
1110 1111 1112 1113 1114 1115 1116
	spin_unlock_irqrestore(&qp->s_lock, flags);
}

/*
 * Set qp->s_sending_psn to the next PSN after the given one.
 * This would be psn+1 except when RDMA reads are present.
 */
1117
static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
1118
{
1119
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1120 1121 1122 1123
	u32 n = qp->s_last;

	/* Find the work request corresponding to the given PSN. */
	for (;;) {
1124
		wqe = rvt_get_swqe_ptr(qp, n);
M
Mike Marciniszyn 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
		if (cmp_psn(psn, wqe->lpsn) <= 0) {
			if (wqe->wr.opcode == IB_WR_RDMA_READ)
				qp->s_sending_psn = wqe->lpsn + 1;
			else
				qp->s_sending_psn = psn + 1;
			break;
		}
		if (++n == qp->s_size)
			n = 0;
		if (n == qp->s_tail)
			break;
	}
}

/*
 * This should be called with the QP s_lock held and interrupts disabled.
 */
1142
void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
M
Mike Marciniszyn 已提交
1143
{
1144
	struct ib_other_headers *ohdr;
1145
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1146 1147 1148 1149 1150
	struct ib_wc wc;
	unsigned i;
	u32 opcode;
	u32 psn;

1151
	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
M
Mike Marciniszyn 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
		return;

	/* Find out where the BTH is */
	if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
		ohdr = &hdr->u.oth;
	else
		ohdr = &hdr->u.l.oth;

	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
		WARN_ON(!qp->s_rdma_ack_cnt);
		qp->s_rdma_ack_cnt--;
		return;
	}

	psn = be32_to_cpu(ohdr->bth[2]);
	reset_sending_psn(qp, psn);

	/*
	 * Start timer after a packet requesting an ACK has been sent and
	 * there are still requests that haven't been acked.
	 */
	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
	    !(qp->s_flags &
1177
		(RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1178
		(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1179
		hfi1_add_retry_timer(qp);
M
Mike Marciniszyn 已提交
1180 1181

	while (qp->s_last != qp->s_acked) {
1182 1183
		u32 s_last;

1184
		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
M
Mike Marciniszyn 已提交
1185 1186 1187
		if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
		    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
			break;
1188 1189 1190 1191 1192 1193
		s_last = qp->s_last;
		if (++s_last >= qp->s_size)
			s_last = 0;
		qp->s_last = s_last;
		/* see post_send() */
		barrier();
M
Mike Marciniszyn 已提交
1194
		for (i = 0; i < wqe->wr.num_sge; i++) {
1195
			struct rvt_sge *sge = &wqe->sg_list[i];
M
Mike Marciniszyn 已提交
1196

1197
			rvt_put_mr(sge->mr);
M
Mike Marciniszyn 已提交
1198 1199
		}
		/* Post a send completion queue entry if requested. */
1200
		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
M
Mike Marciniszyn 已提交
1201 1202 1203 1204 1205 1206 1207
		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
			memset(&wc, 0, sizeof(wc));
			wc.wr_id = wqe->wr.wr_id;
			wc.status = IB_WC_SUCCESS;
			wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
			wc.byte_len = wqe->length;
			wc.qp = &qp->ibqp;
1208
			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
M
Mike Marciniszyn 已提交
1209 1210 1211 1212 1213 1214
		}
	}
	/*
	 * If we were waiting for sends to complete before re-sending,
	 * and they are now complete, restart sending.
	 */
1215
	trace_hfi1_sendcomplete(qp, psn);
1216
	if (qp->s_flags & RVT_S_WAIT_PSN &&
M
Mike Marciniszyn 已提交
1217
	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1218
		qp->s_flags &= ~RVT_S_WAIT_PSN;
M
Mike Marciniszyn 已提交
1219 1220 1221 1222 1223 1224
		qp->s_sending_psn = qp->s_psn;
		qp->s_sending_hpsn = qp->s_psn - 1;
		hfi1_schedule_send(qp);
	}
}

1225
static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
M
Mike Marciniszyn 已提交
1226 1227 1228 1229 1230 1231 1232 1233 1234
{
	qp->s_last_psn = psn;
}

/*
 * Generate a SWQE completion.
 * This is similar to hfi1_send_complete but has to check to be sure
 * that the SGEs are not being referenced if the SWQE is being resent.
 */
1235 1236 1237
static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
					 struct rvt_swqe *wqe,
					 struct hfi1_ibport *ibp)
M
Mike Marciniszyn 已提交
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
{
	struct ib_wc wc;
	unsigned i;

	/*
	 * Don't decrement refcount and don't generate a
	 * completion if the SWQE is being resent until the send
	 * is finished.
	 */
	if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1249 1250
		u32 s_last;

M
Mike Marciniszyn 已提交
1251
		for (i = 0; i < wqe->wr.num_sge; i++) {
1252
			struct rvt_sge *sge = &wqe->sg_list[i];
M
Mike Marciniszyn 已提交
1253

1254
			rvt_put_mr(sge->mr);
M
Mike Marciniszyn 已提交
1255
		}
1256 1257 1258 1259 1260 1261
		s_last = qp->s_last;
		if (++s_last >= qp->s_size)
			s_last = 0;
		qp->s_last = s_last;
		/* see post_send() */
		barrier();
M
Mike Marciniszyn 已提交
1262
		/* Post a send completion queue entry if requested. */
1263
		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
M
Mike Marciniszyn 已提交
1264 1265 1266 1267 1268 1269 1270
		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
			memset(&wc, 0, sizeof(wc));
			wc.wr_id = wqe->wr.wr_id;
			wc.status = IB_WC_SUCCESS;
			wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
			wc.byte_len = wqe->length;
			wc.qp = &qp->ibqp;
1271
			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
M
Mike Marciniszyn 已提交
1272 1273 1274 1275
		}
	} else {
		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);

1276
		this_cpu_inc(*ibp->rvp.rc_delayed_comp);
M
Mike Marciniszyn 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
		/*
		 * If send progress not running attempt to progress
		 * SDMA queue.
		 */
		if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
			struct sdma_engine *engine;
			u8 sc5;

			/* For now use sc to find engine */
			sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
			engine = qp_to_sdma_engine(qp, sc5);
			sdma_engine_progress_schedule(engine);
		}
	}

	qp->s_retry = qp->s_retry_cnt;
	update_last_psn(qp, wqe->lpsn);

	/*
	 * If we are completing a request which is in the process of
	 * being resent, we can stop re-sending it since we know the
	 * responder has already seen it.
	 */
	if (qp->s_acked == qp->s_cur) {
		if (++qp->s_cur >= qp->s_size)
			qp->s_cur = 0;
		qp->s_acked = qp->s_cur;
1304
		wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
M
Mike Marciniszyn 已提交
1305 1306 1307 1308 1309 1310 1311 1312 1313
		if (qp->s_acked != qp->s_tail) {
			qp->s_state = OP(SEND_LAST);
			qp->s_psn = wqe->psn;
		}
	} else {
		if (++qp->s_acked >= qp->s_size)
			qp->s_acked = 0;
		if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
			qp->s_draining = 0;
1314
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	}
	return wqe;
}

/**
 * do_rc_ack - process an incoming RC ACK
 * @qp: the QP the ACK came in on
 * @psn: the packet sequence number of the ACK
 * @opcode: the opcode of the request that resulted in the ACK
 *
 * This is called from rc_rcv_resp() to process an incoming RC ACK
 * for the given QP.
1327
 * May be called at interrupt level, with the QP s_lock held.
M
Mike Marciniszyn 已提交
1328 1329
 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
 */
1330
static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
M
Mike Marciniszyn 已提交
1331 1332 1333 1334
		     u64 val, struct hfi1_ctxtdata *rcd)
{
	struct hfi1_ibport *ibp;
	enum ib_wc_status status;
1335
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1336 1337 1338
	int ret = 0;
	u32 ack_psn;
	int diff;
1339
	unsigned long to;
M
Mike Marciniszyn 已提交
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349

	/*
	 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
	 * requests and implicitly NAK RDMA read and atomic requests issued
	 * before the NAK'ed request.  The MSN won't include the NAK'ed
	 * request but will include an ACK'ed request(s).
	 */
	ack_psn = psn;
	if (aeth >> 29)
		ack_psn--;
1350
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	ibp = to_iport(qp->ibqp.device, qp->port_num);

	/*
	 * The MSN might be for a later WQE than the PSN indicates so
	 * only complete WQEs that the PSN finishes.
	 */
	while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
		/*
		 * RDMA_READ_RESPONSE_ONLY is a special case since
		 * we want to generate completion events for everything
		 * before the RDMA read, copy the data, then generate
		 * the completion for the read.
		 */
		if (wqe->wr.opcode == IB_WR_RDMA_READ &&
		    opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
		    diff == 0) {
			ret = 1;
1368
			goto bail_stop;
M
Mike Marciniszyn 已提交
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
		}
		/*
		 * If this request is a RDMA read or atomic, and the ACK is
		 * for a later operation, this ACK NAKs the RDMA read or
		 * atomic.  In other words, only a RDMA_READ_LAST or ONLY
		 * can ACK a RDMA read and likewise for atomic ops.  Note
		 * that the NAK case can only happen if relaxed ordering is
		 * used and requests are sent after an RDMA read or atomic
		 * is sent but before the response is received.
		 */
		if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
		     (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
		    ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
		     (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
			/* Retry this request. */
1385 1386
			if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
				qp->r_flags |= RVT_R_RDMAR_SEQ;
M
Mike Marciniszyn 已提交
1387 1388
				restart_rc(qp, qp->s_last_psn + 1, 0);
				if (list_empty(&qp->rspwait)) {
1389
					qp->r_flags |= RVT_R_RSP_SEND;
1390
					rvt_get_qp(qp);
M
Mike Marciniszyn 已提交
1391 1392 1393 1394 1395 1396 1397 1398
					list_add_tail(&qp->rspwait,
						      &rcd->qp_wait_list);
				}
			}
			/*
			 * No need to process the ACK/NAK since we are
			 * restarting an earlier request.
			 */
1399
			goto bail_stop;
M
Mike Marciniszyn 已提交
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
		}
		if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
			u64 *vaddr = wqe->sg_list[0].vaddr;
			*vaddr = val;
		}
		if (qp->s_num_rd_atomic &&
		    (wqe->wr.opcode == IB_WR_RDMA_READ ||
		     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
			qp->s_num_rd_atomic--;
			/* Restart sending task if fence is complete */
1412
			if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
M
Mike Marciniszyn 已提交
1413
			    !qp->s_num_rd_atomic) {
1414 1415
				qp->s_flags &= ~(RVT_S_WAIT_FENCE |
						 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
1416
				hfi1_schedule_send(qp);
1417 1418 1419
			} else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
				qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
						 RVT_S_WAIT_ACK);
M
Mike Marciniszyn 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
				hfi1_schedule_send(qp);
			}
		}
		wqe = do_rc_completion(qp, wqe, ibp);
		if (qp->s_acked == qp->s_tail)
			break;
	}

	switch (aeth >> 29) {
	case 0:         /* ACK */
1430
		this_cpu_inc(*ibp->rvp.rc_acks);
M
Mike Marciniszyn 已提交
1431 1432 1433
		if (qp->s_acked != qp->s_tail) {
			/*
			 * We are expecting more ACKs so
1434
			 * mod the retry timer.
M
Mike Marciniszyn 已提交
1435
			 */
1436
			hfi1_mod_retry_timer(qp);
M
Mike Marciniszyn 已提交
1437 1438 1439 1440 1441 1442
			/*
			 * We can stop re-sending the earlier packets and
			 * continue with the next packet the receiver wants.
			 */
			if (cmp_psn(qp->s_psn, psn) <= 0)
				reset_psn(qp, psn + 1);
1443 1444 1445 1446 1447 1448 1449
		} else {
			/* No more acks - kill all timers */
			hfi1_stop_rc_timers(qp);
			if (cmp_psn(qp->s_psn, psn) <= 0) {
				qp->s_state = OP(SEND_LAST);
				qp->s_psn = psn + 1;
			}
M
Mike Marciniszyn 已提交
1450
		}
1451 1452
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
1453 1454 1455 1456 1457 1458
			hfi1_schedule_send(qp);
		}
		hfi1_get_credit(qp, aeth);
		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
		qp->s_retry = qp->s_retry_cnt;
		update_last_psn(qp, psn);
1459
		return 1;
M
Mike Marciniszyn 已提交
1460 1461

	case 1:         /* RNR NAK */
1462
		ibp->rvp.n_rnr_naks++;
M
Mike Marciniszyn 已提交
1463
		if (qp->s_acked == qp->s_tail)
1464
			goto bail_stop;
1465
		if (qp->s_flags & RVT_S_WAIT_RNR)
1466
			goto bail_stop;
M
Mike Marciniszyn 已提交
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
		if (qp->s_rnr_retry == 0) {
			status = IB_WC_RNR_RETRY_EXC_ERR;
			goto class_b;
		}
		if (qp->s_rnr_retry_cnt < 7)
			qp->s_rnr_retry--;

		/* The last valid PSN is the previous PSN. */
		update_last_psn(qp, psn - 1);

1477
		ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
M
Mike Marciniszyn 已提交
1478 1479 1480

		reset_psn(qp, psn);

1481
		qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1482
		hfi1_stop_rc_timers(qp);
1483
		to =
M
Mike Marciniszyn 已提交
1484
			ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
1485 1486
					   HFI1_AETH_CREDIT_MASK];
		hfi1_add_rnr_timer(qp, to);
1487
		return 0;
M
Mike Marciniszyn 已提交
1488 1489 1490

	case 3:         /* NAK */
		if (qp->s_acked == qp->s_tail)
1491
			goto bail_stop;
M
Mike Marciniszyn 已提交
1492 1493 1494 1495 1496
		/* The last valid PSN is the previous PSN. */
		update_last_psn(qp, psn - 1);
		switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
			HFI1_AETH_CREDIT_MASK) {
		case 0: /* PSN sequence error */
1497
			ibp->rvp.n_seq_naks++;
M
Mike Marciniszyn 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
			/*
			 * Back up to the responder's expected PSN.
			 * Note that we might get a NAK in the middle of an
			 * RDMA READ response which terminates the RDMA
			 * READ.
			 */
			restart_rc(qp, psn, 0);
			hfi1_schedule_send(qp);
			break;

		case 1: /* Invalid Request */
			status = IB_WC_REM_INV_REQ_ERR;
1510
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1511 1512 1513 1514
			goto class_b;

		case 2: /* Remote Access Error */
			status = IB_WC_REM_ACCESS_ERR;
1515
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1516 1517 1518 1519
			goto class_b;

		case 3: /* Remote Operation Error */
			status = IB_WC_REM_OP_ERR;
1520
			ibp->rvp.n_other_naks++;
M
Mike Marciniszyn 已提交
1521 1522 1523
class_b:
			if (qp->s_last == qp->s_acked) {
				hfi1_send_complete(qp, wqe, status);
1524
				rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
1525 1526 1527 1528 1529 1530 1531 1532 1533
			}
			break;

		default:
			/* Ignore other reserved NAK error codes */
			goto reserved;
		}
		qp->s_retry = qp->s_retry_cnt;
		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1534
		goto bail_stop;
M
Mike Marciniszyn 已提交
1535 1536 1537 1538

	default:                /* 2: reserved */
reserved:
		/* Ignore reserved NAK codes. */
1539
		goto bail_stop;
M
Mike Marciniszyn 已提交
1540
	}
1541
	/* cannot be reached  */
1542 1543
bail_stop:
	hfi1_stop_rc_timers(qp);
M
Mike Marciniszyn 已提交
1544 1545 1546 1547 1548 1549 1550
	return ret;
}

/*
 * We have seen an out of sequence RDMA read middle or last packet.
 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
 */
1551
static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
M
Mike Marciniszyn 已提交
1552 1553
			 struct hfi1_ctxtdata *rcd)
{
1554
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1555 1556

	/* Remove QP from retry timer */
1557
	hfi1_stop_rc_timers(qp);
M
Mike Marciniszyn 已提交
1558

1559
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1560 1561 1562 1563 1564 1565 1566 1567 1568

	while (cmp_psn(psn, wqe->lpsn) > 0) {
		if (wqe->wr.opcode == IB_WR_RDMA_READ ||
		    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
			break;
		wqe = do_rc_completion(qp, wqe, ibp);
	}

1569
	ibp->rvp.n_rdma_seq++;
1570
	qp->r_flags |= RVT_R_RDMAR_SEQ;
M
Mike Marciniszyn 已提交
1571 1572
	restart_rc(qp, qp->s_last_psn + 1, 0);
	if (list_empty(&qp->rspwait)) {
1573
		qp->r_flags |= RVT_R_RSP_SEND;
1574
		rvt_get_qp(qp);
M
Mike Marciniszyn 已提交
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
}

/**
 * rc_rcv_resp - process an incoming RC response packet
 * @ibp: the port this packet came in on
 * @ohdr: the other headers for this packet
 * @data: the packet data
 * @tlen: the packet length
 * @qp: the QP for this packet
 * @opcode: the opcode for this packet
 * @psn: the packet sequence number for this packet
 * @hdrsize: the header length
 * @pmtu: the path MTU
 *
 * This is called from hfi1_rc_rcv() to process an incoming RC response
 * packet for the given QP.
 * Called at interrupt level.
 */
static void rc_rcv_resp(struct hfi1_ibport *ibp,
1596
			struct ib_other_headers *ohdr,
1597
			void *data, u32 tlen, struct rvt_qp *qp,
M
Mike Marciniszyn 已提交
1598 1599 1600
			u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
			struct hfi1_ctxtdata *rcd)
{
1601
	struct rvt_swqe *wqe;
M
Mike Marciniszyn 已提交
1602 1603 1604 1605 1606 1607 1608 1609 1610
	enum ib_wc_status status;
	unsigned long flags;
	int diff;
	u32 pad;
	u32 aeth;
	u64 val;

	spin_lock_irqsave(&qp->s_lock, flags);

1611
	trace_hfi1_ack(qp, psn);
1612

M
Mike Marciniszyn 已提交
1613
	/* Ignore invalid responses. */
1614 1615
	smp_read_barrier_depends(); /* see post_one_send */
	if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
M
Mike Marciniszyn 已提交
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		goto ack_done;

	/* Ignore duplicate responses. */
	diff = cmp_psn(psn, qp->s_last_psn);
	if (unlikely(diff <= 0)) {
		/* Update credits for "ghost" ACKs */
		if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
			aeth = be32_to_cpu(ohdr->u.aeth);
			if ((aeth >> 29) == 0)
				hfi1_get_credit(qp, aeth);
		}
		goto ack_done;
	}

	/*
	 * Skip everything other than the PSN we expect, if we are waiting
	 * for a reply to a restarted RDMA read or atomic op.
	 */
1634
	if (qp->r_flags & RVT_R_RDMAR_SEQ) {
M
Mike Marciniszyn 已提交
1635 1636
		if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
			goto ack_done;
1637
		qp->r_flags &= ~RVT_R_RDMAR_SEQ;
M
Mike Marciniszyn 已提交
1638 1639 1640 1641
	}

	if (unlikely(qp->s_acked == qp->s_tail))
		goto ack_done;
1642
	wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1643 1644 1645 1646 1647 1648 1649
	status = IB_WC_SUCCESS;

	switch (opcode) {
	case OP(ACKNOWLEDGE):
	case OP(ATOMIC_ACKNOWLEDGE):
	case OP(RDMA_READ_RESPONSE_FIRST):
		aeth = be32_to_cpu(ohdr->u.aeth);
1650 1651 1652
		if (opcode == OP(ATOMIC_ACKNOWLEDGE))
			val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
		else
M
Mike Marciniszyn 已提交
1653 1654 1655 1656
			val = 0;
		if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
		    opcode != OP(RDMA_READ_RESPONSE_FIRST))
			goto ack_done;
1657
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
		/*
		 * If this is a response to a resent RDMA read, we
		 * have to be careful to copy the data to the right
		 * location.
		 */
		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
						  wqe, psn, pmtu);
		goto read_middle;

	case OP(RDMA_READ_RESPONSE_MIDDLE):
		/* no AETH, no ACK */
		if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
			goto ack_seq_err;
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
read_middle:
		if (unlikely(tlen != (hdrsize + pmtu + 4)))
			goto ack_len_err;
		if (unlikely(pmtu >= qp->s_rdma_read_len))
			goto ack_len_err;

		/*
		 * We got a response so update the timeout.
		 * 4.096 usec. * (1 << qp->timeout)
		 */
1685
		qp->s_flags |= RVT_S_TIMER;
M
Mike Marciniszyn 已提交
1686
		mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
1687 1688
		if (qp->s_flags & RVT_S_WAIT_ACK) {
			qp->s_flags &= ~RVT_S_WAIT_ACK;
M
Mike Marciniszyn 已提交
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
			hfi1_schedule_send(qp);
		}

		if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
			qp->s_retry = qp->s_retry_cnt;

		/*
		 * Update the RDMA receive state but do the copy w/o
		 * holding the locks and blocking interrupts.
		 */
		qp->s_rdma_read_len -= pmtu;
		update_last_psn(qp, psn);
		spin_unlock_irqrestore(&qp->s_lock, flags);
1702
		hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
M
Mike Marciniszyn 已提交
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
		goto bail;

	case OP(RDMA_READ_RESPONSE_ONLY):
		aeth = be32_to_cpu(ohdr->u.aeth);
		if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
			goto ack_done;
		/* Get the number of bytes the message was padded by. */
		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
		/*
		 * Check that the data size is >= 0 && <= pmtu.
		 * Remember to account for ICRC (4).
		 */
		if (unlikely(tlen < (hdrsize + pad + 4)))
			goto ack_len_err;
		/*
		 * If this is a response to a resent RDMA read, we
		 * have to be careful to copy the data to the right
		 * location.
		 */
1722
		wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
M
Mike Marciniszyn 已提交
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
						  wqe, psn, pmtu);
		goto read_last;

	case OP(RDMA_READ_RESPONSE_LAST):
		/* ACKs READ req. */
		if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
			goto ack_seq_err;
		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
			goto ack_op_err;
		/* Get the number of bytes the message was padded by. */
		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
		/*
		 * Check that the data size is >= 1 && <= pmtu.
		 * Remember to account for ICRC (4).
		 */
		if (unlikely(tlen <= (hdrsize + pad + 4)))
			goto ack_len_err;
read_last:
		tlen -= hdrsize + pad + 4;
		if (unlikely(tlen != qp->s_rdma_read_len))
			goto ack_len_err;
		aeth = be32_to_cpu(ohdr->u.aeth);
1746
		hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
M
Mike Marciniszyn 已提交
1747
		WARN_ON(qp->s_rdma_read_sge.num_sge);
1748
		(void)do_rc_ack(qp, aeth, psn,
M
Mike Marciniszyn 已提交
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
				 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
		goto ack_done;
	}

ack_op_err:
	status = IB_WC_LOC_QP_OP_ERR;
	goto ack_err;

ack_seq_err:
	rdma_seq_err(qp, ibp, psn, rcd);
	goto ack_done;

ack_len_err:
	status = IB_WC_LOC_LEN_ERR;
ack_err:
	if (qp->s_last == qp->s_acked) {
		hfi1_send_complete(qp, wqe, status);
1766
		rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
M
Mike Marciniszyn 已提交
1767 1768 1769 1770 1771 1772 1773
	}
ack_done:
	spin_unlock_irqrestore(&qp->s_lock, flags);
bail:
	return;
}

1774
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1775
				  struct rvt_qp *qp)
1776 1777
{
	if (list_empty(&qp->rspwait)) {
1778
		qp->r_flags |= RVT_R_RSP_NAK;
1779
		rvt_get_qp(qp);
1780 1781 1782 1783
		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
	}
}

1784
static inline void rc_cancel_ack(struct rvt_qp *qp)
1785
{
1786 1787 1788
	struct hfi1_qp_priv *priv = qp->priv;

	priv->r_adefered = 0;
1789 1790 1791
	if (list_empty(&qp->rspwait))
		return;
	list_del_init(&qp->rspwait);
1792
	qp->r_flags &= ~RVT_R_RSP_NAK;
1793
	rvt_put_qp(qp);
1794 1795
}

M
Mike Marciniszyn 已提交
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
/**
 * rc_rcv_error - process an incoming duplicate or error RC packet
 * @ohdr: the other headers for this packet
 * @data: the packet data
 * @qp: the QP for this packet
 * @opcode: the opcode for this packet
 * @psn: the packet sequence number for this packet
 * @diff: the difference between the PSN and the expected PSN
 *
 * This is called from hfi1_rc_rcv() to process an unexpected
 * incoming RC packet for the given QP.
 * Called at interrupt level.
 * Return 1 if no more processing is needed; otherwise return 0 to
 * schedule a response to be sent.
 */
1811
static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
1812 1813
				 struct rvt_qp *qp, u32 opcode, u32 psn,
				 int diff, struct hfi1_ctxtdata *rcd)
M
Mike Marciniszyn 已提交
1814 1815
{
	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1816
	struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
1817 1818 1819 1820
	unsigned long flags;
	u8 i, prev;
	int old_req;

1821
	trace_hfi1_rcv_error(qp, psn);
M
Mike Marciniszyn 已提交
1822 1823 1824 1825 1826 1827 1828
	if (diff > 0) {
		/*
		 * Packet sequence error.
		 * A NAK will ACK earlier sends and RDMA writes.
		 * Don't queue the NAK if we already sent one.
		 */
		if (!qp->r_nak_state) {
1829
			ibp->rvp.n_rc_seqnak++;
M
Mike Marciniszyn 已提交
1830 1831 1832 1833 1834 1835 1836 1837
			qp->r_nak_state = IB_NAK_PSN_ERROR;
			/* Use the expected PSN. */
			qp->r_ack_psn = qp->r_psn;
			/*
			 * Wait to send the sequence NAK until all packets
			 * in the receive queue have been processed.
			 * Otherwise, we end up propagating congestion.
			 */
1838
			rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
		}
		goto done;
	}

	/*
	 * Handle a duplicate request.  Don't re-execute SEND, RDMA
	 * write or atomic op.  Don't NAK errors, just silently drop
	 * the duplicate request.  Note that r_sge, r_len, and
	 * r_rcv_len may be in use so don't modify them.
	 *
	 * We are supposed to ACK the earliest duplicate PSN but we
	 * can coalesce an outstanding duplicate ACK.  We have to
	 * send the earliest so that RDMA reads can be restarted at
	 * the requester's expected PSN.
	 *
	 * First, find where this duplicate PSN falls within the
	 * ACKs previously sent.
	 * old_req is true if there is an older response that is scheduled
	 * to be sent before sending this one.
	 */
	e = NULL;
	old_req = 1;
1861
	ibp->rvp.n_rc_dupreq++;
M
Mike Marciniszyn 已提交
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913

	spin_lock_irqsave(&qp->s_lock, flags);

	for (i = qp->r_head_ack_queue; ; i = prev) {
		if (i == qp->s_tail_ack_queue)
			old_req = 0;
		if (i)
			prev = i - 1;
		else
			prev = HFI1_MAX_RDMA_ATOMIC;
		if (prev == qp->r_head_ack_queue) {
			e = NULL;
			break;
		}
		e = &qp->s_ack_queue[prev];
		if (!e->opcode) {
			e = NULL;
			break;
		}
		if (cmp_psn(psn, e->psn) >= 0) {
			if (prev == qp->s_tail_ack_queue &&
			    cmp_psn(psn, e->lpsn) <= 0)
				old_req = 0;
			break;
		}
	}
	switch (opcode) {
	case OP(RDMA_READ_REQUEST): {
		struct ib_reth *reth;
		u32 offset;
		u32 len;

		/*
		 * If we didn't find the RDMA read request in the ack queue,
		 * we can ignore this request.
		 */
		if (!e || e->opcode != OP(RDMA_READ_REQUEST))
			goto unlock_done;
		/* RETH comes after BTH */
		reth = &ohdr->u.rc.reth;
		/*
		 * Address range must be a subset of the original
		 * request and start on pmtu boundaries.
		 * We reuse the old ack_queue slot since the requester
		 * should not back up and request an earlier PSN for the
		 * same request.
		 */
		offset = delta_psn(psn, e->psn) * qp->pmtu;
		len = be32_to_cpu(reth->length);
		if (unlikely(offset + len != e->rdma_sge.sge_length))
			goto unlock_done;
		if (e->rdma_sge.mr) {
1914
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
1915 1916 1917 1918
			e->rdma_sge.mr = NULL;
		}
		if (len != 0) {
			u32 rkey = be32_to_cpu(reth->rkey);
1919
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
1920 1921
			int ok;

1922 1923
			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
					 IB_ACCESS_REMOTE_READ);
M
Mike Marciniszyn 已提交
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
			if (unlikely(!ok))
				goto unlock_done;
		} else {
			e->rdma_sge.vaddr = NULL;
			e->rdma_sge.length = 0;
			e->rdma_sge.sge_length = 0;
		}
		e->psn = psn;
		if (old_req)
			goto unlock_done;
		qp->s_tail_ack_queue = prev;
		break;
	}

	case OP(COMPARE_SWAP):
	case OP(FETCH_ADD): {
		/*
		 * If we didn't find the atomic request in the ack queue
		 * or the send tasklet is already backed up to send an
		 * earlier entry, we can ignore this request.
		 */
1945
		if (!e || e->opcode != (u8)opcode || old_req)
M
Mike Marciniszyn 已提交
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
			goto unlock_done;
		qp->s_tail_ack_queue = prev;
		break;
	}

	default:
		/*
		 * Ignore this operation if it doesn't request an ACK
		 * or an earlier RDMA read or atomic is going to be resent.
		 */
		if (!(psn & IB_BTH_REQ_ACK) || old_req)
			goto unlock_done;
		/*
		 * Resend the most recent ACK if this request is
		 * after all the previous RDMA reads and atomics.
		 */
		if (i == qp->r_head_ack_queue) {
			spin_unlock_irqrestore(&qp->s_lock, flags);
			qp->r_nak_state = 0;
			qp->r_ack_psn = qp->r_psn - 1;
			goto send_ack;
		}

		/*
		 * Resend the RDMA read or atomic op which
		 * ACKs this duplicate request.
		 */
		qp->s_tail_ack_queue = i;
		break;
	}
	qp->s_ack_state = OP(ACKNOWLEDGE);
1977
	qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	qp->r_nak_state = 0;
	hfi1_schedule_send(qp);

unlock_done:
	spin_unlock_irqrestore(&qp->s_lock, flags);
done:
	return 1;

send_ack:
	return 0;
}

1990
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
M
Mike Marciniszyn 已提交
1991 1992 1993 1994 1995
{
	unsigned long flags;
	int lastwqe;

	spin_lock_irqsave(&qp->s_lock, flags);
1996
	lastwqe = rvt_error_qp(qp, err);
M
Mike Marciniszyn 已提交
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
	spin_unlock_irqrestore(&qp->s_lock, flags);

	if (lastwqe) {
		struct ib_event ev;

		ev.device = qp->ibqp.device;
		ev.element.qp = &qp->ibqp;
		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
	}
}

2009
static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
M
Mike Marciniszyn 已提交
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
{
	unsigned next;

	next = n + 1;
	if (next > HFI1_MAX_RDMA_ATOMIC)
		next = 0;
	qp->s_tail_ack_queue = next;
	qp->s_ack_state = OP(ACKNOWLEDGE);
}

static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
			  u32 lqpn, u32 rqpn, u8 svc_type)
{
	struct opa_hfi1_cong_log_event_internal *cc_event;
2024
	unsigned long flags;
M
Mike Marciniszyn 已提交
2025 2026 2027 2028

	if (sl >= OPA_MAX_SLS)
		return;

2029
	spin_lock_irqsave(&ppd->cc_log_lock, flags);
M
Mike Marciniszyn 已提交
2030

2031
	ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
M
Mike Marciniszyn 已提交
2032 2033 2034 2035 2036
	ppd->threshold_event_counter++;

	cc_event = &ppd->cc_events[ppd->cc_log_idx++];
	if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
		ppd->cc_log_idx = 0;
2037 2038
	cc_event->lqpn = lqpn & RVT_QPN_MASK;
	cc_event->rqpn = rqpn & RVT_QPN_MASK;
M
Mike Marciniszyn 已提交
2039 2040 2041 2042 2043 2044
	cc_event->sl = sl;
	cc_event->svc_type = svc_type;
	cc_event->rlid = rlid;
	/* keep timestamp in units of 1.024 usec */
	cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;

2045
	spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
M
Mike Marciniszyn 已提交
2046 2047 2048 2049 2050 2051 2052 2053 2054
}

void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
		  u32 rqpn, u8 svc_type)
{
	struct cca_timer *cca_timer;
	u16 ccti, ccti_incr, ccti_timer, ccti_limit;
	u8 trigger_threshold;
	struct cc_state *cc_state;
2055
	unsigned long flags;
M
Mike Marciniszyn 已提交
2056 2057 2058 2059 2060 2061

	if (sl >= OPA_MAX_SLS)
		return;

	cc_state = get_cc_state(ppd);

2062
	if (!cc_state)
M
Mike Marciniszyn 已提交
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
		return;

	/*
	 * 1) increase CCTI (for this SL)
	 * 2) select IPG (i.e., call set_link_ipg())
	 * 3) start timer
	 */
	ccti_limit = cc_state->cct.ccti_limit;
	ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
	ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
	trigger_threshold =
		cc_state->cong_setting.entries[sl].trigger_threshold;

2076
	spin_lock_irqsave(&ppd->cca_timer_lock, flags);
M
Mike Marciniszyn 已提交
2077

2078
	cca_timer = &ppd->cca_timer[sl];
M
Mike Marciniszyn 已提交
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	if (cca_timer->ccti < ccti_limit) {
		if (cca_timer->ccti + ccti_incr <= ccti_limit)
			cca_timer->ccti += ccti_incr;
		else
			cca_timer->ccti = ccti_limit;
		set_link_ipg(ppd);
	}

	ccti = cca_timer->ccti;

	if (!hrtimer_active(&cca_timer->hrtimer)) {
		/* ccti_timer is in units of 1.024 usec */
		unsigned long nsec = 1024 * ccti_timer;

		hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
			      HRTIMER_MODE_REL);
	}

2097 2098
	spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);

M
Mike Marciniszyn 已提交
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
	if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
		log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
}

/**
 * hfi1_rc_rcv - process an incoming RC packet
 * @rcd: the context pointer
 * @hdr: the header of this packet
 * @rcv_flags: flags relevant to rcv processing
 * @data: the packet data
 * @tlen: the packet length
 * @qp: the QP for this packet
 *
 * This is called from qp_rcv() to process an incoming RC packet
 * for the given QP.
2114
 * May be called at interrupt level.
M
Mike Marciniszyn 已提交
2115 2116 2117 2118
 */
void hfi1_rc_rcv(struct hfi1_packet *packet)
{
	struct hfi1_ctxtdata *rcd = packet->rcd;
2119
	struct ib_header *hdr = packet->hdr;
M
Mike Marciniszyn 已提交
2120 2121 2122
	u32 rcv_flags = packet->rcv_flags;
	void *data = packet->ebuf;
	u32 tlen = packet->tlen;
2123
	struct rvt_qp *qp = packet->qp;
M
Mike Marciniszyn 已提交
2124
	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2125
	struct ib_other_headers *ohdr = packet->ohdr;
M
Mike Marciniszyn 已提交
2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
	u32 bth0, opcode;
	u32 hdrsize = packet->hlen;
	u32 psn;
	u32 pad;
	struct ib_wc wc;
	u32 pmtu = qp->pmtu;
	int diff;
	struct ib_reth *reth;
	unsigned long flags;
	int ret, is_fecn = 0;
2136
	int copy_last = 0;
2137
	u32 rkey;
M
Mike Marciniszyn 已提交
2138 2139 2140 2141 2142

	bth0 = be32_to_cpu(ohdr->bth[0]);
	if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
		return;

2143
	is_fecn = process_ecn(qp, packet, false);
M
Mike Marciniszyn 已提交
2144 2145

	psn = be32_to_cpu(ohdr->bth[2]);
2146
	opcode = (bth0 >> 24) & 0xff;
M
Mike Marciniszyn 已提交
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176

	/*
	 * Process responses (ACKs) before anything else.  Note that the
	 * packet sequence number will be for something in the send work
	 * queue rather than the expected receive packet sequence number.
	 * In other words, this QP is the requester.
	 */
	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
		rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
			    hdrsize, pmtu, rcd);
		if (is_fecn)
			goto send_ack;
		return;
	}

	/* Compute 24 bits worth of difference. */
	diff = delta_psn(psn, qp->r_psn);
	if (unlikely(diff)) {
		if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
			return;
		goto send_ack;
	}

	/* Check for opcode sequence errors. */
	switch (qp->r_state) {
	case OP(SEND_FIRST):
	case OP(SEND_MIDDLE):
		if (opcode == OP(SEND_MIDDLE) ||
		    opcode == OP(SEND_LAST) ||
2177 2178
		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
		    opcode == OP(SEND_LAST_WITH_INVALIDATE))
M
Mike Marciniszyn 已提交
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
			break;
		goto nack_inv;

	case OP(RDMA_WRITE_FIRST):
	case OP(RDMA_WRITE_MIDDLE):
		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
		    opcode == OP(RDMA_WRITE_LAST) ||
		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
			break;
		goto nack_inv;

	default:
		if (opcode == OP(SEND_MIDDLE) ||
		    opcode == OP(SEND_LAST) ||
		    opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2194
		    opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
M
Mike Marciniszyn 已提交
2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
		    opcode == OP(RDMA_WRITE_MIDDLE) ||
		    opcode == OP(RDMA_WRITE_LAST) ||
		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
			goto nack_inv;
		/*
		 * Note that it is up to the requester to not send a new
		 * RDMA read or atomic operation before receiving an ACK
		 * for the previous operation.
		 */
		break;
	}

2207
	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
M
Mike Marciniszyn 已提交
2208 2209 2210 2211 2212
		qp_comm_est(qp);

	/* OK, process the packet. */
	switch (opcode) {
	case OP(SEND_FIRST):
2213
		ret = hfi1_rvt_get_rwqe(qp, 0);
M
Mike Marciniszyn 已提交
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		qp->r_rcv_len = 0;
		/* FALLTHROUGH */
	case OP(SEND_MIDDLE):
	case OP(RDMA_WRITE_MIDDLE):
send_middle:
		/* Check for invalid length PMTU or posted rwqe len. */
		if (unlikely(tlen != (hdrsize + pmtu + 4)))
			goto nack_inv;
		qp->r_rcv_len += pmtu;
		if (unlikely(qp->r_rcv_len > qp->r_len))
			goto nack_inv;
2229
		hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
M
Mike Marciniszyn 已提交
2230 2231 2232 2233
		break;

	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
		/* consume RWQE */
2234
		ret = hfi1_rvt_get_rwqe(qp, 1);
M
Mike Marciniszyn 已提交
2235 2236 2237 2238 2239 2240 2241 2242
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		goto send_last_imm;

	case OP(SEND_ONLY):
	case OP(SEND_ONLY_WITH_IMMEDIATE):
2243
	case OP(SEND_ONLY_WITH_INVALIDATE):
2244
		ret = hfi1_rvt_get_rwqe(qp, 0);
M
Mike Marciniszyn 已提交
2245 2246 2247 2248 2249 2250 2251
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		qp->r_rcv_len = 0;
		if (opcode == OP(SEND_ONLY))
			goto no_immediate_data;
2252 2253
		if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
			goto send_last_inv;
M
Mike Marciniszyn 已提交
2254 2255 2256 2257 2258 2259
		/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
	case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
		wc.ex.imm_data = ohdr->u.imm_data;
		wc.wc_flags = IB_WC_WITH_IMM;
		goto send_last;
2260 2261 2262 2263 2264 2265 2266 2267
	case OP(SEND_LAST_WITH_INVALIDATE):
send_last_inv:
		rkey = be32_to_cpu(ohdr->u.ieth);
		if (rvt_invalidate_rkey(qp, rkey))
			goto no_immediate_data;
		wc.ex.invalidate_rkey = rkey;
		wc.wc_flags = IB_WC_WITH_INVALIDATE;
		goto send_last;
M
Mike Marciniszyn 已提交
2268
	case OP(RDMA_WRITE_LAST):
2269 2270 2271
		copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
		/* fall through */
	case OP(SEND_LAST):
M
Mike Marciniszyn 已提交
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
no_immediate_data:
		wc.wc_flags = 0;
		wc.ex.imm_data = 0;
send_last:
		/* Get the number of bytes the message was padded by. */
		pad = (bth0 >> 20) & 3;
		/* Check for invalid length. */
		/* LAST len should be >= 1 */
		if (unlikely(tlen < (hdrsize + pad + 4)))
			goto nack_inv;
		/* Don't count the CRC. */
		tlen -= (hdrsize + pad + 4);
		wc.byte_len = tlen + qp->r_rcv_len;
		if (unlikely(wc.byte_len > qp->r_len))
			goto nack_inv;
2287
		hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
2288
		rvt_put_ss(&qp->r_sge);
M
Mike Marciniszyn 已提交
2289
		qp->r_msn++;
2290
		if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
M
Mike Marciniszyn 已提交
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
			break;
		wc.wr_id = qp->r_wr_id;
		wc.status = IB_WC_SUCCESS;
		if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
			wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
		else
			wc.opcode = IB_WC_RECV;
		wc.qp = &qp->ibqp;
		wc.src_qp = qp->remote_qpn;
		wc.slid = qp->remote_ah_attr.dlid;
		/*
		 * It seems that IB mandates the presence of an SL in a
		 * work completion only for the UD transport (see section
		 * 11.4.2 of IBTA Vol. 1).
		 *
		 * However, the way the SL is chosen below is consistent
		 * with the way that IB/qib works and is trying avoid
		 * introducing incompatibilities.
		 *
		 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
		 */
		wc.sl = qp->remote_ah_attr.sl;
		/* zero fields that are N/A */
		wc.vendor_err = 0;
		wc.pkey_index = 0;
		wc.dlid_path_bits = 0;
		wc.port_num = 0;
		/* Signal completion event if the solicited bit is set. */
2320 2321
		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
			     (bth0 & IB_BTH_SOLICITED) != 0);
M
Mike Marciniszyn 已提交
2322 2323 2324
		break;

	case OP(RDMA_WRITE_ONLY):
2325 2326 2327
		copy_last = 1;
		/* fall through */
	case OP(RDMA_WRITE_FIRST):
M
Mike Marciniszyn 已提交
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
			goto nack_inv;
		/* consume RWQE */
		reth = &ohdr->u.rc.reth;
		qp->r_len = be32_to_cpu(reth->length);
		qp->r_rcv_len = 0;
		qp->r_sge.sg_list = NULL;
		if (qp->r_len != 0) {
			u32 rkey = be32_to_cpu(reth->rkey);
2338
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
2339 2340 2341
			int ok;

			/* Check rkey & NAK */
2342 2343
			ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
					 rkey, IB_ACCESS_REMOTE_WRITE);
M
Mike Marciniszyn 已提交
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
			if (unlikely(!ok))
				goto nack_acc;
			qp->r_sge.num_sge = 1;
		} else {
			qp->r_sge.num_sge = 0;
			qp->r_sge.sge.mr = NULL;
			qp->r_sge.sge.vaddr = NULL;
			qp->r_sge.sge.length = 0;
			qp->r_sge.sge.sge_length = 0;
		}
		if (opcode == OP(RDMA_WRITE_FIRST))
			goto send_middle;
		else if (opcode == OP(RDMA_WRITE_ONLY))
			goto no_immediate_data;
2358
		ret = hfi1_rvt_get_rwqe(qp, 1);
M
Mike Marciniszyn 已提交
2359 2360 2361 2362 2363 2364 2365 2366 2367
		if (ret < 0)
			goto nack_op_err;
		if (!ret)
			goto rnr_nak;
		wc.ex.imm_data = ohdr->u.rc.imm_data;
		wc.wc_flags = IB_WC_WITH_IMM;
		goto send_last;

	case OP(RDMA_READ_REQUEST): {
2368
		struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
		u32 len;
		u8 next;

		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
			goto nack_inv;
		next = qp->r_head_ack_queue + 1;
		/* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
		if (next > HFI1_MAX_RDMA_ATOMIC)
			next = 0;
		spin_lock_irqsave(&qp->s_lock, flags);
		if (unlikely(next == qp->s_tail_ack_queue)) {
			if (!qp->s_ack_queue[next].sent)
				goto nack_inv_unlck;
			update_ack_queue(qp, next);
		}
		e = &qp->s_ack_queue[qp->r_head_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2386
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
2387 2388 2389 2390 2391 2392
			e->rdma_sge.mr = NULL;
		}
		reth = &ohdr->u.rc.reth;
		len = be32_to_cpu(reth->length);
		if (len) {
			u32 rkey = be32_to_cpu(reth->rkey);
2393
			u64 vaddr = get_ib_reth_vaddr(reth);
M
Mike Marciniszyn 已提交
2394 2395 2396
			int ok;

			/* Check rkey & NAK */
2397 2398
			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
					 rkey, IB_ACCESS_REMOTE_READ);
M
Mike Marciniszyn 已提交
2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
			if (unlikely(!ok))
				goto nack_acc_unlck;
			/*
			 * Update the next expected PSN.  We add 1 later
			 * below, so only add the remainder here.
			 */
			if (len > pmtu)
				qp->r_psn += (len - 1) / pmtu;
		} else {
			e->rdma_sge.mr = NULL;
			e->rdma_sge.vaddr = NULL;
			e->rdma_sge.length = 0;
			e->rdma_sge.sge_length = 0;
		}
		e->opcode = opcode;
		e->sent = 0;
		e->psn = psn;
		e->lpsn = qp->r_psn;
		/*
		 * We need to increment the MSN here instead of when we
		 * finish sending the result since a duplicate request would
		 * increment it more than once.
		 */
		qp->r_msn++;
		qp->r_psn++;
		qp->r_state = opcode;
		qp->r_nak_state = 0;
		qp->r_head_ack_queue = next;

		/* Schedule the send tasklet. */
2429
		qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
		if (is_fecn)
			goto send_ack;
		return;
	}

	case OP(COMPARE_SWAP):
	case OP(FETCH_ADD): {
		struct ib_atomic_eth *ateth;
2441
		struct rvt_ack_entry *e;
M
Mike Marciniszyn 已提交
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
		u64 vaddr;
		atomic64_t *maddr;
		u64 sdata;
		u32 rkey;
		u8 next;

		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
			goto nack_inv;
		next = qp->r_head_ack_queue + 1;
		if (next > HFI1_MAX_RDMA_ATOMIC)
			next = 0;
		spin_lock_irqsave(&qp->s_lock, flags);
		if (unlikely(next == qp->s_tail_ack_queue)) {
			if (!qp->s_ack_queue[next].sent)
				goto nack_inv_unlck;
			update_ack_queue(qp, next);
		}
		e = &qp->s_ack_queue[qp->r_head_ack_queue];
		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2461
			rvt_put_mr(e->rdma_sge.mr);
M
Mike Marciniszyn 已提交
2462 2463 2464
			e->rdma_sge.mr = NULL;
		}
		ateth = &ohdr->u.atomic_eth;
2465
		vaddr = get_ib_ateth_vaddr(ateth);
M
Mike Marciniszyn 已提交
2466 2467 2468 2469
		if (unlikely(vaddr & (sizeof(u64) - 1)))
			goto nack_inv_unlck;
		rkey = be32_to_cpu(ateth->rkey);
		/* Check rkey & NAK */
2470 2471 2472
		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
					  vaddr, rkey,
					  IB_ACCESS_REMOTE_ATOMIC)))
M
Mike Marciniszyn 已提交
2473 2474
			goto nack_acc_unlck;
		/* Perform atomic OP and save result. */
2475
		maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2476
		sdata = get_ib_ateth_swap(ateth);
M
Mike Marciniszyn 已提交
2477
		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2478 2479
			(u64)atomic64_add_return(sdata, maddr) - sdata :
			(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2480
				      get_ib_ateth_compare(ateth),
M
Mike Marciniszyn 已提交
2481
				      sdata);
2482
		rvt_put_mr(qp->r_sge.sge.mr);
M
Mike Marciniszyn 已提交
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
		qp->r_sge.num_sge = 0;
		e->opcode = opcode;
		e->sent = 0;
		e->psn = psn;
		e->lpsn = psn;
		qp->r_msn++;
		qp->r_psn++;
		qp->r_state = opcode;
		qp->r_nak_state = 0;
		qp->r_head_ack_queue = next;

		/* Schedule the send tasklet. */
2495
		qp->s_flags |= RVT_S_RESP_PENDING;
M
Mike Marciniszyn 已提交
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
		hfi1_schedule_send(qp);

		spin_unlock_irqrestore(&qp->s_lock, flags);
		if (is_fecn)
			goto send_ack;
		return;
	}

	default:
		/* NAK unknown opcodes. */
		goto nack_inv;
	}
	qp->r_psn++;
	qp->r_state = opcode;
	qp->r_ack_psn = psn;
	qp->r_nak_state = 0;
	/* Send an ACK if requested or required. */
2513
	if (psn & IB_BTH_REQ_ACK) {
2514 2515
		struct hfi1_qp_priv *priv = qp->priv;

2516 2517 2518 2519
		if (packet->numpkt == 0) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
2520
		if (priv->r_adefered >= HFI1_PSN_CREDIT) {
2521 2522 2523 2524 2525 2526 2527
			rc_cancel_ack(qp);
			goto send_ack;
		}
		if (unlikely(is_fecn)) {
			rc_cancel_ack(qp);
			goto send_ack;
		}
2528
		priv->r_adefered++;
2529 2530
		rc_defered_ack(rcd, qp);
	}
M
Mike Marciniszyn 已提交
2531 2532 2533
	return;

rnr_nak:
2534
	qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
M
Mike Marciniszyn 已提交
2535 2536
	qp->r_ack_psn = qp->r_psn;
	/* Queue RNR NAK for later */
2537
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2538 2539 2540 2541 2542 2543 2544
	return;

nack_op_err:
	hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
	qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
2545
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2546 2547 2548 2549 2550 2551 2552 2553 2554
	return;

nack_inv_unlck:
	spin_unlock_irqrestore(&qp->s_lock, flags);
nack_inv:
	hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
	qp->r_ack_psn = qp->r_psn;
	/* Queue NAK for later */
2555
	rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
	return;

nack_acc_unlck:
	spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc:
	hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
	qp->r_ack_psn = qp->r_psn;
send_ack:
	hfi1_send_rc_ack(rcd, qp, is_fecn);
}

void hfi1_rc_hdrerr(
	struct hfi1_ctxtdata *rcd,
2570
	struct ib_header *hdr,
M
Mike Marciniszyn 已提交
2571
	u32 rcv_flags,
2572
	struct rvt_qp *qp)
M
Mike Marciniszyn 已提交
2573 2574
{
	int has_grh = rcv_flags & HFI1_HAS_GRH;
2575
	struct ib_other_headers *ohdr;
M
Mike Marciniszyn 已提交
2576 2577
	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
	int diff;
2578
	u32 opcode;
2579
	u32 psn, bth0;
M
Mike Marciniszyn 已提交
2580 2581 2582 2583 2584 2585

	/* Check for GRH */
	ohdr = &hdr->u.oth;
	if (has_grh)
		ohdr = &hdr->u.l.oth;

2586 2587
	bth0 = be32_to_cpu(ohdr->bth[0]);
	if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
M
Mike Marciniszyn 已提交
2588 2589 2590
		return;

	psn = be32_to_cpu(ohdr->bth[2]);
2591
	opcode = (bth0 >> 24) & 0xff;
M
Mike Marciniszyn 已提交
2592 2593 2594 2595 2596

	/* Only deal with RDMA Writes for now */
	if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
		diff = delta_psn(psn, qp->r_psn);
		if (!qp->r_nak_state && diff >= 0) {
2597
			ibp->rvp.n_rc_seqnak++;
M
Mike Marciniszyn 已提交
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
			qp->r_nak_state = IB_NAK_PSN_ERROR;
			/* Use the expected PSN. */
			qp->r_ack_psn = qp->r_psn;
			/*
			 * Wait to send the sequence
			 * NAK until all packets
			 * in the receive queue have
			 * been processed.
			 * Otherwise, we end up
			 * propagating congestion.
			 */
2609
			rc_defered_ack(rcd, qp);
M
Mike Marciniszyn 已提交
2610 2611 2612
		} /* Out of sequence NAK */
	} /* QP Request NAKs */
}