qp.c 177.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <rdma/ib_umem.h>
35
#include <rdma/ib_cache.h>
36
#include <rdma/ib_user_verbs.h>
M
Mark Zhang 已提交
37
#include <rdma/rdma_counter.h>
38
#include <linux/mlx5/fs.h>
39
#include "mlx5_ib.h"
40
#include "ib_rep.h"
41
#include "cmd.h"
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58

/* not supported currently */
static int wq_signature;

enum {
	MLX5_IB_ACK_REQ_FREQ	= 8,
};

enum {
	MLX5_IB_DEFAULT_SCHED_QUEUE	= 0x83,
	MLX5_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
	MLX5_IB_LINK_TYPE_IB		= 0,
	MLX5_IB_LINK_TYPE_ETH		= 1
};

enum {
	MLX5_IB_SQ_STRIDE	= 6,
59
	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
60 61 62 63
};

static const u32 mlx5_ib_opcode[] = {
	[IB_WR_SEND]				= MLX5_OPCODE_SEND,
64
	[IB_WR_LSO]				= MLX5_OPCODE_LSO,
65 66 67 68 69 70 71 72
	[IB_WR_SEND_WITH_IMM]			= MLX5_OPCODE_SEND_IMM,
	[IB_WR_RDMA_WRITE]			= MLX5_OPCODE_RDMA_WRITE,
	[IB_WR_RDMA_WRITE_WITH_IMM]		= MLX5_OPCODE_RDMA_WRITE_IMM,
	[IB_WR_RDMA_READ]			= MLX5_OPCODE_RDMA_READ,
	[IB_WR_ATOMIC_CMP_AND_SWP]		= MLX5_OPCODE_ATOMIC_CS,
	[IB_WR_ATOMIC_FETCH_AND_ADD]		= MLX5_OPCODE_ATOMIC_FA,
	[IB_WR_SEND_WITH_INV]			= MLX5_OPCODE_SEND_INVAL,
	[IB_WR_LOCAL_INV]			= MLX5_OPCODE_UMR,
73
	[IB_WR_REG_MR]				= MLX5_OPCODE_UMR,
74 75 76 77 78
	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= MLX5_OPCODE_ATOMIC_MASKED_CS,
	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= MLX5_OPCODE_ATOMIC_MASKED_FA,
	[MLX5_IB_WR_UMR]			= MLX5_OPCODE_UMR,
};

79 80 81
struct mlx5_wqe_eth_pad {
	u8 rsvd0[16];
};
82

83 84
enum raw_qp_set_mask_map {
	MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID		= 1UL << 0,
85
	MLX5_RAW_QP_RATE_LIMIT			= 1UL << 1,
86 87
};

88 89
struct mlx5_modify_raw_qp_param {
	u16 operation;
90 91

	u32 set_mask; /* raw_qp_set_mask_map */
92 93 94

	struct mlx5_rate_limit rl;

95
	u8 rq_q_ctr_id;
96
	u16 port;
97 98
};

99 100 101 102
static void get_cqs(enum ib_qp_type qp_type,
		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);

103 104 105 106 107 108 109 110 111 112
static int is_qp0(enum ib_qp_type qp_type)
{
	return qp_type == IB_QPT_SMI;
}

static int is_sqp(enum ib_qp_type qp_type)
{
	return is_qp0(qp_type) || is_qp1(qp_type);
}

113
/**
114 115
 * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
 * to kernel buffer
116
 *
117 118 119 120 121 122 123 124 125
 * @umem: User space memory where the WQ is
 * @buffer: buffer to copy to
 * @buflen: buffer length
 * @wqe_index: index of WQE to copy from
 * @wq_offset: offset to start of WQ
 * @wq_wqe_cnt: number of WQEs in WQ
 * @wq_wqe_shift: log2 of WQE size
 * @bcnt: number of bytes to copy
 * @bytes_copied: number of bytes to copy (return value)
126
 *
127 128
 * Copies from start of WQE bcnt or less bytes.
 * Does not gurantee to copy the entire WQE.
129
 *
130
 * Return: zero on success, or an error code.
131
 */
132 133 134 135
static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
					size_t buflen, int wqe_index,
					int wq_offset, int wq_wqe_cnt,
					int wq_wqe_shift, int bcnt,
136
					size_t *bytes_copied)
137
{
138 139 140
	size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
	size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
	size_t copy_length;
141 142
	int ret;

143 144 145 146 147 148 149 150 151
	/* don't copy more than requested, more than buffer length or
	 * beyond WQ end
	 */
	copy_length = min_t(u32, buflen, wq_end - offset);
	copy_length = min_t(u32, copy_length, bcnt);

	ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
	if (ret)
		return ret;
152

153 154
	if (!ret && bytes_copied)
		*bytes_copied = copy_length;
155

156 157
	return 0;
}
158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
				      void *buffer, size_t buflen, size_t *bc)
{
	struct mlx5_wqe_ctrl_seg *ctrl;
	size_t bytes_copied = 0;
	size_t wqe_length;
	void *p;
	int ds;

	wqe_index = wqe_index & qp->sq.fbc.sz_m1;

	/* read the control segment first */
	p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
	ctrl = p;
	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
	wqe_length = ds * MLX5_WQE_DS_UNITS;

	/* read rest of WQE if it spreads over more than one stride */
	while (bytes_copied < wqe_length) {
		size_t copy_length =
			min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);

		if (!copy_length)
			break;

		memcpy(buffer + bytes_copied, p, copy_length);
		bytes_copied += copy_length;

		wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
		p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
	}
	*bc = bytes_copied;
	return 0;
}

static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
				    void *buffer, size_t buflen, size_t *bc)
196 197 198 199 200 201 202 203 204 205 206 207
{
	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
	struct ib_umem *umem = base->ubuffer.umem;
	struct mlx5_ib_wq *wq = &qp->sq;
	struct mlx5_wqe_ctrl_seg *ctrl;
	size_t bytes_copied;
	size_t bytes_copied2;
	size_t wqe_length;
	int ret;
	int ds;

	/* at first read as much as possible */
208 209 210
	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
					   wq->offset, wq->wqe_cnt,
					   wq->wqe_shift, buflen,
211
					   &bytes_copied);
212 213 214
	if (ret)
		return ret;

215 216 217 218 219 220 221
	/* we need at least control segment size to proceed */
	if (bytes_copied < sizeof(*ctrl))
		return -EINVAL;

	ctrl = buffer;
	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
	wqe_length = ds * MLX5_WQE_DS_UNITS;
222

223 224 225 226
	/* if we copied enough then we are done */
	if (bytes_copied >= wqe_length) {
		*bc = bytes_copied;
		return 0;
227 228
	}

229 230 231 232
	/* otherwise this a wrapped around wqe
	 * so read the remaining bytes starting
	 * from  wqe_index 0
	 */
233 234 235
	ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
					   buflen - bytes_copied, 0, wq->offset,
					   wq->wqe_cnt, wq->wqe_shift,
236 237 238 239 240 241 242 243 244
					   wqe_length - bytes_copied,
					   &bytes_copied2);

	if (ret)
		return ret;
	*bc = bytes_copied + bytes_copied2;
	return 0;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
			size_t buflen, size_t *bc)
{
	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
	struct ib_umem *umem = base->ubuffer.umem;

	if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
		return -EINVAL;

	if (!umem)
		return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
						  buflen, bc);

	return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
}

static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
				    void *buffer, size_t buflen, size_t *bc)
263 264 265 266 267 268 269
{
	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
	struct ib_umem *umem = base->ubuffer.umem;
	struct mlx5_ib_wq *wq = &qp->rq;
	size_t bytes_copied;
	int ret;

270 271 272
	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
					   wq->offset, wq->wqe_cnt,
					   wq->wqe_shift, buflen,
273
					   &bytes_copied);
274 275 276

	if (ret)
		return ret;
277 278 279 280
	*bc = bytes_copied;
	return 0;
}

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
			size_t buflen, size_t *bc)
{
	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
	struct ib_umem *umem = base->ubuffer.umem;
	struct mlx5_ib_wq *wq = &qp->rq;
	size_t wqe_size = 1 << wq->wqe_shift;

	if (buflen < wqe_size)
		return -EINVAL;

	if (!umem)
		return -EOPNOTSUPP;

	return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
}

static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
				     void *buffer, size_t buflen, size_t *bc)
300 301 302 303
{
	struct ib_umem *umem = srq->umem;
	size_t bytes_copied;
	int ret;
304

305 306 307
	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
					   srq->msrq.max, srq->msrq.wqe_shift,
					   buflen, &bytes_copied);
308 309 310 311 312

	if (ret)
		return ret;
	*bc = bytes_copied;
	return 0;
313 314
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
			 size_t buflen, size_t *bc)
{
	struct ib_umem *umem = srq->umem;
	size_t wqe_size = 1 << srq->msrq.wqe_shift;

	if (buflen < wqe_size)
		return -EINVAL;

	if (!umem)
		return -EOPNOTSUPP;

	return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
}

330 331 332 333 334
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
	struct ib_event event;

335 336 337 338
	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
		/* This event is only valid for trans_qps */
		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
	}
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

	if (ibqp->event_handler) {
		event.device     = ibqp->device;
		event.element.qp = ibqp;
		switch (type) {
		case MLX5_EVENT_TYPE_PATH_MIG:
			event.event = IB_EVENT_PATH_MIG;
			break;
		case MLX5_EVENT_TYPE_COMM_EST:
			event.event = IB_EVENT_COMM_EST;
			break;
		case MLX5_EVENT_TYPE_SQ_DRAINED:
			event.event = IB_EVENT_SQ_DRAINED;
			break;
		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
			break;
		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
			event.event = IB_EVENT_QP_FATAL;
			break;
		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
			event.event = IB_EVENT_PATH_MIG_ERR;
			break;
		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
			event.event = IB_EVENT_QP_REQ_ERR;
			break;
		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
			event.event = IB_EVENT_QP_ACCESS_ERR;
			break;
		default:
			pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
			return;
		}

		ibqp->event_handler(&event, ibqp->qp_context);
	}
}

static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
{
	int wqe_size;
	int wq_size;

	/* Sanity check RQ size before proceeding */
384
	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
385 386 387 388 389 390
		return -EINVAL;

	if (!has_rq) {
		qp->rq.max_gs = 0;
		qp->rq.wqe_cnt = 0;
		qp->rq.wqe_shift = 0;
391 392
		cap->max_recv_wr = 0;
		cap->max_recv_sge = 0;
393 394 395
	} else {
		if (ucmd) {
			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
396 397
			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
				return -EINVAL;
398
			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
399 400
			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
				return -EINVAL;
401 402 403 404 405 406 407 408 409
			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
			qp->rq.max_post = qp->rq.wqe_cnt;
		} else {
			wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
			wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
			wqe_size = roundup_pow_of_two(wqe_size);
			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
			qp->rq.wqe_cnt = wq_size / wqe_size;
410
			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
411 412
				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
					    wqe_size,
413 414
					    MLX5_CAP_GEN(dev->mdev,
							 max_wqe_sz_rq));
415 416 417 418 419 420 421 422 423 424 425
				return -EINVAL;
			}
			qp->rq.wqe_shift = ilog2(wqe_size);
			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
			qp->rq.max_post = qp->rq.wqe_cnt;
		}
	}

	return 0;
}

426
static int sq_overhead(struct ib_qp_init_attr *attr)
427
{
428
	int size = 0;
429

430
	switch (attr->qp_type) {
431
	case IB_QPT_XRC_INI:
432
		size += sizeof(struct mlx5_wqe_xrc_seg);
433 434 435
		/* fall through */
	case IB_QPT_RC:
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
436 437 438
			max(sizeof(struct mlx5_wqe_atomic_seg) +
			    sizeof(struct mlx5_wqe_raddr_seg),
			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
439 440 441
			    sizeof(struct mlx5_mkey_seg) +
			    MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
			    MLX5_IB_UMR_OCTOWORD);
442 443
		break;

444 445 446
	case IB_QPT_XRC_TGT:
		return 0;

447
	case IB_QPT_UC:
448
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
449 450 451
			max(sizeof(struct mlx5_wqe_raddr_seg),
			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
			    sizeof(struct mlx5_mkey_seg));
452 453 454
		break;

	case IB_QPT_UD:
455 456 457 458
		if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
			size += sizeof(struct mlx5_wqe_eth_pad) +
				sizeof(struct mlx5_wqe_eth_seg);
		/* fall through */
459
	case IB_QPT_SMI:
H
Haggai Eran 已提交
460
	case MLX5_IB_QPT_HW_GSI:
461
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
462 463 464 465
			sizeof(struct mlx5_wqe_datagram_seg);
		break;

	case MLX5_IB_QPT_REG_UMR:
466
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
			sizeof(struct mlx5_mkey_seg);
		break;

	default:
		return -EINVAL;
	}

	return size;
}

static int calc_send_wqe(struct ib_qp_init_attr *attr)
{
	int inl_size = 0;
	int size;

483
	size = sq_overhead(attr);
484 485 486 487 488 489 490 491 492
	if (size < 0)
		return size;

	if (attr->cap.max_inline_data) {
		inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
			attr->cap.max_inline_data;
	}

	size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
493
	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
494
	    ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
495
		return MLX5_SIG_WQE_SIZE;
496 497
	else
		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
498 499
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
{
	int max_sge;

	if (attr->qp_type == IB_QPT_RC)
		max_sge = (min_t(int, wqe_size, 512) -
			   sizeof(struct mlx5_wqe_ctrl_seg) -
			   sizeof(struct mlx5_wqe_raddr_seg)) /
			sizeof(struct mlx5_wqe_data_seg);
	else if (attr->qp_type == IB_QPT_XRC_INI)
		max_sge = (min_t(int, wqe_size, 512) -
			   sizeof(struct mlx5_wqe_ctrl_seg) -
			   sizeof(struct mlx5_wqe_xrc_seg) -
			   sizeof(struct mlx5_wqe_raddr_seg)) /
			sizeof(struct mlx5_wqe_data_seg);
	else
		max_sge = (wqe_size - sq_overhead(attr)) /
			sizeof(struct mlx5_wqe_data_seg);

	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
		     sizeof(struct mlx5_wqe_data_seg));
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
			struct mlx5_ib_qp *qp)
{
	int wqe_size;
	int wq_size;

	if (!attr->cap.max_send_wr)
		return 0;

	wqe_size = calc_send_wqe(attr);
	mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
	if (wqe_size < 0)
		return wqe_size;

537
	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
538
		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
539
			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
540 541 542
		return -EINVAL;
	}

543 544
	qp->max_inline_data = wqe_size - sq_overhead(attr) -
			      sizeof(struct mlx5_wqe_inline_seg);
545 546 547 548
	attr->cap.max_inline_data = qp->max_inline_data;

	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
549
	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
550 551
		mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
			    attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
552 553
			    qp->sq.wqe_cnt,
			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
554 555
		return -ENOMEM;
	}
556
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
557 558 559 560 561
	qp->sq.max_gs = get_send_sge(attr, wqe_size);
	if (qp->sq.max_gs < attr->cap.max_send_sge)
		return -ENOMEM;

	attr->cap.max_send_sge = qp->sq.max_gs;
562 563
	qp->sq.max_post = wq_size / wqe_size;
	attr->cap.max_send_wr = qp->sq.max_post;
564 565 566 567 568 569

	return wq_size;
}

static int set_user_buf_size(struct mlx5_ib_dev *dev,
			    struct mlx5_ib_qp *qp,
570
			    struct mlx5_ib_create_qp *ucmd,
571 572
			    struct mlx5_ib_qp_base *base,
			    struct ib_qp_init_attr *attr)
573 574 575
{
	int desc_sz = 1 << qp->sq.wqe_shift;

576
	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
577
		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
578
			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
579 580 581
		return -EINVAL;
	}

582 583 584
	if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
		mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
			     ucmd->sq_wqe_count);
585 586 587 588 589
		return -EINVAL;
	}

	qp->sq.wqe_cnt = ucmd->sq_wqe_count;

590
	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
591
		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
592 593
			     qp->sq.wqe_cnt,
			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
594 595 596
		return -EINVAL;
	}

597 598
	if (attr->qp_type == IB_QPT_RAW_PACKET ||
	    qp->flags & MLX5_IB_QP_UNDERLAY) {
599 600 601 602 603 604
		base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
		qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
	} else {
		base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
					 (qp->sq.wqe_cnt << 6);
	}
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619

	return 0;
}

static int qp_has_rq(struct ib_qp_init_attr *attr)
{
	if (attr->qp_type == IB_QPT_XRC_INI ||
	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
	    attr->qp_type == MLX5_IB_QPT_REG_UMR ||
	    !attr->cap.max_recv_wr)
		return 0;

	return 1;
}

620 621 622 623 624 625 626 627 628
enum {
	/* this is the first blue flame register in the array of bfregs assigned
	 * to a processes. Since we do not use it for blue flame but rather
	 * regular 64 bit doorbells, we do not need a lock for maintaiing
	 * "odd/even" order
	 */
	NUM_NON_BLUE_FLAME_BFREGS = 1,
};

629 630
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
631
	return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
632 633 634 635
}

static int num_med_bfreg(struct mlx5_ib_dev *dev,
			 struct mlx5_bfreg_info *bfregi)
E
Eli Cohen 已提交
636 637 638
{
	int n;

639 640
	n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
	    NUM_NON_BLUE_FLAME_BFREGS;
E
Eli Cohen 已提交
641 642 643 644

	return n >= 0 ? n : 0;
}

645 646 647 648 649 650
static int first_med_bfreg(struct mlx5_ib_dev *dev,
			   struct mlx5_bfreg_info *bfregi)
{
	return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
}

651 652
static int first_hi_bfreg(struct mlx5_ib_dev *dev,
			  struct mlx5_bfreg_info *bfregi)
E
Eli Cohen 已提交
653 654 655
{
	int med;

656 657
	med = num_med_bfreg(dev, bfregi);
	return ++med;
E
Eli Cohen 已提交
658 659
}

660 661
static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
				  struct mlx5_bfreg_info *bfregi)
662 663 664
{
	int i;

665 666
	for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
		if (!bfregi->count[i]) {
667
			bfregi->count[i]++;
668 669 670 671 672 673 674
			return i;
		}
	}

	return -ENOMEM;
}

675 676
static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
				 struct mlx5_bfreg_info *bfregi)
677
{
678
	int minidx = first_med_bfreg(dev, bfregi);
679 680
	int i;

681 682 683 684
	if (minidx < 0)
		return minidx;

	for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
685
		if (bfregi->count[i] < bfregi->count[minidx])
686
			minidx = i;
687 688
		if (!bfregi->count[minidx])
			break;
689 690
	}

691
	bfregi->count[minidx]++;
692 693 694
	return minidx;
}

695
static int alloc_bfreg(struct mlx5_ib_dev *dev,
696
		       struct mlx5_bfreg_info *bfregi)
697
{
698
	int bfregn = -ENOMEM;
699

700
	mutex_lock(&bfregi->lock);
701 702 703 704 705 706 707
	if (bfregi->ver >= 2) {
		bfregn = alloc_high_class_bfreg(dev, bfregi);
		if (bfregn < 0)
			bfregn = alloc_med_class_bfreg(dev, bfregi);
	}

	if (bfregn < 0) {
708
		BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
709 710
		bfregn = 0;
		bfregi->count[bfregn]++;
711
	}
712
	mutex_unlock(&bfregi->lock);
713

714
	return bfregn;
715 716
}

717
void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
718
{
719
	mutex_lock(&bfregi->lock);
720
	bfregi->count[bfregn]--;
721
	mutex_unlock(&bfregi->lock);
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
}

static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
{
	switch (state) {
	case IB_QPS_RESET:	return MLX5_QP_STATE_RST;
	case IB_QPS_INIT:	return MLX5_QP_STATE_INIT;
	case IB_QPS_RTR:	return MLX5_QP_STATE_RTR;
	case IB_QPS_RTS:	return MLX5_QP_STATE_RTS;
	case IB_QPS_SQD:	return MLX5_QP_STATE_SQD;
	case IB_QPS_SQE:	return MLX5_QP_STATE_SQER;
	case IB_QPS_ERR:	return MLX5_QP_STATE_ERR;
	default:		return -1;
	}
}

static int to_mlx5_st(enum ib_qp_type type)
{
	switch (type) {
	case IB_QPT_RC:			return MLX5_QP_ST_RC;
	case IB_QPT_UC:			return MLX5_QP_ST_UC;
	case IB_QPT_UD:			return MLX5_QP_ST_UD;
	case MLX5_IB_QPT_REG_UMR:	return MLX5_QP_ST_REG_UMR;
	case IB_QPT_XRC_INI:
	case IB_QPT_XRC_TGT:		return MLX5_QP_ST_XRC;
	case IB_QPT_SMI:		return MLX5_QP_ST_QP0;
H
Haggai Eran 已提交
748
	case MLX5_IB_QPT_HW_GSI:	return MLX5_QP_ST_QP1;
749
	case MLX5_IB_QPT_DCI:		return MLX5_QP_ST_DCI;
750 751
	case IB_QPT_RAW_IPV6:		return MLX5_QP_ST_RAW_IPV6;
	case IB_QPT_RAW_PACKET:
752
	case IB_QPT_RAW_ETHERTYPE:	return MLX5_QP_ST_RAW_ETHERTYPE;
753 754 755 756 757
	case IB_QPT_MAX:
	default:		return -EINVAL;
	}
}

758 759 760 761 762
static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
			     struct mlx5_ib_cq *recv_cq);
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
			       struct mlx5_ib_cq *recv_cq);

763
int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
764
			struct mlx5_bfreg_info *bfregi, u32 bfregn,
765
			bool dyn_bfreg)
766
{
767 768 769
	unsigned int bfregs_per_sys_page;
	u32 index_of_sys_page;
	u32 offset;
770 771 772 773 774

	bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
				MLX5_NON_FP_BFREGS_PER_UAR;
	index_of_sys_page = bfregn / bfregs_per_sys_page;

775 776
	if (dyn_bfreg) {
		index_of_sys_page += bfregi->num_static_sys_pages;
777 778 779 780

		if (index_of_sys_page >= bfregi->num_sys_pages)
			return -EINVAL;

781 782 783 784 785 786
		if (bfregn > bfregi->num_dyn_bfregs ||
		    bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
			mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
			return -EINVAL;
		}
	}
787

788
	offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
789
	return bfregi->sys_pages[index_of_sys_page] + offset;
790 791
}

792
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
793
			    unsigned long addr, size_t size,
794 795
			    struct ib_umem **umem, int *npages, int *page_shift,
			    int *ncont, u32 *offset)
796 797 798
{
	int err;

799
	*umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
800 801 802 803 804
	if (IS_ERR(*umem)) {
		mlx5_ib_dbg(dev, "umem_get failed\n");
		return PTR_ERR(*umem);
	}

805
	mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824

	err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
		    addr, size, *npages, *page_shift, *ncont, *offset);

	return 0;

err_umem:
	ib_umem_release(*umem);
	*umem = NULL;

	return err;
}

825
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
826
			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
827
{
828 829 830 831 832
	struct mlx5_ib_ucontext *context =
		rdma_udata_to_drv_context(
			udata,
			struct mlx5_ib_ucontext,
			ibucontext);
833

834 835 836
	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
		atomic_dec(&dev->delay_drop.rqs_cnt);

837
	mlx5_ib_db_unmap_user(context, &rwq->db);
838
	ib_umem_release(rwq->umem);
839 840 841
}

static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
842
			  struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
843 844
			  struct mlx5_ib_create_wq *ucmd)
{
845 846
	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
847 848 849 850 851 852 853 854 855
	int page_shift = 0;
	int npages;
	u32 offset = 0;
	int ncont = 0;
	int err;

	if (!ucmd->buf_addr)
		return -EINVAL;

856
	rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
857 858 859 860 861 862
	if (IS_ERR(rwq->umem)) {
		mlx5_ib_dbg(dev, "umem_get failed\n");
		err = PTR_ERR(rwq->umem);
		return err;
	}

863
	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
			   &ncont, NULL);
	err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
				     &rwq->rq_page_offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	rwq->rq_num_pas = ncont;
	rwq->page_shift = page_shift;
	rwq->log_page_size =  page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);

	mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
		    (unsigned long long)ucmd->buf_addr, rwq->buf_size,
		    npages, page_shift, ncont, offset);

881
	err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
882 883 884 885 886 887 888 889 890 891 892 893 894
	if (err) {
		mlx5_ib_dbg(dev, "map failed\n");
		goto err_umem;
	}

	rwq->create_type = MLX5_WQ_USER;
	return 0;

err_umem:
	ib_umem_release(rwq->umem);
	return err;
}

895 896 897 898 899 900 901
static int adjust_bfregn(struct mlx5_ib_dev *dev,
			 struct mlx5_bfreg_info *bfregi, int bfregn)
{
	return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
				bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}

902 903
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
			  struct mlx5_ib_qp *qp, struct ib_udata *udata,
904
			  struct ib_qp_init_attr *attr,
905
			  u32 **in,
906 907
			  struct mlx5_ib_create_qp_resp *resp, int *inlen,
			  struct mlx5_ib_qp_base *base)
908 909 910
{
	struct mlx5_ib_ucontext *context;
	struct mlx5_ib_create_qp ucmd;
911
	struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
912
	int page_shift = 0;
913
	int uar_index = 0;
914
	int npages;
915
	u32 offset = 0;
916
	int bfregn;
917
	int ncont = 0;
918 919
	__be64 *pas;
	void *qpc;
920
	int err;
921
	u16 uid;
922 923 924 925 926 927 928

	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
	if (err) {
		mlx5_ib_dbg(dev, "copy failed\n");
		return err;
	}

929 930
	context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
					    ibucontext);
931 932 933 934 935 936 937 938 939 940 941
	if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
		uar_index = bfregn_to_uar_index(dev, &context->bfregi,
						ucmd.bfreg_index, true);
		if (uar_index < 0)
			return uar_index;

		bfregn = MLX5_IB_INVALID_BFREG;
	} else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
		/*
		 * TBD: should come from the verbs when we have the API
		 */
942
		/* In CROSS_CHANNEL CQ and QP must use the same UAR */
943
		bfregn = MLX5_CROSS_CHANNEL_BFREG;
944
	}
945
	else {
946 947 948
		bfregn = alloc_bfreg(dev, &context->bfregi);
		if (bfregn < 0)
			return bfregn;
949 950
	}

951
	mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
952 953 954
	if (bfregn != MLX5_IB_INVALID_BFREG)
		uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
						false);
955

956 957 958 959
	qp->rq.offset = 0;
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;

960
	err = set_user_buf_size(dev, qp, &ucmd, base, attr);
961
	if (err)
962
		goto err_bfreg;
963

964 965
	if (ucmd.buf_addr && ubuffer->buf_size) {
		ubuffer->buf_addr = ucmd.buf_addr;
966 967 968
		err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
				       ubuffer->buf_size, &ubuffer->umem,
				       &npages, &page_shift, &ncont, &offset);
969
		if (err)
970
			goto err_bfreg;
971
	} else {
972
		ubuffer->umem = NULL;
973 974
	}

975 976
	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
977
	*in = kvzalloc(*inlen, GFP_KERNEL);
978 979 980 981
	if (!*in) {
		err = -ENOMEM;
		goto err_umem;
	}
982

983 984
	uid = (attr->qp_type != IB_QPT_XRC_TGT &&
	       attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
985
	MLX5_SET(create_qp_in, *in, uid, uid);
986
	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
987
	if (ubuffer->umem)
988 989 990 991 992 993
		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);

	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);

	MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
	MLX5_SET(qpc, qpc, page_offset, offset);
994

995
	MLX5_SET(qpc, qpc, uar_page, uar_index);
996 997 998 999
	if (bfregn != MLX5_IB_INVALID_BFREG)
		resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
	else
		resp->bfreg_index = MLX5_IB_INVALID_BFREG;
1000
	qp->bfregn = bfregn;
1001

1002
	err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
1003 1004 1005 1006 1007
	if (err) {
		mlx5_ib_dbg(dev, "map failed\n");
		goto err_free;
	}

1008
	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	if (err) {
		mlx5_ib_dbg(dev, "copy failed\n");
		goto err_unmap;
	}
	qp->create_type = MLX5_QP_USER;

	return 0;

err_unmap:
	mlx5_ib_db_unmap_user(context, &qp->db);

err_free:
A
Al Viro 已提交
1021
	kvfree(*in);
1022 1023

err_umem:
1024
	ib_umem_release(ubuffer->umem);
1025

1026
err_bfreg:
1027 1028
	if (bfregn != MLX5_IB_INVALID_BFREG)
		mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
1029 1030 1031
	return err;
}

1032
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1033 1034
			    struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
			    struct ib_udata *udata)
1035
{
1036 1037 1038 1039 1040
	struct mlx5_ib_ucontext *context =
		rdma_udata_to_drv_context(
			udata,
			struct mlx5_ib_ucontext,
			ibucontext);
1041 1042

	mlx5_ib_db_unmap_user(context, &qp->db);
1043
	ib_umem_release(base->ubuffer.umem);
1044 1045 1046 1047 1048 1049 1050

	/*
	 * Free only the BFREGs which are handled by the kernel.
	 * BFREGs of UARs allocated dynamically are handled by user.
	 */
	if (qp->bfregn != MLX5_IB_INVALID_BFREG)
		mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1051 1052
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
/* get_sq_edge - Get the next nearby edge.
 *
 * An 'edge' is defined as the first following address after the end
 * of the fragment or the SQ. Accordingly, during the WQE construction
 * which repetitively increases the pointer to write the next data, it
 * simply should check if it gets to an edge.
 *
 * @sq - SQ buffer.
 * @idx - Stride index in the SQ buffer.
 *
 * Return:
 *	The new edge.
 */
static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
{
	void *fragment_end;

	fragment_end = mlx5_frag_buf_get_wqe
		(&sq->fbc,
		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));

	return fragment_end + MLX5_SEND_WQE_BB;
}

1077 1078 1079
static int create_kernel_qp(struct mlx5_ib_dev *dev,
			    struct ib_qp_init_attr *init_attr,
			    struct mlx5_ib_qp *qp,
1080
			    u32 **in, int *inlen,
1081
			    struct mlx5_ib_qp_base *base)
1082 1083
{
	int uar_index;
1084
	void *qpc;
1085 1086
	int err;

1087
	if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
1088
					IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1089
					IB_QP_CREATE_IPOIB_UD_LSO |
1090
					IB_QP_CREATE_NETIF_QP |
1091 1092
					MLX5_IB_QP_CREATE_SQPN_QP1 |
					MLX5_IB_QP_CREATE_WC_TEST))
1093
		return -EINVAL;
1094 1095

	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
1096
		qp->bf.bfreg = &dev->fp_bfreg;
1097 1098
	else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
		qp->bf.bfreg = &dev->wc_bfreg;
1099 1100
	else
		qp->bf.bfreg = &dev->bfreg;
1101

1102 1103 1104 1105
	/* We need to divide by two since each register is comprised of
	 * two buffers of identical size, namely odd and even
	 */
	qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1106
	uar_index = qp->bf.bfreg->index;
1107 1108 1109 1110

	err = calc_sq_size(dev, init_attr, qp);
	if (err < 0) {
		mlx5_ib_dbg(dev, "err %d\n", err);
1111
		return err;
1112 1113 1114 1115
	}

	qp->rq.offset = 0;
	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1116
	base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1117

1118 1119
	err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
				       &qp->buf, dev->mdev->priv.numa_node);
1120 1121
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
1122
		return err;
1123 1124
	}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	if (qp->rq.wqe_cnt)
		mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
			      ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);

	if (qp->sq.wqe_cnt) {
		int sq_strides_offset = (qp->sq.offset  & (PAGE_SIZE - 1)) /
					MLX5_SEND_WQE_BB;
		mlx5_init_fbc_offset(qp->buf.frags +
				     (qp->sq.offset / PAGE_SIZE),
				     ilog2(MLX5_SEND_WQE_BB),
				     ilog2(qp->sq.wqe_cnt),
				     sq_strides_offset, &qp->sq.fbc);

		qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
	}

1141 1142
	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1143
	*in = kvzalloc(*inlen, GFP_KERNEL);
1144 1145 1146 1147
	if (!*in) {
		err = -ENOMEM;
		goto err_buf;
	}
1148 1149 1150 1151 1152

	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
	MLX5_SET(qpc, qpc, uar_page, uar_index);
	MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);

1153
	/* Set "fast registration enabled" for all kernel QPs */
1154 1155
	MLX5_SET(qpc, qpc, fre, 1);
	MLX5_SET(qpc, qpc, rlky, 1);
1156

1157
	if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
1158
		MLX5_SET(qpc, qpc, deth_sqpn, 1);
1159 1160 1161
		qp->flags |= MLX5_IB_QP_SQPN_QP1;
	}

1162 1163 1164
	mlx5_fill_page_frag_array(&qp->buf,
				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
							 *in, pas));
1165

1166
	err = mlx5_db_alloc(dev->mdev, &qp->db);
1167 1168 1169 1170 1171
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		goto err_free;
	}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
				     sizeof(*qp->sq.wrid), GFP_KERNEL);
	qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
					sizeof(*qp->sq.wr_data), GFP_KERNEL);
	qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
				     sizeof(*qp->rq.wrid), GFP_KERNEL);
	qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
				       sizeof(*qp->sq.w_list), GFP_KERNEL);
	qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
					 sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192

	if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
	    !qp->sq.w_list || !qp->sq.wqe_head) {
		err = -ENOMEM;
		goto err_wrid;
	}
	qp->create_type = MLX5_QP_KERNEL;

	return 0;

err_wrid:
1193 1194 1195 1196 1197
	kvfree(qp->sq.wqe_head);
	kvfree(qp->sq.w_list);
	kvfree(qp->sq.wrid);
	kvfree(qp->sq.wr_data);
	kvfree(qp->rq.wrid);
1198
	mlx5_db_free(dev->mdev, &qp->db);
1199 1200

err_free:
A
Al Viro 已提交
1201
	kvfree(*in);
1202 1203

err_buf:
1204
	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1205 1206 1207 1208 1209
	return err;
}

static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
1210 1211 1212 1213 1214
	kvfree(qp->sq.wqe_head);
	kvfree(qp->sq.w_list);
	kvfree(qp->sq.wrid);
	kvfree(qp->sq.wr_data);
	kvfree(qp->rq.wrid);
1215
	mlx5_db_free(dev->mdev, &qp->db);
1216
	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1217 1218
}

1219
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1220 1221
{
	if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
1222
	    (attr->qp_type == MLX5_IB_QPT_DCI) ||
1223
	    (attr->qp_type == IB_QPT_XRC_INI))
1224
		return MLX5_SRQ_RQ;
1225
	else if (!qp->has_rq)
1226
		return MLX5_ZERO_LEN_RQ;
1227
	else
1228
		return MLX5_NON_ZERO_RQ;
1229 1230 1231 1232
}

static int is_connected(enum ib_qp_type qp_type)
{
1233 1234
	if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
	    qp_type == MLX5_IB_QPT_DCI)
1235 1236 1237 1238 1239
		return 1;

	return 0;
}

1240
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1241
				    struct mlx5_ib_qp *qp,
1242 1243
				    struct mlx5_ib_sq *sq, u32 tdn,
				    struct ib_pd *pd)
1244
{
1245
	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1246 1247
	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);

1248
	MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
1249
	MLX5_SET(tisc, tisc, transport_domain, tdn);
1250 1251 1252
	if (qp->flags & MLX5_IB_QP_UNDERLAY)
		MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);

1253 1254 1255 1256
	return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}

static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1257
				      struct mlx5_ib_sq *sq, struct ib_pd *pd)
1258
{
1259
	mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1260 1261
}

1262
static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1263 1264 1265
{
	if (sq->flow_rule)
		mlx5_del_flow_rules(sq->flow_rule);
1266
	sq->flow_rule = NULL;
1267 1268
}

1269
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1270
				   struct ib_udata *udata,
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
				   struct mlx5_ib_sq *sq, void *qpin,
				   struct ib_pd *pd)
{
	struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
	__be64 *pas;
	void *in;
	void *sqc;
	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
	void *wq;
	int inlen;
	int err;
	int page_shift = 0;
	int npages;
	int ncont = 0;
	u32 offset = 0;

1287 1288 1289
	err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
			       &sq->ubuffer.umem, &npages, &page_shift, &ncont,
			       &offset);
1290 1291 1292 1293
	if (err)
		return err;

	inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
1294
	in = kvzalloc(inlen, GFP_KERNEL);
1295 1296 1297 1298 1299
	if (!in) {
		err = -ENOMEM;
		goto err_umem;
	}

1300
	MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
1301 1302
	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1303 1304
	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
		MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
1305 1306 1307 1308 1309
	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1310 1311 1312
	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
	    MLX5_CAP_ETH(dev->mdev, swp))
		MLX5_SET(sqc, sqc, allow_swp, 1);
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	wq = MLX5_ADDR_OF(sqc, sqc, wq);
	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
	MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
	MLX5_SET(wq, wq, log_wq_pg_sz,  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
	MLX5_SET(wq, wq, page_offset, offset);

	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
	mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);

	err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);

	kvfree(in);

	if (err)
		goto err_umem;

	return 0;

err_umem:
	ib_umem_release(sq->ubuffer.umem);
	sq->ubuffer.umem = NULL;

	return err;
}

static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
				     struct mlx5_ib_sq *sq)
{
1346
	destroy_flow_rule_vport_sq(sq);
1347 1348 1349 1350
	mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
	ib_umem_release(sq->ubuffer.umem);
}

1351
static size_t get_rq_pas_size(void *qpc)
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
{
	u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
	u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
	u32 log_rq_size   = MLX5_GET(qpc, qpc, log_rq_size);
	u32 page_offset   = MLX5_GET(qpc, qpc, page_offset);
	u32 po_quanta	  = 1 << (log_page_size - 6);
	u32 rq_sz	  = 1 << (log_rq_size + 4 + log_rq_stride);
	u32 page_size	  = 1 << log_page_size;
	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;

	return rq_num_pas * sizeof(u64);
}

static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1367
				   struct mlx5_ib_rq *rq, void *qpin,
1368
				   size_t qpinlen, struct ib_pd *pd)
1369
{
1370
	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1371 1372 1373 1374 1375 1376
	__be64 *pas;
	__be64 *qp_pas;
	void *in;
	void *rqc;
	void *wq;
	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1377 1378
	size_t rq_pas_size = get_rq_pas_size(qpc);
	size_t inlen;
1379
	int err;
1380 1381 1382

	if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
		return -EINVAL;
1383 1384

	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1385
	in = kvzalloc(inlen, GFP_KERNEL);
1386 1387 1388
	if (!in)
		return -ENOMEM;

1389
	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
1390
	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1391 1392
	if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
		MLX5_SET(rqc, rqc, vsd, 1);
1393 1394 1395 1396 1397 1398
	MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
	MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
	MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));

1399 1400 1401
	if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
		MLX5_SET(rqc, rqc, scatter_fcs, 1);

1402 1403
	wq = MLX5_ADDR_OF(rqc, rqc, wq);
	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1404 1405
	if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
		MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
	MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
	MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));

	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
	qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
	memcpy(pas, qp_pas, rq_pas_size);

	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);

	kvfree(in);

	return err;
}

static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
				     struct mlx5_ib_rq *rq)
{
	mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}

1430 1431 1432 1433 1434 1435 1436
static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
{
	return  (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
		 MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}

1437 1438
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
				      struct mlx5_ib_rq *rq,
1439 1440
				      u32 qp_flags_en,
				      struct ib_pd *pd)
1441 1442 1443 1444
{
	if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
		mlx5_ib_disable_lb(dev, false, true);
1445
	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1446 1447
}

1448
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1449
				    struct mlx5_ib_rq *rq, u32 tdn,
1450
				    u32 *qp_flags_en,
1451 1452
				    struct ib_pd *pd,
				    u32 *out, int outlen)
1453
{
1454
	u8 lb_flag = 0;
1455 1456 1457 1458 1459 1460
	u32 *in;
	void *tirc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1461
	in = kvzalloc(inlen, GFP_KERNEL);
1462 1463 1464
	if (!in)
		return -ENOMEM;

1465
	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1466 1467 1468 1469
	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
	MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
	MLX5_SET(tirc, tirc, transport_domain, tdn);
1470
	if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1471
		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1472

1473 1474 1475 1476 1477 1478
	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;

	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;

1479
	if (dev->is_rep) {
1480 1481 1482 1483 1484
		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
		*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
	}

	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1485

1486
	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
1487

1488
	rq->tirn = MLX5_GET(create_tir_out, out, tirn);
1489 1490 1491 1492
	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
		err = mlx5_ib_enable_lb(dev, false, true);

		if (err)
1493
			destroy_raw_packet_qp_tir(dev, rq, 0, pd);
1494
	}
1495 1496 1497 1498 1499 1500
	kvfree(in);

	return err;
}

static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1501
				u32 *in, size_t inlen,
1502 1503 1504
				struct ib_pd *pd,
				struct ib_udata *udata,
				struct mlx5_ib_create_qp_resp *resp)
1505 1506 1507 1508
{
	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1509 1510
	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
1511 1512
	int err;
	u32 tdn = mucontext->tdn;
1513
	u16 uid = to_mpd(pd)->uid;
1514
	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
1515 1516

	if (qp->sq.wqe_cnt) {
1517
		err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1518 1519 1520
		if (err)
			return err;

1521
		err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
1522 1523 1524
		if (err)
			goto err_destroy_tis;

1525 1526 1527 1528 1529 1530 1531
		if (uid) {
			resp->tisn = sq->tisn;
			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
			resp->sqn = sq->base.mqp.qpn;
			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
		}

1532
		sq->base.container_mibqp = qp;
1533
		sq->base.mqp.event = mlx5_ib_qp_event;
1534 1535 1536
	}

	if (qp->rq.wqe_cnt) {
1537 1538
		rq->base.container_mibqp = qp;

1539 1540
		if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1541 1542
		if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1543
		err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
1544 1545 1546
		if (err)
			goto err_destroy_sq;

1547 1548 1549
		err = create_raw_packet_qp_tir(
			dev, rq, tdn, &qp->flags_en, pd, out,
			MLX5_ST_SZ_BYTES(create_tir_out));
1550 1551
		if (err)
			goto err_destroy_rq;
1552 1553 1554 1555 1556 1557

		if (uid) {
			resp->rqn = rq->base.mqp.qpn;
			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
			resp->tirn = rq->tirn;
			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
			if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
				resp->tir_icm_addr = MLX5_GET(
					create_tir_out, out, icm_address_31_0);
				resp->tir_icm_addr |=
					(u64)MLX5_GET(create_tir_out, out,
						      icm_address_39_32)
					<< 32;
				resp->tir_icm_addr |=
					(u64)MLX5_GET(create_tir_out, out,
						      icm_address_63_40)
					<< 40;
				resp->comp_mask |=
					MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
			}
1572
		}
1573 1574 1575 1576
	}

	qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
						     rq->base.mqp.qpn;
1577 1578 1579
	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
	if (err)
		goto err_destroy_tir;
1580 1581 1582

	return 0;

1583 1584
err_destroy_tir:
	destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
1585 1586 1587 1588 1589 1590 1591
err_destroy_rq:
	destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
	if (!qp->sq.wqe_cnt)
		return err;
	destroy_raw_packet_qp_sq(dev, sq);
err_destroy_tis:
1592
	destroy_raw_packet_qp_tis(dev, sq, pd);
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604

	return err;
}

static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
				  struct mlx5_ib_qp *qp)
{
	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;

	if (qp->rq.wqe_cnt) {
1605
		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1606 1607 1608 1609 1610
		destroy_raw_packet_qp_rq(dev, rq);
	}

	if (qp->sq.wqe_cnt) {
		destroy_raw_packet_qp_sq(dev, sq);
1611
		destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	}
}

static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
				    struct mlx5_ib_raw_packet_qp *raw_packet_qp)
{
	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;

	sq->sq = &qp->sq;
	rq->rq = &qp->rq;
	sq->doorbell = &qp->db;
	rq->doorbell = &qp->db;
}

Y
Yishai Hadas 已提交
1627 1628
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
1629 1630 1631
	if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
			    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
		mlx5_ib_disable_lb(dev, false, true);
1632 1633
	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
			     to_mpd(qp->ibqp.pd)->uid);
Y
Yishai Hadas 已提交
1634 1635 1636 1637 1638 1639 1640
}

static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
				 struct ib_pd *pd,
				 struct ib_qp_init_attr *init_attr,
				 struct ib_udata *udata)
{
1641 1642
	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
Y
Yishai Hadas 已提交
1643 1644
	struct mlx5_ib_create_qp_resp resp = {};
	int inlen;
1645
	int outlen;
Y
Yishai Hadas 已提交
1646 1647
	int err;
	u32 *in;
1648
	u32 *out;
Y
Yishai Hadas 已提交
1649 1650 1651
	void *tirc;
	void *hfso;
	u32 selected_fields = 0;
1652
	u32 outer_l4;
Y
Yishai Hadas 已提交
1653 1654 1655 1656
	size_t min_resp_len;
	u32 tdn = mucontext->tdn;
	struct mlx5_ib_create_qp_rss ucmd = {};
	size_t required_cmd_sz;
1657
	u8 lb_flag = 0;
Y
Yishai Hadas 已提交
1658 1659 1660 1661 1662 1663 1664

	if (init_attr->qp_type != IB_QPT_RAW_PACKET)
		return -EOPNOTSUPP;

	if (init_attr->create_flags || init_attr->send_cq)
		return -EINVAL;

1665
	min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
Y
Yishai Hadas 已提交
1666 1667 1668
	if (udata->outlen < min_resp_len)
		return -EINVAL;

1669
	required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
Y
Yishai Hadas 已提交
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
	if (udata->inlen < required_cmd_sz) {
		mlx5_ib_dbg(dev, "invalid inlen\n");
		return -EINVAL;
	}

	if (udata->inlen > sizeof(ucmd) &&
	    !ib_is_udata_cleared(udata, sizeof(ucmd),
				 udata->inlen - sizeof(ucmd))) {
		mlx5_ib_dbg(dev, "inlen is not supported\n");
		return -EOPNOTSUPP;
	}

	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
		mlx5_ib_dbg(dev, "copy failed\n");
		return -EFAULT;
	}

	if (ucmd.comp_mask) {
		mlx5_ib_dbg(dev, "invalid comp mask\n");
		return -EOPNOTSUPP;
	}

1692 1693 1694
	if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
1695 1696 1697 1698 1699 1700 1701
		mlx5_ib_dbg(dev, "invalid flags\n");
		return -EOPNOTSUPP;
	}

	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
	    !tunnel_offload_supported(dev->mdev)) {
		mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
Y
Yishai Hadas 已提交
1702 1703 1704
		return -EOPNOTSUPP;
	}

1705 1706 1707 1708 1709 1710
	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
	    !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
		return -EOPNOTSUPP;
	}

1711
	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
1712 1713 1714 1715 1716 1717 1718 1719 1720
		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
	}

	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
	}

1721
	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
Y
Yishai Hadas 已提交
1722 1723 1724 1725 1726 1727
	if (err) {
		mlx5_ib_dbg(dev, "copy failed\n");
		return -EINVAL;
	}

	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1728 1729
	outlen = MLX5_ST_SZ_BYTES(create_tir_out);
	in = kvzalloc(inlen + outlen, GFP_KERNEL);
Y
Yishai Hadas 已提交
1730 1731 1732
	if (!in)
		return -ENOMEM;

1733
	out = in + MLX5_ST_SZ_DW(create_tir_in);
1734
	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
Y
Yishai Hadas 已提交
1735 1736 1737 1738 1739 1740 1741 1742
	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
	MLX5_SET(tirc, tirc, disp_type,
		 MLX5_TIRC_DISP_TYPE_INDIRECT);
	MLX5_SET(tirc, tirc, indirect_table,
		 init_attr->rwq_ind_tbl->ind_tbl_num);
	MLX5_SET(tirc, tirc, transport_domain, tdn);

	hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1743 1744 1745 1746

	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);

1747 1748
	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);

1749 1750 1751 1752 1753
	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
	else
		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);

Y
Yishai Hadas 已提交
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
	switch (ucmd.rx_hash_function) {
	case MLX5_RX_HASH_FUNC_TOEPLITZ:
	{
		void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
		size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);

		if (len != ucmd.rx_key_len) {
			err = -EINVAL;
			goto err;
		}

		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
		memcpy(rss_key, ucmd.rx_hash_key, len);
		break;
	}
	default:
		err = -EOPNOTSUPP;
		goto err;
	}

	if (!ucmd.rx_hash_fields_mask) {
		/* special case when this TIR serves as steering entry without hashing */
		if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
			goto create_tir;
		err = -EINVAL;
		goto err;
	}

	if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
	     ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
		err = -EINVAL;
		goto err;
	}

	/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);

1800 1801 1802 1803 1804 1805 1806 1807
	outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
		   ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
		   (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;

	/* Check that only one l4 protocol is set */
	if (outer_l4 & (outer_l4 - 1)) {
Y
Yishai Hadas 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
		err = -EINVAL;
		goto err;
	}

	/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_TCP);
	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_UDP);

	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
		selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;

	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
		selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;

	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
		selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;

	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
		selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;

1838 1839 1840
	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
		selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;

Y
Yishai Hadas 已提交
1841 1842 1843
	MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);

create_tir:
1844
	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
Y
Yishai Hadas 已提交
1845

1846
	qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1847 1848 1849 1850
	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
		err = mlx5_ib_enable_lb(dev, false, true);

		if (err)
1851 1852
			mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
					     to_mpd(pd)->uid);
1853 1854
	}

Y
Yishai Hadas 已提交
1855 1856 1857
	if (err)
		goto err;

1858 1859 1860
	if (mucontext->devx_uid) {
		resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
		resp.tirn = qp->rss_qp.tirn;
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
			resp.tir_icm_addr =
				MLX5_GET(create_tir_out, out, icm_address_31_0);
			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
							   icm_address_39_32)
					     << 32;
			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
							   icm_address_63_40)
					     << 40;
			resp.comp_mask |=
				MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
		}
1873 1874 1875 1876 1877 1878
	}

	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
	if (err)
		goto err_copy;

Y
Yishai Hadas 已提交
1879 1880 1881
	kvfree(in);
	/* qpn is reserved for that QP */
	qp->trans_qp.base.mqp.qpn = 0;
1882
	qp->flags |= MLX5_IB_QP_RSS;
Y
Yishai Hadas 已提交
1883 1884
	return 0;

1885 1886
err_copy:
	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
Y
Yishai Hadas 已提交
1887 1888 1889 1890 1891
err:
	kvfree(in);
	return err;
}

1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
					 void *qpc)
{
	int rcqe_sz;

	if (init_attr->qp_type == MLX5_IB_QPT_DCI)
		return;

	rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);

1902 1903 1904 1905
	if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
		if (rcqe_sz == 128)
			MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);

1906 1907 1908
		return;
	}

1909 1910 1911
	MLX5_SET(qpc, qpc, cs_res,
		 rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
				  MLX5_RES_SCAT_DATA32_CQE);
1912 1913 1914 1915
}

static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
					 struct ib_qp_init_attr *init_attr,
1916
					 struct mlx5_ib_create_qp *ucmd,
1917 1918 1919 1920
					 void *qpc)
{
	enum ib_qp_type qpt = init_attr->qp_type;
	int scqe_sz;
1921
	bool allow_scat_cqe = false;
1922 1923 1924 1925

	if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
		return;

1926 1927 1928 1929
	if (ucmd)
		allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;

	if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
		return;

	scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
	if (scqe_sz == 128) {
		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
		return;
	}

	if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
	    MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
static int atomic_size_to_mode(int size_mask)
{
	/* driver does not support atomic_size > 256B
	 * and does not know how to translate bigger sizes
	 */
	int supported_size_mask = size_mask & 0x1ff;
	int log_max_size;

	if (!supported_size_mask)
		return -EOPNOTSUPP;

	log_max_size = __fls(supported_size_mask);

	if (log_max_size > 3)
		return log_max_size;

	return MLX5_ATOMIC_MODE_8B;
}

static int get_atomic_mode(struct mlx5_ib_dev *dev,
			   enum ib_qp_type qp_type)
{
	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
	u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
	int atomic_mode = -EOPNOTSUPP;
	int atomic_size_mask;

	if (!atomic)
		return -EOPNOTSUPP;

	if (qp_type == MLX5_IB_QPT_DCT)
		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
	else
		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);

	if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
	    (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
		atomic_mode = atomic_size_to_mode(atomic_size_mask);

	if (atomic_mode <= 0 &&
	    (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
	     atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
		atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;

	return atomic_mode;
}

1990 1991 1992 1993 1994
static inline bool check_flags_mask(uint64_t input, uint64_t supported)
{
	return (input & ~supported) == 0;
}

1995 1996 1997 1998 1999
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
			    struct ib_qp_init_attr *init_attr,
			    struct ib_udata *udata, struct mlx5_ib_qp *qp)
{
	struct mlx5_ib_resources *devr = &dev->devr;
2000
	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2001
	struct mlx5_core_dev *mdev = dev->mdev;
2002
	struct mlx5_ib_create_qp_resp resp = {};
2003 2004
	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
2005 2006 2007
	struct mlx5_ib_cq *send_cq;
	struct mlx5_ib_cq *recv_cq;
	unsigned long flags;
2008
	u32 uidx = MLX5_IB_DEFAULT_UIDX;
2009 2010
	struct mlx5_ib_create_qp ucmd;
	struct mlx5_ib_qp_base *base;
2011
	int mlx5_st;
2012
	void *qpc;
2013 2014
	u32 *in;
	int err;
2015 2016 2017 2018 2019

	mutex_init(&qp->mutex);
	spin_lock_init(&qp->sq.lock);
	spin_lock_init(&qp->rq.lock);

2020 2021 2022 2023
	mlx5_st = to_mlx5_st(init_attr->qp_type);
	if (mlx5_st < 0)
		return -EINVAL;

Y
Yishai Hadas 已提交
2024 2025 2026 2027 2028 2029 2030 2031
	if (init_attr->rwq_ind_tbl) {
		if (!udata)
			return -ENOSYS;

		err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
		return err;
	}

2032
	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
2033
		if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
2034 2035 2036 2037 2038 2039 2040
			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
			return -EINVAL;
		} else {
			qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
		}
	}

2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
	if (init_attr->create_flags &
			(IB_QP_CREATE_CROSS_CHANNEL |
			 IB_QP_CREATE_MANAGED_SEND |
			 IB_QP_CREATE_MANAGED_RECV)) {
		if (!MLX5_CAP_GEN(mdev, cd)) {
			mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
			return -EINVAL;
		}
		if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
			qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
			qp->flags |= MLX5_IB_QP_MANAGED_SEND;
		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
			qp->flags |= MLX5_IB_QP_MANAGED_RECV;
	}
2056 2057 2058 2059 2060 2061 2062 2063

	if (init_attr->qp_type == IB_QPT_UD &&
	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
		if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
			mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
			return -EOPNOTSUPP;
		}

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
		if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
			mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
			return -EOPNOTSUPP;
		}
		if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
		    !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
			mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
			return -EOPNOTSUPP;
		}
		qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
	}

2077 2078 2079
	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;

2080 2081 2082 2083 2084 2085 2086 2087
	if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
		if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
		      MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
		    (init_attr->qp_type != IB_QPT_RAW_PACKET))
			return -EOPNOTSUPP;
		qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
	}

2088
	if (udata) {
2089 2090 2091 2092 2093
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
			mlx5_ib_dbg(dev, "copy failed\n");
			return -EFAULT;
		}

2094
		if (!check_flags_mask(ucmd.flags,
2095 2096 2097 2098
				      MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
				      MLX5_QP_FLAG_BFREG_INDEX |
				      MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
				      MLX5_QP_FLAG_SCATTER_CQE |
2099
				      MLX5_QP_FLAG_SIGNATURE |
2100 2101 2102 2103 2104
				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
				      MLX5_QP_FLAG_TUNNEL_OFFLOADS |
				      MLX5_QP_FLAG_TYPE_DCI |
				      MLX5_QP_FLAG_TYPE_DCT))
2105 2106
			return -EINVAL;

2107
		err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
2108 2109 2110
		if (err)
			return err;

2111
		qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
2112 2113
		if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
			qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
2114 2115 2116 2117 2118 2119
		if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
			if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
			    !tunnel_offload_supported(mdev)) {
				mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
				return -EOPNOTSUPP;
			}
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
			qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
		}

		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
				mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
				return -EOPNOTSUPP;
			}
			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
		}

		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
				mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
				return -EOPNOTSUPP;
			}
			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
2137
		}
2138

2139 2140 2141 2142 2143 2144 2145 2146 2147
		if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
			if (init_attr->qp_type != IB_QPT_RC ||
				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
				mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
				return -EOPNOTSUPP;
			}
			qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
		}

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
		if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
			if (init_attr->qp_type != IB_QPT_UD ||
			    (MLX5_CAP_GEN(dev->mdev, port_type) !=
			     MLX5_CAP_PORT_TYPE_IB) ||
			    !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
				mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
				return -EOPNOTSUPP;
			}

			qp->flags |= MLX5_IB_QP_UNDERLAY;
			qp->underlay_qpn = init_attr->source_qpn;
		}
2160 2161 2162 2163
	} else {
		qp->wq_sig = !!wq_signature;
	}

2164 2165 2166 2167 2168
	base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
		qp->flags & MLX5_IB_QP_UNDERLAY) ?
	       &qp->raw_packet_qp.rq.base :
	       &qp->trans_qp.base;

2169 2170
	qp->has_rq = qp_has_rq(init_attr);
	err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
2171
			  qp, udata ? &ucmd : NULL);
2172 2173 2174 2175 2176 2177
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		return err;
	}

	if (pd) {
2178
		if (udata) {
2179 2180
			__u32 max_wqes =
				1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
2181 2182 2183 2184 2185 2186
			mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
			if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
			    ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
				mlx5_ib_dbg(dev, "invalid rq params\n");
				return -EINVAL;
			}
2187
			if (ucmd.sq_wqe_count > max_wqes) {
2188
				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
2189
					    ucmd.sq_wqe_count, max_wqes);
2190 2191
				return -EINVAL;
			}
2192
			if (init_attr->create_flags &
2193
			    MLX5_IB_QP_CREATE_SQPN_QP1) {
2194 2195 2196
				mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
				return -EINVAL;
			}
2197 2198
			err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
					     &resp, &inlen, base);
2199 2200 2201
			if (err)
				mlx5_ib_dbg(dev, "err %d\n", err);
		} else {
2202 2203
			err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
					       base);
2204 2205 2206 2207 2208 2209 2210
			if (err)
				mlx5_ib_dbg(dev, "err %d\n", err);
		}

		if (err)
			return err;
	} else {
2211
		in = kvzalloc(inlen, GFP_KERNEL);
2212 2213 2214 2215 2216 2217 2218 2219 2220
		if (!in)
			return -ENOMEM;

		qp->create_type = MLX5_QP_EMPTY;
	}

	if (is_sqp(init_attr->qp_type))
		qp->port = init_attr->port_num;

2221 2222
	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);

2223
	MLX5_SET(qpc, qpc, st, mlx5_st);
2224
	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2225 2226

	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
2227
		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
2228
	else
2229 2230
		MLX5_SET(qpc, qpc, latency_sensitive, 1);

2231 2232

	if (qp->wq_sig)
2233
		MLX5_SET(qpc, qpc, wq_signature, 1);
2234

2235
	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2236
		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2237

2238
	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
2239
		MLX5_SET(qpc, qpc, cd_master, 1);
2240
	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
2241
		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2242
	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
2243
		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2244 2245
	if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
		MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
2246
	if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
2247
		configure_responder_scat_cqe(init_attr, qpc);
2248
		configure_requester_scat_cqe(dev, init_attr,
2249
					     udata ? &ucmd : NULL,
2250
					     qpc);
2251 2252 2253
	}

	if (qp->rq.wqe_cnt) {
2254 2255
		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2256 2257
	}

2258
	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2259

A
Artemy Kovalyov 已提交
2260
	if (qp->sq.wqe_cnt) {
2261
		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
A
Artemy Kovalyov 已提交
2262
	} else {
2263
		MLX5_SET(qpc, qpc, no_sq, 1);
A
Artemy Kovalyov 已提交
2264 2265 2266 2267 2268
		if (init_attr->srq &&
		    init_attr->srq->srq_type == IB_SRQT_TM)
			MLX5_SET(qpc, qpc, offload_type,
				 MLX5_QPC_OFFLOAD_TYPE_RNDV);
	}
2269 2270 2271 2272

	/* Set default resources */
	switch (init_attr->qp_type) {
	case IB_QPT_XRC_TGT:
2273 2274 2275 2276
		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
2277 2278
		break;
	case IB_QPT_XRC_INI:
2279 2280 2281
		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2282 2283 2284
		break;
	default:
		if (init_attr->srq) {
2285 2286
			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
2287
		} else {
2288 2289
			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
2290 2291 2292 2293
		}
	}

	if (init_attr->send_cq)
2294
		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
2295 2296

	if (init_attr->recv_cq)
2297
		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
2298

2299
	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2300

2301 2302
	/* 0xffffff means we ask to work with cqe version 0 */
	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2303
		MLX5_SET(qpc, qpc, user_index, uidx);
2304

2305 2306 2307 2308 2309 2310
	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
	if (init_attr->qp_type == IB_QPT_UD &&
	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
		qp->flags |= MLX5_IB_QP_LSO;
	}
2311

2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
	if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
			mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
			err = -EOPNOTSUPP;
			goto err;
		} else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
			MLX5_SET(qpc, qpc, end_padding_mode,
				 MLX5_WQ_END_PAD_MODE_ALIGN);
		} else {
			qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
		}
	}

2325 2326 2327 2328 2329
	if (inlen < 0) {
		err = -EINVAL;
		goto err;
	}

2330 2331
	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2332 2333
		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2334 2335
		err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
					   &resp);
2336 2337 2338 2339
	} else {
		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
	}

2340 2341 2342 2343 2344
	if (err) {
		mlx5_ib_dbg(dev, "create qp failed\n");
		goto err_create;
	}

A
Al Viro 已提交
2345
	kvfree(in);
2346

2347 2348
	base->container_mibqp = qp;
	base->mqp.event = mlx5_ib_qp_event;
2349

2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
	get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
		&send_cq, &recv_cq);
	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
	mlx5_ib_lock_cqs(send_cq, recv_cq);
	/* Maintain device to QPs access, needed for further handling via reset
	 * flow
	 */
	list_add_tail(&qp->qps_list, &dev->qp_list);
	/* Maintain CQ to QPs access, needed for further handling via reset flow
	 */
	if (send_cq)
		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
	if (recv_cq)
		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
	mlx5_ib_unlock_cqs(send_cq, recv_cq);
	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);

2367 2368 2369 2370
	return 0;

err_create:
	if (qp->create_type == MLX5_QP_USER)
2371
		destroy_qp_user(dev, pd, qp, base, udata);
2372 2373 2374
	else if (qp->create_type == MLX5_QP_KERNEL)
		destroy_qp_kernel(dev, qp);

2375
err:
A
Al Viro 已提交
2376
	kvfree(in);
2377 2378 2379 2380 2381 2382 2383 2384 2385
	return err;
}

static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
	if (send_cq) {
		if (recv_cq) {
			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2386
				spin_lock(&send_cq->lock);
2387 2388 2389
				spin_lock_nested(&recv_cq->lock,
						 SINGLE_DEPTH_NESTING);
			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2390
				spin_lock(&send_cq->lock);
2391 2392
				__acquire(&recv_cq->lock);
			} else {
2393
				spin_lock(&recv_cq->lock);
2394 2395 2396 2397
				spin_lock_nested(&send_cq->lock,
						 SINGLE_DEPTH_NESTING);
			}
		} else {
2398
			spin_lock(&send_cq->lock);
E
Eli Cohen 已提交
2399
			__acquire(&recv_cq->lock);
2400 2401
		}
	} else if (recv_cq) {
2402
		spin_lock(&recv_cq->lock);
E
Eli Cohen 已提交
2403 2404 2405 2406
		__acquire(&send_cq->lock);
	} else {
		__acquire(&send_cq->lock);
		__acquire(&recv_cq->lock);
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
	}
}

static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
	__releases(&send_cq->lock) __releases(&recv_cq->lock)
{
	if (send_cq) {
		if (recv_cq) {
			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
				spin_unlock(&recv_cq->lock);
2417
				spin_unlock(&send_cq->lock);
2418 2419
			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
				__release(&recv_cq->lock);
2420
				spin_unlock(&send_cq->lock);
2421 2422
			} else {
				spin_unlock(&send_cq->lock);
2423
				spin_unlock(&recv_cq->lock);
2424 2425
			}
		} else {
E
Eli Cohen 已提交
2426
			__release(&recv_cq->lock);
2427
			spin_unlock(&send_cq->lock);
2428 2429
		}
	} else if (recv_cq) {
E
Eli Cohen 已提交
2430
		__release(&send_cq->lock);
2431
		spin_unlock(&recv_cq->lock);
E
Eli Cohen 已提交
2432 2433 2434
	} else {
		__release(&recv_cq->lock);
		__release(&send_cq->lock);
2435 2436 2437 2438 2439 2440 2441 2442
	}
}

static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
{
	return to_mpd(qp->ibqp.pd);
}

2443 2444
static void get_cqs(enum ib_qp_type qp_type,
		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
2445 2446
		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{
2447
	switch (qp_type) {
2448 2449 2450 2451 2452 2453
	case IB_QPT_XRC_TGT:
		*send_cq = NULL;
		*recv_cq = NULL;
		break;
	case MLX5_IB_QPT_REG_UMR:
	case IB_QPT_XRC_INI:
2454
		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2455 2456 2457 2458
		*recv_cq = NULL;
		break;

	case IB_QPT_SMI:
H
Haggai Eran 已提交
2459
	case MLX5_IB_QPT_HW_GSI:
2460 2461 2462 2463 2464
	case IB_QPT_RC:
	case IB_QPT_UC:
	case IB_QPT_UD:
	case IB_QPT_RAW_IPV6:
	case IB_QPT_RAW_ETHERTYPE:
2465
	case IB_QPT_RAW_PACKET:
2466 2467
		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
		*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
		break;

	case IB_QPT_MAX:
	default:
		*send_cq = NULL;
		*recv_cq = NULL;
		break;
	}
}

2478
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
A
Aviv Heller 已提交
2479 2480
				const struct mlx5_modify_raw_qp_param *raw_qp_param,
				u8 lag_tx_affinity);
2481

2482 2483
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
			      struct ib_udata *udata)
2484 2485
{
	struct mlx5_ib_cq *send_cq, *recv_cq;
2486
	struct mlx5_ib_qp_base *base;
2487
	unsigned long flags;
2488 2489
	int err;

Y
Yishai Hadas 已提交
2490 2491 2492 2493 2494
	if (qp->ibqp.rwq_ind_tbl) {
		destroy_rss_raw_qp_tir(dev, qp);
		return;
	}

2495 2496
	base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
		qp->flags & MLX5_IB_QP_UNDERLAY) ?
2497 2498 2499
	       &qp->raw_packet_qp.rq.base :
	       &qp->trans_qp.base;

2500
	if (qp->state != IB_QPS_RESET) {
2501 2502
		if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
2503
			err = mlx5_core_qp_modify(dev->mdev,
2504 2505
						  MLX5_CMD_OP_2RST_QP, 0,
						  NULL, &base->mqp);
2506
		} else {
2507 2508 2509 2510
			struct mlx5_modify_raw_qp_param raw_qp_param = {
				.operation = MLX5_CMD_OP_2RST_QP
			};

A
Aviv Heller 已提交
2511
			err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2512 2513
		}
		if (err)
2514
			mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2515
				     base->mqp.qpn);
2516
	}
2517

2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
		&send_cq, &recv_cq);

	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
	mlx5_ib_lock_cqs(send_cq, recv_cq);
	/* del from lists under both locks above to protect reset flow paths */
	list_del(&qp->qps_list);
	if (send_cq)
		list_del(&qp->cq_send_list);

	if (recv_cq)
		list_del(&qp->cq_recv_list);
2530 2531

	if (qp->create_type == MLX5_QP_KERNEL) {
2532
		__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2533 2534
				   qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
		if (send_cq != recv_cq)
2535 2536
			__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
					   NULL);
2537
	}
2538 2539
	mlx5_ib_unlock_cqs(send_cq, recv_cq);
	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2540

2541 2542
	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2543 2544 2545 2546 2547 2548 2549
		destroy_raw_packet_qp(dev, qp);
	} else {
		err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
		if (err)
			mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
				     base->mqp.qpn);
	}
2550 2551 2552 2553

	if (qp->create_type == MLX5_QP_KERNEL)
		destroy_qp_kernel(dev, qp);
	else if (qp->create_type == MLX5_QP_USER)
2554
		destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581
}

static const char *ib_qp_type_str(enum ib_qp_type type)
{
	switch (type) {
	case IB_QPT_SMI:
		return "IB_QPT_SMI";
	case IB_QPT_GSI:
		return "IB_QPT_GSI";
	case IB_QPT_RC:
		return "IB_QPT_RC";
	case IB_QPT_UC:
		return "IB_QPT_UC";
	case IB_QPT_UD:
		return "IB_QPT_UD";
	case IB_QPT_RAW_IPV6:
		return "IB_QPT_RAW_IPV6";
	case IB_QPT_RAW_ETHERTYPE:
		return "IB_QPT_RAW_ETHERTYPE";
	case IB_QPT_XRC_INI:
		return "IB_QPT_XRC_INI";
	case IB_QPT_XRC_TGT:
		return "IB_QPT_XRC_TGT";
	case IB_QPT_RAW_PACKET:
		return "IB_QPT_RAW_PACKET";
	case MLX5_IB_QPT_REG_UMR:
		return "MLX5_IB_QPT_REG_UMR";
2582 2583
	case IB_QPT_DRIVER:
		return "IB_QPT_DRIVER";
2584 2585 2586 2587 2588 2589
	case IB_QPT_MAX:
	default:
		return "Invalid QP type";
	}
}

2590 2591
static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
					struct ib_qp_init_attr *attr,
2592 2593
					struct mlx5_ib_create_qp *ucmd,
					struct ib_udata *udata)
2594
{
2595 2596
	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
2597 2598 2599 2600 2601 2602 2603 2604
	struct mlx5_ib_qp *qp;
	int err = 0;
	u32 uidx = MLX5_IB_DEFAULT_UIDX;
	void *dctc;

	if (!attr->srq || !attr->recv_cq)
		return ERR_PTR(-EINVAL);

2605
	err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
	if (err)
		return ERR_PTR(err);

	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp)
		return ERR_PTR(-ENOMEM);

	qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
	if (!qp->dct.in) {
		err = -ENOMEM;
		goto err_free;
	}

2619
	MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2620
	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2621
	qp->qp_sub_type = MLX5_IB_QPT_DCT;
2622 2623 2624 2625 2626 2627
	MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
	MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
	MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
	MLX5_SET(dctc, dctc, user_index, uidx);

2628 2629 2630
	if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
		configure_responder_scat_cqe(attr, dctc);

2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
	qp->state = IB_QPS_RESET;

	return &qp->ibqp;
err_free:
	kfree(qp);
	return ERR_PTR(err);
}

static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
			   struct ib_qp_init_attr *init_attr,
			   struct mlx5_ib_create_qp *ucmd,
			   struct ib_udata *udata)
{
	enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
	int err;

	if (!udata)
		return -EINVAL;

	if (udata->inlen < sizeof(*ucmd)) {
		mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
		return -EINVAL;
	}
	err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
	if (err)
		return err;

	if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
		init_attr->qp_type = MLX5_IB_QPT_DCI;
	} else {
		if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
			init_attr->qp_type = MLX5_IB_QPT_DCT;
		} else {
			mlx5_ib_dbg(dev, "Invalid QP flags\n");
			return -EINVAL;
		}
	}

	if (!MLX5_CAP_GEN(dev->mdev, dct)) {
		mlx5_ib_dbg(dev, "DC transport is not supported\n");
		return -EOPNOTSUPP;
	}

	return 0;
}

2677
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
2678
				struct ib_qp_init_attr *verbs_init_attr,
2679 2680 2681 2682 2683 2684
				struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev;
	struct mlx5_ib_qp *qp;
	u16 xrcdn = 0;
	int err;
2685 2686
	struct ib_qp_init_attr mlx_init_attr;
	struct ib_qp_init_attr *init_attr = verbs_init_attr;
2687 2688
	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
2689 2690 2691

	if (pd) {
		dev = to_mdev(pd->device);
2692 2693

		if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
2694
			if (!ucontext) {
2695 2696
				mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
				return ERR_PTR(-EINVAL);
2697
			} else if (!ucontext->cqe_version) {
2698 2699 2700 2701
				mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
				return ERR_PTR(-EINVAL);
			}
		}
2702 2703 2704 2705 2706 2707 2708 2709 2710
	} else {
		/* being cautious here */
		if (init_attr->qp_type != IB_QPT_XRC_TGT &&
		    init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
			pr_warn("%s: no PD for transport %s\n", __func__,
				ib_qp_type_str(init_attr->qp_type));
			return ERR_PTR(-EINVAL);
		}
		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
2711 2712
	}

2713 2714 2715 2716 2717 2718 2719 2720
	if (init_attr->qp_type == IB_QPT_DRIVER) {
		struct mlx5_ib_create_qp ucmd;

		init_attr = &mlx_init_attr;
		memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
		err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
		if (err)
			return ERR_PTR(err);
2721 2722 2723 2724 2725 2726 2727

		if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
			if (init_attr->cap.max_recv_wr ||
			    init_attr->cap.max_recv_sge) {
				mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
				return ERR_PTR(-EINVAL);
			}
2728
		} else {
2729
			return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
2730
		}
2731 2732
	}

2733 2734 2735
	switch (init_attr->qp_type) {
	case IB_QPT_XRC_TGT:
	case IB_QPT_XRC_INI:
2736
		if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
			mlx5_ib_dbg(dev, "XRC not supported\n");
			return ERR_PTR(-ENOSYS);
		}
		init_attr->recv_cq = NULL;
		if (init_attr->qp_type == IB_QPT_XRC_TGT) {
			xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
			init_attr->send_cq = NULL;
		}

		/* fall through */
2747
	case IB_QPT_RAW_PACKET:
2748 2749 2750 2751
	case IB_QPT_RC:
	case IB_QPT_UC:
	case IB_QPT_UD:
	case IB_QPT_SMI:
H
Haggai Eran 已提交
2752
	case MLX5_IB_QPT_HW_GSI:
2753
	case MLX5_IB_QPT_REG_UMR:
2754
	case MLX5_IB_QPT_DCI:
2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
		if (!qp)
			return ERR_PTR(-ENOMEM);

		err = create_qp_common(dev, pd, init_attr, udata, qp);
		if (err) {
			mlx5_ib_dbg(dev, "create_qp_common failed\n");
			kfree(qp);
			return ERR_PTR(err);
		}

		if (is_qp0(init_attr->qp_type))
			qp->ibqp.qp_num = 0;
		else if (is_qp1(init_attr->qp_type))
			qp->ibqp.qp_num = 1;
		else
2771
			qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
2772 2773

		mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2774
			    qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
2775 2776
			    init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
			    init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
2777

2778
		qp->trans_qp.xrcdn = xrcdn;
2779 2780 2781

		break;

H
Haggai Eran 已提交
2782 2783 2784
	case IB_QPT_GSI:
		return mlx5_ib_gsi_create_qp(pd, init_attr);

2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
	case IB_QPT_RAW_IPV6:
	case IB_QPT_RAW_ETHERTYPE:
	case IB_QPT_MAX:
	default:
		mlx5_ib_dbg(dev, "unsupported qp type %d\n",
			    init_attr->qp_type);
		/* Don't support raw QPs */
		return ERR_PTR(-EINVAL);
	}

2795 2796 2797
	if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
		qp->qp_sub_type = init_attr->qp_type;

2798 2799 2800
	return &qp->ibqp;
}

2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
{
	struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);

	if (mqp->state == IB_QPS_RTR) {
		int err;

		err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
		if (err) {
			mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
			return err;
		}
	}

	kfree(mqp->dct.in);
	kfree(mqp);
	return 0;
}

2820
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
2821 2822 2823 2824
{
	struct mlx5_ib_dev *dev = to_mdev(qp->device);
	struct mlx5_ib_qp *mqp = to_mqp(qp);

H
Haggai Eran 已提交
2825 2826 2827
	if (unlikely(qp->qp_type == IB_QPT_GSI))
		return mlx5_ib_gsi_destroy_qp(qp);

2828 2829 2830
	if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
		return mlx5_ib_destroy_dct(mqp);

2831
	destroy_qp_common(dev, mqp, udata);
2832 2833 2834 2835 2836 2837

	kfree(mqp);

	return 0;
}

2838 2839
static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
				const struct ib_qp_attr *attr,
2840
				int attr_mask, __be32 *hw_access_flags_be)
2841 2842
{
	u8 dest_rd_atomic;
2843
	u32 access_flags, hw_access_flags = 0;
2844

2845 2846
	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);

2847 2848 2849
	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		dest_rd_atomic = attr->max_dest_rd_atomic;
	else
2850
		dest_rd_atomic = qp->trans_qp.resp_depth;
2851 2852 2853 2854

	if (attr_mask & IB_QP_ACCESS_FLAGS)
		access_flags = attr->qp_access_flags;
	else
2855
		access_flags = qp->trans_qp.atomic_rd_en;
2856 2857 2858 2859 2860

	if (!dest_rd_atomic)
		access_flags &= IB_ACCESS_REMOTE_WRITE;

	if (access_flags & IB_ACCESS_REMOTE_READ)
2861
		hw_access_flags |= MLX5_QP_BIT_RRE;
2862
	if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
2863 2864 2865 2866 2867 2868
		int atomic_mode;

		atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
		if (atomic_mode < 0)
			return -EOPNOTSUPP;

2869 2870
		hw_access_flags |= MLX5_QP_BIT_RAE;
		hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
2871 2872
	}

2873
	if (access_flags & IB_ACCESS_REMOTE_WRITE)
2874
		hw_access_flags |= MLX5_QP_BIT_RWE;
2875

2876
	*hw_access_flags_be = cpu_to_be32(hw_access_flags);
2877

2878
	return 0;
2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
}

enum {
	MLX5_PATH_FLAG_FL	= 1 << 0,
	MLX5_PATH_FLAG_FREE_AR	= 1 << 1,
	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
};

static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
2889
	if (rate == IB_RATE_PORT_CURRENT)
2890
		return 0;
2891

M
Michael Guralnik 已提交
2892
	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
2893 2894
		return -EINVAL;

2895 2896 2897 2898 2899 2900
	while (rate != IB_RATE_PORT_CURRENT &&
	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
		--rate;

	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
2901 2902
}

2903
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
2904 2905
				      struct mlx5_ib_sq *sq, u8 sl,
				      struct ib_pd *pd)
2906 2907 2908 2909 2910 2911 2912
{
	void *in;
	void *tisc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2913
	in = kvzalloc(inlen, GFP_KERNEL);
2914 2915 2916 2917
	if (!in)
		return -ENOMEM;

	MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
2918
	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929

	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
	MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));

	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);

	kvfree(in);

	return err;
}

A
Aviv Heller 已提交
2930
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
2931 2932
					 struct mlx5_ib_sq *sq, u8 tx_affinity,
					 struct ib_pd *pd)
A
Aviv Heller 已提交
2933 2934 2935 2936 2937 2938 2939
{
	void *in;
	void *tisc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2940
	in = kvzalloc(inlen, GFP_KERNEL);
A
Aviv Heller 已提交
2941 2942 2943 2944
	if (!in)
		return -ENOMEM;

	MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
2945
	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
A
Aviv Heller 已提交
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956

	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
	MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);

	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);

	kvfree(in);

	return err;
}

2957
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2958
			 const struct rdma_ah_attr *ah,
2959
			 struct mlx5_qp_path *path, u8 port, int attr_mask,
2960 2961
			 u32 path_flags, const struct ib_qp_attr *attr,
			 bool alt)
2962
{
2963
	const struct ib_global_route *grh = rdma_ah_read_grh(ah);
2964
	int err;
2965
	enum ib_gid_type gid_type;
2966 2967
	u8 ah_flags = rdma_ah_get_ah_flags(ah);
	u8 sl = rdma_ah_get_sl(ah);
2968 2969

	if (attr_mask & IB_QP_PKEY_INDEX)
2970 2971
		path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
						     attr->pkey_index);
2972

2973 2974
	if (ah_flags & IB_AH_GRH) {
		if (grh->sgid_index >=
2975
		    dev->mdev->port_caps[port - 1].gid_table_len) {
2976
			pr_err("sgid_index (%u) too large. max is %d\n",
2977
			       grh->sgid_index,
2978
			       dev->mdev->port_caps[port - 1].gid_table_len);
2979 2980
			return -EINVAL;
		}
2981
	}
2982 2983

	if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
2984
		if (!(ah_flags & IB_AH_GRH))
2985
			return -EINVAL;
2986

2987
		memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
2988 2989 2990 2991
		if (qp->ibqp.qp_type == IB_QPT_RC ||
		    qp->ibqp.qp_type == IB_QPT_UC ||
		    qp->ibqp.qp_type == IB_QPT_XRC_INI ||
		    qp->ibqp.qp_type == IB_QPT_XRC_TGT)
2992 2993
			path->udp_sport =
				mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
2994
		path->dci_cfi_prio_sl = (sl & 0x7) << 4;
2995
		gid_type = ah->grh.sgid_attr->gid_type;
2996
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
2997
			path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
2998
	} else {
2999 3000 3001
		path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
		path->fl_free_ar |=
			(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
3002 3003 3004
		path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
		path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
		if (ah_flags & IB_AH_GRH)
3005
			path->grh_mlid	|= 1 << 7;
3006
		path->dci_cfi_prio_sl = sl & 0xf;
3007 3008
	}

3009 3010 3011
	if (ah_flags & IB_AH_GRH) {
		path->mgid_index = grh->sgid_index;
		path->hop_limit  = grh->hop_limit;
3012
		path->tclass_flowlabel =
3013 3014 3015
			cpu_to_be32((grh->traffic_class << 20) |
				    (grh->flow_label));
		memcpy(path->rgid, grh->dgid.raw, 16);
3016 3017
	}

3018
	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
3019 3020 3021 3022 3023 3024
	if (err < 0)
		return err;
	path->static_rate = err;
	path->port = port;

	if (attr_mask & IB_QP_TIMEOUT)
3025
		path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
3026

3027 3028 3029
	if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
		return modify_raw_packet_eth_prio(dev->mdev,
						  &qp->raw_packet_qp.sq,
3030
						  sl & 0xf, qp->ibqp.pd);
3031

3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
	return 0;
}

static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
	[MLX5_QP_STATE_INIT] = {
		[MLX5_QP_STATE_INIT] = {
			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PKEY_INDEX	|
					  MLX5_QP_OPTPAR_PRI_PORT,
			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PKEY_INDEX	|
					  MLX5_QP_OPTPAR_PRI_PORT,
			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX	|
					  MLX5_QP_OPTPAR_Q_KEY		|
					  MLX5_QP_OPTPAR_PRI_PORT,
3049 3050 3051 3052 3053
			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PKEY_INDEX	|
					  MLX5_QP_OPTPAR_PRI_PORT,
3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
		},
		[MLX5_QP_STATE_RTR] = {
			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
					  MLX5_QP_OPTPAR_RRE            |
					  MLX5_QP_OPTPAR_RAE            |
					  MLX5_QP_OPTPAR_RWE            |
					  MLX5_QP_OPTPAR_PKEY_INDEX,
			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
					  MLX5_QP_OPTPAR_RWE            |
					  MLX5_QP_OPTPAR_PKEY_INDEX,
			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
					  MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
					   MLX5_QP_OPTPAR_Q_KEY,
3068 3069 3070 3071 3072
			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
					  MLX5_QP_OPTPAR_RRE            |
					  MLX5_QP_OPTPAR_RAE            |
					  MLX5_QP_OPTPAR_RWE            |
					  MLX5_QP_OPTPAR_PKEY_INDEX,
3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086
		},
	},
	[MLX5_QP_STATE_RTR] = {
		[MLX5_QP_STATE_RTS] = {
			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
					  MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PM_STATE	|
					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PM_STATE,
			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3087 3088 3089 3090 3091 3092
			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
					  MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_PM_STATE	|
					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3093 3094 3095 3096 3097 3098 3099 3100
		},
	},
	[MLX5_QP_STATE_RTS] = {
		[MLX5_QP_STATE_RTS] = {
			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3101 3102
					  MLX5_QP_OPTPAR_PM_STATE	|
					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3103
			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3104 3105
					  MLX5_QP_OPTPAR_PM_STATE	|
					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3106 3107 3108
			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY		|
					  MLX5_QP_OPTPAR_SRQN		|
					  MLX5_QP_OPTPAR_CQN_RCV,
3109 3110 3111 3112 3113 3114
			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
					  MLX5_QP_OPTPAR_RAE		|
					  MLX5_QP_OPTPAR_RWE		|
					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
					  MLX5_QP_OPTPAR_PM_STATE	|
					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3115 3116 3117 3118 3119 3120
		},
	},
	[MLX5_QP_STATE_SQER] = {
		[MLX5_QP_STATE_RTS] = {
			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3121
			[MLX5_QP_ST_UC]	 = MLX5_QP_OPTPAR_RWE,
3122 3123 3124 3125
			[MLX5_QP_ST_RC]	 = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
					   MLX5_QP_OPTPAR_RWE		|
					   MLX5_QP_OPTPAR_RAE		|
					   MLX5_QP_OPTPAR_RRE,
3126 3127 3128 3129
			[MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
					   MLX5_QP_OPTPAR_RWE		|
					   MLX5_QP_OPTPAR_RAE		|
					   MLX5_QP_OPTPAR_RRE,
3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
		},
	},
};

static int ib_nr_to_mlx5_nr(int ib_mask)
{
	switch (ib_mask) {
	case IB_QP_STATE:
		return 0;
	case IB_QP_CUR_STATE:
		return 0;
	case IB_QP_EN_SQD_ASYNC_NOTIFY:
		return 0;
	case IB_QP_ACCESS_FLAGS:
		return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
			MLX5_QP_OPTPAR_RAE;
	case IB_QP_PKEY_INDEX:
		return MLX5_QP_OPTPAR_PKEY_INDEX;
	case IB_QP_PORT:
		return MLX5_QP_OPTPAR_PRI_PORT;
	case IB_QP_QKEY:
		return MLX5_QP_OPTPAR_Q_KEY;
	case IB_QP_AV:
		return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
			MLX5_QP_OPTPAR_PRI_PORT;
	case IB_QP_PATH_MTU:
		return 0;
	case IB_QP_TIMEOUT:
		return MLX5_QP_OPTPAR_ACK_TIMEOUT;
	case IB_QP_RETRY_CNT:
		return MLX5_QP_OPTPAR_RETRY_COUNT;
	case IB_QP_RNR_RETRY:
		return MLX5_QP_OPTPAR_RNR_RETRY;
	case IB_QP_RQ_PSN:
		return 0;
	case IB_QP_MAX_QP_RD_ATOMIC:
		return MLX5_QP_OPTPAR_SRA_MAX;
	case IB_QP_ALT_PATH:
		return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
	case IB_QP_MIN_RNR_TIMER:
		return MLX5_QP_OPTPAR_RNR_TIMEOUT;
	case IB_QP_SQ_PSN:
		return 0;
	case IB_QP_MAX_DEST_RD_ATOMIC:
		return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
			MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
	case IB_QP_PATH_MIG_STATE:
		return MLX5_QP_OPTPAR_PM_STATE;
	case IB_QP_CAP:
		return 0;
	case IB_QP_DEST_QPN:
		return 0;
	}
	return 0;
}

static int ib_mask_to_mlx5_opt(int ib_mask)
{
	int result = 0;
	int i;

	for (i = 0; i < 8 * sizeof(int); i++) {
		if ((1 << i) & ib_mask)
			result |= ib_nr_to_mlx5_nr(1 << i);
	}

	return result;
}

3199 3200 3201
static int modify_raw_packet_qp_rq(
	struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3202 3203 3204 3205 3206 3207 3208
{
	void *in;
	void *rqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
3209
	in = kvzalloc(inlen, GFP_KERNEL);
3210 3211 3212 3213
	if (!in)
		return -ENOMEM;

	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
3214
	MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
3215 3216 3217 3218

	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
	MLX5_SET(rqc, rqc, state, new_state);

3219 3220 3221
	if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
			MLX5_SET64(modify_rq_in, in, modify_bitmask,
3222
				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
3223 3224
			MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
		} else
3225 3226 3227
			dev_info_once(
				&dev->ib_dev.dev,
				"RAW PACKET QP counters are not supported on current FW\n");
3228 3229 3230
	}

	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
	if (err)
		goto out;

	rq->state = new_state;

out:
	kvfree(in);
	return err;
}

3241 3242 3243
static int modify_raw_packet_qp_sq(
	struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3244
{
3245
	struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
3246 3247 3248
	struct mlx5_rate_limit old_rl = ibqp->rl;
	struct mlx5_rate_limit new_rl = old_rl;
	bool new_rate_added = false;
3249
	u16 rl_index = 0;
3250 3251 3252 3253 3254 3255
	void *in;
	void *sqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
3256
	in = kvzalloc(inlen, GFP_KERNEL);
3257 3258 3259
	if (!in)
		return -ENOMEM;

3260
	MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
3261 3262 3263 3264 3265
	MLX5_SET(modify_sq_in, in, sq_state, sq->state);

	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
	MLX5_SET(sqc, sqc, state, new_state);

3266 3267 3268 3269 3270
	if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
		if (new_state != MLX5_SQC_STATE_RDY)
			pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
				__func__);
		else
3271
			new_rl = raw_qp_param->rl;
3272 3273
	}

3274 3275 3276
	if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
		if (new_rl.rate) {
			err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
3277
			if (err) {
3278 3279 3280 3281 3282
				pr_err("Failed configuring rate limit(err %d): \
				       rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
				       err, new_rl.rate, new_rl.max_burst_sz,
				       new_rl.typical_pkt_sz);

3283 3284
				goto out;
			}
3285
			new_rate_added = true;
3286 3287 3288
		}

		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
3289
		/* index 0 means no limit */
3290 3291 3292
		MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
	}

3293
	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
3294 3295
	if (err) {
		/* Remove new rate from table if failed */
3296 3297
		if (new_rate_added)
			mlx5_rl_remove_rate(dev, &new_rl);
3298
		goto out;
3299 3300 3301
	}

	/* Only remove the old rate after new rate was set */
3302 3303
	if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
	    (new_state != MLX5_SQC_STATE_RDY)) {
3304
		mlx5_rl_remove_rate(dev, &old_rl);
3305 3306 3307
		if (new_state != MLX5_SQC_STATE_RDY)
			memset(&new_rl, 0, sizeof(new_rl));
	}
3308

3309
	ibqp->rl = new_rl;
3310 3311 3312 3313 3314 3315 3316 3317
	sq->state = new_state;

out:
	kvfree(in);
	return err;
}

static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
A
Aviv Heller 已提交
3318 3319
				const struct mlx5_modify_raw_qp_param *raw_qp_param,
				u8 tx_affinity)
3320 3321 3322 3323
{
	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3324 3325
	int modify_rq = !!qp->rq.wqe_cnt;
	int modify_sq = !!qp->sq.wqe_cnt;
3326 3327 3328 3329
	int rq_state;
	int sq_state;
	int err;

3330
	switch (raw_qp_param->operation) {
3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
	case MLX5_CMD_OP_RST2INIT_QP:
		rq_state = MLX5_RQC_STATE_RDY;
		sq_state = MLX5_SQC_STATE_RDY;
		break;
	case MLX5_CMD_OP_2ERR_QP:
		rq_state = MLX5_RQC_STATE_ERR;
		sq_state = MLX5_SQC_STATE_ERR;
		break;
	case MLX5_CMD_OP_2RST_QP:
		rq_state = MLX5_RQC_STATE_RST;
		sq_state = MLX5_SQC_STATE_RST;
		break;
	case MLX5_CMD_OP_RTR2RTS_QP:
	case MLX5_CMD_OP_RTS2RTS_QP:
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
		if (raw_qp_param->set_mask ==
		    MLX5_RAW_QP_RATE_LIMIT) {
			modify_rq = 0;
			sq_state = sq->state;
		} else {
			return raw_qp_param->set_mask ? -EINVAL : 0;
		}
		break;
	case MLX5_CMD_OP_INIT2INIT_QP:
	case MLX5_CMD_OP_INIT2RTR_QP:
3355 3356 3357 3358
		if (raw_qp_param->set_mask)
			return -EINVAL;
		else
			return 0;
3359 3360 3361 3362 3363
	default:
		WARN_ON(1);
		return -EINVAL;
	}

3364
	if (modify_rq) {
3365 3366
		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
					       qp->ibqp.pd);
3367 3368 3369 3370
		if (err)
			return err;
	}

3371
	if (modify_sq) {
3372 3373
		struct mlx5_flow_handle *flow_rule;

A
Aviv Heller 已提交
3374 3375
		if (tx_affinity) {
			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3376 3377
							    tx_affinity,
							    qp->ibqp.pd);
A
Aviv Heller 已提交
3378 3379 3380 3381
			if (err)
				return err;
		}

3382 3383 3384
		flow_rule = create_flow_rule_vport_sq(dev, sq,
						      raw_qp_param->port);
		if (IS_ERR(flow_rule))
3385
			return PTR_ERR(flow_rule);
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400

		err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
					      raw_qp_param, qp->ibqp.pd);
		if (err) {
			if (flow_rule)
				mlx5_del_flow_rules(flow_rule);
			return err;
		}

		if (flow_rule) {
			destroy_flow_rule_vport_sq(sq);
			sq->flow_rule = flow_rule;
		}

		return err;
A
Aviv Heller 已提交
3401
	}
3402 3403 3404 3405

	return 0;
}

3406 3407 3408
static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
				    struct mlx5_ib_pd *pd,
				    struct mlx5_ib_qp_base *qp_base,
3409
				    u8 port_num, struct ib_udata *udata)
3410
{
3411 3412
	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct mlx5_ib_ucontext, ibucontext);
3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
	unsigned int tx_port_affinity;

	if (ucontext) {
		tx_port_affinity = (unsigned int)atomic_add_return(
					   1, &ucontext->tx_port_affinity) %
					   MLX5_MAX_PORTS +
				   1;
		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
				tx_port_affinity, qp_base->mqp.qpn, ucontext);
	} else {
		tx_port_affinity =
			(unsigned int)atomic_add_return(
3425
				1, &dev->port[port_num].roce.tx_port_affinity) %
3426 3427 3428 3429 3430 3431 3432 3433 3434
				MLX5_MAX_PORTS +
			1;
		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
				tx_port_affinity, qp_base->mqp.qpn);
	}

	return tx_port_affinity;
}

M
Mark Zhang 已提交
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
				    struct rdma_counter *counter)
{
	struct mlx5_ib_dev *dev = to_mdev(qp->device);
	struct mlx5_ib_qp *mqp = to_mqp(qp);
	struct mlx5_qp_context context = {};
	struct mlx5_ib_qp_base *base;
	u32 set_id;

	if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
		return 0;

3447
	if (counter)
M
Mark Zhang 已提交
3448
		set_id = counter->id;
3449 3450
	else
		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
M
Mark Zhang 已提交
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460

	base = &mqp->trans_qp.base;
	context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
	context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
	return mlx5_core_qp_modify(dev->mdev,
				   MLX5_CMD_OP_RTS2RTS_QP,
				   MLX5_QP_OPTPAR_COUNTER_SET_ID,
				   &context, &base->mqp);
}

3461 3462
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
			       const struct ib_qp_attr *attr, int attr_mask,
3463 3464 3465 3466
			       enum ib_qp_state cur_state,
			       enum ib_qp_state new_state,
			       const struct mlx5_ib_modify_qp *ucmd,
			       struct ib_udata *udata)
3467
{
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
		[MLX5_QP_STATE_RST] = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
		},
		[MLX5_QP_STATE_INIT]  = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
		},
		[MLX5_QP_STATE_RTR]   = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
		},
		[MLX5_QP_STATE_RTS]   = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
		},
		[MLX5_QP_STATE_SQD] = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
		},
		[MLX5_QP_STATE_SQER] = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
		},
		[MLX5_QP_STATE_ERR] = {
			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
		}
	};

3505 3506
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3507
	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
3508 3509 3510 3511 3512
	struct mlx5_ib_cq *send_cq, *recv_cq;
	struct mlx5_qp_context *context;
	struct mlx5_ib_pd *pd;
	enum mlx5_qp_state mlx5_cur, mlx5_new;
	enum mlx5_qp_optpar optpar;
M
Mark Zhang 已提交
3513
	u32 set_id = 0;
3514 3515
	int mlx5_st;
	int err;
3516
	u16 op;
A
Aviv Heller 已提交
3517
	u8 tx_affinity = 0;
3518

3519 3520 3521 3522 3523
	mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
			     qp->qp_sub_type : ibqp->qp_type);
	if (mlx5_st < 0)
		return -EINVAL;

3524 3525
	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
3526 3527
		return -ENOMEM;

3528
	pd = get_pd(qp);
3529
	context->flags = cpu_to_be32(mlx5_st << 16);
3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546

	if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
		context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
	} else {
		switch (attr->path_mig_state) {
		case IB_MIG_MIGRATED:
			context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
			break;
		case IB_MIG_REARM:
			context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
			break;
		case IB_MIG_ARMED:
			context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
			break;
		}
	}

A
Aviv Heller 已提交
3547 3548 3549 3550 3551 3552 3553 3554
	if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
		if ((ibqp->qp_type == IB_QPT_RC) ||
		    (ibqp->qp_type == IB_QPT_UD &&
		     !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
		    (ibqp->qp_type == IB_QPT_UC) ||
		    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
		    (ibqp->qp_type == IB_QPT_XRC_INI) ||
		    (ibqp->qp_type == IB_QPT_XRC_TGT)) {
3555
			if (dev->lag_active) {
3556
				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
3557 3558
				tx_affinity = get_tx_affinity(dev, pd, base, p,
							      udata);
A
Aviv Heller 已提交
3559 3560 3561 3562 3563
				context->flags |= cpu_to_be32(tx_affinity << 24);
			}
		}
	}

H
Haggai Eran 已提交
3564
	if (is_sqp(ibqp->qp_type)) {
3565
		context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
3566 3567
	} else if ((ibqp->qp_type == IB_QPT_UD &&
		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
3568 3569 3570 3571 3572 3573 3574 3575 3576
		   ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
		context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
	} else if (attr_mask & IB_QP_PATH_MTU) {
		if (attr->path_mtu < IB_MTU_256 ||
		    attr->path_mtu > IB_MTU_4096) {
			mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
			err = -EINVAL;
			goto out;
		}
3577 3578
		context->mtu_msgmax = (attr->path_mtu << 5) |
				      (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
3579 3580 3581 3582 3583 3584
	}

	if (attr_mask & IB_QP_DEST_QPN)
		context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);

	if (attr_mask & IB_QP_PKEY_INDEX)
3585
		context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
3586 3587 3588 3589 3590 3591 3592 3593 3594 3595

	/* todo implement counter_index functionality */

	if (is_sqp(ibqp->qp_type))
		context->pri_path.port = qp->port;

	if (attr_mask & IB_QP_PORT)
		context->pri_path.port = attr->port_num;

	if (attr_mask & IB_QP_AV) {
3596
		err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
3597
				    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
3598
				    attr_mask, 0, attr, false);
3599 3600 3601 3602 3603 3604 3605 3606
		if (err)
			goto out;
	}

	if (attr_mask & IB_QP_TIMEOUT)
		context->pri_path.ackto_lt |= attr->timeout << 3;

	if (attr_mask & IB_QP_ALT_PATH) {
3607 3608
		err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
				    &context->alt_path,
3609 3610 3611
				    attr->alt_port_num,
				    attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
				    0, attr, true);
3612 3613 3614 3615
		if (err)
			goto out;
	}

3616 3617
	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
		&send_cq, &recv_cq);
3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644

	context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
	context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
	context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
	context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);

	if (attr_mask & IB_QP_RNR_RETRY)
		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);

	if (attr_mask & IB_QP_RETRY_CNT)
		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
		if (attr->max_rd_atomic)
			context->params1 |=
				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
	}

	if (attr_mask & IB_QP_SQ_PSN)
		context->next_send_psn = cpu_to_be32(attr->sq_psn);

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		if (attr->max_dest_rd_atomic)
			context->params2 |=
				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
	}

3645
	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
3646
		__be32 access_flags;
3647 3648 3649 3650 3651 3652 3653

		err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
		if (err)
			goto out;

		context->params2 |= access_flags;
	}
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666

	if (attr_mask & IB_QP_MIN_RNR_TIMER)
		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);

	if (attr_mask & IB_QP_RQ_PSN)
		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);

	if (attr_mask & IB_QP_QKEY)
		context->qkey = cpu_to_be32(attr->qkey);

	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
		context->db_rec_addr = cpu_to_be64(qp->db.dma);

M
Mark Bloch 已提交
3667 3668 3669
	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
		u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
			       qp->port) - 1;
3670 3671 3672 3673 3674

		/* Underlay port should be used - index 0 function per port */
		if (qp->flags & MLX5_IB_QP_UNDERLAY)
			port_num = 0;

M
Mark Zhang 已提交
3675 3676 3677
		if (ibqp->counter)
			set_id = ibqp->counter->id;
		else
3678
			set_id = mlx5_ib_get_counters_id(dev, port_num);
M
Mark Bloch 已提交
3679
		context->qp_counter_set_usr_page |=
M
Mark Zhang 已提交
3680
			cpu_to_be32(set_id << 24);
M
Mark Bloch 已提交
3681 3682
	}

3683 3684 3685
	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
		context->sq_crq_size |= cpu_to_be16(1 << 4);

3686 3687
	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
		context->deth_sqpn = cpu_to_be32(1);
3688 3689 3690 3691

	mlx5_cur = to_mlx5_state(cur_state);
	mlx5_new = to_mlx5_state(new_state);

3692
	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3693 3694
	    !optab[mlx5_cur][mlx5_new]) {
		err = -EINVAL;
3695
		goto out;
3696
	}
3697 3698

	op = optab[mlx5_cur][mlx5_new];
3699 3700
	optpar = ib_mask_to_mlx5_opt(attr_mask);
	optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
3701

3702 3703
	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
	    qp->flags & MLX5_IB_QP_UNDERLAY) {
3704 3705 3706
		struct mlx5_modify_raw_qp_param raw_qp_param = {};

		raw_qp_param.operation = op;
3707
		if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
M
Mark Zhang 已提交
3708
			raw_qp_param.rq_q_ctr_id = set_id;
3709 3710
			raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
		}
3711

3712 3713 3714
		if (attr_mask & IB_QP_PORT)
			raw_qp_param.port = attr->port_num;

3715
		if (attr_mask & IB_QP_RATE_LIMIT) {
3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
			raw_qp_param.rl.rate = attr->rate_limit;

			if (ucmd->burst_info.max_burst_sz) {
				if (attr->rate_limit &&
				    MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
					raw_qp_param.rl.max_burst_sz =
						ucmd->burst_info.max_burst_sz;
				} else {
					err = -EINVAL;
					goto out;
				}
			}

			if (ucmd->burst_info.typical_pkt_sz) {
				if (attr->rate_limit &&
				    MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
					raw_qp_param.rl.typical_pkt_sz =
						ucmd->burst_info.typical_pkt_sz;
				} else {
					err = -EINVAL;
					goto out;
				}
			}

3740 3741 3742
			raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
		}

A
Aviv Heller 已提交
3743
		err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
3744
	} else {
3745
		err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
3746
					  &base->mqp);
3747 3748
	}

3749 3750 3751 3752 3753 3754
	if (err)
		goto out;

	qp->state = new_state;

	if (attr_mask & IB_QP_ACCESS_FLAGS)
3755
		qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
3756
	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3757
		qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
3758 3759 3760
	if (attr_mask & IB_QP_PORT)
		qp->port = attr->port_num;
	if (attr_mask & IB_QP_ALT_PATH)
3761
		qp->trans_qp.alt_port = attr->alt_port_num;
3762 3763 3764 3765 3766

	/*
	 * If we moved a kernel QP to RESET, clean up all old CQ
	 * entries and reinitialize the QP.
	 */
3767 3768
	if (new_state == IB_QPS_RESET &&
	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
3769
		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
3770 3771
				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
		if (send_cq != recv_cq)
3772
			mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
3773 3774 3775 3776 3777 3778

		qp->rq.head = 0;
		qp->rq.tail = 0;
		qp->sq.head = 0;
		qp->sq.tail = 0;
		qp->sq.cur_post = 0;
3779 3780
		if (qp->sq.wqe_cnt)
			qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
3781 3782 3783 3784
		qp->db.db[MLX5_RCV_DBR] = 0;
		qp->db.db[MLX5_SND_DBR] = 0;
	}

M
Mark Zhang 已提交
3785 3786 3787 3788 3789 3790
	if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
		err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
		if (!err)
			qp->counter_pending = 0;
	}

3791
out:
3792
	kfree(context);
3793 3794 3795
	return err;
}

3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815
static inline bool is_valid_mask(int mask, int req, int opt)
{
	if ((mask & req) != req)
		return false;

	if (mask & ~(req | opt))
		return false;

	return true;
}

/* check valid transition for driver QP types
 * for now the only QP type that this function supports is DCI
 */
static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
				enum ib_qp_attr_mask attr_mask)
{
	int req = IB_QP_STATE;
	int opt = 0;

3816 3817 3818
	if (new_state == IB_QPS_RESET) {
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3819 3820 3821 3822 3823 3824 3825
		req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
		opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
		req |= IB_QP_PATH_MTU;
3826
		opt = IB_QP_PKEY_INDEX | IB_QP_AV;
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
		req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
		       IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
		opt = IB_QP_MIN_RNR_TIMER;
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
		opt = IB_QP_MIN_RNR_TIMER;
		return is_valid_mask(attr_mask, req, opt);
	} else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
		return is_valid_mask(attr_mask, req, opt);
	}
	return false;
}

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
/* mlx5_ib_modify_dct: modify a DCT QP
 * valid transitions are:
 * RESET to INIT: must set access_flags, pkey_index and port
 * INIT  to RTR : must set min_rnr_timer, tclass, flow_label,
 *			   mtu, gid_index and hop_limit
 * Other transitions and attributes are illegal
 */
static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
			      int attr_mask, struct ib_udata *udata)
{
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	enum ib_qp_state cur_state, new_state;
	int err = 0;
	int required = IB_QP_STATE;
	void *dctc;

	if (!(attr_mask & IB_QP_STATE))
		return -EINVAL;

	cur_state = qp->state;
	new_state = attr->qp_state;

	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3867 3868
		u16 set_id;

3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883
		required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
		if (!is_valid_mask(attr_mask, required, 0))
			return -EINVAL;

		if (attr->port_num == 0 ||
		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
				    attr->port_num, dev->num_ports);
			return -EINVAL;
		}
		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
			MLX5_SET(dctc, dctc, rre, 1);
		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
			MLX5_SET(dctc, dctc, rwe, 1);
		if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
3884 3885 3886 3887
			int atomic_mode;

			atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
			if (atomic_mode < 0)
3888
				return -EOPNOTSUPP;
3889 3890

			MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
3891 3892 3893 3894
			MLX5_SET(dctc, dctc, rae, 1);
		}
		MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
		MLX5_SET(dctc, dctc, port, attr->port_num);
3895 3896 3897

		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
		MLX5_SET(dctc, dctc, counter_set_id, set_id);
3898 3899 3900

	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
		struct mlx5_ib_modify_qp_resp resp = {};
3901
		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
		u32 min_resp_len = offsetof(typeof(resp), dctn) +
				   sizeof(resp.dctn);

		if (udata->outlen < min_resp_len)
			return -EINVAL;
		resp.response_length = min_resp_len;

		required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
		if (!is_valid_mask(attr_mask, required, 0))
			return -EINVAL;
		MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
		MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
		MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
		MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
		MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
		MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);

		err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
3920 3921
					   MLX5_ST_SZ_BYTES(create_dct_in), out,
					   sizeof(out));
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
		if (err)
			return err;
		resp.dctn = qp->dct.mdct.mqp.qpn;
		err = ib_copy_to_udata(udata, &resp, resp.response_length);
		if (err) {
			mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
			return err;
		}
	} else {
		mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
		return -EINVAL;
	}
	if (err)
		qp->state = IB_QPS_ERR;
	else
		qp->state = new_state;
	return err;
}

3941 3942 3943 3944 3945
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		      int attr_mask, struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3946
	struct mlx5_ib_modify_qp ucmd = {};
H
Haggai Eran 已提交
3947
	enum ib_qp_type qp_type;
3948
	enum ib_qp_state cur_state, new_state;
3949
	size_t required_cmd_sz;
3950 3951 3952
	int err = -EINVAL;
	int port;

Y
Yishai Hadas 已提交
3953 3954 3955
	if (ibqp->rwq_ind_tbl)
		return -ENOSYS;

3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
	if (udata && udata->inlen) {
		required_cmd_sz = offsetof(typeof(ucmd), reserved) +
			sizeof(ucmd.reserved);
		if (udata->inlen < required_cmd_sz)
			return -EINVAL;

		if (udata->inlen > sizeof(ucmd) &&
		    !ib_is_udata_cleared(udata, sizeof(ucmd),
					 udata->inlen - sizeof(ucmd)))
			return -EOPNOTSUPP;

		if (ib_copy_from_udata(&ucmd, udata,
				       min(udata->inlen, sizeof(ucmd))))
			return -EFAULT;

		if (ucmd.comp_mask ||
		    memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
		    memchr_inv(&ucmd.burst_info.reserved, 0,
			       sizeof(ucmd.burst_info.reserved)))
			return -EOPNOTSUPP;
	}

H
Haggai Eran 已提交
3978 3979 3980
	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
		return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);

3981 3982 3983 3984 3985 3986
	if (ibqp->qp_type == IB_QPT_DRIVER)
		qp_type = qp->qp_sub_type;
	else
		qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
			IB_QPT_GSI : ibqp->qp_type;

3987 3988
	if (qp_type == MLX5_IB_QPT_DCT)
		return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
H
Haggai Eran 已提交
3989

3990 3991 3992 3993 3994
	mutex_lock(&qp->mutex);

	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;

3995 3996 3997 3998
	if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
	}

3999 4000 4001 4002 4003 4004 4005
	if (qp->flags & MLX5_IB_QP_UNDERLAY) {
		if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
			mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
				    attr_mask);
			goto out;
		}
	} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
4006
		   qp_type != MLX5_IB_QPT_DCI &&
4007 4008
		   !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
				       attr_mask)) {
4009 4010
		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
			    cur_state, new_state, ibqp->qp_type, attr_mask);
4011
		goto out;
4012 4013 4014 4015 4016
	} else if (qp_type == MLX5_IB_QPT_DCI &&
		   !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
			    cur_state, new_state, qp_type, attr_mask);
		goto out;
4017
	}
4018 4019

	if ((attr_mask & IB_QP_PORT) &&
4020
	    (attr->port_num == 0 ||
4021
	     attr->port_num > dev->num_ports)) {
4022 4023
		mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
			    attr->port_num, dev->num_ports);
4024
		goto out;
4025
	}
4026 4027 4028

	if (attr_mask & IB_QP_PKEY_INDEX) {
		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4029
		if (attr->pkey_index >=
4030 4031 4032
		    dev->mdev->port_caps[port - 1].pkey_table_len) {
			mlx5_ib_dbg(dev, "invalid pkey index %d\n",
				    attr->pkey_index);
4033
			goto out;
4034
		}
4035 4036 4037
	}

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4038
	    attr->max_rd_atomic >
4039 4040 4041
	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
			    attr->max_rd_atomic);
4042
		goto out;
4043
	}
4044 4045

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4046
	    attr->max_dest_rd_atomic >
4047 4048 4049
	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
			    attr->max_dest_rd_atomic);
4050
		goto out;
4051
	}
4052 4053 4054 4055 4056 4057

	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
		err = 0;
		goto out;
	}

4058
	err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
4059
				  new_state, &ucmd, udata);
4060 4061 4062 4063 4064 4065

out:
	mutex_unlock(&qp->mutex);
	return err;
}

4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
				   u32 wqe_sz, void **cur_edge)
{
	u32 idx;

	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
	*cur_edge = get_sq_edge(sq, idx);

	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
}

/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
 * next nearby edge and get new address translation for current WQE position.
 * @sq - SQ buffer.
 * @seg: Current WQE position (16B aligned).
 * @wqe_sz: Total current WQE size [16B].
 * @cur_edge: Updated current edge.
 */
static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
					 u32 wqe_sz, void **cur_edge)
{
	if (likely(*seg != *cur_edge))
		return;

	_handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
}

/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
 * pointers. At the end @seg is aligned to 16B regardless the copied size.
 * @sq - SQ buffer.
 * @cur_edge: Updated current edge.
 * @seg: Current WQE position (16B aligned).
 * @wqe_sz: Total current WQE size [16B].
 * @src: Pointer to copy from.
 * @n: Number of bytes to copy.
 */
static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
				   void **seg, u32 *wqe_sz, const void *src,
				   size_t n)
{
	while (likely(n)) {
		size_t leftlen = *cur_edge - *seg;
		size_t copysz = min_t(size_t, leftlen, n);
		size_t stride;

		memcpy(*seg, src, copysz);

		n -= copysz;
		src += copysz;
		stride = !n ? ALIGN(copysz, 16) : copysz;
		*seg += stride;
		*wqe_sz += stride >> 4;
		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
	}
}

4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
{
	struct mlx5_ib_cq *cq;
	unsigned cur;

	cur = wq->head - wq->tail;
	if (likely(cur + nreq < wq->max_post))
		return 0;

	cq = to_mcq(ib_cq);
	spin_lock(&cq->lock);
	cur = wq->head - wq->tail;
	spin_unlock(&cq->lock);

	return cur + nreq >= wq->max_post;
}

static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
					  u64 remote_addr, u32 rkey)
{
	rseg->raddr    = cpu_to_be64(remote_addr);
	rseg->rkey     = cpu_to_be32(rkey);
	rseg->reserved = 0;
}

4147 4148
static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
			void **seg, int *size, void **cur_edge)
4149
{
4150
	struct mlx5_wqe_eth_seg *eseg = *seg;
4151 4152 4153 4154 4155 4156 4157 4158 4159

	memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));

	if (wr->send_flags & IB_SEND_IP_CSUM)
		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
				 MLX5_ETH_WQE_L4_CSUM;

	if (wr->opcode == IB_WR_LSO) {
		struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
4160
		size_t left, copysz;
4161
		void *pdata = ud_wr->header;
4162
		size_t stride;
4163 4164 4165

		left = ud_wr->hlen;
		eseg->mss = cpu_to_be16(ud_wr->mss);
S
Saeed Mahameed 已提交
4166
		eseg->inline_hdr.sz = cpu_to_be16(left);
4167

4168 4169 4170
		/* memcpy_send_wqe should get a 16B align address. Hence, we
		 * first copy up to the current edge and then, if needed,
		 * fall-through to memcpy_send_wqe.
4171
		 */
4172 4173 4174 4175 4176 4177 4178 4179 4180 4181
		copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
			       left);
		memcpy(eseg->inline_hdr.start, pdata, copysz);
		stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
			       sizeof(eseg->inline_hdr.start) + copysz, 16);
		*size += stride / 16;
		*seg += stride;

		if (copysz < left) {
			handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4182 4183
			left -= copysz;
			pdata += copysz;
4184 4185
			memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
					left);
4186
		}
4187 4188

		return;
4189 4190
	}

4191 4192
	*seg += sizeof(struct mlx5_wqe_eth_seg);
	*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
4193 4194
}

4195
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
4196
			     const struct ib_send_wr *wr)
4197
{
C
Christoph Hellwig 已提交
4198 4199 4200
	memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
	dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
	dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
4201 4202 4203 4204 4205 4206 4207 4208 4209
}

static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
{
	dseg->byte_count = cpu_to_be32(sg->length);
	dseg->lkey       = cpu_to_be32(sg->lkey);
	dseg->addr       = cpu_to_be64(sg->addr);
}

4210
static u64 get_xlt_octo(u64 bytes)
4211
{
4212 4213
	return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
	       MLX5_IB_UMR_OCTOWORD;
4214 4215
}

4216
static __be64 frwr_mkey_mask(bool atomic)
4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
{
	u64 result;

	result = MLX5_MKEY_MASK_LEN		|
		MLX5_MKEY_MASK_PAGE_SIZE	|
		MLX5_MKEY_MASK_START_ADDR	|
		MLX5_MKEY_MASK_EN_RINVAL	|
		MLX5_MKEY_MASK_KEY		|
		MLX5_MKEY_MASK_LR		|
		MLX5_MKEY_MASK_LW		|
		MLX5_MKEY_MASK_RR		|
		MLX5_MKEY_MASK_RW		|
		MLX5_MKEY_MASK_SMALL_FENCE	|
		MLX5_MKEY_MASK_FREE;

4232 4233 4234
	if (atomic)
		result |= MLX5_MKEY_MASK_A;

4235 4236 4237
	return cpu_to_be64(result);
}

4238 4239 4240 4241 4242 4243 4244
static __be64 sig_mkey_mask(void)
{
	u64 result;

	result = MLX5_MKEY_MASK_LEN		|
		MLX5_MKEY_MASK_PAGE_SIZE	|
		MLX5_MKEY_MASK_START_ADDR	|
4245
		MLX5_MKEY_MASK_EN_SIGERR	|
4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258
		MLX5_MKEY_MASK_EN_RINVAL	|
		MLX5_MKEY_MASK_KEY		|
		MLX5_MKEY_MASK_LR		|
		MLX5_MKEY_MASK_LW		|
		MLX5_MKEY_MASK_RR		|
		MLX5_MKEY_MASK_RW		|
		MLX5_MKEY_MASK_SMALL_FENCE	|
		MLX5_MKEY_MASK_FREE		|
		MLX5_MKEY_MASK_BSF_EN;

	return cpu_to_be64(result);
}

4259
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4260
			    struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4261
{
4262
	int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4263 4264

	memset(umr, 0, sizeof(*umr));
4265

4266
	umr->flags = flags;
4267
	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4268
	umr->mkey_mask = frwr_mkey_mask(atomic);
4269 4270
}

4271
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
4272 4273
{
	memset(umr, 0, sizeof(*umr));
4274
	umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
4275
	umr->flags = MLX5_UMR_INLINE;
4276 4277
}

4278
static __be64 get_umr_enable_mr_mask(void)
4279 4280 4281
{
	u64 result;

4282
	result = MLX5_MKEY_MASK_KEY |
4283 4284 4285 4286 4287
		 MLX5_MKEY_MASK_FREE;

	return cpu_to_be64(result);
}

4288
static __be64 get_umr_disable_mr_mask(void)
4289 4290 4291 4292 4293 4294 4295 4296
{
	u64 result;

	result = MLX5_MKEY_MASK_FREE;

	return cpu_to_be64(result);
}

4297 4298 4299 4300 4301 4302
static __be64 get_umr_update_translation_mask(void)
{
	u64 result;

	result = MLX5_MKEY_MASK_LEN |
		 MLX5_MKEY_MASK_PAGE_SIZE |
4303
		 MLX5_MKEY_MASK_START_ADDR;
4304 4305 4306 4307

	return cpu_to_be64(result);
}

4308
static __be64 get_umr_update_access_mask(int atomic)
4309 4310 4311
{
	u64 result;

4312 4313
	result = MLX5_MKEY_MASK_LR |
		 MLX5_MKEY_MASK_LW |
4314
		 MLX5_MKEY_MASK_RR |
4315 4316 4317 4318
		 MLX5_MKEY_MASK_RW;

	if (atomic)
		result |= MLX5_MKEY_MASK_A;
4319 4320 4321 4322 4323 4324 4325 4326

	return cpu_to_be64(result);
}

static __be64 get_umr_update_pd_mask(void)
{
	u64 result;

4327
	result = MLX5_MKEY_MASK_PD;
4328 4329 4330 4331

	return cpu_to_be64(result);
}

4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{
	if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
	     MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
	    (mask & MLX5_MKEY_MASK_A &&
	     MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
		return -EPERM;
	return 0;
}

static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
			       struct mlx5_wqe_umr_ctrl_seg *umr,
4344
			       const struct ib_send_wr *wr, int atomic)
4345
{
4346
	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4347 4348 4349

	memset(umr, 0, sizeof(*umr));

4350 4351 4352 4353 4354 4355 4356 4357
	if (!umrwr->ignore_free_state) {
		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
			 /* fail if free */
			umr->flags = MLX5_UMR_CHECK_FREE;
		else
			/* fail if not free */
			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
	}
4358

4359 4360 4361 4362 4363 4364 4365
	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
		u64 offset = get_xlt_octo(umrwr->offset);

		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
4366
	}
4367 4368 4369 4370 4371 4372 4373 4374 4375 4376
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
		umr->mkey_mask |= get_umr_update_translation_mask();
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
		umr->mkey_mask |= get_umr_update_access_mask(atomic);
		umr->mkey_mask |= get_umr_update_pd_mask();
	}
	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
		umr->mkey_mask |= get_umr_enable_mr_mask();
	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
		umr->mkey_mask |= get_umr_disable_mr_mask();
4377 4378

	if (!wr->num_sge)
4379
		umr->flags |= MLX5_UMR_INLINE;
4380 4381

	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
4382 4383 4384 4385 4386 4387 4388 4389
}

static u8 get_umr_flags(int acc)
{
	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
4390
		MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
4391 4392
}

4393 4394 4395 4396
static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
			     struct mlx5_ib_mr *mr,
			     u32 key, int access)
{
4397
	int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
4398 4399

	memset(seg, 0, sizeof(*seg));
4400

4401
	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
4402
		seg->log2_page_size = ilog2(mr->ibmr.page_size);
4403
	else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
4404 4405 4406 4407
		/* KLMs take twice the size of MTTs */
		ndescs *= 2;

	seg->flags = get_umr_flags(access) | mr->access_mode;
4408 4409 4410 4411 4412 4413 4414
	seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
	seg->start_addr = cpu_to_be64(mr->ibmr.iova);
	seg->len = cpu_to_be64(mr->ibmr.length);
	seg->xlt_oct_size = cpu_to_be32(ndescs);
}

4415
static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
4416 4417
{
	memset(seg, 0, sizeof(*seg));
4418
	seg->status = MLX5_MKEY_STATUS_FREE;
4419 4420
}

4421 4422
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
				 const struct ib_send_wr *wr)
4423
{
4424
	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4425

4426
	memset(seg, 0, sizeof(*seg));
4427
	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
4428
		seg->status = MLX5_MKEY_STATUS_FREE;
4429

4430
	seg->flags = convert_access(umrwr->access_flags);
4431 4432 4433 4434 4435 4436 4437
	if (umrwr->pd)
		seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
	    !umrwr->length)
		seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);

	seg->start_addr = cpu_to_be64(umrwr->virt_addr);
4438 4439
	seg->len = cpu_to_be64(umrwr->length);
	seg->log2_page_size = umrwr->page_shift;
E
Eli Cohen 已提交
4440
	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
4441
				       mlx5_mkey_variant(umrwr->mkey));
4442 4443
}

4444 4445 4446 4447
static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
			     struct mlx5_ib_mr *mr,
			     struct mlx5_ib_pd *pd)
{
4448
	int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
4449 4450 4451 4452 4453 4454

	dseg->addr = cpu_to_be64(mr->desc_map);
	dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}

4455
static __be32 send_ieth(const struct ib_send_wr *wr)
4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486
{
	switch (wr->opcode) {
	case IB_WR_SEND_WITH_IMM:
	case IB_WR_RDMA_WRITE_WITH_IMM:
		return wr->ex.imm_data;

	case IB_WR_SEND_WITH_INV:
		return cpu_to_be32(wr->ex.invalidate_rkey);

	default:
		return 0;
	}
}

static u8 calc_sig(void *wqe, int size)
{
	u8 *p = wqe;
	u8 res = 0;
	int i;

	for (i = 0; i < size; i++)
		res ^= p[i];

	return ~res;
}

static u8 wq_sig(void *wqe)
{
	return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
}

4487
static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
4488
			    void **wqe, int *wqe_sz, void **cur_edge)
4489 4490
{
	struct mlx5_wqe_inline_seg *seg;
4491
	size_t offset;
4492 4493 4494
	int inl = 0;
	int i;

4495 4496 4497 4498
	seg = *wqe;
	*wqe += sizeof(*seg);
	offset = sizeof(*seg);

4499
	for (i = 0; i < wr->num_sge; i++) {
4500 4501 4502
		size_t len  = wr->sg_list[i].length;
		void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);

4503 4504 4505 4506 4507
		inl += len;

		if (unlikely(inl > qp->max_inline_data))
			return -ENOMEM;

4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523
		while (likely(len)) {
			size_t leftlen;
			size_t copysz;

			handle_post_send_edge(&qp->sq, wqe,
					      *wqe_sz + (offset >> 4),
					      cur_edge);

			leftlen = *cur_edge - *wqe;
			copysz = min_t(size_t, leftlen, len);

			memcpy(*wqe, addr, copysz);
			len -= copysz;
			addr += copysz;
			*wqe += copysz;
			offset += copysz;
4524 4525 4526 4527 4528
		}
	}

	seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);

4529
	*wqe_sz +=  ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
4530 4531 4532 4533

	return 0;
}

4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
static u16 prot_field_size(enum ib_signature_type type)
{
	switch (type) {
	case IB_SIG_TYPE_T10_DIF:
		return MLX5_DIF_SIZE;
	default:
		return 0;
	}
}

static u8 bs_selector(int block_size)
{
	switch (block_size) {
	case 512:	    return 0x1;
	case 520:	    return 0x2;
	case 4096:	    return 0x3;
	case 4160:	    return 0x4;
	case 1073741824:    return 0x5;
	default:	    return 0;
	}
}

4556 4557
static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
			      struct mlx5_bsf_inl *inl)
4558
{
4559 4560 4561 4562 4563
	/* Valid inline section and allow BSF refresh */
	inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
				       MLX5_BSF_REFRESH_DIF);
	inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
	inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
4564 4565 4566 4567
	/* repeating block */
	inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
	inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
			MLX5_DIF_CRC : MLX5_DIF_IPCS;
4568

4569 4570
	if (domain->sig.dif.ref_remap)
		inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
4571

4572 4573 4574 4575 4576
	if (domain->sig.dif.app_escape) {
		if (domain->sig.dif.ref_escape)
			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
		else
			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
4577 4578
	}

4579 4580
	inl->dif_app_bitmask_check =
		cpu_to_be16(domain->sig.dif.apptag_check_mask);
4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591
}

static int mlx5_set_bsf(struct ib_mr *sig_mr,
			struct ib_sig_attrs *sig_attrs,
			struct mlx5_bsf *bsf, u32 data_size)
{
	struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
	struct mlx5_bsf_basic *basic = &bsf->basic;
	struct ib_sig_domain *mem = &sig_attrs->mem;
	struct ib_sig_domain *wire = &sig_attrs->wire;

4592
	memset(bsf, 0, sizeof(*bsf));
4593 4594 4595 4596 4597 4598 4599 4600

	/* Basic + Extended + Inline */
	basic->bsf_size_sbs = 1 << 7;
	/* Input domain check byte mask */
	basic->check_byte_mask = sig_attrs->check_mask;
	basic->raw_data_size = cpu_to_be32(data_size);

	/* Memory domain */
4601
	switch (sig_attrs->mem.sig_type) {
4602 4603
	case IB_SIG_TYPE_NONE:
		break;
4604
	case IB_SIG_TYPE_T10_DIF:
4605 4606 4607 4608 4609 4610 4611
		basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
		basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
		mlx5_fill_inl_bsf(mem, &bsf->m_inl);
		break;
	default:
		return -EINVAL;
	}
4612

4613 4614 4615 4616 4617
	/* Wire domain */
	switch (sig_attrs->wire.sig_type) {
	case IB_SIG_TYPE_NONE:
		break;
	case IB_SIG_TYPE_T10_DIF:
4618
		if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
4619
		    mem->sig_type == wire->sig_type) {
4620
			/* Same block structure */
4621
			basic->bsf_size_sbs |= 1 << 4;
4622
			if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
4623
				basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
4624
			if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
4625
				basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
4626
			if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
4627
				basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
4628 4629 4630
		} else
			basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);

4631
		basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
4632
		mlx5_fill_inl_bsf(wire, &bsf->w_inl);
4633 4634 4635 4636 4637 4638 4639 4640
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

4641 4642 4643 4644 4645
static int set_sig_data_segment(const struct ib_send_wr *send_wr,
				struct ib_mr *sig_mr,
				struct ib_sig_attrs *sig_attrs,
				struct mlx5_ib_qp *qp, void **seg, int *size,
				void **cur_edge)
4646 4647
{
	struct mlx5_bsf *bsf;
4648 4649 4650 4651 4652 4653 4654
	u32 data_len;
	u32 data_key;
	u64 data_va;
	u32 prot_len = 0;
	u32 prot_key = 0;
	u64 prot_va = 0;
	bool prot = false;
4655 4656
	int ret;
	int wqe_size;
4657 4658
	struct mlx5_ib_mr *mr = to_mmr(sig_mr);
	struct mlx5_ib_mr *pi_mr = mr->pi_mr;
4659

4660 4661
	data_len = pi_mr->data_length;
	data_key = pi_mr->ibmr.lkey;
4662
	data_va = pi_mr->data_iova;
4663 4664 4665
	if (pi_mr->meta_ndescs) {
		prot_len = pi_mr->meta_length;
		prot_key = pi_mr->ibmr.lkey;
4666
		prot_va = pi_mr->pi_iova;
4667
		prot = true;
4668 4669 4670 4671
	}

	if (!prot || (data_key == prot_key && data_va == prot_va &&
		      data_len == prot_len)) {
4672 4673
		/**
		 * Source domain doesn't contain signature information
4674
		 * or data and protection are interleaved in memory.
4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725
		 * So need construct:
		 *                  ------------------
		 *                 |     data_klm     |
		 *                  ------------------
		 *                 |       BSF        |
		 *                  ------------------
		 **/
		struct mlx5_klm *data_klm = *seg;

		data_klm->bcount = cpu_to_be32(data_len);
		data_klm->key = cpu_to_be32(data_key);
		data_klm->va = cpu_to_be64(data_va);
		wqe_size = ALIGN(sizeof(*data_klm), 64);
	} else {
		/**
		 * Source domain contains signature information
		 * So need construct a strided block format:
		 *               ---------------------------
		 *              |     stride_block_ctrl     |
		 *               ---------------------------
		 *              |          data_klm         |
		 *               ---------------------------
		 *              |          prot_klm         |
		 *               ---------------------------
		 *              |             BSF           |
		 *               ---------------------------
		 **/
		struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
		struct mlx5_stride_block_entry *data_sentry;
		struct mlx5_stride_block_entry *prot_sentry;
		u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
		int prot_size;

		sblock_ctrl = *seg;
		data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
		prot_sentry = (void *)data_sentry + sizeof(*data_sentry);

		prot_size = prot_field_size(sig_attrs->mem.sig_type);
		if (!prot_size) {
			pr_err("Bad block size given: %u\n", block_size);
			return -EINVAL;
		}
		sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
							    prot_size);
		sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
		sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
		sblock_ctrl->num_entries = cpu_to_be16(2);

		data_sentry->bcount = cpu_to_be16(block_size);
		data_sentry->key = cpu_to_be32(data_key);
		data_sentry->va = cpu_to_be64(data_va);
4726 4727
		data_sentry->stride = cpu_to_be16(block_size);

4728 4729
		prot_sentry->bcount = cpu_to_be16(prot_size);
		prot_sentry->key = cpu_to_be32(prot_key);
4730 4731
		prot_sentry->va = cpu_to_be64(prot_va);
		prot_sentry->stride = cpu_to_be16(prot_size);
4732 4733 4734 4735 4736 4737 4738

		wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
				 sizeof(*prot_sentry), 64);
	}

	*seg += wqe_size;
	*size += wqe_size / 16;
4739
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4740 4741 4742 4743 4744 4745 4746 4747

	bsf = *seg;
	ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
	if (ret)
		return -EINVAL;

	*seg += sizeof(*bsf);
	*size += sizeof(*bsf) / 16;
4748
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4749 4750 4751 4752 4753

	return 0;
}

static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
4754 4755
				 struct ib_mr *sig_mr, int access_flags,
				 u32 size, u32 length, u32 pdn)
4756 4757
{
	u32 sig_key = sig_mr->rkey;
4758
	u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
4759 4760 4761

	memset(seg, 0, sizeof(*seg));

4762
	seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
4763
	seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
4764
	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
4765 4766
				    MLX5_MKEY_BSF_EN | pdn);
	seg->len = cpu_to_be64(length);
4767
	seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
4768 4769 4770 4771
	seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
}

static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
4772
				u32 size)
4773 4774 4775 4776
{
	memset(umr, 0, sizeof(*umr));

	umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
4777
	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4778 4779 4780 4781
	umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
	umr->mkey_mask = sig_mkey_mask();
}

4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795
static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
			 struct mlx5_ib_qp *qp, void **seg, int *size,
			 void **cur_edge)
{
	const struct ib_reg_wr *wr = reg_wr(send_wr);
	struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
	struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
	struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
	u32 pdn = get_pd(qp)->pdn;
	u32 xlt_size;
	int region_len, ret;

	if (unlikely(send_wr->num_sge != 0) ||
	    unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
4796
	    unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831
	    unlikely(!sig_mr->sig->sig_status_checked))
		return -EINVAL;

	/* length of the protected region, data + protection */
	region_len = pi_mr->ibmr.length;

	/**
	 * KLM octoword size - if protection was provided
	 * then we use strided block format (3 octowords),
	 * else we use single KLM (1 octoword)
	 **/
	if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
		xlt_size = 0x30;
	else
		xlt_size = sizeof(struct mlx5_klm);

	set_sig_umr_segment(*seg, xlt_size);
	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);

	set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
			     pdn);
	*seg += sizeof(struct mlx5_mkey_seg);
	*size += sizeof(struct mlx5_mkey_seg) / 16;
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);

	ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
				   cur_edge);
	if (ret)
		return ret;

	sig_mr->sig->sig_status_checked = false;
	return 0;
}
4832 4833 4834 4835 4836 4837 4838 4839 4840

static int set_psv_wr(struct ib_sig_domain *domain,
		      u32 psv_idx, void **seg, int *size)
{
	struct mlx5_seg_set_psv *psv_seg = *seg;

	memset(psv_seg, 0, sizeof(*psv_seg));
	psv_seg->psv_num = cpu_to_be32(psv_idx);
	switch (domain->sig_type) {
4841 4842
	case IB_SIG_TYPE_NONE:
		break;
4843 4844 4845 4846 4847 4848
	case IB_SIG_TYPE_T10_DIF:
		psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
						     domain->sig.dif.app_tag);
		psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
		break;
	default:
4849 4850 4851
		pr_err("Bad signature type (%d) is given.\n",
		       domain->sig_type);
		return -EINVAL;
4852 4853
	}

4854 4855 4856
	*seg += sizeof(*psv_seg);
	*size += sizeof(*psv_seg) / 16;

4857 4858 4859
	return 0;
}

4860
static int set_reg_wr(struct mlx5_ib_qp *qp,
4861
		      const struct ib_reg_wr *wr,
4862 4863
		      void **seg, int *size, void **cur_edge,
		      bool check_not_free)
4864 4865 4866
{
	struct mlx5_ib_mr *mr = to_mmr(wr->mr);
	struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4867
	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4868
	int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4869
	bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4870
	bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4871
	u8 flags = 0;
4872

4873
	if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
4874 4875 4876 4877 4878 4879 4880 4881 4882
		mlx5_ib_warn(to_mdev(qp->ibqp.device),
			     "Fast update of %s for MR is disabled\n",
			     (MLX5_CAP_GEN(dev->mdev,
					   umr_modify_entity_size_disabled)) ?
				     "entity size" :
				     "atomic access");
		return -EINVAL;
	}

4883 4884 4885 4886 4887 4888
	if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
		mlx5_ib_warn(to_mdev(qp->ibqp.device),
			     "Invalid IB_SEND_INLINE send flag\n");
		return -EINVAL;
	}

4889 4890 4891 4892 4893
	if (check_not_free)
		flags |= MLX5_UMR_CHECK_NOT_FREE;
	if (umr_inline)
		flags |= MLX5_UMR_INLINE;

4894
	set_reg_umr_seg(*seg, mr, flags, atomic);
4895 4896
	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4897
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4898 4899 4900 4901

	set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
	*seg += sizeof(struct mlx5_mkey_seg);
	*size += sizeof(struct mlx5_mkey_seg) / 16;
4902
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4903

4904
	if (umr_inline) {
4905 4906 4907
		memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
				mr_list_size);
		*size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
4908 4909 4910 4911 4912
	} else {
		set_reg_data_seg(*seg, mr, pd);
		*seg += sizeof(struct mlx5_wqe_data_seg);
		*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
	}
4913 4914 4915
	return 0;
}

4916 4917
static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
			void **cur_edge)
4918
{
4919
	set_linv_umr_seg(*seg);
4920 4921
	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4922
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4923
	set_linv_mkey_seg(*seg);
4924 4925
	*seg += sizeof(struct mlx5_mkey_seg);
	*size += sizeof(struct mlx5_mkey_seg) / 16;
4926
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4927 4928
}

4929
static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
4930 4931 4932 4933
{
	__be32 *p = NULL;
	int i, j;

4934
	pr_debug("dump WQE index %u:\n", idx);
4935 4936
	for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
		if ((i & 0xf) == 0) {
4937
			p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
4938
			pr_debug("WQBB at %p:\n", (void *)p);
4939
			j = 0;
4940
			idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
4941 4942 4943 4944 4945 4946 4947
		}
		pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
			 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
			 be32_to_cpu(p[j + 3]));
	}
}

4948
static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
4949 4950 4951 4952
		       struct mlx5_wqe_ctrl_seg **ctrl,
		       const struct ib_send_wr *wr, unsigned int *idx,
		       int *size, void **cur_edge, int nreq,
		       bool send_signaled, bool solicited)
4953
{
4954 4955
	if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
		return -ENOMEM;
4956 4957

	*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
4958
	*seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
4959 4960 4961 4962
	*ctrl = *seg;
	*(uint32_t *)(*seg + 8) = 0;
	(*ctrl)->imm = send_ieth(wr);
	(*ctrl)->fm_ce_se = qp->sq_signal_bits |
4963 4964
		(send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
		(solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
4965 4966 4967

	*seg += sizeof(**ctrl);
	*size = sizeof(**ctrl) / 16;
4968
	*cur_edge = qp->sq.cur_edge;
4969

4970
	return 0;
4971 4972
}

4973 4974 4975
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
		     struct mlx5_wqe_ctrl_seg **ctrl,
		     const struct ib_send_wr *wr, unsigned *idx,
4976
		     int *size, void **cur_edge, int nreq)
4977
{
4978
	return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
4979 4980 4981 4982
			   wr->send_flags & IB_SEND_SIGNALED,
			   wr->send_flags & IB_SEND_SOLICITED);
}

4983 4984
static void finish_wqe(struct mlx5_ib_qp *qp,
		       struct mlx5_wqe_ctrl_seg *ctrl,
4985 4986 4987
		       void *seg, u8 size, void *cur_edge,
		       unsigned int idx, u64 wr_id, int nreq, u8 fence,
		       u32 mlx5_opcode)
4988 4989 4990 4991 4992
{
	u8 opmod = 0;

	ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
					     mlx5_opcode | ((u32)opmod << 24));
4993
	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
4994 4995 4996 4997 4998 4999 5000 5001 5002
	ctrl->fm_ce_se |= fence;
	if (unlikely(qp->wq_sig))
		ctrl->signature = wq_sig(ctrl);

	qp->sq.wrid[idx] = wr_id;
	qp->sq.w_list[idx].opcode = mlx5_opcode;
	qp->sq.wqe_head[idx] = qp->sq.head + nreq;
	qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
	qp->sq.w_list[idx].next = qp->sq.cur_post;
5003 5004 5005 5006 5007 5008 5009 5010 5011

	/* We save the edge which was possibly updated during the WQE
	 * construction, into SQ's cache.
	 */
	seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
	qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
			  get_sq_edge(&qp->sq, qp->sq.cur_post &
				      (qp->sq.wqe_cnt - 1)) :
			  cur_edge;
5012 5013
}

5014 5015
static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
			      const struct ib_send_wr **bad_wr, bool drain)
5016 5017 5018
{
	struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5019
	struct mlx5_core_dev *mdev = dev->mdev;
5020
	struct ib_reg_wr reg_pi_wr;
H
Haggai Eran 已提交
5021
	struct mlx5_ib_qp *qp;
5022
	struct mlx5_ib_mr *mr;
5023
	struct mlx5_ib_mr *pi_mr;
5024
	struct mlx5_ib_mr pa_pi_mr;
5025
	struct ib_sig_attrs *sig_attrs;
5026
	struct mlx5_wqe_xrc_seg *xrc;
H
Haggai Eran 已提交
5027
	struct mlx5_bf *bf;
5028
	void *cur_edge;
5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039
	int uninitialized_var(size);
	unsigned long flags;
	unsigned idx;
	int err = 0;
	int num_sge;
	void *seg;
	int nreq;
	int i;
	u8 next_fence = 0;
	u8 fence;

5040 5041 5042 5043 5044 5045
	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
		     !drain)) {
		*bad_wr = wr;
		return -EIO;
	}

H
Haggai Eran 已提交
5046 5047 5048 5049
	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
		return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);

	qp = to_mqp(ibqp);
5050
	bf = &qp->bf;
H
Haggai Eran 已提交
5051

5052 5053 5054
	spin_lock_irqsave(&qp->sq.lock, flags);

	for (nreq = 0; wr; nreq++, wr = wr->next) {
5055
		if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
5056 5057 5058 5059 5060 5061
			mlx5_ib_warn(dev, "\n");
			err = -EINVAL;
			*bad_wr = wr;
			goto out;
		}

5062 5063
		num_sge = wr->num_sge;
		if (unlikely(num_sge > qp->sq.max_gs)) {
5064
			mlx5_ib_warn(dev, "\n");
5065
			err = -EINVAL;
5066 5067 5068 5069
			*bad_wr = wr;
			goto out;
		}

5070 5071
		err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
				nreq);
5072
		if (err) {
5073 5074 5075 5076 5077 5078
			mlx5_ib_warn(dev, "\n");
			err = -ENOMEM;
			*bad_wr = wr;
			goto out;
		}

5079 5080
		if (wr->opcode == IB_WR_REG_MR ||
		    wr->opcode == IB_WR_REG_MR_INTEGRITY) {
5081 5082
			fence = dev->umr_fence;
			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
5083 5084 5085 5086 5087 5088 5089 5090 5091
		} else  {
			if (wr->send_flags & IB_SEND_FENCE) {
				if (qp->next_fence)
					fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
				else
					fence = MLX5_FENCE_MODE_FENCE;
			} else {
				fence = qp->next_fence;
			}
5092 5093
		}

5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
		switch (ibqp->qp_type) {
		case IB_QPT_XRC_INI:
			xrc = seg;
			seg += sizeof(*xrc);
			size += sizeof(*xrc) / 16;
			/* fall through */
		case IB_QPT_RC:
			switch (wr->opcode) {
			case IB_WR_RDMA_READ:
			case IB_WR_RDMA_WRITE:
			case IB_WR_RDMA_WRITE_WITH_IMM:
C
Christoph Hellwig 已提交
5105 5106
				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
					      rdma_wr(wr)->rkey);
5107
				seg += sizeof(struct mlx5_wqe_raddr_seg);
5108 5109 5110 5111 5112 5113
				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
				break;

			case IB_WR_ATOMIC_CMP_AND_SWP:
			case IB_WR_ATOMIC_FETCH_AND_ADD:
			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
E
Eli Cohen 已提交
5114 5115 5116 5117
				mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
				err = -ENOSYS;
				*bad_wr = wr;
				goto out;
5118 5119 5120 5121

			case IB_WR_LOCAL_INV:
				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
5122
				set_linv_wr(qp, &seg, &size, &cur_edge);
5123 5124 5125
				num_sge = 0;
				break;

5126 5127 5128
			case IB_WR_REG_MR:
				qp->sq.wr_data[idx] = IB_WR_REG_MR;
				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
5129
				err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
5130
						 &cur_edge, true);
5131 5132 5133 5134 5135 5136 5137
				if (err) {
					*bad_wr = wr;
					goto out;
				}
				num_sge = 0;
				break;

5138
			case IB_WR_REG_MR_INTEGRITY:
5139
				qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
5140 5141 5142 5143

				mr = to_mmr(reg_wr(wr)->mr);
				pi_mr = mr->pi_mr;

5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194
				if (pi_mr) {
					memset(&reg_pi_wr, 0,
					       sizeof(struct ib_reg_wr));

					reg_pi_wr.mr = &pi_mr->ibmr;
					reg_pi_wr.access = reg_wr(wr)->access;
					reg_pi_wr.key = pi_mr->ibmr.rkey;

					ctrl->imm = cpu_to_be32(reg_pi_wr.key);
					/* UMR for data + prot registration */
					err = set_reg_wr(qp, &reg_pi_wr, &seg,
							 &size, &cur_edge,
							 false);
					if (err) {
						*bad_wr = wr;
						goto out;
					}
					finish_wqe(qp, ctrl, seg, size,
						   cur_edge, idx, wr->wr_id,
						   nreq, fence,
						   MLX5_OPCODE_UMR);

					err = begin_wqe(qp, &seg, &ctrl, wr,
							&idx, &size, &cur_edge,
							nreq);
					if (err) {
						mlx5_ib_warn(dev, "\n");
						err = -ENOMEM;
						*bad_wr = wr;
						goto out;
					}
				} else {
					memset(&pa_pi_mr, 0,
					       sizeof(struct mlx5_ib_mr));
					/* No UMR, use local_dma_lkey */
					pa_pi_mr.ibmr.lkey =
						mr->ibmr.pd->local_dma_lkey;

					pa_pi_mr.ndescs = mr->ndescs;
					pa_pi_mr.data_length = mr->data_length;
					pa_pi_mr.data_iova = mr->data_iova;
					if (mr->meta_ndescs) {
						pa_pi_mr.meta_ndescs =
							mr->meta_ndescs;
						pa_pi_mr.meta_length =
							mr->meta_length;
						pa_pi_mr.pi_iova = mr->pi_iova;
					}

					pa_pi_mr.ibmr.length = mr->ibmr.length;
					mr->pi_mr = &pa_pi_mr;
5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260
				}
				ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
				/* UMR for sig MR */
				err = set_pi_umr_wr(wr, qp, &seg, &size,
						    &cur_edge);
				if (err) {
					mlx5_ib_warn(dev, "\n");
					*bad_wr = wr;
					goto out;
				}
				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
					   wr->wr_id, nreq, fence,
					   MLX5_OPCODE_UMR);

				/*
				 * SET_PSV WQEs are not signaled and solicited
				 * on error
				 */
				sig_attrs = mr->ibmr.sig_attrs;
				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
						  &size, &cur_edge, nreq, false,
						  true);
				if (err) {
					mlx5_ib_warn(dev, "\n");
					err = -ENOMEM;
					*bad_wr = wr;
					goto out;
				}
				err = set_psv_wr(&sig_attrs->mem,
						 mr->sig->psv_memory.psv_idx,
						 &seg, &size);
				if (err) {
					mlx5_ib_warn(dev, "\n");
					*bad_wr = wr;
					goto out;
				}
				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
					   wr->wr_id, nreq, next_fence,
					   MLX5_OPCODE_SET_PSV);

				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
						  &size, &cur_edge, nreq, false,
						  true);
				if (err) {
					mlx5_ib_warn(dev, "\n");
					err = -ENOMEM;
					*bad_wr = wr;
					goto out;
				}
				err = set_psv_wr(&sig_attrs->wire,
						 mr->sig->psv_wire.psv_idx,
						 &seg, &size);
				if (err) {
					mlx5_ib_warn(dev, "\n");
					*bad_wr = wr;
					goto out;
				}
				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
					   wr->wr_id, nreq, next_fence,
					   MLX5_OPCODE_SET_PSV);

				qp->next_fence =
					MLX5_FENCE_MODE_INITIATOR_SMALL;
				num_sge = 0;
				goto skip_psv;

5261 5262 5263 5264 5265 5266 5267 5268 5269
			default:
				break;
			}
			break;

		case IB_QPT_UC:
			switch (wr->opcode) {
			case IB_WR_RDMA_WRITE:
			case IB_WR_RDMA_WRITE_WITH_IMM:
C
Christoph Hellwig 已提交
5270 5271
				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
					      rdma_wr(wr)->rkey);
5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
				seg  += sizeof(struct mlx5_wqe_raddr_seg);
				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
				break;

			default:
				break;
			}
			break;

		case IB_QPT_SMI:
M
Maor Gottlieb 已提交
5282 5283 5284 5285 5286 5287
			if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
				mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
				err = -EPERM;
				*bad_wr = wr;
				goto out;
			}
5288
			/* fall through */
H
Haggai Eran 已提交
5289
		case MLX5_IB_QPT_HW_GSI:
5290
			set_datagram_seg(seg, wr);
5291
			seg += sizeof(struct mlx5_wqe_datagram_seg);
5292
			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5293 5294
			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);

5295
			break;
5296 5297 5298 5299
		case IB_QPT_UD:
			set_datagram_seg(seg, wr);
			seg += sizeof(struct mlx5_wqe_datagram_seg);
			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5300
			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5301 5302 5303 5304

			/* handle qp that supports ud offload */
			if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
				struct mlx5_wqe_eth_pad *pad;
5305

5306 5307 5308 5309
				pad = seg;
				memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
				seg += sizeof(struct mlx5_wqe_eth_pad);
				size += sizeof(struct mlx5_wqe_eth_pad) / 16;
5310 5311 5312
				set_eth_seg(wr, qp, &seg, &size, &cur_edge);
				handle_post_send_edge(&qp->sq, &seg, size,
						      &cur_edge);
5313 5314
			}
			break;
5315 5316 5317 5318 5319 5320 5321
		case MLX5_IB_QPT_REG_UMR:
			if (wr->opcode != MLX5_IB_WR_UMR) {
				err = -EINVAL;
				mlx5_ib_warn(dev, "bad opcode\n");
				goto out;
			}
			qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
C
Christoph Hellwig 已提交
5322
			ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
5323 5324 5325
			err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
			if (unlikely(err))
				goto out;
5326 5327
			seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
			size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
5328
			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5329 5330 5331
			set_reg_mkey_segment(seg, wr);
			seg += sizeof(struct mlx5_mkey_seg);
			size += sizeof(struct mlx5_mkey_seg) / 16;
5332
			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5333 5334 5335 5336 5337 5338 5339
			break;

		default:
			break;
		}

		if (wr->send_flags & IB_SEND_INLINE && num_sge) {
5340
			err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
5341 5342 5343 5344 5345 5346 5347
			if (unlikely(err)) {
				mlx5_ib_warn(dev, "\n");
				*bad_wr = wr;
				goto out;
			}
		} else {
			for (i = 0; i < num_sge; i++) {
5348 5349
				handle_post_send_edge(&qp->sq, &seg, size,
						      &cur_edge);
5350
				if (likely(wr->sg_list[i].length)) {
5351 5352 5353
					set_data_ptr_seg
					((struct mlx5_wqe_data_seg *)seg,
					 wr->sg_list + i);
5354
					size += sizeof(struct mlx5_wqe_data_seg) / 16;
5355
					seg += sizeof(struct mlx5_wqe_data_seg);
5356 5357 5358 5359
				}
			}
		}

5360
		qp->next_fence = next_fence;
5361 5362
		finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
			   fence, mlx5_ib_opcode[wr->opcode]);
5363
skip_psv:
5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378
		if (0)
			dump_wqe(qp, idx, size);
	}

out:
	if (likely(nreq)) {
		qp->sq.head += nreq;

		/* Make sure that descriptors are written before
		 * updating doorbell record and ringing the doorbell
		 */
		wmb();

		qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);

5379 5380 5381 5382
		/* Make sure doorbell record is visible to the HCA before
		 * we hit doorbell */
		wmb();

5383
		mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
5384 5385 5386
		/* Make sure doorbells don't leak out of SQ spinlock
		 * and reach the HCA out of order.
		 */
5387 5388 5389 5390 5391 5392 5393 5394
		bf->offset ^= bf->buf_size;
	}

	spin_unlock_irqrestore(&qp->sq.lock, flags);

	return err;
}

5395 5396
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
		      const struct ib_send_wr **bad_wr)
5397 5398 5399 5400
{
	return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
}

5401 5402 5403 5404 5405
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
{
	sig->signature = calc_sig(sig, size);
}

5406 5407
static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
		      const struct ib_recv_wr **bad_wr, bool drain)
5408 5409 5410 5411
{
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
	struct mlx5_wqe_data_seg *scat;
	struct mlx5_rwqe_sig *sig;
5412 5413
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx5_core_dev *mdev = dev->mdev;
5414 5415 5416 5417 5418 5419
	unsigned long flags;
	int err = 0;
	int nreq;
	int ind;
	int i;

5420 5421 5422 5423 5424 5425
	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
		     !drain)) {
		*bad_wr = wr;
		return -EIO;
	}

H
Haggai Eran 已提交
5426 5427 5428
	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
		return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);

5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445
	spin_lock_irqsave(&qp->rq.lock, flags);

	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);

	for (nreq = 0; wr; nreq++, wr = wr->next) {
		if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
			err = -ENOMEM;
			*bad_wr = wr;
			goto out;
		}

		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
			err = -EINVAL;
			*bad_wr = wr;
			goto out;
		}

5446
		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485
		if (qp->wq_sig)
			scat++;

		for (i = 0; i < wr->num_sge; i++)
			set_data_ptr_seg(scat + i, wr->sg_list + i);

		if (i < qp->rq.max_gs) {
			scat[i].byte_count = 0;
			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
			scat[i].addr       = 0;
		}

		if (qp->wq_sig) {
			sig = (struct mlx5_rwqe_sig *)scat;
			set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
		}

		qp->rq.wrid[ind] = wr->wr_id;

		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
	}

out:
	if (likely(nreq)) {
		qp->rq.head += nreq;

		/* Make sure that descriptors are written before
		 * doorbell record.
		 */
		wmb();

		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
	}

	spin_unlock_irqrestore(&qp->rq.lock, flags);

	return err;
}

5486 5487
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
		      const struct ib_recv_wr **bad_wr)
5488 5489 5490 5491
{
	return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
}

5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
	switch (mlx5_state) {
	case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
	case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
	case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
	case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
	case MLX5_QP_STATE_SQ_DRAINING:
	case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
	case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
	case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
	default:		     return -1;
	}
}

static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
{
	switch (mlx5_mig_state) {
	case MLX5_QP_PM_ARMED:		return IB_MIG_ARMED;
	case MLX5_QP_PM_REARM:		return IB_MIG_REARM;
	case MLX5_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
	default: return -1;
	}
}

static int to_ib_qp_access_flags(int mlx5_flags)
{
	int ib_flags = 0;

	if (mlx5_flags & MLX5_QP_BIT_RRE)
		ib_flags |= IB_ACCESS_REMOTE_READ;
	if (mlx5_flags & MLX5_QP_BIT_RWE)
		ib_flags |= IB_ACCESS_REMOTE_WRITE;
	if (mlx5_flags & MLX5_QP_BIT_RAE)
		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;

	return ib_flags;
}

5531
static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
5532
			    struct rdma_ah_attr *ah_attr,
5533
			    struct mlx5_qp_path *path)
5534 5535
{

5536
	memset(ah_attr, 0, sizeof(*ah_attr));
5537

5538
	if (!path->port || path->port > ibdev->num_ports)
5539 5540
		return;

5541 5542
	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);

5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558
	rdma_ah_set_port_num(ah_attr, path->port);
	rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);

	rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
	rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
	rdma_ah_set_static_rate(ah_attr,
				path->static_rate ? path->static_rate - 5 : 0);
	if (path->grh_mlid & (1 << 7)) {
		u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);

		rdma_ah_set_grh(ah_attr, NULL,
				tc_fl & 0xfffff,
				path->mgid_index,
				path->hop_limit,
				(tc_fl >> 20) & 0xff);
		rdma_ah_set_dgid_raw(ah_attr, path->rgid);
5559 5560 5561
	}
}

5562 5563 5564 5565 5566 5567
static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
					struct mlx5_ib_sq *sq,
					u8 *sq_state)
{
	int err;

5568
	err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586
	if (err)
		goto out;
	sq->state = *sq_state;

out:
	return err;
}

static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
					struct mlx5_ib_rq *rq,
					u8 *rq_state)
{
	void *out;
	void *rqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(query_rq_out);
5587
	out = kvzalloc(inlen, GFP_KERNEL);
5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677
	if (!out)
		return -ENOMEM;

	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
	if (err)
		goto out;

	rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
	*rq_state = MLX5_GET(rqc, rqc, state);
	rq->state = *rq_state;

out:
	kvfree(out);
	return err;
}

static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
				  struct mlx5_ib_qp *qp, u8 *qp_state)
{
	static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
		[MLX5_RQC_STATE_RST] = {
			[MLX5_SQC_STATE_RST]	= IB_QPS_RESET,
			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE_BAD,
			[MLX5_SQ_STATE_NA]	= IB_QPS_RESET,
		},
		[MLX5_RQC_STATE_RDY] = {
			[MLX5_SQC_STATE_RST]	= MLX5_QP_STATE_BAD,
			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
			[MLX5_SQC_STATE_ERR]	= IB_QPS_SQE,
			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE,
		},
		[MLX5_RQC_STATE_ERR] = {
			[MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
			[MLX5_SQC_STATE_ERR]	= IB_QPS_ERR,
			[MLX5_SQ_STATE_NA]	= IB_QPS_ERR,
		},
		[MLX5_RQ_STATE_NA] = {
			[MLX5_SQC_STATE_RST]    = IB_QPS_RESET,
			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE,
			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE_BAD,
		},
	};

	*qp_state = sqrq_trans[rq_state][sq_state];

	if (*qp_state == MLX5_QP_STATE_BAD) {
		WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
		     qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
		     qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
		return -EINVAL;
	}

	if (*qp_state == MLX5_QP_STATE)
		*qp_state = qp->state;

	return 0;
}

static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
				     struct mlx5_ib_qp *qp,
				     u8 *raw_packet_qp_state)
{
	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
	int err;
	u8 sq_state = MLX5_SQ_STATE_NA;
	u8 rq_state = MLX5_RQ_STATE_NA;

	if (qp->sq.wqe_cnt) {
		err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
		if (err)
			return err;
	}

	if (qp->rq.wqe_cnt) {
		err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
		if (err)
			return err;
	}

	return sqrq_state_to_qp_state(sq_state, rq_state, qp,
				      raw_packet_qp_state);
}

static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
			 struct ib_qp_attr *qp_attr)
5678
{
5679
	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
5680 5681
	struct mlx5_qp_context *context;
	int mlx5_state;
5682
	u32 *outb;
5683 5684
	int err = 0;

5685
	outb = kzalloc(outlen, GFP_KERNEL);
5686 5687 5688
	if (!outb)
		return -ENOMEM;

5689
	err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
5690
				 outlen);
5691
	if (err)
5692
		goto out;
5693

5694 5695 5696
	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
	context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);

5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710
	mlx5_state = be32_to_cpu(context->flags) >> 28;

	qp->state		     = to_ib_qp_state(mlx5_state);
	qp_attr->path_mtu	     = context->mtu_msgmax >> 5;
	qp_attr->path_mig_state	     =
		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
	qp_attr->qkey		     = be32_to_cpu(context->qkey);
	qp_attr->rq_psn		     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
	qp_attr->sq_psn		     = be32_to_cpu(context->next_send_psn) & 0xffffff;
	qp_attr->dest_qp_num	     = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
	qp_attr->qp_access_flags     =
		to_ib_qp_access_flags(be32_to_cpu(context->params2));

	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
5711 5712
		to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
5713 5714
		qp_attr->alt_pkey_index =
			be16_to_cpu(context->alt_path.pkey_index);
5715 5716
		qp_attr->alt_port_num	=
			rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
5717 5718
	}

5719
	qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734
	qp_attr->port_num = context->pri_path.port;

	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
	qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;

	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);

	qp_attr->max_dest_rd_atomic =
		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
	qp_attr->min_rnr_timer	    =
		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
	qp_attr->timeout	    = context->pri_path.ackto_lt >> 3;
	qp_attr->retry_cnt	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
	qp_attr->rnr_retry	    = (be32_to_cpu(context->params1) >> 13) & 0x7;
	qp_attr->alt_timeout	    = context->alt_path.ackto_lt >> 3;
5735 5736 5737 5738 5739 5740

out:
	kfree(outb);
	return err;
}

5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805
static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
				struct ib_qp_attr *qp_attr, int qp_attr_mask,
				struct ib_qp_init_attr *qp_init_attr)
{
	struct mlx5_core_dct	*dct = &mqp->dct.mdct;
	u32 *out;
	u32 access_flags = 0;
	int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
	void *dctc;
	int err;
	int supported_mask = IB_QP_STATE |
			     IB_QP_ACCESS_FLAGS |
			     IB_QP_PORT |
			     IB_QP_MIN_RNR_TIMER |
			     IB_QP_AV |
			     IB_QP_PATH_MTU |
			     IB_QP_PKEY_INDEX;

	if (qp_attr_mask & ~supported_mask)
		return -EINVAL;
	if (mqp->state != IB_QPS_RTR)
		return -EINVAL;

	out = kzalloc(outlen, GFP_KERNEL);
	if (!out)
		return -ENOMEM;

	err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
	if (err)
		goto out;

	dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);

	if (qp_attr_mask & IB_QP_STATE)
		qp_attr->qp_state = IB_QPS_RTR;

	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
		if (MLX5_GET(dctc, dctc, rre))
			access_flags |= IB_ACCESS_REMOTE_READ;
		if (MLX5_GET(dctc, dctc, rwe))
			access_flags |= IB_ACCESS_REMOTE_WRITE;
		if (MLX5_GET(dctc, dctc, rae))
			access_flags |= IB_ACCESS_REMOTE_ATOMIC;
		qp_attr->qp_access_flags = access_flags;
	}

	if (qp_attr_mask & IB_QP_PORT)
		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
	if (qp_attr_mask & IB_QP_AV) {
		qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
		qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
		qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
		qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
	}
	if (qp_attr_mask & IB_QP_PATH_MTU)
		qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
	if (qp_attr_mask & IB_QP_PKEY_INDEX)
		qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
out:
	kfree(out);
	return err;
}

5806 5807 5808 5809 5810 5811 5812 5813
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
	int err = 0;
	u8 raw_packet_qp_state;

Y
Yishai Hadas 已提交
5814 5815 5816
	if (ibqp->rwq_ind_tbl)
		return -ENOSYS;

H
Haggai Eran 已提交
5817 5818 5819 5820
	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
		return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
					    qp_init_attr);

5821 5822 5823 5824
	/* Not all of output fields are applicable, make sure to zero them */
	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
	memset(qp_attr, 0, sizeof(*qp_attr));

5825 5826 5827 5828
	if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
		return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
					    qp_attr_mask, qp_init_attr);

5829 5830
	mutex_lock(&qp->mutex);

5831 5832
	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
	    qp->flags & MLX5_IB_QP_UNDERLAY) {
5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844
		err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
		if (err)
			goto out;
		qp->state = raw_packet_qp_state;
		qp_attr->port_num = 1;
	} else {
		err = query_qp_attr(dev, qp, qp_attr);
		if (err)
			goto out;
	}

	qp_attr->qp_state	     = qp->state;
5845 5846 5847 5848 5849
	qp_attr->cur_qp_state	     = qp_attr->qp_state;
	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;

	if (!ibqp->uobject) {
5850
		qp_attr->cap.max_send_wr  = qp->sq.max_post;
5851
		qp_attr->cap.max_send_sge = qp->sq.max_gs;
5852
		qp_init_attr->qp_context = ibqp->qp_context;
5853 5854 5855 5856 5857
	} else {
		qp_attr->cap.max_send_wr  = 0;
		qp_attr->cap.max_send_sge = 0;
	}

5858 5859 5860 5861 5862
	qp_init_attr->qp_type = ibqp->qp_type;
	qp_init_attr->recv_cq = ibqp->recv_cq;
	qp_init_attr->send_cq = ibqp->send_cq;
	qp_init_attr->srq = ibqp->srq;
	qp_attr->cap.max_inline_data = qp->max_inline_data;
5863 5864 5865 5866 5867 5868 5869

	qp_init_attr->cap	     = qp_attr->cap;

	qp_init_attr->create_flags = 0;
	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;

5870 5871 5872 5873 5874 5875
	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
		qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
5876
	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
5877
		qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
5878

5879 5880 5881 5882 5883 5884 5885 5886 5887
	qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;

out:
	mutex_unlock(&qp->mutex);
	return err;
}

struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
5888
				   struct ib_udata *udata)
5889 5890 5891 5892 5893
{
	struct mlx5_ib_dev *dev = to_mdev(ibdev);
	struct mlx5_ib_xrcd *xrcd;
	int err;

5894
	if (!MLX5_CAP_GEN(dev->mdev, xrc))
5895 5896 5897 5898 5899 5900
		return ERR_PTR(-ENOSYS);

	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
	if (!xrcd)
		return ERR_PTR(-ENOMEM);

5901
	err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
5902 5903 5904 5905 5906 5907 5908 5909
	if (err) {
		kfree(xrcd);
		return ERR_PTR(-ENOMEM);
	}

	return &xrcd->ibxrcd;
}

5910
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
5911 5912 5913 5914 5915
{
	struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
	int err;

5916
	err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
5917
	if (err)
5918 5919 5920 5921 5922
		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);

	kfree(xrcd);
	return 0;
}
5923

5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
{
	struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
	struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
	struct ib_event event;

	if (rwq->ibwq.event_handler) {
		event.device     = rwq->ibwq.device;
		event.element.wq = &rwq->ibwq;
		switch (type) {
		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
			event.event = IB_EVENT_WQ_FATAL;
			break;
		default:
			mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
			return;
		}

		rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
	}
}

5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960
static int set_delay_drop(struct mlx5_ib_dev *dev)
{
	int err = 0;

	mutex_lock(&dev->delay_drop.lock);
	if (dev->delay_drop.activate)
		goto out;

	err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
	if (err)
		goto out;

	dev->delay_drop.activate = true;
out:
	mutex_unlock(&dev->delay_drop.lock);
5961 5962 5963

	if (!err)
		atomic_inc(&dev->delay_drop.rqs_cnt);
5964 5965 5966
	return err;
}

5967 5968 5969 5970
static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
		      struct ib_wq_init_attr *init_attr)
{
	struct mlx5_ib_dev *dev;
5971
	int has_net_offloads;
5972 5973 5974 5975 5976 5977 5978 5979 5980 5981
	__be64 *rq_pas0;
	void *in;
	void *rqc;
	void *wq;
	int inlen;
	int err;

	dev = to_mdev(pd->device);

	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
5982
	in = kvzalloc(inlen, GFP_KERNEL);
5983 5984 5985
	if (!in)
		return -ENOMEM;

5986
	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
5987 5988 5989 5990 5991 5992 5993 5994
	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
	MLX5_SET(rqc,  rqc, mem_rq_type,
		 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
	MLX5_SET(rqc, rqc, user_index, rwq->user_index);
	MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
	MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
	MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
	wq = MLX5_ADDR_OF(rqc, rqc, wq);
5995 5996 5997
	MLX5_SET(wq, wq, wq_type,
		 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
		 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
5998 5999 6000 6001 6002 6003 6004 6005 6006
	if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
			mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
			err = -EOPNOTSUPP;
			goto out;
		} else {
			MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
		}
	}
6007
	MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
6008
	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
6009 6010 6011 6012 6013 6014 6015 6016
		/*
		 * In Firmware number of strides in each WQE is:
		 *   "512 * 2^single_wqe_log_num_of_strides"
		 * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
		 * accepted as 0 to 9
		 */
		static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
					     2,  3,  4,  5,  6,  7,  8, 9 };
6017 6018 6019 6020
		MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
		MLX5_SET(wq, wq, log_wqe_stride_size,
			 rwq->single_stride_log_num_of_bytes -
			 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
6021 6022 6023
		MLX5_SET(wq, wq, log_wqe_num_of_strides,
			 fw_map[rwq->log_num_strides -
				MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
6024
	}
6025 6026 6027 6028 6029 6030
	MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
	MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
	MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
	MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
	MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
	MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
6031
	has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
6032
	if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
6033
		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
6034 6035 6036 6037 6038 6039 6040
			mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
			err = -EOPNOTSUPP;
			goto out;
		}
	} else {
		MLX5_SET(rqc, rqc, vsd, 1);
	}
6041 6042 6043 6044 6045 6046 6047 6048
	if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
			mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
			err = -EOPNOTSUPP;
			goto out;
		}
		MLX5_SET(rqc, rqc, scatter_fcs, 1);
	}
6049 6050 6051 6052 6053 6054 6055 6056 6057
	if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
		if (!(dev->ib_dev.attrs.raw_packet_caps &
		      IB_RAW_PACKET_CAP_DELAY_DROP)) {
			mlx5_ib_dbg(dev, "Delay drop is not supported\n");
			err = -EOPNOTSUPP;
			goto out;
		}
		MLX5_SET(rqc, rqc, delay_drop_en, 1);
	}
6058 6059
	rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
	mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
6060
	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
6061 6062 6063 6064 6065 6066 6067 6068 6069 6070
	if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
		err = set_delay_drop(dev);
		if (err) {
			mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
				     err);
			mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
		} else {
			rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
		}
	}
6071
out:
6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089
	kvfree(in);
	return err;
}

static int set_user_rq_size(struct mlx5_ib_dev *dev,
			    struct ib_wq_init_attr *wq_init_attr,
			    struct mlx5_ib_create_wq *ucmd,
			    struct mlx5_ib_rwq *rwq)
{
	/* Sanity check RQ size before proceeding */
	if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
		return -EINVAL;

	if (!ucmd->rq_wqe_count)
		return -EINVAL;

	rwq->wqe_count = ucmd->rq_wqe_count;
	rwq->wqe_shift = ucmd->rq_wqe_shift;
6090 6091 6092
	if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
		return -EINVAL;

6093 6094 6095 6096 6097
	rwq->log_rq_stride = rwq->wqe_shift;
	rwq->log_rq_size = ilog2(rwq->wqe_count);
	return 0;
}

6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110
static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
{
	if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
	    (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
		return false;

	if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
	    (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
		return false;

	return true;
}

6111 6112 6113 6114 6115 6116 6117 6118 6119 6120
static int prepare_user_rq(struct ib_pd *pd,
			   struct ib_wq_init_attr *init_attr,
			   struct ib_udata *udata,
			   struct mlx5_ib_rwq *rwq)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_create_wq ucmd = {};
	int err;
	size_t required_cmd_sz;

6121 6122
	required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
		+ sizeof(ucmd.single_stride_log_num_of_bytes);
6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139
	if (udata->inlen < required_cmd_sz) {
		mlx5_ib_dbg(dev, "invalid inlen\n");
		return -EINVAL;
	}

	if (udata->inlen > sizeof(ucmd) &&
	    !ib_is_udata_cleared(udata, sizeof(ucmd),
				 udata->inlen - sizeof(ucmd))) {
		mlx5_ib_dbg(dev, "inlen is not supported\n");
		return -EOPNOTSUPP;
	}

	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
		mlx5_ib_dbg(dev, "copy failed\n");
		return -EFAULT;
	}

6140
	if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
6141 6142
		mlx5_ib_dbg(dev, "invalid comp mask\n");
		return -EOPNOTSUPP;
6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157
	} else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
		if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
			mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
			return -EOPNOTSUPP;
		}
		if ((ucmd.single_stride_log_num_of_bytes <
		    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
		    (ucmd.single_stride_log_num_of_bytes >
		     MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
			mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
				    ucmd.single_stride_log_num_of_bytes,
				    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
				    MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
			return -EINVAL;
		}
6158 6159 6160 6161 6162 6163 6164 6165 6166 6167
		if (!log_of_strides_valid(dev,
					  ucmd.single_wqe_log_num_of_strides)) {
			mlx5_ib_dbg(
				dev,
				"Invalid log num strides (%u. Range is %u - %u)\n",
				ucmd.single_wqe_log_num_of_strides,
				MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
6168 6169 6170 6171 6172 6173 6174
			return -EINVAL;
		}
		rwq->single_stride_log_num_of_bytes =
			ucmd.single_stride_log_num_of_bytes;
		rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
		rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
		rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
6175 6176 6177 6178 6179 6180 6181 6182
	}

	err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
		return err;
	}

6183
	err = create_user_rq(dev, pd, udata, rwq, &ucmd);
6184 6185
	if (err) {
		mlx5_ib_dbg(dev, "err %d\n", err);
6186
		return err;
6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228
	}

	rwq->user_index = ucmd.user_index;
	return 0;
}

struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
				struct ib_wq_init_attr *init_attr,
				struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev;
	struct mlx5_ib_rwq *rwq;
	struct mlx5_ib_create_wq_resp resp = {};
	size_t min_resp_len;
	int err;

	if (!udata)
		return ERR_PTR(-ENOSYS);

	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
	if (udata->outlen && udata->outlen < min_resp_len)
		return ERR_PTR(-EINVAL);

	dev = to_mdev(pd->device);
	switch (init_attr->wq_type) {
	case IB_WQT_RQ:
		rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
		if (!rwq)
			return ERR_PTR(-ENOMEM);
		err = prepare_user_rq(pd, init_attr, udata, rwq);
		if (err)
			goto err;
		err = create_rq(rwq, pd, init_attr);
		if (err)
			goto err_user_rq;
		break;
	default:
		mlx5_ib_dbg(dev, "unsupported wq type %d\n",
			    init_attr->wq_type);
		return ERR_PTR(-EINVAL);
	}

6229
	rwq->ibwq.wq_num = rwq->core_qp.qpn;
6230 6231 6232 6233 6234 6235 6236 6237 6238
	rwq->ibwq.state = IB_WQS_RESET;
	if (udata->outlen) {
		resp.response_length = offsetof(typeof(resp), response_length) +
				sizeof(resp.response_length);
		err = ib_copy_to_udata(udata, &resp, resp.response_length);
		if (err)
			goto err_copy;
	}

6239 6240
	rwq->core_qp.event = mlx5_ib_wq_event;
	rwq->ibwq.event_handler = init_attr->event_handler;
6241 6242 6243
	return &rwq->ibwq;

err_copy:
6244
	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6245
err_user_rq:
6246
	destroy_user_rq(dev, pd, rwq, udata);
6247 6248 6249 6250 6251
err:
	kfree(rwq);
	return ERR_PTR(err);
}

6252
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
6253 6254 6255 6256
{
	struct mlx5_ib_dev *dev = to_mdev(wq->device);
	struct mlx5_ib_rwq *rwq = to_mrwq(wq);

6257
	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6258
	destroy_user_rq(dev, wq->pd, rwq, udata);
6259 6260 6261
	kfree(rwq);
}

6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
						      struct ib_rwq_ind_table_init_attr *init_attr,
						      struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(device);
	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
	int sz = 1 << init_attr->log_ind_tbl_size;
	struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
	size_t min_resp_len;
	int inlen;
	int err;
	int i;
	u32 *in;
	void *rqtc;

	if (udata->inlen > 0 &&
	    !ib_is_udata_cleared(udata, 0,
				 udata->inlen))
		return ERR_PTR(-EOPNOTSUPP);

6282 6283 6284 6285 6286 6287 6288 6289
	if (init_attr->log_ind_tbl_size >
	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
		mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
			    init_attr->log_ind_tbl_size,
			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
		return ERR_PTR(-EINVAL);
	}

6290 6291 6292 6293 6294 6295 6296 6297 6298
	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
	if (udata->outlen && udata->outlen < min_resp_len)
		return ERR_PTR(-EINVAL);

	rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
	if (!rwq_ind_tbl)
		return ERR_PTR(-ENOMEM);

	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6299
	in = kvzalloc(inlen, GFP_KERNEL);
6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312
	if (!in) {
		err = -ENOMEM;
		goto err;
	}

	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);

	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);

	for (i = 0; i < sz; i++)
		MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);

6313 6314 6315
	rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
	MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);

6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333
	err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
	kvfree(in);

	if (err)
		goto err;

	rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
	if (udata->outlen) {
		resp.response_length = offsetof(typeof(resp), response_length) +
					sizeof(resp.response_length);
		err = ib_copy_to_udata(udata, &resp, resp.response_length);
		if (err)
			goto err_copy;
	}

	return &rwq_ind_tbl->ib_rwq_ind_tbl;

err_copy:
6334
	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6335 6336 6337 6338 6339 6340 6341 6342 6343 6344
err:
	kfree(rwq_ind_tbl);
	return ERR_PTR(err);
}

int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
{
	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
	struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);

6345
	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6346 6347 6348 6349 6350

	kfree(rwq_ind_tbl);
	return 0;
}

6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
		      u32 wq_attr_mask, struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(wq->device);
	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
	struct mlx5_ib_modify_wq ucmd = {};
	size_t required_cmd_sz;
	int curr_wq_state;
	int wq_state;
	int inlen;
	int err;
	void *rqc;
	void *in;

	required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
	if (udata->inlen < required_cmd_sz)
		return -EINVAL;

	if (udata->inlen > sizeof(ucmd) &&
	    !ib_is_udata_cleared(udata, sizeof(ucmd),
				 udata->inlen - sizeof(ucmd)))
		return -EOPNOTSUPP;

	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
		return -EFAULT;

	if (ucmd.comp_mask || ucmd.reserved)
		return -EOPNOTSUPP;

	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
6381
	in = kvzalloc(inlen, GFP_KERNEL);
6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395
	if (!in)
		return -ENOMEM;

	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);

	curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
		wq_attr->curr_wq_state : wq->state;
	wq_state = (wq_attr_mask & IB_WQ_STATE) ?
		wq_attr->wq_state : curr_wq_state;
	if (curr_wq_state == IB_WQS_ERR)
		curr_wq_state = MLX5_RQC_STATE_ERR;
	if (wq_state == IB_WQS_ERR)
		wq_state = MLX5_RQC_STATE_ERR;
	MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
6396
	MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
6397 6398
	MLX5_SET(rqc, rqc, state, wq_state);

6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412
	if (wq_attr_mask & IB_WQ_FLAGS) {
		if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
			if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
			      MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
				mlx5_ib_dbg(dev, "VLAN offloads are not "
					    "supported\n");
				err = -EOPNOTSUPP;
				goto out;
			}
			MLX5_SET64(modify_rq_in, in, modify_bitmask,
				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
			MLX5_SET(rqc, rqc, vsd,
				 (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
		}
6413 6414 6415 6416 6417 6418

		if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
			mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
			err = -EOPNOTSUPP;
			goto out;
		}
6419 6420
	}

6421
	if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
6422 6423 6424
		u16 set_id;

		set_id = mlx5_ib_get_counters_id(dev, 0);
6425 6426 6427
		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
			MLX5_SET64(modify_rq_in, in, modify_bitmask,
				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
6428
			MLX5_SET(rqc, rqc, counter_set_id, set_id);
6429
		} else
6430 6431 6432
			dev_info_once(
				&dev->ib_dev.dev,
				"Receive WQ counters are not supported on current FW\n");
6433 6434
	}

6435
	err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
6436 6437 6438
	if (!err)
		rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;

6439 6440
out:
	kvfree(in);
6441 6442
	return err;
}
6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501

struct mlx5_ib_drain_cqe {
	struct ib_cqe cqe;
	struct completion done;
};

static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
						     struct mlx5_ib_drain_cqe,
						     cqe);

	complete(&cqe->done);
}

/* This function returns only once the drained WR was completed */
static void handle_drain_completion(struct ib_cq *cq,
				    struct mlx5_ib_drain_cqe *sdrain,
				    struct mlx5_ib_dev *dev)
{
	struct mlx5_core_dev *mdev = dev->mdev;

	if (cq->poll_ctx == IB_POLL_DIRECT) {
		while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
			ib_process_cq_direct(cq, -1);
		return;
	}

	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
		struct mlx5_ib_cq *mcq = to_mcq(cq);
		bool triggered = false;
		unsigned long flags;

		spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
		/* Make sure that the CQ handler won't run if wasn't run yet */
		if (!mcq->mcq.reset_notify_added)
			mcq->mcq.reset_notify_added = 1;
		else
			triggered = true;
		spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);

		if (triggered) {
			/* Wait for any scheduled/running task to be ended */
			switch (cq->poll_ctx) {
			case IB_POLL_SOFTIRQ:
				irq_poll_disable(&cq->iop);
				irq_poll_enable(&cq->iop);
				break;
			case IB_POLL_WORKQUEUE:
				cancel_work_sync(&cq->work);
				break;
			default:
				WARN_ON_ONCE(1);
			}
		}

		/* Run the CQ handler - this makes sure that the drain WR will
		 * be processed if wasn't processed yet.
		 */
6502
		mcq->mcq.comp(&mcq->mcq, NULL);
6503 6504 6505 6506 6507 6508 6509 6510 6511 6512
	}

	wait_for_completion(&sdrain->done);
}

void mlx5_ib_drain_sq(struct ib_qp *qp)
{
	struct ib_cq *cq = qp->send_cq;
	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
	struct mlx5_ib_drain_cqe sdrain;
6513
	const struct ib_send_wr *bad_swr;
6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547
	struct ib_rdma_wr swr = {
		.wr = {
			.next = NULL,
			{ .wr_cqe	= &sdrain.cqe, },
			.opcode	= IB_WR_RDMA_WRITE,
		},
	};
	int ret;
	struct mlx5_ib_dev *dev = to_mdev(qp->device);
	struct mlx5_core_dev *mdev = dev->mdev;

	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
		return;
	}

	sdrain.cqe.done = mlx5_ib_drain_qp_done;
	init_completion(&sdrain.done);

	ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
	if (ret) {
		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
		return;
	}

	handle_drain_completion(cq, &sdrain, dev);
}

void mlx5_ib_drain_rq(struct ib_qp *qp)
{
	struct ib_cq *cq = qp->recv_cq;
	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
	struct mlx5_ib_drain_cqe rdrain;
6548 6549
	struct ib_recv_wr rwr = {};
	const struct ib_recv_wr *bad_rwr;
6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571
	int ret;
	struct mlx5_ib_dev *dev = to_mdev(qp->device);
	struct mlx5_core_dev *mdev = dev->mdev;

	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
		return;
	}

	rwr.wr_cqe = &rdrain.cqe;
	rdrain.cqe.done = mlx5_ib_drain_qp_done;
	init_completion(&rdrain.done);

	ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
	if (ret) {
		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
		return;
	}

	handle_drain_completion(cq, &rdrain, dev);
}
M
Mark Zhang 已提交
6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602

/**
 * Bind a qp to a counter. If @counter is NULL then bind the qp to
 * the default counter
 */
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
{
	struct mlx5_ib_qp *mqp = to_mqp(qp);
	int err = 0;

	mutex_lock(&mqp->mutex);
	if (mqp->state == IB_QPS_RESET) {
		qp->counter = counter;
		goto out;
	}

	if (mqp->state == IB_QPS_RTS) {
		err = __mlx5_ib_qp_set_counter(qp, counter);
		if (!err)
			qp->counter = counter;

		goto out;
	}

	mqp->counter_pending = 1;
	qp->counter = counter;

out:
	mutex_unlock(&mqp->mutex);
	return err;
}