main.c 90.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3
 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/init.h>
36
#include <linux/slab.h>
37
#include <linux/errno.h>
E
Eli Cohen 已提交
38 39 40
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
E
Eli Cohen 已提交
41
#include <linux/if_vlan.h>
42 43
#include <net/ipv6.h>
#include <net/addrconf.h>
J
Jiri Pirko 已提交
44
#include <net/devlink.h>
45 46 47

#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
E
Eli Cohen 已提交
48
#include <rdma/ib_addr.h>
49 50 51
#include <rdma/ib_cache.h>

#include <net/bonding.h>
52 53 54

#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
55
#include <linux/mlx4/qp.h>
56 57

#include "mlx4_ib.h"
58
#include <rdma/mlx4-abi.h>
59

J
Jack Morgenstein 已提交
60
#define DRV_NAME	MLX4_IB_DRV_NAME
61 62
#define DRV_VERSION	"2.2-1"
#define DRV_RELDATE	"Feb 2014"
63

64
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
65
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
66
#define MLX4_IB_CARD_REV_A0   0xA0
67

68 69 70 71 72
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);

73
int mlx4_ib_sm_guid_assign = 0;
74
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
75
MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
76

77
static const char mlx4_ib_version[] =
78 79 80
	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
	DRV_VERSION " (" DRV_RELDATE ")\n";

81 82
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);

E
Eli Cohen 已提交
83 84
static struct workqueue_struct *wq;

85 86 87 88 89 90 91 92
static void init_query_mad(struct ib_smp *mad)
{
	mad->base_version  = 1;
	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
	mad->class_version = 1;
	mad->method	   = IB_MGMT_METHOD_GET;
}

93 94
static int check_flow_steering_support(struct mlx4_dev *dev)
{
95
	int eth_num_ports = 0;
96 97
	int ib_num_ports = 0;

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;

	if (dmfs) {
		int i;
		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
			eth_num_ports++;
		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
			ib_num_ports++;
		dmfs &= (!ib_num_ports ||
			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
			(!eth_num_ports ||
			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
		if (ib_num_ports && mlx4_is_mfunc(dev)) {
			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
			dmfs = 0;
113 114
		}
	}
115
	return dmfs;
116 117
}

118 119 120 121 122 123 124 125 126 127 128
static int num_ib_ports(struct mlx4_dev *dev)
{
	int ib_ports = 0;
	int i;

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
		ib_ports++;

	return ib_ports;
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
{
	struct mlx4_ib_dev *ibdev = to_mdev(device);
	struct net_device *dev;

	rcu_read_lock();
	dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);

	if (dev) {
		if (mlx4_is_bonded(ibdev->dev)) {
			struct net_device *upper = NULL;

			upper = netdev_master_upper_dev_get_rcu(dev);
			if (upper) {
				struct net_device *active;

				active = bond_option_active_slave_get_rcu(netdev_priv(upper));
				if (active)
					dev = active;
			}
		}
	}
	if (dev)
		dev_hold(dev);

	rcu_read_unlock();
	return dev;
}

158 159 160
static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
				  struct mlx4_ib_dev *ibdev,
				  u8 port_num)
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
{
	struct mlx4_cmd_mailbox *mailbox;
	int err;
	struct mlx4_dev *dev = ibdev->dev;
	int i;
	union ib_gid *gid_tbl;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox))
		return -ENOMEM;

	gid_tbl = mailbox->buf;

	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
		memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));

	err = mlx4_cmd(dev, mailbox->dma,
		       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
		       MLX4_CMD_WRAPPED);
	if (mlx4_is_bonded(dev))
		err += mlx4_cmd(dev, mailbox->dma,
				MLX4_SET_PORT_GID_TABLE << 8 | 2,
				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
				MLX4_CMD_WRAPPED);

	mlx4_free_cmd_mailbox(dev, mailbox);
	return err;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
				     struct mlx4_ib_dev *ibdev,
				     u8 port_num)
{
	struct mlx4_cmd_mailbox *mailbox;
	int err;
	struct mlx4_dev *dev = ibdev->dev;
	int i;
	struct {
		union ib_gid	gid;
		__be32		rsrvd1[2];
		__be16		rsrvd2;
		u8		type;
		u8		version;
		__be32		rsrvd3;
	} *gid_tbl;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox))
		return -ENOMEM;

	gid_tbl = mailbox->buf;
	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
		memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
		if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
			gid_tbl[i].version = 2;
			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
				gid_tbl[i].type = 1;
			else
				memset(&gid_tbl[i].gid, 0, 12);
		}
	}

	err = mlx4_cmd(dev, mailbox->dma,
		       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
		       MLX4_CMD_WRAPPED);
	if (mlx4_is_bonded(dev))
		err += mlx4_cmd(dev, mailbox->dma,
				MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
				MLX4_CMD_WRAPPED);

	mlx4_free_cmd_mailbox(dev, mailbox);
	return err;
}

static int mlx4_ib_update_gids(struct gid_entry *gids,
			       struct mlx4_ib_dev *ibdev,
			       u8 port_num)
{
	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
		return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);

	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static int mlx4_ib_add_gid(struct ib_device *device,
			   u8 port_num,
			   unsigned int index,
			   const union ib_gid *gid,
			   const struct ib_gid_attr *attr,
			   void **context)
{
	struct mlx4_ib_dev *ibdev = to_mdev(device);
	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
	struct mlx4_port_gid_table   *port_gid_table;
	int free = -1, found = -1;
	int ret = 0;
	int hw_update = 0;
	int i;
	struct gid_entry *gids = NULL;

	if (!rdma_cap_roce_gid_table(device, port_num))
		return -EINVAL;

	if (port_num > MLX4_MAX_PORTS)
		return -EINVAL;

	if (!context)
		return -EINVAL;

	port_gid_table = &iboe->gids[port_num - 1];
	spin_lock_bh(&iboe->lock);
	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
276 277
		if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
		    (port_gid_table->gids[i].gid_type == attr->gid_type))  {
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
			found = i;
			break;
		}
		if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
			free = i; /* HW has space */
	}

	if (found < 0) {
		if (free < 0) {
			ret = -ENOSPC;
		} else {
			port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
			if (!port_gid_table->gids[free].ctx) {
				ret = -ENOMEM;
			} else {
				*context = port_gid_table->gids[free].ctx;
				memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
295
				port_gid_table->gids[free].gid_type = attr->gid_type;
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
				port_gid_table->gids[free].ctx->real_index = free;
				port_gid_table->gids[free].ctx->refcount = 1;
				hw_update = 1;
			}
		}
	} else {
		struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
		*context = ctx;
		ctx->refcount++;
	}
	if (!ret && hw_update) {
		gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
		if (!gids) {
			ret = -ENOMEM;
		} else {
311
			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
312
				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
313 314
				gids[i].gid_type = port_gid_table->gids[i].gid_type;
			}
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
		}
	}
	spin_unlock_bh(&iboe->lock);

	if (!ret && hw_update) {
		ret = mlx4_ib_update_gids(gids, ibdev, port_num);
		kfree(gids);
	}

	return ret;
}

static int mlx4_ib_del_gid(struct ib_device *device,
			   u8 port_num,
			   unsigned int index,
			   void **context)
{
	struct gid_cache_context *ctx = *context;
	struct mlx4_ib_dev *ibdev = to_mdev(device);
	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
	struct mlx4_port_gid_table   *port_gid_table;
	int ret = 0;
	int hw_update = 0;
	struct gid_entry *gids = NULL;

	if (!rdma_cap_roce_gid_table(device, port_num))
		return -EINVAL;

	if (port_num > MLX4_MAX_PORTS)
		return -EINVAL;

	port_gid_table = &iboe->gids[port_num - 1];
	spin_lock_bh(&iboe->lock);
	if (ctx) {
		ctx->refcount--;
		if (!ctx->refcount) {
			unsigned int real_index = ctx->real_index;

			memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
			kfree(port_gid_table->gids[real_index].ctx);
			port_gid_table->gids[real_index].ctx = NULL;
			hw_update = 1;
		}
	}
	if (!ret && hw_update) {
		int i;

		gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
		if (!gids) {
			ret = -ENOMEM;
		} else {
			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
		}
	}
	spin_unlock_bh(&iboe->lock);

	if (!ret && hw_update) {
		ret = mlx4_ib_update_gids(gids, ibdev, port_num);
		kfree(gids);
	}
	return ret;
}

int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
				    u8 port_num, int index)
{
	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
	struct gid_cache_context *ctx = NULL;
	union ib_gid gid;
	struct mlx4_port_gid_table   *port_gid_table;
	int real_index = -EINVAL;
	int i;
	int ret;
	unsigned long flags;
390
	struct ib_gid_attr attr;
391 392 393 394 395 396 397 398 399 400

	if (port_num > MLX4_MAX_PORTS)
		return -EINVAL;

	if (mlx4_is_bonded(ibdev->dev))
		port_num = 1;

	if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
		return index;

401
	ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
402 403 404
	if (ret)
		return ret;

405 406 407
	if (attr.ndev)
		dev_put(attr.ndev);

408 409 410 411 412 413 414
	if (!memcmp(&gid, &zgid, sizeof(gid)))
		return -EINVAL;

	spin_lock_irqsave(&iboe->lock, flags);
	port_gid_table = &iboe->gids[port_num - 1];

	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
415 416
		if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
		    attr.gid_type == port_gid_table->gids[i].gid_type) {
417 418 419 420 421 422 423 424 425
			ctx = port_gid_table->gids[i].ctx;
			break;
		}
	if (ctx)
		real_index = ctx->real_index;
	spin_unlock_irqrestore(&iboe->lock, flags);
	return real_index;
}

426
static int mlx4_ib_query_device(struct ib_device *ibdev,
427 428
				struct ib_device_attr *props,
				struct ib_udata *uhw)
429 430 431 432 433
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
	int err = -ENOMEM;
434
	int have_ib_ports;
435 436 437
	struct mlx4_uverbs_ex_query_device cmd;
	struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
	struct mlx4_clock_params clock_params;
438

439 440 441 442 443 444 445 446 447 448 449 450 451 452
	if (uhw->inlen) {
		if (uhw->inlen < sizeof(cmd))
			return -EINVAL;

		err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
		if (err)
			return err;

		if (cmd.comp_mask)
			return -EINVAL;

		if (cmd.reserved)
			return -EINVAL;
	}
453

454 455
	resp.response_length = offsetof(typeof(resp), response_length) +
		sizeof(resp.response_length);
456 457 458 459 460 461 462 463
	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;

464 465
	err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
			   1, NULL, NULL, in_mad, out_mad);
466 467 468 469 470
	if (err)
		goto out;

	memset(props, 0, sizeof *props);

471 472
	have_ib_ports = num_ib_ports(dev->dev);

473 474 475 476
	props->fw_ver = dev->dev->caps.fw_ver;
	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
		IB_DEVICE_PORT_ACTIVE_EVENT		|
		IB_DEVICE_SYS_IMAGE_GUID		|
477 478
		IB_DEVICE_RC_RNR_NAK_GEN		|
		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
479 480 481 482
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
483
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
484 485 486
		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
487 488
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
489 490 491
	if (dev->dev->caps.max_gso_sz &&
	    (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
	    (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
E
Eli Cohen 已提交
492
		props->device_cap_flags |= IB_DEVICE_UD_TSO;
493 494 495 496 497 498
	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
	if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
	    (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
S
Sean Hefty 已提交
499 500
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
		props->device_cap_flags |= IB_DEVICE_XRC;
S
Shani Michaeli 已提交
501 502 503 504 505 506 507 508
	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
		else
			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
	}
509 510
	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
511

512 513
	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;

514 515
	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
		0xffffff;
516
	props->vendor_part_id	   = dev->dev->persist->pdev->device;
517 518 519 520 521
	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);

	props->max_mr_size	   = ~0ull;
	props->page_size_cap	   = dev->dev->caps.page_size_cap;
522
	props->max_qp		   = dev->dev->quotas.qp;
523
	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
524 525
	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
					 dev->dev->caps.max_rq_sg);
526
	props->max_sge_rd	   = MLX4_MAX_SGE_RD;
527
	props->max_cq		   = dev->dev->quotas.cq;
528
	props->max_cqe		   = dev->dev->caps.max_cqes;
529
	props->max_mr		   = dev->dev->quotas.mpt;
530 531 532 533
	props->max_pd		   = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
	props->max_qp_rd_atom	   = dev->dev->caps.max_qp_dest_rdma;
	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
534
	props->max_srq		   = dev->dev->quotas.srq;
535
	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
536
	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
537
	props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
538 539 540
	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
541
	props->masked_atomic_cap   = props->atomic_cap;
542
	props->max_pkeys	   = dev->dev->caps.pkey_table_len[1];
543 544 545 546
	props->max_mcast_grp	   = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
	props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
					   props->max_mcast_grp;
547
	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
548 549
	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
550
	props->max_ah = INT_MAX;
551

552 553
	if (!mlx4_is_slave(dev->dev))
		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
554 555 556

	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
		resp.response_length += sizeof(resp.hca_core_clock_offset);
557 558 559 560
		if (!err && !mlx4_is_slave(dev->dev)) {
			resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
		}
561 562 563 564 565 566 567
	}

	if (uhw->outlen) {
		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
		if (err)
			goto out;
	}
568 569 570 571 572 573 574
out:
	kfree(in_mad);
	kfree(out_mad);

	return err;
}

E
Eli Cohen 已提交
575 576
static enum rdma_link_layer
mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
577
{
E
Eli Cohen 已提交
578
	struct mlx4_dev *dev = to_mdev(device)->dev;
579

580
	return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
E
Eli Cohen 已提交
581 582
		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
583

E
Eli Cohen 已提交
584
static int ib_link_query_port(struct ib_device *ibdev, u8 port,
585
			      struct ib_port_attr *props, int netw_view)
E
Eli Cohen 已提交
586
{
587 588
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
589
	int ext_active_speed;
590
	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
591 592 593 594 595 596 597 598 599 600 601
	int err = -ENOMEM;

	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
	in_mad->attr_mod = cpu_to_be32(port);

602 603 604 605
	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;

	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
606 607 608 609
				in_mad, out_mad);
	if (err)
		goto out;

610

611 612 613 614 615 616 617
	props->lid		= be16_to_cpup((__be16 *) (out_mad->data + 16));
	props->lmc		= out_mad->data[34] & 0x7;
	props->sm_lid		= be16_to_cpup((__be16 *) (out_mad->data + 18));
	props->sm_sl		= out_mad->data[36] & 0xf;
	props->state		= out_mad->data[32] & 0xf;
	props->phys_state	= out_mad->data[33] >> 4;
	props->port_cap_flags	= be32_to_cpup((__be32 *) (out_mad->data + 20));
618 619 620 621
	if (netw_view)
		props->gid_tbl_len = out_mad->data[50];
	else
		props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
622
	props->max_msg_sz	= to_mdev(ibdev)->dev->caps.max_msg_sz;
623
	props->pkey_tbl_len	= to_mdev(ibdev)->dev->caps.pkey_table_len[port];
624 625 626 627 628 629 630 631 632 633
	props->bad_pkey_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 46));
	props->qkey_viol_cntr	= be16_to_cpup((__be16 *) (out_mad->data + 48));
	props->active_width	= out_mad->data[31] & 0xf;
	props->active_speed	= out_mad->data[35] >> 4;
	props->max_mtu		= out_mad->data[41] & 0xf;
	props->active_mtu	= out_mad->data[36] >> 4;
	props->subnet_timeout	= out_mad->data[51] & 0x1f;
	props->max_vl_num	= out_mad->data[37] >> 4;
	props->init_type_reply	= out_mad->data[41] >> 4;

634 635 636 637 638 639
	/* Check if extended speeds (EDR/FDR/...) are supported */
	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
		ext_active_speed = out_mad->data[62] >> 4;

		switch (ext_active_speed) {
		case 1:
640
			props->active_speed = IB_SPEED_FDR;
641 642
			break;
		case 2:
643
			props->active_speed = IB_SPEED_EDR;
644 645 646 647 648
			break;
		}
	}

	/* If reported active speed is QDR, check if is FDR-10 */
649
	if (props->active_speed == IB_SPEED_QDR) {
650 651 652 653
		init_query_mad(in_mad);
		in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
		in_mad->attr_mod = cpu_to_be32(port);

654
		err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
655 656
				   NULL, NULL, in_mad, out_mad);
		if (err)
657
			goto out;
658 659 660 661

		/* Checking LinkSpeedActive for FDR-10 */
		if (out_mad->data[15] & 0x1)
			props->active_speed = IB_SPEED_FDR10;
662
	}
663 664 665 666 667

	/* Avoid wrong speed value returned by FW if the IB link is down. */
	if (props->state == IB_PORT_DOWN)
		 props->active_speed = IB_SPEED_SDR;

668 669 670 671
out:
	kfree(in_mad);
	kfree(out_mad);
	return err;
E
Eli Cohen 已提交
672 673 674 675 676 677 678 679
}

static u8 state_to_phys_state(enum ib_port_state state)
{
	return state == IB_PORT_ACTIVE ? 5 : 3;
}

static int eth_link_query_port(struct ib_device *ibdev, u8 port,
680
			       struct ib_port_attr *props, int netw_view)
E
Eli Cohen 已提交
681
{
682 683 684

	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
	struct mlx4_ib_iboe *iboe = &mdev->iboe;
E
Eli Cohen 已提交
685 686
	struct net_device *ndev;
	enum ib_mtu tmp;
687 688
	struct mlx4_cmd_mailbox *mailbox;
	int err = 0;
689
	int is_bonded = mlx4_is_bonded(mdev->dev);
690 691 692 693

	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);
E
Eli Cohen 已提交
694

695 696 697 698 699 700
	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
			   MLX4_CMD_WRAPPED);
	if (err)
		goto out;

701 702 703 704 705
	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
					   IB_WIDTH_4X : IB_WIDTH_1X;
	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
					   IB_SPEED_FDR : IB_SPEED_QDR;
706
	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
707 708
	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
E
Eli Cohen 已提交
709
	props->pkey_tbl_len	= 1;
O
Or Gerlitz 已提交
710
	props->max_mtu		= IB_MTU_4096;
711
	props->max_vl_num	= 2;
E
Eli Cohen 已提交
712 713 714
	props->state		= IB_PORT_DOWN;
	props->phys_state	= state_to_phys_state(props->state);
	props->active_mtu	= IB_MTU_256;
715
	spin_lock_bh(&iboe->lock);
E
Eli Cohen 已提交
716
	ndev = iboe->netdevs[port - 1];
717 718 719 720 721
	if (ndev && is_bonded) {
		rcu_read_lock(); /* required to get upper dev */
		ndev = netdev_master_upper_dev_get_rcu(ndev);
		rcu_read_unlock();
	}
E
Eli Cohen 已提交
722
	if (!ndev)
723
		goto out_unlock;
E
Eli Cohen 已提交
724 725 726 727

	tmp = iboe_get_mtu(ndev->mtu);
	props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;

E
Eli Cohen 已提交
728
	props->state		= (netif_running(ndev) && netif_carrier_ok(ndev)) ?
E
Eli Cohen 已提交
729 730
					IB_PORT_ACTIVE : IB_PORT_DOWN;
	props->phys_state	= state_to_phys_state(props->state);
731
out_unlock:
732
	spin_unlock_bh(&iboe->lock);
733 734 735
out:
	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
	return err;
E
Eli Cohen 已提交
736 737
}

738 739
int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
			 struct ib_port_attr *props, int netw_view)
E
Eli Cohen 已提交
740
{
741
	int err;
E
Eli Cohen 已提交
742 743 744 745

	memset(props, 0, sizeof *props);

	err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
746 747
		ib_link_query_port(ibdev, port, props, netw_view) :
				eth_link_query_port(ibdev, port, props, netw_view);
748 749 750 751

	return err;
}

752 753 754 755 756 757 758
static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
			      struct ib_port_attr *props)
{
	/* returns host view */
	return __mlx4_ib_query_port(ibdev, port, props, 0);
}

759 760
int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
			union ib_gid *gid, int netw_view)
761 762 763 764
{
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
	int err = -ENOMEM;
765 766 767
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	int clear = 0;
	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
768 769 770 771 772 773 774 775 776 777

	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
	in_mad->attr_mod = cpu_to_be32(port);

778 779 780 781
	if (mlx4_is_mfunc(dev->dev) && netw_view)
		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;

	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
782 783 784 785 786
	if (err)
		goto out;

	memcpy(gid->raw, out_mad->data + 8, 8);

787 788 789 790 791 792 793 794 795
	if (mlx4_is_mfunc(dev->dev) && !netw_view) {
		if (index) {
			/* For any index > 0, return the null guid */
			err = 0;
			clear = 1;
			goto out;
		}
	}

796 797 798 799
	init_query_mad(in_mad);
	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
	in_mad->attr_mod = cpu_to_be32(index / 8);

800
	err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
801
			   NULL, NULL, in_mad, out_mad);
802 803 804 805 806 807
	if (err)
		goto out;

	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);

out:
808 809
	if (clear)
		memset(gid->raw + 8, 0, 8);
810 811 812 813 814
	kfree(in_mad);
	kfree(out_mad);
	return err;
}

E
Eli Cohen 已提交
815 816 817
static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
			     union ib_gid *gid)
{
818 819 820
	int ret;

	if (rdma_protocol_ib(ibdev, port))
821
		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
822 823 824 825 826 827 828

	if (!rdma_protocol_roce(ibdev, port))
		return -ENODEV;

	if (!rdma_cap_roce_gid_table(ibdev, port))
		return -ENODEV;

829
	ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
830 831 832 833 834 835
	if (ret == -EAGAIN) {
		memcpy(gid, &zgid, sizeof(*gid));
		return 0;
	}

	return ret;
E
Eli Cohen 已提交
836 837
}

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
{
	union sl2vl_tbl_to_u64 sl2vl64;
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
	int err = -ENOMEM;
	int jj;

	if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
		*sl2vl_tbl = 0;
		return 0;
	}

	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
	in_mad->attr_mod = 0;

	if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;

	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
			   in_mad, out_mad);
	if (err)
		goto out;

	for (jj = 0; jj < 8; jj++)
		sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
	*sl2vl_tbl = sl2vl64.sl64;

out:
	kfree(in_mad);
	kfree(out_mad);
	return err;
}

static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
{
	u64 sl2vl;
	int i;
	int err;

	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
			continue;
		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
		if (err) {
			pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
			       i, err);
			sl2vl = 0;
		}
		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
	}
}

898 899
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
			 u16 *pkey, int netw_view)
900 901 902
{
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
903
	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
904 905 906 907 908 909 910 911 912 913 914
	int err = -ENOMEM;

	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
	in_mad->attr_mod = cpu_to_be32(index / 32);

915 916 917 918 919
	if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;

	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
			   in_mad, out_mad);
920 921 922 923 924 925 926 927 928 929 930
	if (err)
		goto out;

	*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);

out:
	kfree(in_mad);
	kfree(out_mad);
	return err;
}

931 932 933 934 935
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
	return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
}

936 937 938
static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
				 struct ib_device_modify *props)
{
939
	struct mlx4_cmd_mailbox *mailbox;
940
	unsigned long flags;
941

942 943 944
	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
		return -EOPNOTSUPP;

945 946 947
	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
		return 0;

948 949 950
	if (mlx4_is_slave(to_mdev(ibdev)->dev))
		return -EOPNOTSUPP;

951
	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
952
	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
953
	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
954 955 956 957 958 959 960 961 962

	/*
	 * If possible, pass node desc to FW, so it can generate
	 * a 144 trap.  If cmd fails, just ignore.
	 */
	mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
	if (IS_ERR(mailbox))
		return 0;

963
	memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
964
	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
965
		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
966 967

	mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
968 969 970 971

	return 0;
}

972 973
static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
			    u32 cap_mask)
974 975 976 977 978 979 980 981
{
	struct mlx4_cmd_mailbox *mailbox;
	int err;

	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);

982 983 984 985 986 987 988
	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
	} else {
		((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
	}
989

990 991 992
	err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
		       MLX4_CMD_WRAPPED);
993 994 995 996 997 998 999 1000

	mlx4_free_cmd_mailbox(dev->dev, mailbox);
	return err;
}

static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
			       struct ib_port_modify *props)
{
1001 1002
	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1003 1004 1005 1006
	struct ib_port_attr attr;
	u32 cap_mask;
	int err;

1007 1008 1009 1010 1011 1012 1013 1014
	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
	 * of whether port link layer is ETH or IB. For ETH ports, qkey
	 * violations and port capabilities are not meaningful.
	 */
	if (is_eth)
		return 0;

	mutex_lock(&mdev->cap_mask_mutex);
1015 1016 1017 1018 1019 1020 1021 1022

	err = mlx4_ib_query_port(ibdev, port, &attr);
	if (err)
		goto out;

	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
		~props->clr_port_cap_mask;

1023 1024 1025
	err = mlx4_ib_SET_PORT(mdev, port,
			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
			       cap_mask);
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036

out:
	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
	return err;
}

static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
						  struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct mlx4_ib_ucontext *context;
O
Or Gerlitz 已提交
1037
	struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1038 1039 1040
	struct mlx4_ib_alloc_ucontext_resp resp;
	int err;

1041 1042 1043
	if (!dev->ib_active)
		return ERR_PTR(-EAGAIN);

O
Or Gerlitz 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
	} else {
		resp.dev_caps	      = dev->dev->caps.userspace_caps;
		resp.qp_tab_size      = dev->dev->caps.num_qps;
		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
		resp.cqe_size	      = dev->dev->caps.cqe_size;
	}
1055

Y
Yishai Hadas 已提交
1056
	context = kzalloc(sizeof(*context), GFP_KERNEL);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	if (!context)
		return ERR_PTR(-ENOMEM);

	err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
	if (err) {
		kfree(context);
		return ERR_PTR(err);
	}

	INIT_LIST_HEAD(&context->db_page_list);
	mutex_init(&context->db_page_mutex);

O
Or Gerlitz 已提交
1069 1070 1071 1072 1073
	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
		err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
	else
		err = ib_copy_to_udata(udata, &resp, sizeof(resp));

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	if (err) {
		mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
		kfree(context);
		return ERR_PTR(-EFAULT);
	}

	return &context->ibucontext;
}

static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);

	mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
	kfree(context);

	return 0;
}

Y
Yishai Hadas 已提交
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
static void  mlx4_ib_vma_open(struct vm_area_struct *area)
{
	/* vma_open is called when a new VMA is created on top of our VMA.
	 * This is done through either mremap flow or split_vma (usually due
	 * to mlock, madvise, munmap, etc.). We do not support a clone of the
	 * vma, as this VMA is strongly hardware related. Therefore we set the
	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
	 * calling us again and trying to do incorrect actions. We assume that
	 * the original vma size is exactly a single page that there will be no
	 * "splitting" operations on.
	 */
	area->vm_ops = NULL;
}

static void  mlx4_ib_vma_close(struct vm_area_struct *area)
{
	struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;

	/* It's guaranteed that all VMAs opened on a FD are closed before the
	 * file itself is closed, therefore no sync is needed with the regular
	 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
	 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
	 * The close operation is usually called under mm->mmap_sem except when
	 * process is exiting.  The exiting case is handled explicitly as part
	 * of mlx4_ib_disassociate_ucontext.
	 */
	mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
				area->vm_private_data;

	/* set the vma context pointer to null in the mlx4_ib driver's private
	 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
	 */
	mlx4_ib_vma_priv_data->vma = NULL;
}

static const struct vm_operations_struct mlx4_ib_vm_ops = {
	.open = mlx4_ib_vma_open,
	.close = mlx4_ib_vma_close
};

static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
	int i;
	int ret = 0;
	struct vm_area_struct *vma;
	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
	struct task_struct *owning_process  = NULL;
	struct mm_struct   *owning_mm       = NULL;

	owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
	if (!owning_process)
		return;

	owning_mm = get_task_mm(owning_process);
	if (!owning_mm) {
		pr_info("no mm, disassociate ucontext is pending task termination\n");
		while (1) {
			/* make sure that task is dead before returning, it may
			 * prevent a rare case of module down in parallel to a
			 * call to mlx4_ib_vma_close.
			 */
			put_task_struct(owning_process);
			msleep(1);
			owning_process = get_pid_task(ibcontext->tgid,
						      PIDTYPE_PID);
			if (!owning_process ||
			    owning_process->state == TASK_DEAD) {
				pr_info("disassociate ucontext done, task was terminated\n");
				/* in case task was dead need to release the task struct */
				if (owning_process)
					put_task_struct(owning_process);
				return;
			}
		}
	}

	/* need to protect from a race on closing the vma as part of
	 * mlx4_ib_vma_close().
	 */
	down_read(&owning_mm->mmap_sem);
	for (i = 0; i < HW_BAR_COUNT; i++) {
		vma = context->hw_bar_info[i].vma;
		if (!vma)
			continue;

		ret = zap_vma_ptes(context->hw_bar_info[i].vma,
				   context->hw_bar_info[i].vma->vm_start,
				   PAGE_SIZE);
		if (ret) {
			pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
			BUG_ON(1);
		}

		/* context going to be destroyed, should not access ops any more */
		context->hw_bar_info[i].vma->vm_ops = NULL;
	}

	up_read(&owning_mm->mmap_sem);
	mmput(owning_mm);
	put_task_struct(owning_process);
}

static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
				 struct mlx4_ib_vma_private_data *vma_private_data)
{
	vma_private_data->vma = vma;
	vma->vm_private_data = vma_private_data;
	vma->vm_ops =  &mlx4_ib_vm_ops;
}

1203 1204 1205
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	struct mlx4_ib_dev *dev = to_mdev(context->device);
Y
Yishai Hadas 已提交
1206
	struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
1207 1208 1209 1210 1211

	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
		return -EINVAL;

	if (vma->vm_pgoff == 0) {
Y
Yishai Hadas 已提交
1212 1213 1214 1215
		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_DB].vma)
			return -EINVAL;

1216 1217 1218 1219 1220 1221
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

		if (io_remap_pfn_range(vma, vma->vm_start,
				       to_mucontext(context)->uar.pfn,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
Y
Yishai Hadas 已提交
1222 1223 1224

		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);

1225
	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Y
Yishai Hadas 已提交
1226 1227 1228 1229
		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_BF].vma)
			return -EINVAL;

1230
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1231 1232 1233 1234 1235 1236

		if (io_remap_pfn_range(vma, vma->vm_start,
				       to_mucontext(context)->uar.pfn +
				       dev->dev->caps.num_uars,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
Y
Yishai Hadas 已提交
1237 1238 1239

		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);

1240 1241
	} else if (vma->vm_pgoff == 3) {
		struct mlx4_clock_params params;
Y
Yishai Hadas 已提交
1242 1243 1244 1245 1246 1247 1248
		int ret;

		/* We prevent double mmaping on same context */
		if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
			return -EINVAL;

		ret = mlx4_get_internal_clock_params(dev->dev, &params);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

		if (ret)
			return ret;

		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		if (io_remap_pfn_range(vma, vma->vm_start,
				       (pci_resource_start(dev->dev->persist->pdev,
							   params.bar) +
					params.offset)
				       >> PAGE_SHIFT,
				       PAGE_SIZE, vma->vm_page_prot))
			return -EAGAIN;
Y
Yishai Hadas 已提交
1261 1262 1263

		mlx4_ib_set_vma_data(vma,
				     &mucontext->hw_bar_info[HW_BAR_CLOCK]);
1264
	} else {
1265
		return -EINVAL;
1266
	}
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305

	return 0;
}

static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
				      struct ib_ucontext *context,
				      struct ib_udata *udata)
{
	struct mlx4_ib_pd *pd;
	int err;

	pd = kmalloc(sizeof *pd, GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

	err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
	if (err) {
		kfree(pd);
		return ERR_PTR(err);
	}

	if (context)
		if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
			mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
			kfree(pd);
			return ERR_PTR(-EFAULT);
		}

	return &pd->ibpd;
}

static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
{
	mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
	kfree(pd);

	return 0;
}

S
Sean Hefty 已提交
1306 1307 1308 1309 1310
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
					  struct ib_ucontext *context,
					  struct ib_udata *udata)
{
	struct mlx4_ib_xrcd *xrcd;
1311
	struct ib_cq_init_attr cq_attr = {};
S
Sean Hefty 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
	int err;

	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
		return ERR_PTR(-ENOSYS);

	xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
	if (!xrcd)
		return ERR_PTR(-ENOMEM);

	err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
	if (err)
		goto err1;

1325
	xrcd->pd = ib_alloc_pd(ibdev, 0);
S
Sean Hefty 已提交
1326 1327 1328 1329 1330
	if (IS_ERR(xrcd->pd)) {
		err = PTR_ERR(xrcd->pd);
		goto err2;
	}

1331 1332
	cq_attr.cqe = 1;
	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
S
Sean Hefty 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	if (IS_ERR(xrcd->cq)) {
		err = PTR_ERR(xrcd->cq);
		goto err3;
	}

	return &xrcd->ibxrcd;

err3:
	ib_dealloc_pd(xrcd->pd);
err2:
	mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
err1:
	kfree(xrcd);
	return ERR_PTR(err);
}

static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
	ib_destroy_cq(to_mxrcd(xrcd)->cq);
	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
	kfree(xrcd);

	return 0;
}

E
Eli Cohen 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
{
	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
	struct mlx4_ib_gid_entry *ge;

	ge = kzalloc(sizeof *ge, GFP_KERNEL);
	if (!ge)
		return -ENOMEM;

	ge->gid = *gid;
	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
		ge->port = mqp->port;
		ge->added = 1;
	}

	mutex_lock(&mqp->mutex);
	list_add_tail(&ge->list, &mqp->gid_list);
	mutex_unlock(&mqp->mutex);

	return 0;
}

E
Eran Ben Elisha 已提交
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
					  struct mlx4_ib_counters *ctr_table)
{
	struct counter_index *counter, *tmp_count;

	mutex_lock(&ctr_table->mutex);
	list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
				 list) {
		if (counter->allocated)
			mlx4_counter_free(ibdev->dev, counter->index);
		list_del(&counter->list);
		kfree(counter);
	}
	mutex_unlock(&ctr_table->mutex);
}

E
Eli Cohen 已提交
1398 1399 1400 1401 1402 1403 1404 1405 1406
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
		   union ib_gid *gid)
{
	struct net_device *ndev;
	int ret = 0;

	if (!mqp->port)
		return 0;

1407
	spin_lock_bh(&mdev->iboe.lock);
E
Eli Cohen 已提交
1408 1409 1410
	ndev = mdev->iboe.netdevs[mqp->port - 1];
	if (ndev)
		dev_hold(ndev);
1411
	spin_unlock_bh(&mdev->iboe.lock);
E
Eli Cohen 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420

	if (ndev) {
		ret = 1;
		dev_put(ndev);
	}

	return ret;
}

1421 1422
struct mlx4_ib_steering {
	struct list_head list;
1423
	struct mlx4_flow_reg_id reg_id;
1424 1425 1426
	union ib_gid gid;
};

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
#define LAST_ETH_FIELD vlan_tag
#define LAST_IB_FIELD sl
#define LAST_IPV4_FIELD dst_ip
#define LAST_TCP_UDP_FIELD src_port

/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
	memchr_inv((void *)&filter.field  +\
		   sizeof(filter.field), 0,\
		   sizeof(filter) -\
		   offsetof(typeof(filter), field) -\
		   sizeof(filter.field))

1440
static int parse_flow_attr(struct mlx4_dev *dev,
1441
			   u32 qp_num,
1442 1443 1444 1445 1446 1447 1448
			   union ib_flow_spec *ib_spec,
			   struct _rule_hw *mlx4_spec)
{
	enum mlx4_net_trans_rule_id type;

	switch (ib_spec->type) {
	case IB_FLOW_SPEC_ETH:
1449 1450 1451
		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
			return -ENOTSUPP;

1452 1453 1454 1455 1456 1457 1458 1459
		type = MLX4_NET_TRANS_RULE_ID_ETH;
		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
		       ETH_ALEN);
		memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
		       ETH_ALEN);
		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
		break;
1460
	case IB_FLOW_SPEC_IB:
1461 1462 1463
		if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
			return -ENOTSUPP;

1464 1465 1466 1467 1468 1469 1470
		type = MLX4_NET_TRANS_RULE_ID_IB;
		mlx4_spec->ib.l3_qpn =
			cpu_to_be32(qp_num);
		mlx4_spec->ib.qpn_mask =
			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
		break;

1471 1472

	case IB_FLOW_SPEC_IPV4:
1473 1474 1475
		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
			return -ENOTSUPP;

1476 1477 1478 1479 1480 1481 1482 1483 1484
		type = MLX4_NET_TRANS_RULE_ID_IPV4;
		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
		mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
		mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
		break;

	case IB_FLOW_SPEC_TCP:
	case IB_FLOW_SPEC_UDP:
1485 1486 1487
		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
			return -ENOTSUPP;

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
					MLX4_NET_TRANS_RULE_ID_TCP :
					MLX4_NET_TRANS_RULE_ID_UDP;
		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
		break;

	default:
		return -EINVAL;
	}
	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
	    mlx4_hw_rule_sz(dev, type) < 0)
		return -EINVAL;
	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
	return mlx4_hw_rule_sz(dev, type);
}

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
struct default_rules {
	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
	__u8  link_layer;
};
static const struct default_rules default_table[] = {
	{
		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
		.rules_create_list = {IB_FLOW_SPEC_IB},
		.link_layer = IB_LINK_LAYER_INFINIBAND
	}
};

static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
					 struct ib_flow_attr *flow_attr)
{
	int i, j, k;
	void *ib_flow;
	const struct default_rules *pdefault_rules = default_table;
	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);

1531
	for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
		memset(&field_types, 0, sizeof(field_types));

		if (link_layer != pdefault_rules->link_layer)
			continue;

		ib_flow = flow_attr + 1;
		/* we assume the specs are sorted */
		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
		     j < flow_attr->num_of_specs; k++) {
			union ib_flow_spec *current_flow =
				(union ib_flow_spec *)ib_flow;

			/* same layer but different type */
			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
			     (pdefault_rules->mandatory_fields[k] &
			      IB_FLOW_SPEC_LAYER_MASK)) &&
			    (current_flow->type !=
			     pdefault_rules->mandatory_fields[k]))
				goto out;

			/* same layer, try match next one */
			if (current_flow->type ==
			    pdefault_rules->mandatory_fields[k]) {
				j++;
				ib_flow +=
					((union ib_flow_spec *)ib_flow)->size;
			}
		}

		ib_flow = flow_attr + 1;
		for (j = 0; j < flow_attr->num_of_specs;
		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
				/* same layer and same type */
				if (((union ib_flow_spec *)ib_flow)->type ==
				    pdefault_rules->mandatory_not_fields[k])
					goto out;

		return i;
	}
out:
	return -1;
}

static int __mlx4_ib_create_default_rules(
		struct mlx4_ib_dev *mdev,
		struct ib_qp *qp,
		const struct default_rules *pdefault_rules,
		struct _rule_hw *mlx4_spec) {
	int size = 0;
	int i;

1585
	for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
		int ret;
		union ib_flow_spec ib_spec;
		switch (pdefault_rules->rules_create_list[i]) {
		case 0:
			/* no rule */
			continue;
		case IB_FLOW_SPEC_IB:
			ib_spec.type = IB_FLOW_SPEC_IB;
			ib_spec.size = sizeof(struct ib_flow_spec_ib);

			break;
		default:
			/* invalid rule */
			return -EINVAL;
		}
		/* We must put empty rule, qpn is being ignored */
		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
				      mlx4_spec);
		if (ret < 0) {
			pr_info("invalid parsing\n");
			return -EINVAL;
		}

		mlx4_spec = (void *)mlx4_spec + ret;
		size += ret;
	}
	return size;
}

1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
			  int domain,
			  enum mlx4_net_trans_promisc_mode flow_type,
			  u64 *reg_id)
{
	int ret, i;
	int size = 0;
	void *ib_flow;
	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1626
	int default_flow;
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660

	static const u16 __mlx4_domain[] = {
		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
		[IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
		[IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
		[IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
	};

	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
		pr_err("Invalid priority value %d\n", flow_attr->priority);
		return -EINVAL;
	}

	if (domain >= IB_FLOW_DOMAIN_NUM) {
		pr_err("Invalid domain value %d\n", domain);
		return -EINVAL;
	}

	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
		return -EINVAL;

	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);
	ctrl = mailbox->buf;

	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
				 flow_attr->priority);
	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
	ctrl->port = flow_attr->port;
	ctrl->qpn = cpu_to_be32(qp->qp_num);

	ib_flow = flow_attr + 1;
	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
	/* Add default flows */
	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
	if (default_flow >= 0) {
		ret = __mlx4_ib_create_default_rules(
				mdev, qp, default_table + default_flow,
				mailbox->buf + size);
		if (ret < 0) {
			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
			return -EINVAL;
		}
		size += ret;
	}
1673
	for (i = 0; i < flow_attr->num_of_specs; i++) {
1674 1675
		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
				      mailbox->buf + size);
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
		if (ret < 0) {
			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
			return -EINVAL;
		}
		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
		size += ret;
	}

	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1686
			   MLX4_CMD_WRAPPED);
1687 1688 1689 1690 1691
	if (ret == -ENOMEM)
		pr_err("mcg table is full. Fail to register network rule.\n");
	else if (ret == -ENXIO)
		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
	else if (ret)
C
Colin Ian King 已提交
1692
		pr_err("Invalid argument. Fail to register network rule.\n");
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702

	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
	return ret;
}

static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
{
	int err;
	err = mlx4_cmd(dev, reg_id, 0, 0,
		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1703
		       MLX4_CMD_WRAPPED);
1704 1705 1706 1707 1708 1709
	if (err)
		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
		       reg_id);
	return err;
}

1710 1711 1712 1713 1714 1715 1716 1717
static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
				    u64 *reg_id)
{
	void *ib_flow;
	union ib_flow_spec *ib_spec;
	struct mlx4_dev	*dev = to_mdev(qp->device)->dev;
	int err = 0;

1718 1719
	if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
	    dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
		return 0; /* do nothing */

	ib_flow = flow_attr + 1;
	ib_spec = (union ib_flow_spec *)ib_flow;

	if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
		return 0; /* do nothing */

	err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
				    flow_attr->port, qp->qp_num,
				    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
				    reg_id);
	return err;
}

1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
				      struct ib_flow_attr *flow_attr,
				      enum mlx4_net_trans_promisc_mode *type)
{
	int err = 0;

	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
	    (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
	    (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
		return -EOPNOTSUPP;
	}

	if (flow_attr->num_of_specs == 0) {
		type[0] = MLX4_FS_MC_SNIFFER;
		type[1] = MLX4_FS_UC_SNIFFER;
	} else {
		union ib_flow_spec *ib_spec;

		ib_spec = (union ib_flow_spec *)(flow_attr + 1);
		if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
			return -EINVAL;

		/* if all is zero than MC and UC */
		if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
			type[0] = MLX4_FS_MC_SNIFFER;
			type[1] = MLX4_FS_UC_SNIFFER;
		} else {
			u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
					    ib_spec->eth.mask.dst_mac[1],
					    ib_spec->eth.mask.dst_mac[2],
					    ib_spec->eth.mask.dst_mac[3],
					    ib_spec->eth.mask.dst_mac[4],
					    ib_spec->eth.mask.dst_mac[5]};

			/* Above xor was only on MC bit, non empty mask is valid
			 * only if this bit is set and rest are zero.
			 */
			if (!is_zero_ether_addr(&mac[0]))
				return -EINVAL;

			if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
				type[0] = MLX4_FS_MC_SNIFFER;
			else
				type[0] = MLX4_FS_UC_SNIFFER;
		}
	}

	return err;
}

1785 1786 1787 1788
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
				    struct ib_flow_attr *flow_attr,
				    int domain)
{
1789
	int err = 0, i = 0, j = 0;
1790 1791
	struct mlx4_ib_flow *mflow;
	enum mlx4_net_trans_promisc_mode type[2];
1792 1793
	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
	int is_bonded = mlx4_is_bonded(dev);
1794

1795 1796 1797
	if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
		return ERR_PTR(-EINVAL);

1798 1799
	if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
	    (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1800 1801
		return ERR_PTR(-EOPNOTSUPP);

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
	memset(type, 0, sizeof(type));

	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
	if (!mflow) {
		err = -ENOMEM;
		goto err_free;
	}

	switch (flow_attr->type) {
	case IB_FLOW_ATTR_NORMAL:
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
		/* If dont trap flag (continue match) is set, under specific
		 * condition traffic be replicated to given qp,
		 * without stealing it
		 */
		if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
			err = mlx4_ib_add_dont_trap_rule(dev,
							 flow_attr,
							 type);
			if (err)
				goto err_free;
		} else {
			type[0] = MLX4_FS_REGULAR;
		}
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
		break;

	case IB_FLOW_ATTR_ALL_DEFAULT:
		type[0] = MLX4_FS_ALL_DEFAULT;
		break;

	case IB_FLOW_ATTR_MC_DEFAULT:
		type[0] = MLX4_FS_MC_DEFAULT;
		break;

	case IB_FLOW_ATTR_SNIFFER:
1836 1837
		type[0] = MLX4_FS_MIRROR_RX_PORT;
		type[1] = MLX4_FS_MIRROR_SX_PORT;
1838 1839 1840 1841 1842 1843 1844 1845 1846
		break;

	default:
		err = -EINVAL;
		goto err_free;
	}

	while (i < ARRAY_SIZE(type) && type[i]) {
		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1847
					    &mflow->reg_id[i].id);
1848
		if (err)
1849
			goto err_create_flow;
1850
		if (is_bonded) {
1851 1852 1853
			/* Application always sees one port so the mirror rule
			 * must be on port #2
			 */
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
			flow_attr->port = 2;
			err = __mlx4_ib_create_flow(qp, flow_attr,
						    domain, type[j],
						    &mflow->reg_id[j].mirror);
			flow_attr->port = 1;
			if (err)
				goto err_create_flow;
			j++;
		}

1864
		i++;
1865 1866
	}

1867
	if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1868 1869
		err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
					       &mflow->reg_id[i].id);
1870
		if (err)
1871
			goto err_create_flow;
1872

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
		if (is_bonded) {
			flow_attr->port = 2;
			err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
						       &mflow->reg_id[j].mirror);
			flow_attr->port = 1;
			if (err)
				goto err_create_flow;
			j++;
		}
		/* function to create mirror rule */
1883
		i++;
1884 1885
	}

1886 1887
	return &mflow->ibflow;

1888 1889
err_create_flow:
	while (i) {
1890 1891
		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
					     mflow->reg_id[i].id);
1892 1893
		i--;
	}
1894 1895 1896 1897 1898 1899

	while (j) {
		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
					     mflow->reg_id[j].mirror);
		j--;
	}
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
err_free:
	kfree(mflow);
	return ERR_PTR(err);
}

static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
{
	int err, ret = 0;
	int i = 0;
	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
	struct mlx4_ib_flow *mflow = to_mflow(flow_id);

1912 1913
	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1914 1915
		if (err)
			ret = err;
1916 1917 1918 1919 1920 1921
		if (mflow->reg_id[i].mirror) {
			err = __mlx4_ib_destroy_flow(mdev->dev,
						     mflow->reg_id[i].mirror);
			if (err)
				ret = err;
		}
1922 1923 1924 1925 1926 1927 1928
		i++;
	}

	kfree(mflow);
	return ret;
}

1929 1930
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
E
Eli Cohen 已提交
1931 1932
	int err;
	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1933
	struct mlx4_dev	*dev = mdev->dev;
E
Eli Cohen 已提交
1934
	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1935
	struct mlx4_ib_steering *ib_steering = NULL;
1936
	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1937
	struct mlx4_flow_reg_id	reg_id;
1938 1939 1940 1941 1942 1943 1944

	if (mdev->dev->caps.steering_mode ==
	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
		if (!ib_steering)
			return -ENOMEM;
	}
E
Eli Cohen 已提交
1945

1946 1947 1948
	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
				    !!(mqp->flags &
				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1949
				    prot, &reg_id.id);
1950 1951
	if (err) {
		pr_err("multicast attach op failed, err %d\n", err);
1952
		goto err_malloc;
1953
	}
E
Eli Cohen 已提交
1954

1955 1956
	reg_id.mirror = 0;
	if (mlx4_is_bonded(dev)) {
1957 1958
		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
					    (mqp->port == 1) ? 2 : 1,
1959 1960 1961 1962 1963 1964 1965
					    !!(mqp->flags &
					    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
					    prot, &reg_id.mirror);
		if (err)
			goto err_add;
	}

E
Eli Cohen 已提交
1966 1967 1968 1969
	err = add_gid_entry(ibqp, gid);
	if (err)
		goto err_add;

1970 1971 1972 1973 1974 1975 1976
	if (ib_steering) {
		memcpy(ib_steering->gid.raw, gid->raw, 16);
		ib_steering->reg_id = reg_id;
		mutex_lock(&mqp->mutex);
		list_add(&ib_steering->list, &mqp->steering_rules);
		mutex_unlock(&mqp->mutex);
	}
E
Eli Cohen 已提交
1977 1978 1979
	return 0;

err_add:
1980
	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1981 1982 1983 1984
			      prot, reg_id.id);
	if (reg_id.mirror)
		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
				      prot, reg_id.mirror);
1985 1986 1987
err_malloc:
	kfree(ib_steering);

E
Eli Cohen 已提交
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
	return err;
}

static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
{
	struct mlx4_ib_gid_entry *ge;
	struct mlx4_ib_gid_entry *tmp;
	struct mlx4_ib_gid_entry *ret = NULL;

	list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
		if (!memcmp(raw, ge->gid.raw, 16)) {
			ret = ge;
			break;
		}
	}

	return ret;
2005 2006 2007 2008
}

static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
E
Eli Cohen 已提交
2009 2010
	int err;
	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2011
	struct mlx4_dev *dev = mdev->dev;
E
Eli Cohen 已提交
2012 2013 2014
	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
	struct net_device *ndev;
	struct mlx4_ib_gid_entry *ge;
2015
	struct mlx4_flow_reg_id reg_id = {0, 0};
2016
	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036

	if (mdev->dev->caps.steering_mode ==
	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
		struct mlx4_ib_steering *ib_steering;

		mutex_lock(&mqp->mutex);
		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
				list_del(&ib_steering->list);
				break;
			}
		}
		mutex_unlock(&mqp->mutex);
		if (&ib_steering->list == &mqp->steering_rules) {
			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
			return -EINVAL;
		}
		reg_id = ib_steering->reg_id;
		kfree(ib_steering);
	}
E
Eli Cohen 已提交
2037

2038
	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2039
				    prot, reg_id.id);
E
Eli Cohen 已提交
2040 2041 2042
	if (err)
		return err;

2043 2044 2045 2046 2047 2048 2049
	if (mlx4_is_bonded(dev)) {
		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
					    prot, reg_id.mirror);
		if (err)
			return err;
	}

E
Eli Cohen 已提交
2050 2051 2052
	mutex_lock(&mqp->mutex);
	ge = find_gid_entry(mqp, gid->raw);
	if (ge) {
2053
		spin_lock_bh(&mdev->iboe.lock);
E
Eli Cohen 已提交
2054 2055 2056
		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
		if (ndev)
			dev_hold(ndev);
2057
		spin_unlock_bh(&mdev->iboe.lock);
2058
		if (ndev)
E
Eli Cohen 已提交
2059 2060 2061 2062
			dev_put(ndev);
		list_del(&ge->list);
		kfree(ge);
	} else
2063
		pr_warn("could not find mgid entry\n");
E
Eli Cohen 已提交
2064 2065 2066 2067

	mutex_unlock(&mqp->mutex);

	return 0;
2068 2069 2070 2071 2072 2073
}

static int init_node_data(struct mlx4_ib_dev *dev)
{
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
2074
	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
2075 2076 2077 2078 2079 2080 2081 2082 2083
	int err = -ENOMEM;

	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
	if (!in_mad || !out_mad)
		goto out;

	init_query_mad(in_mad);
	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
2084 2085
	if (mlx4_is_master(dev->dev))
		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2086

2087
	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2088 2089 2090
	if (err)
		goto out;

2091
	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2092 2093 2094

	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;

2095
	err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2096 2097 2098
	if (err)
		goto out;

2099
	dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2100 2101 2102 2103 2104 2105 2106 2107
	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);

out:
	kfree(in_mad);
	kfree(out_mad);
	return err;
}

2108 2109
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
			char *buf)
2110
{
2111 2112
	struct mlx4_ib_dev *dev =
		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2113
	return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2114 2115
}

2116 2117
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
			char *buf)
2118
{
2119 2120
	struct mlx4_ib_dev *dev =
		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2121 2122 2123
	return sprintf(buf, "%x\n", dev->dev->rev_id);
}

2124 2125
static ssize_t show_board(struct device *device, struct device_attribute *attr,
			  char *buf)
2126
{
2127 2128 2129 2130
	struct mlx4_ib_dev *dev =
		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
		       dev->dev->board_id);
2131 2132
}

2133 2134 2135
static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
2136

2137 2138 2139 2140
static struct device_attribute *mlx4_class_attributes[] = {
	&dev_attr_hw_rev,
	&dev_attr_hca_type,
	&dev_attr_board_id
2141 2142
};

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
struct diag_counter {
	const char *name;
	u32 offset;
};

#define DIAG_COUNTER(_name, _offset)			\
	{ .name = #_name, .offset = _offset }

static const struct diag_counter diag_basic[] = {
	DIAG_COUNTER(rq_num_lle, 0x00),
	DIAG_COUNTER(sq_num_lle, 0x04),
	DIAG_COUNTER(rq_num_lqpoe, 0x08),
	DIAG_COUNTER(sq_num_lqpoe, 0x0C),
	DIAG_COUNTER(rq_num_lpe, 0x18),
	DIAG_COUNTER(sq_num_lpe, 0x1C),
	DIAG_COUNTER(rq_num_wrfe, 0x20),
	DIAG_COUNTER(sq_num_wrfe, 0x24),
	DIAG_COUNTER(sq_num_mwbe, 0x2C),
	DIAG_COUNTER(sq_num_bre, 0x34),
	DIAG_COUNTER(sq_num_rire, 0x44),
	DIAG_COUNTER(rq_num_rire, 0x48),
	DIAG_COUNTER(sq_num_rae, 0x4C),
	DIAG_COUNTER(rq_num_rae, 0x50),
	DIAG_COUNTER(sq_num_roe, 0x54),
	DIAG_COUNTER(sq_num_tree, 0x5C),
	DIAG_COUNTER(sq_num_rree, 0x64),
	DIAG_COUNTER(rq_num_rnr, 0x68),
	DIAG_COUNTER(sq_num_rnr, 0x6C),
	DIAG_COUNTER(rq_num_oos, 0x100),
	DIAG_COUNTER(sq_num_oos, 0x104),
};

static const struct diag_counter diag_ext[] = {
	DIAG_COUNTER(rq_num_dup, 0x130),
	DIAG_COUNTER(sq_num_to, 0x134),
};

static const struct diag_counter diag_device_only[] = {
	DIAG_COUNTER(num_cqovf, 0x1A0),
	DIAG_COUNTER(rq_num_udsdprd, 0x118),
};

static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
						    u8 port_num)
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct mlx4_ib_diag_counters *diag = dev->diag_counters;

	if (!diag[!!port_num].name)
		return NULL;

	return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
					  diag[!!port_num].num_counters,
					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
}

static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
				struct rdma_hw_stats *stats,
				u8 port, int index)
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
	u32 hw_value[ARRAY_SIZE(diag_device_only) +
		ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
	int ret;
	int i;

	ret = mlx4_query_diag_counters(dev->dev,
				       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
				       diag[!!port].offset, hw_value,
				       diag[!!port].num_counters, port);

	if (ret)
		return ret;

	for (i = 0; i < diag[!!port].num_counters; i++)
		stats->value[i] = hw_value[i];

	return diag[!!port].num_counters;
}

static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
					 const char ***name,
					 u32 **offset,
					 u32 *num,
					 bool port)
{
	u32 num_counters;

	num_counters = ARRAY_SIZE(diag_basic);

	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
		num_counters += ARRAY_SIZE(diag_ext);

	if (!port)
		num_counters += ARRAY_SIZE(diag_device_only);

	*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
	if (!*name)
		return -ENOMEM;

	*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
	if (!*offset)
		goto err_name;

	*num = num_counters;

	return 0;

err_name:
	kfree(*name);
	return -ENOMEM;
}

static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
				       const char **name,
				       u32 *offset,
				       bool port)
{
	int i;
	int j;

	for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
		name[i] = diag_basic[i].name;
		offset[i] = diag_basic[i].offset;
	}

	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
		for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
			name[j] = diag_ext[i].name;
			offset[j] = diag_ext[i].offset;
		}
	}

	if (!port) {
		for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
			name[j] = diag_device_only[i].name;
			offset[j] = diag_device_only[i].offset;
		}
	}
}

static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
{
	struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
	int i;
	int ret;
	bool per_port = !!(ibdev->dev->caps.flags2 &
		MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);

2293 2294 2295
	if (mlx4_is_slave(ibdev->dev))
		return 0;

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
		/* i == 1 means we are building port counters */
		if (i && !per_port)
			continue;

		ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
						    &diag[i].offset,
						    &diag[i].num_counters, i);
		if (ret)
			goto err_alloc;

		mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
					   diag[i].offset, i);
	}

	ibdev->ib_dev.get_hw_stats	= mlx4_ib_get_hw_stats;
	ibdev->ib_dev.alloc_hw_stats	= mlx4_ib_alloc_hw_stats;

	return 0;

err_alloc:
	if (i) {
		kfree(diag[i - 1].name);
		kfree(diag[i - 1].offset);
	}

	return ret;
}

static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
{
	int i;

	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
		kfree(ibdev->diag_counters[i].offset);
		kfree(ibdev->diag_counters[i].name);
	}
}

2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
#define MLX4_IB_INVALID_MAC	((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
			       struct net_device *dev,
			       int port)
{
	u64 new_smac = 0;
	u64 release_mac = MLX4_IB_INVALID_MAC;
	struct mlx4_ib_qp *qp;

	read_lock(&dev_base_lock);
	new_smac = mlx4_mac_to_u64(dev->dev_addr);
	read_unlock(&dev_base_lock);

2348 2349
	atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);

2350 2351 2352 2353
	/* no need for update QP1 and mac registration in non-SRIOV */
	if (!mlx4_is_mfunc(ibdev->dev))
		return;

2354 2355 2356 2357
	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
	qp = ibdev->qp1_proxy[port - 1];
	if (qp) {
		int new_smac_index;
2358
		u64 old_smac;
2359 2360
		struct mlx4_update_qp_params update_params;

2361 2362
		mutex_lock(&qp->mutex);
		old_smac = qp->pri.smac;
2363 2364 2365 2366 2367 2368 2369 2370 2371
		if (new_smac == old_smac)
			goto unlock;

		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);

		if (new_smac_index < 0)
			goto unlock;

		update_params.smac_index = new_smac_index;
2372
		if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2373 2374 2375 2376
				   &update_params)) {
			release_mac = new_smac;
			goto unlock;
		}
2377 2378 2379
		/* if old port was zero, no mac was yet registered for this QP */
		if (qp->pri.smac_port)
			release_mac = old_smac;
2380
		qp->pri.smac = new_smac;
2381
		qp->pri.smac_port = port;
2382 2383 2384 2385 2386 2387
		qp->pri.smac_index = new_smac_index;
	}

unlock:
	if (release_mac != MLX4_IB_INVALID_MAC)
		mlx4_unregister_mac(ibdev->dev, port, release_mac);
2388 2389 2390
	if (qp)
		mutex_unlock(&qp->mutex);
	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2391 2392 2393 2394 2395 2396
}

static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
				 struct net_device *dev,
				 unsigned long event)

2397
{
E
Eli Cohen 已提交
2398
	struct mlx4_ib_iboe *iboe;
2399
	int update_qps_port = -1;
E
Eli Cohen 已提交
2400 2401
	int port;

2402 2403
	ASSERT_RTNL();

E
Eli Cohen 已提交
2404 2405
	iboe = &ibdev->iboe;

2406
	spin_lock_bh(&iboe->lock);
E
Eli Cohen 已提交
2407
	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2408

E
Eli Cohen 已提交
2409
		iboe->netdevs[port - 1] =
2410
			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
E
Eli Cohen 已提交
2411

2412 2413 2414 2415 2416
		if (dev == iboe->netdevs[port - 1] &&
		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
		     event == NETDEV_UP || event == NETDEV_CHANGE))
			update_qps_port = port;

2417
	}
2418
	spin_unlock_bh(&iboe->lock);
2419 2420 2421

	if (update_qps_port > 0)
		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
}

static int mlx4_ib_netdev_event(struct notifier_block *this,
				unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct mlx4_ib_dev *ibdev;

	if (!net_eq(dev_net(dev), &init_net))
		return NOTIFY_DONE;

	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2434
	mlx4_ib_scan_netdevs(ibdev, dev, event);
E
Eli Cohen 已提交
2435 2436 2437 2438

	return NOTIFY_DONE;
}

2439 2440 2441 2442 2443 2444 2445
static void init_pkeys(struct mlx4_ib_dev *ibdev)
{
	int port;
	int slave;
	int i;

	if (mlx4_is_master(ibdev->dev)) {
2446 2447
		for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
		     ++slave) {
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
				for (i = 0;
				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
				     ++i) {
					ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
					/* master has the identity virt2phys pkey mapping */
						(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
							ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
					mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
							     ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
				}
			}
		}
		/* initialize pkey cache */
		for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
			for (i = 0;
			     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
			     ++i)
				ibdev->pkeys.phys_pkey_cache[port-1][i] =
					(i) ? 0 : 0xFFFF;
		}
	}
}

2472 2473
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
M
Matan Barak 已提交
2474
	int i, j, eq = 0, total_eqs = 0;
2475

M
Matan Barak 已提交
2476 2477
	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2478 2479 2480
	if (!ibdev->eq_table)
		return;

M
Matan Barak 已提交
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
	for (i = 1; i <= dev->caps.num_ports; i++) {
		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
		     j++, total_eqs++) {
			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
				continue;
			ibdev->eq_table[eq] = total_eqs;
			if (!mlx4_assign_eq(dev, i,
					    &ibdev->eq_table[eq]))
				eq++;
			else
				ibdev->eq_table[eq] = -1;
2492 2493 2494
		}
	}

M
Matan Barak 已提交
2495 2496 2497
	for (i = eq; i < dev->caps.num_comp_vectors;
	     ibdev->eq_table[i++] = -1)
		;
2498 2499

	/* Advertise the new number of EQs to clients */
M
Matan Barak 已提交
2500
	ibdev->ib_dev.num_comp_vectors = eq;
2501 2502 2503 2504 2505
}

static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
	int i;
M
Matan Barak 已提交
2506
	int total_eqs = ibdev->ib_dev.num_comp_vectors;
2507

M
Matan Barak 已提交
2508
	/* no eqs were allocated */
2509 2510
	if (!ibdev->eq_table)
		return;
2511 2512

	/* Reset the advertised EQ number */
M
Matan Barak 已提交
2513
	ibdev->ib_dev.num_comp_vectors = 0;
2514

M
Matan Barak 已提交
2515
	for (i = 0; i < total_eqs; i++)
2516 2517 2518
		mlx4_release_eq(dev, ibdev->eq_table[i]);

	kfree(ibdev->eq_table);
M
Matan Barak 已提交
2519
	ibdev->eq_table = NULL;
2520 2521
}

2522 2523 2524 2525
static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
			       struct ib_port_immutable *immutable)
{
	struct ib_port_attr attr;
M
Matan Barak 已提交
2526
	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2527 2528 2529 2530 2531 2532 2533 2534 2535
	int err;

	err = mlx4_ib_query_port(ibdev, port_num, &attr);
	if (err)
		return err;

	immutable->pkey_tbl_len = attr.pkey_tbl_len;
	immutable->gid_tbl_len = attr.gid_tbl_len;

M
Matan Barak 已提交
2536
	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2537
		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
M
Matan Barak 已提交
2538 2539 2540 2541 2542 2543 2544
	} else {
		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
	}
2545

2546 2547
	immutable->max_mad_size = IB_MGMT_MAD_SIZE;

2548 2549 2550
	return 0;
}

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
static void get_fw_ver_str(struct ib_device *device, char *str,
			   size_t str_len)
{
	struct mlx4_ib_dev *dev =
		container_of(device, struct mlx4_ib_dev, ib_dev);
	snprintf(str, str_len, "%d.%d.%d",
		 (int) (dev->dev->caps.fw_ver >> 32),
		 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
		 (int) dev->dev->caps.fw_ver & 0xffff);
}

2562 2563 2564
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
	struct mlx4_ib_dev *ibdev;
2565
	int num_ports = 0;
2566
	int i, j;
E
Eli Cohen 已提交
2567 2568
	int err;
	struct mlx4_ib_iboe *iboe;
2569
	int ib_num_ports = 0;
2570
	int num_req_counters;
2571 2572
	int allocated;
	u32 counter_index;
E
Eran Ben Elisha 已提交
2573
	struct counter_index *new_counter_index = NULL;
2574

2575
	pr_info_once("%s", mlx4_ib_version);
2576

2577
	num_ports = 0;
E
Eli Cohen 已提交
2578
	mlx4_foreach_ib_transport_port(i, dev)
2579 2580 2581 2582 2583 2584
		num_ports++;

	/* No point in registering a device with no ports... */
	if (num_ports == 0)
		return NULL;

2585 2586
	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
	if (!ibdev) {
2587 2588
		dev_err(&dev->persist->pdev->dev,
			"Device struct alloc failed\n");
2589 2590 2591
		return NULL;
	}

E
Eli Cohen 已提交
2592 2593
	iboe = &ibdev->iboe;

2594 2595 2596 2597 2598 2599
	if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
		goto err_dealloc;

	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
		goto err_pd;

2600 2601
	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
				 PAGE_SIZE);
2602 2603
	if (!ibdev->uar_map)
		goto err_uar;
2604
	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2605 2606

	ibdev->dev = dev;
2607
	ibdev->bond_next_port	= 0;
2608 2609 2610 2611

	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
	ibdev->ib_dev.owner		= THIS_MODULE;
	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2612
	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
2613
	ibdev->num_ports		= num_ports;
2614 2615
	ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
						1 : ibdev->num_ports;
2616
	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
2617
	ibdev->ib_dev.dma_device	= &dev->persist->pdev->dev;
2618 2619 2620
	ibdev->ib_dev.get_netdev	= mlx4_ib_get_netdev;
	ibdev->ib_dev.add_gid		= mlx4_ib_add_gid;
	ibdev->ib_dev.del_gid		= mlx4_ib_del_gid;
2621

O
Or Gerlitz 已提交
2622 2623 2624 2625 2626
	if (dev->caps.userspace_caps)
		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
	else
		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;

2627 2628 2629 2630 2631 2632 2633
	ibdev->ib_dev.uverbs_cmd_mask	=
		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
2634
		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
2635 2636 2637
		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
2638
		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
2639 2640 2641
		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
J
Jack Morgenstein 已提交
2642
		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
2643 2644 2645 2646 2647
		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
J
Jack Morgenstein 已提交
2648
		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
S
Sean Hefty 已提交
2649
		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
2650 2651
		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
2652 2653 2654

	ibdev->ib_dev.query_device	= mlx4_ib_query_device;
	ibdev->ib_dev.query_port	= mlx4_ib_query_port;
E
Eli Cohen 已提交
2655
	ibdev->ib_dev.get_link_layer	= mlx4_ib_port_link_layer;
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669
	ibdev->ib_dev.query_gid		= mlx4_ib_query_gid;
	ibdev->ib_dev.query_pkey	= mlx4_ib_query_pkey;
	ibdev->ib_dev.modify_device	= mlx4_ib_modify_device;
	ibdev->ib_dev.modify_port	= mlx4_ib_modify_port;
	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
	ibdev->ib_dev.query_ah		= mlx4_ib_query_ah;
	ibdev->ib_dev.destroy_ah	= mlx4_ib_destroy_ah;
	ibdev->ib_dev.create_srq	= mlx4_ib_create_srq;
	ibdev->ib_dev.modify_srq	= mlx4_ib_modify_srq;
J
Jack Morgenstein 已提交
2670
	ibdev->ib_dev.query_srq		= mlx4_ib_query_srq;
2671 2672 2673 2674
	ibdev->ib_dev.destroy_srq	= mlx4_ib_destroy_srq;
	ibdev->ib_dev.post_srq_recv	= mlx4_ib_post_srq_recv;
	ibdev->ib_dev.create_qp		= mlx4_ib_create_qp;
	ibdev->ib_dev.modify_qp		= mlx4_ib_modify_qp;
J
Jack Morgenstein 已提交
2675
	ibdev->ib_dev.query_qp		= mlx4_ib_query_qp;
2676 2677 2678 2679
	ibdev->ib_dev.destroy_qp	= mlx4_ib_destroy_qp;
	ibdev->ib_dev.post_send		= mlx4_ib_post_send;
	ibdev->ib_dev.post_recv		= mlx4_ib_post_recv;
	ibdev->ib_dev.create_cq		= mlx4_ib_create_cq;
2680
	ibdev->ib_dev.modify_cq		= mlx4_ib_modify_cq;
2681
	ibdev->ib_dev.resize_cq		= mlx4_ib_resize_cq;
2682 2683 2684 2685 2686
	ibdev->ib_dev.destroy_cq	= mlx4_ib_destroy_cq;
	ibdev->ib_dev.poll_cq		= mlx4_ib_poll_cq;
	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
2687
	ibdev->ib_dev.rereg_user_mr	= mlx4_ib_rereg_user_mr;
2688
	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
S
Sagi Grimberg 已提交
2689
	ibdev->ib_dev.alloc_mr		= mlx4_ib_alloc_mr;
2690
	ibdev->ib_dev.map_mr_sg		= mlx4_ib_map_mr_sg;
2691 2692 2693
	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
2694
	ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2695
	ibdev->ib_dev.get_dev_fw_str    = get_fw_ver_str;
Y
Yishai Hadas 已提交
2696
	ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
2697

2698 2699 2700 2701 2702 2703
	if (!mlx4_is_slave(ibdev->dev)) {
		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
		ibdev->ib_dev.map_phys_fmr	= mlx4_ib_map_phys_fmr;
		ibdev->ib_dev.unmap_fmr		= mlx4_ib_unmap_fmr;
		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
	}
J
Jack Morgenstein 已提交
2704

S
Shani Michaeli 已提交
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
		ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
		ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;

		ibdev->ib_dev.uverbs_cmd_mask |=
			(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
	}

S
Sean Hefty 已提交
2715 2716 2717 2718 2719 2720 2721 2722
	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
		ibdev->ib_dev.uverbs_cmd_mask |=
			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
	}

2723
	if (check_flow_steering_support(dev)) {
2724
		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2725 2726 2727
		ibdev->ib_dev.create_flow	= mlx4_ib_create_flow;
		ibdev->ib_dev.destroy_flow	= mlx4_ib_destroy_flow;

2728 2729 2730
		ibdev->ib_dev.uverbs_ex_cmd_mask	|=
			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2731 2732
	}

2733 2734
	ibdev->ib_dev.uverbs_ex_cmd_mask |=
		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2735 2736
		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2737

2738 2739
	mlx4_ib_alloc_eqs(dev, ibdev);

E
Eli Cohen 已提交
2740 2741
	spin_lock_init(&iboe->lock);

2742 2743
	if (init_node_data(ibdev))
		goto err_map;
2744
	mlx4_init_sl2vl_tbl(ibdev);
2745

E
Eran Ben Elisha 已提交
2746 2747 2748 2749 2750
	for (i = 0; i < ibdev->num_ports; ++i) {
		mutex_init(&ibdev->counters_table[i].mutex);
		INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
	}

2751 2752
	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
	for (i = 0; i < num_req_counters; ++i) {
2753
		mutex_init(&ibdev->qp1_proxy_lock[i]);
2754
		allocated = 0;
2755 2756
		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
						IB_LINK_LAYER_ETHERNET) {
2757 2758
			err = mlx4_counter_alloc(ibdev->dev, &counter_index);
			/* if failed to allocate a new counter, use default */
2759
			if (err)
2760 2761 2762 2763 2764 2765 2766 2767
				counter_index =
					mlx4_get_default_counter_index(dev,
								       i + 1);
			else
				allocated = 1;
		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
			counter_index = mlx4_get_default_counter_index(dev,
								       i + 1);
2768
		}
E
Eran Ben Elisha 已提交
2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
		new_counter_index = kmalloc(sizeof(*new_counter_index),
					    GFP_KERNEL);
		if (!new_counter_index) {
			if (allocated)
				mlx4_counter_free(ibdev->dev, counter_index);
			goto err_counter;
		}
		new_counter_index->index = counter_index;
		new_counter_index->allocated = allocated;
		list_add_tail(&new_counter_index->list,
			      &ibdev->counters_table[i].counters_list);
		ibdev->counters_table[i].default_counter = counter_index;
2781 2782
		pr_info("counter index %d for port %d allocated %d\n",
			counter_index, i + 1, allocated);
2783
	}
2784
	if (mlx4_is_bonded(dev))
2785
		for (i = 1; i < ibdev->num_ports ; ++i) {
E
Eran Ben Elisha 已提交
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
			new_counter_index =
					kmalloc(sizeof(struct counter_index),
						GFP_KERNEL);
			if (!new_counter_index)
				goto err_counter;
			new_counter_index->index = counter_index;
			new_counter_index->allocated = 0;
			list_add_tail(&new_counter_index->list,
				      &ibdev->counters_table[i].counters_list);
			ibdev->counters_table[i].default_counter =
								counter_index;
2797
		}
2798

2799 2800 2801
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
		ib_num_ports++;

2802 2803
	spin_lock_init(&ibdev->sm_lock);
	mutex_init(&ibdev->cap_mask_mutex);
2804 2805
	INIT_LIST_HEAD(&ibdev->qp_list);
	spin_lock_init(&ibdev->reset_flow_resource_lock);
2806

2807 2808
	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
	    ib_num_ports) {
2809 2810 2811
		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
					    MLX4_IB_UC_STEER_QPN_ALIGN,
2812
					    &ibdev->steer_qpn_base, 0);
2813 2814 2815 2816 2817 2818 2819 2820
		if (err)
			goto err_counter;

		ibdev->ib_uc_qpns_bitmap =
			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
				sizeof(long),
				GFP_KERNEL);
		if (!ibdev->ib_uc_qpns_bitmap) {
2821 2822
			dev_err(&dev->persist->pdev->dev,
				"bit map alloc failed\n");
2823 2824 2825
			goto err_steer_qp_release;
		}

2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
				    ibdev->steer_qpn_count);
			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
					dev, ibdev->steer_qpn_base,
					ibdev->steer_qpn_base +
					ibdev->steer_qpn_count - 1);
			if (err)
				goto err_steer_free_bitmap;
		} else {
			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
				    ibdev->steer_qpn_count);
		}
2839 2840
	}

2841 2842 2843
	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
		atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);

2844
	if (mlx4_ib_alloc_diag_counters(ibdev))
2845
		goto err_steer_free_bitmap;
2846

2847 2848 2849
	if (ib_register_device(&ibdev->ib_dev, NULL))
		goto err_diag_counters;

2850 2851 2852
	if (mlx4_ib_mad_init(ibdev))
		goto err_reg;

2853 2854 2855
	if (mlx4_ib_init_sriov(ibdev))
		goto err_mad;

2856 2857
	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2858 2859 2860 2861 2862 2863 2864 2865
		if (!iboe->nb.notifier_call) {
			iboe->nb.notifier_call = mlx4_ib_netdev_event;
			err = register_netdevice_notifier(&iboe->nb);
			if (err) {
				iboe->nb.notifier_call = NULL;
				goto err_notif;
			}
		}
2866 2867 2868 2869 2870 2871
		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
			err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
			if (err) {
				goto err_notif;
			}
		}
E
Eli Cohen 已提交
2872 2873
	}

2874
	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2875
		if (device_create_file(&ibdev->ib_dev.dev,
2876
				       mlx4_class_attributes[j]))
E
Eli Cohen 已提交
2877
			goto err_notif;
2878 2879
	}

2880
	ibdev->ib_active = true;
J
Jiri Pirko 已提交
2881 2882 2883
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
		devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
					 &ibdev->ib_dev);
2884

2885 2886 2887
	if (mlx4_is_mfunc(ibdev->dev))
		init_pkeys(ibdev);

2888 2889 2890 2891 2892 2893 2894 2895 2896
	/* create paravirt contexts for any VFs which are active */
	if (mlx4_is_master(ibdev->dev)) {
		for (j = 0; j < MLX4_MFUNC_MAX; j++) {
			if (j == mlx4_master_func_num(ibdev->dev))
				continue;
			if (mlx4_is_slave_active(ibdev->dev, j))
				do_slave_init(ibdev, j, 1);
		}
	}
2897 2898
	return ibdev;

E
Eli Cohen 已提交
2899
err_notif:
2900 2901 2902 2903 2904
	if (ibdev->iboe.nb.notifier_call) {
		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
			pr_warn("failure unregistering notifier\n");
		ibdev->iboe.nb.notifier_call = NULL;
	}
E
Eli Cohen 已提交
2905 2906
	flush_workqueue(wq);

2907 2908 2909 2910 2911
	mlx4_ib_close_sriov(ibdev);

err_mad:
	mlx4_ib_mad_cleanup(ibdev);

2912 2913 2914
err_reg:
	ib_unregister_device(&ibdev->ib_dev);

2915 2916 2917
err_diag_counters:
	mlx4_ib_diag_cleanup(ibdev);

2918 2919 2920 2921 2922 2923 2924
err_steer_free_bitmap:
	kfree(ibdev->ib_uc_qpns_bitmap);

err_steer_qp_release:
	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
				      ibdev->steer_qpn_count);
2925
err_counter:
E
Eran Ben Elisha 已提交
2926 2927 2928
	for (i = 0; i < ibdev->num_ports; ++i)
		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
err_map:
	iounmap(ibdev->uar_map);

err_uar:
	mlx4_uar_free(dev, &ibdev->priv_uar);

err_pd:
	mlx4_pd_free(dev, ibdev->priv_pdn);

err_dealloc:
	ib_dealloc_device(&ibdev->ib_dev);

	return NULL;
}

2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
{
	int offset;

	WARN_ON(!dev->ib_uc_qpns_bitmap);

	offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
					 dev->steer_qpn_count,
					 get_count_order(count));
	if (offset < 0)
		return offset;

	*qpn = dev->steer_qpn_base + offset;
	return 0;
}

void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
{
	if (!qpn ||
	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
		return;

	BUG_ON(qpn < dev->steer_qpn_base);

	bitmap_release_region(dev->ib_uc_qpns_bitmap,
			      qpn - dev->steer_qpn_base,
			      get_count_order(count));
}

int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
			 int is_attach)
{
	int err;
	size_t flow_size;
	struct ib_flow_attr *flow = NULL;
	struct ib_flow_spec_ib *ib_spec;

	if (is_attach) {
		flow_size = sizeof(struct ib_flow_attr) +
			    sizeof(struct ib_flow_spec_ib);
		flow = kzalloc(flow_size, GFP_KERNEL);
		if (!flow)
			return -ENOMEM;
		flow->port = mqp->port;
		flow->num_of_specs = 1;
		flow->size = flow_size;
		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
		ib_spec->type = IB_FLOW_SPEC_IB;
		ib_spec->size = sizeof(struct ib_flow_spec_ib);
		/* Add an empty rule for IB L2 */
		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));

		err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
					    IB_FLOW_DOMAIN_NIC,
					    MLX4_FS_REGULAR,
					    &mqp->reg_id);
	} else {
		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
	}
	kfree(flow);
	return err;
}

3007 3008 3009 3010
static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
	struct mlx4_ib_dev *ibdev = ibdev_ptr;
	int p;
J
Jiri Pirko 已提交
3011
	int i;
3012

J
Jiri Pirko 已提交
3013 3014
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
		devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
3015 3016 3017
	ibdev->ib_active = false;
	flush_workqueue(wq);

3018
	mlx4_ib_close_sriov(ibdev);
3019 3020
	mlx4_ib_mad_cleanup(ibdev);
	ib_unregister_device(&ibdev->ib_dev);
3021
	mlx4_ib_diag_cleanup(ibdev);
E
Eli Cohen 已提交
3022 3023
	if (ibdev->iboe.nb.notifier_call) {
		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
3024
			pr_warn("failure unregistering notifier\n");
E
Eli Cohen 已提交
3025 3026
		ibdev->iboe.nb.notifier_call = NULL;
	}
3027 3028 3029 3030 3031 3032 3033

	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
				      ibdev->steer_qpn_count);
		kfree(ibdev->ib_uc_qpns_bitmap);
	}

E
Eli Cohen 已提交
3034
	iounmap(ibdev->uar_map);
3035
	for (p = 0; p < ibdev->num_ports; ++p)
E
Eran Ben Elisha 已提交
3036 3037
		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);

E
Eli Cohen 已提交
3038
	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3039 3040
		mlx4_CLOSE_PORT(dev, p);

3041 3042
	mlx4_ib_free_eqs(dev, ibdev);

3043 3044 3045 3046 3047
	mlx4_uar_free(dev, &ibdev->priv_uar);
	mlx4_pd_free(dev, ibdev->priv_pdn);
	ib_dealloc_device(&ibdev->ib_dev);
}

3048 3049 3050 3051 3052 3053
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
{
	struct mlx4_ib_demux_work **dm = NULL;
	struct mlx4_dev *dev = ibdev->dev;
	int i;
	unsigned long flags;
M
Matan Barak 已提交
3054 3055 3056
	struct mlx4_active_ports actv_ports;
	unsigned int ports;
	unsigned int first_port;
3057 3058 3059 3060

	if (!mlx4_is_master(dev))
		return;

M
Matan Barak 已提交
3061 3062 3063 3064 3065
	actv_ports = mlx4_get_active_ports(dev, slave);
	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);

	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3066 3067
	if (!dm) {
		pr_err("failed to allocate memory for tunneling qp update\n");
3068
		return;
3069 3070
	}

M
Matan Barak 已提交
3071
	for (i = 0; i < ports; i++) {
3072 3073 3074
		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
		if (!dm[i]) {
			pr_err("failed to allocate memory for tunneling qp update work struct\n");
3075 3076
			while (--i >= 0)
				kfree(dm[i]);
3077 3078 3079
			goto out;
		}
		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
M
Matan Barak 已提交
3080
		dm[i]->port = first_port + i + 1;
3081 3082 3083
		dm[i]->slave = slave;
		dm[i]->do_init = do_init;
		dm[i]->dev = ibdev;
D
Doug Ledford 已提交
3084 3085 3086 3087 3088
	}
	/* initialize or tear down tunnel QPs for the slave */
	spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
	if (!ibdev->sriov.is_going_down) {
		for (i = 0; i < ports; i++)
3089 3090
			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
D
Doug Ledford 已提交
3091 3092 3093 3094
	} else {
		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
		for (i = 0; i < ports; i++)
			kfree(dm[i]);
3095 3096
	}
out:
3097
	kfree(dm);
3098 3099 3100
	return;
}

3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
{
	struct mlx4_ib_qp *mqp;
	unsigned long flags_qp;
	unsigned long flags_cq;
	struct mlx4_ib_cq *send_mcq, *recv_mcq;
	struct list_head    cq_notify_list;
	struct mlx4_cq *mcq;
	unsigned long flags;

	pr_warn("mlx4_ib_handle_catas_error was started\n");
	INIT_LIST_HEAD(&cq_notify_list);

	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);

	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
		if (mqp->sq.tail != mqp->sq.head) {
			send_mcq = to_mcq(mqp->ibqp.send_cq);
			spin_lock_irqsave(&send_mcq->lock, flags_cq);
			if (send_mcq->mcq.comp &&
			    mqp->ibqp.send_cq->comp_handler) {
				if (!send_mcq->mcq.reset_notify_added) {
					send_mcq->mcq.reset_notify_added = 1;
					list_add_tail(&send_mcq->mcq.reset_notify,
						      &cq_notify_list);
				}
			}
			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
		}
		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
		/* Now, handle the QP's receive queue */
		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
		/* no handling is needed for SRQ */
		if (!mqp->ibqp.srq) {
			if (mqp->rq.tail != mqp->rq.head) {
				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
				if (recv_mcq->mcq.comp &&
				    mqp->ibqp.recv_cq->comp_handler) {
					if (!recv_mcq->mcq.reset_notify_added) {
						recv_mcq->mcq.reset_notify_added = 1;
						list_add_tail(&recv_mcq->mcq.reset_notify,
							      &cq_notify_list);
					}
				}
				spin_unlock_irqrestore(&recv_mcq->lock,
						       flags_cq);
			}
		}
		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
	}

	list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
		mcq->comp(mcq);
	}
	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
	pr_warn("mlx4_ib_handle_catas_error ended\n");
}

3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
static void handle_bonded_port_state_event(struct work_struct *work)
{
	struct ib_event_work *ew =
		container_of(work, struct ib_event_work, work);
	struct mlx4_ib_dev *ibdev = ew->ib_dev;
	enum ib_port_state bonded_port_state = IB_PORT_NOP;
	int i;
	struct ib_event ibev;

	kfree(ew);
	spin_lock_bh(&ibdev->iboe.lock);
	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3175
		enum ib_port_state curr_port_state;
3176

3177 3178 3179 3180
		if (!curr_netdev)
			continue;

		curr_port_state =
3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
			(netif_running(curr_netdev) &&
			 netif_carrier_ok(curr_netdev)) ?
			IB_PORT_ACTIVE : IB_PORT_DOWN;

		bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
			curr_port_state : IB_PORT_ACTIVE;
	}
	spin_unlock_bh(&ibdev->iboe.lock);

	ibev.device = &ibdev->ib_dev;
	ibev.element.port_num = 1;
	ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;

	ib_dispatch_event(&ibev);
}

3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
{
	u64 sl2vl;
	int err;

	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
	if (err) {
		pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
		       port, err);
		sl2vl = 0;
	}
	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
}

static void ib_sl2vl_update_work(struct work_struct *work)
{
	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
	struct mlx4_ib_dev *mdev = ew->ib_dev;
	int port = ew->port;

	mlx4_ib_sl2vl_update(mdev, port);

	kfree(ew);
}

void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
				     int port)
{
	struct ib_event_work *ew;

	ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
	if (ew) {
		INIT_WORK(&ew->work, ib_sl2vl_update_work);
		ew->port = port;
		ew->ib_dev = ibdev;
		queue_work(wq, &ew->work);
	} else {
		pr_err("failed to allocate memory for sl2vl update work\n");
	}
}

3239
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3240
			  enum mlx4_dev_event event, unsigned long param)
3241 3242
{
	struct ib_event ibev;
3243
	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3244 3245
	struct mlx4_eqe *eqe = NULL;
	struct ib_event_work *ew;
3246
	int p = 0;
3247

3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
	if (mlx4_is_bonded(dev) &&
	    ((event == MLX4_DEV_EVENT_PORT_UP) ||
	    (event == MLX4_DEV_EVENT_PORT_DOWN))) {
		ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
		if (!ew)
			return;
		INIT_WORK(&ew->work, handle_bonded_port_state_event);
		ew->ib_dev = ibdev;
		queue_work(wq, &ew->work);
		return;
	}

3260 3261 3262
	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
		eqe = (struct mlx4_eqe *)param;
	else
3263
		p = (int) param;
3264 3265

	switch (event) {
3266
	case MLX4_DEV_EVENT_PORT_UP:
3267 3268
		if (p > ibdev->num_ports)
			return;
3269
		if (!mlx4_is_slave(dev) &&
3270 3271
		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
			IB_LINK_LAYER_INFINIBAND) {
3272 3273 3274 3275 3276
			if (mlx4_is_master(dev))
				mlx4_ib_invalidate_all_guid_record(ibdev, p);
			if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
			    !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3277
		}
3278
		ibev.event = IB_EVENT_PORT_ACTIVE;
3279 3280
		break;

3281
	case MLX4_DEV_EVENT_PORT_DOWN:
3282 3283
		if (p > ibdev->num_ports)
			return;
3284 3285 3286 3287
		ibev.event = IB_EVENT_PORT_ERR;
		break;

	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3288
		ibdev->ib_active = false;
3289
		ibev.event = IB_EVENT_DEVICE_FATAL;
3290
		mlx4_ib_handle_catas_error(ibdev);
3291 3292
		break;

3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
		if (!ew) {
			pr_err("failed to allocate memory for events work\n");
			break;
		}

		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
		ew->ib_dev = ibdev;
3303 3304 3305 3306 3307
		/* need to queue only for port owner, which uses GEN_EQE */
		if (mlx4_is_master(dev))
			queue_work(wq, &ew->work);
		else
			handle_port_mgmt_change_event(&ew->work);
3308 3309
		return;

3310 3311 3312
	case MLX4_DEV_EVENT_SLAVE_INIT:
		/* here, p is the slave id */
		do_slave_init(ibdev, p, 1);
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323
		if (mlx4_is_master(dev)) {
			int i;

			for (i = 1; i <= ibdev->num_ports; i++) {
				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
					== IB_LINK_LAYER_INFINIBAND)
					mlx4_ib_slave_alias_guid_event(ibdev,
								       p, i,
								       1);
			}
		}
3324 3325 3326
		return;

	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
		if (mlx4_is_master(dev)) {
			int i;

			for (i = 1; i <= ibdev->num_ports; i++) {
				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
					== IB_LINK_LAYER_INFINIBAND)
					mlx4_ib_slave_alias_guid_event(ibdev,
								       p, i,
								       0);
			}
		}
3338 3339 3340 3341
		/* here, p is the slave id */
		do_slave_init(ibdev, p, 0);
		return;

3342 3343 3344 3345 3346
	default:
		return;
	}

	ibev.device	      = ibdev_ptr;
3347
	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3348 3349 3350 3351 3352

	ib_dispatch_event(&ibev);
}

static struct mlx4_interface mlx4_ib_interface = {
E
Eli Cohen 已提交
3353 3354 3355
	.add		= mlx4_ib_add,
	.remove		= mlx4_ib_remove,
	.event		= mlx4_ib_event,
3356 3357
	.protocol	= MLX4_PROT_IB_IPV6,
	.flags		= MLX4_INTFF_BONDING
3358 3359 3360 3361
};

static int __init mlx4_ib_init(void)
{
E
Eli Cohen 已提交
3362 3363
	int err;

3364
	wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
E
Eli Cohen 已提交
3365 3366 3367
	if (!wq)
		return -ENOMEM;

3368 3369 3370 3371
	err = mlx4_ib_mcg_init();
	if (err)
		goto clean_wq;

E
Eli Cohen 已提交
3372
	err = mlx4_register_interface(&mlx4_ib_interface);
3373 3374
	if (err)
		goto clean_mcg;
E
Eli Cohen 已提交
3375 3376

	return 0;
3377 3378 3379 3380 3381 3382 3383

clean_mcg:
	mlx4_ib_mcg_destroy();

clean_wq:
	destroy_workqueue(wq);
	return err;
3384 3385 3386 3387 3388
}

static void __exit mlx4_ib_cleanup(void)
{
	mlx4_unregister_interface(&mlx4_ib_interface);
3389
	mlx4_ib_mcg_destroy();
E
Eli Cohen 已提交
3390
	destroy_workqueue(wq);
3391 3392 3393 3394
}

module_init(mlx4_ib_init);
module_exit(mlx4_ib_cleanup);