main.c 88.1 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
41
#include <linux/slab.h>
42
#include <linux/io-mapping.h>
43
#include <linux/delay.h>
44
#include <linux/kmod.h>
45 46 47 48 49 50 51 52 53 54 55 56 57

#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>

#include "mlx4.h"
#include "fw.h"
#include "icm.h"

MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);

58 59
struct workqueue_struct *mlx4_wq;

60 61 62 63 64 65 66 67 68 69
#ifdef CONFIG_MLX4_DEBUG

int mlx4_debug_level = 0;
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");

#endif /* CONFIG_MLX4_DEBUG */

#ifdef CONFIG_PCI_MSI

70
static int msi_x = 1;
71 72 73 74 75 76 77 78 79
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");

#else /* CONFIG_PCI_MSI */

#define msi_x (0)

#endif /* CONFIG_PCI_MSI */

80
static uint8_t num_vfs[3] = {0, 0, 0};
81
static int num_vfs_argc;
82 83 84 85 86
module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
			  "num_vfs=port1,port2,port1+2");

static uint8_t probe_vf[3] = {0, 0, 0};
87
static int probe_vfs_argc;
88 89 90
module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
			   "probe_vf=port1,port2,port1+2");
91

92
int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
93 94 95 96
module_param_named(log_num_mgm_entry_size,
			mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
					 " of qp per mcg, for example:"
97
					 " 10 gives 248.range: 7 <="
98
					 " log_num_mgm_entry_size <= 12."
99 100
					 " To activate device managed"
					 " flow steering when available, set to -1");
101

102
static bool enable_64b_cqe_eqe = true;
O
Or Gerlitz 已提交
103 104
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
105
		 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
O
Or Gerlitz 已提交
106

107 108
#define PF_CONTEXT_BEHAVIOUR_MASK	(MLX4_FUNC_CAP_64B_EQE_CQE | \
					 MLX4_FUNC_CAP_EQE_CQE_STRIDE)
109

110
static char mlx4_version[] =
111 112 113 114
	DRV_NAME ": Mellanox ConnectX core driver v"
	DRV_VERSION " (" DRV_RELDATE ")\n";

static struct mlx4_profile default_profile = {
115
	.num_qp		= 1 << 18,
116
	.num_srq	= 1 << 16,
117
	.rdmarc_per_qp	= 1 << 4,
118 119
	.num_cq		= 1 << 16,
	.num_mcg	= 1 << 13,
120
	.num_mpt	= 1 << 19,
121
	.num_mtt	= 1 << 20, /* It is really num mtt segements */
122 123
};

124 125 126 127 128 129 130 131 132 133
static struct mlx4_profile low_mem_profile = {
	.num_qp		= 1 << 17,
	.num_srq	= 1 << 6,
	.rdmarc_per_qp	= 1 << 4,
	.num_cq		= 1 << 8,
	.num_mcg	= 1 << 8,
	.num_mpt	= 1 << 9,
	.num_mtt	= 1 << 7,
};

134
static int log_num_mac = 7;
135 136 137 138 139 140
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");

static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
141 142
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
143 144
#define MLX4_MIN_LOG_NUM_VLANS 0
#define MLX4_MIN_LOG_NUM_MAC 1
145

146
static bool use_prio;
147
module_param_named(use_prio, use_prio, bool, 0444);
148
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
149

150
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
151
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
152
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
153

154
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
155 156
static int arr_argc = 2;
module_param_array(port_type_array, int, &arr_argc, 0444);
157 158
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
				"1 for IB, 2 for Ethernet");
159 160 161 162 163 164 165

struct mlx4_port_config {
	struct list_head list;
	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
	struct pci_dev *pdev;
};

166 167
static atomic_t pf_loading = ATOMIC_INIT(0);

168 169
int mlx4_check_port_params(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_type)
170 171 172 173
{
	int i;

	for (i = 0; i < dev->caps.num_ports - 1; i++) {
174 175
		if (port_type[i] != port_type[i + 1]) {
			if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
J
Joe Perches 已提交
176
				mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
177 178
				return -EINVAL;
			}
179 180 181 182 183
		}
	}

	for (i = 0; i < dev->caps.num_ports; i++) {
		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
J
Joe Perches 已提交
184 185
			mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
				 i + 1);
186 187 188 189 190 191 192 193 194 195 196
			return -EINVAL;
		}
	}
	return 0;
}

static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
	int i;

	for (i = 1; i <= dev->caps.num_ports; ++i)
197
		dev->caps.port_mask[i] = dev->caps.port_type[i];
198
}
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
enum {
	MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
};

static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
	int err = 0;
	struct mlx4_func func;

	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
		err = mlx4_QUERY_FUNC(dev, &func, 0);
		if (err) {
			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
			return err;
		}
		dev_cap->max_eqs = func.max_eq;
		dev_cap->reserved_eqs = func.rsvd_eqs;
		dev_cap->reserved_uars = func.rsvd_uars;
		err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
	}
	return err;
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
{
	struct mlx4_caps *dev_cap = &dev->caps;

	/* FW not supporting or cancelled by user */
	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
	    !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
		return;

	/* Must have 64B CQE_EQE enabled by FW to use bigger stride
	 * When FW has NCSI it may decide not to report 64B CQE/EQEs
	 */
	if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
	    !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
		return;
	}

	if (cache_line_size() == 128 || cache_line_size() == 256) {
		mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
		/* Changing the real data inside CQE size to 32B */
		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
		dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;

		if (mlx4_is_master(dev))
			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
	} else {
		mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
	}
}

R
Roland Dreier 已提交
257
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
258 259
{
	int err;
260
	int i;
261 262 263

	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
	if (err) {
J
Joe Perches 已提交
264
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
265 266 267 268
		return err;
	}

	if (dev_cap->min_page_sz > PAGE_SIZE) {
J
Joe Perches 已提交
269
		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
270 271 272 273
			 dev_cap->min_page_sz, PAGE_SIZE);
		return -ENODEV;
	}
	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
J
Joe Perches 已提交
274
		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
275 276 277 278 279
			 dev_cap->num_ports, MLX4_MAX_PORTS);
		return -ENODEV;
	}

	if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
J
Joe Perches 已提交
280
		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
281 282 283 284 285 286
			 dev_cap->uar_size,
			 (unsigned long long) pci_resource_len(dev->pdev, 2));
		return -ENODEV;
	}

	dev->caps.num_ports	     = dev_cap->num_ports;
287 288 289 290
	dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
	dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
				      dev->caps.num_sys_eqs :
				      MLX4_MAX_EQ_NUM;
291 292
	for (i = 1; i <= dev->caps.num_ports; ++i) {
		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
293
		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
294 295 296 297
		dev->phys_caps.gid_phys_table_len[i]  = dev_cap->max_gids[i];
		dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
		/* set gid and pkey table operating lengths by default
		 * to non-sriov values */
298 299 300
		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
301 302
		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
303
		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
304 305
		dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
		dev->caps.default_sense[i] = dev_cap->default_sense[i];
306 307 308 309
		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
310 311
	}

312
	dev->caps.uar_page_size	     = PAGE_SIZE;
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
	/*
	 * Subtract 1 from the limit because we need to allocate a
	 * spare CQE so the HCA HW can tell the difference between an
	 * empty CQ and a full CQ.
	 */
	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
334
	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
335
	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
336 337 338

	/* The first 128 UARs are used for EQ doorbells */
	dev->caps.reserved_uars	     = max_t(int, 128, dev_cap->reserved_uars);
339
	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
S
Sean Hefty 已提交
340 341 342 343
	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
					dev_cap->reserved_xrcds : 0;
	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
					dev_cap->max_xrcds : 0;
344 345
	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;

346
	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
347 348
	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
	dev->caps.flags		     = dev_cap->flags;
349
	dev->caps.flags2	     = dev_cap->flags2;
350 351
	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
352
	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
E
Eli Cohen 已提交
353
	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
354
	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
355

356 357
	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
358
		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
359 360 361
	/* Don't do sense port on multifunction devices (for now at least) */
	if (mlx4_is_mfunc(dev))
		dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
362

363 364 365 366 367 368 369
	if (mlx4_low_memory_profile()) {
		dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
		dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
	} else {
		dev->caps.log_num_macs  = log_num_mac;
		dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
	}
370 371

	for (i = 1; i <= dev->caps.num_ports; ++i) {
372 373 374 375 376
		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
		if (dev->caps.supported_type[i]) {
			/* if only ETH is supported - assign ETH */
			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
377
			/* if only IB is supported, assign IB */
378
			else if (dev->caps.supported_type[i] ==
379 380
				 MLX4_PORT_TYPE_IB)
				dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
381
			else {
382 383 384 385
				/* if IB and ETH are supported, we set the port
				 * type according to user selection of port type;
				 * if user selected none, take the FW hint */
				if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
386 387
					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
388
				else
389
					dev->caps.port_type[i] = port_type_array[i - 1];
390 391
			}
		}
392 393 394 395 396 397
		/*
		 * Link sensing is allowed on the port if 3 conditions are true:
		 * 1. Both protocols are supported on the port.
		 * 2. Different types are supported on the port
		 * 3. FW declared that it supports link sensing
		 */
398
		mlx4_priv(dev)->sense.sense_allowed[i] =
399
			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
400
			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
401
			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
402

403 404 405 406 407
		/*
		 * If "default_sense" bit is set, we move the port to "AUTO" mode
		 * and perform sense_port FW command to try and set the correct
		 * port type from beginning
		 */
408
		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
409 410 411 412 413 414 415 416 417
			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
			mlx4_SENSE_PORT(dev, i, &sensed_port);
			if (sensed_port != MLX4_PORT_TYPE_NONE)
				dev->caps.port_type[i] = sensed_port;
		} else {
			dev->caps.possible_type[i] = dev->caps.port_type[i];
		}

418 419
		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
J
Joe Perches 已提交
420
			mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
421 422 423 424
				  i, 1 << dev->caps.log_num_macs);
		}
		if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
			dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
J
Joe Perches 已提交
425
			mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
426 427 428 429
				  i, 1 << dev->caps.log_num_vlans);
		}
	}

430 431
	dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);

432 433 434 435 436 437 438 439 440 441 442 443 444
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
		(1 << dev->caps.log_num_macs) *
		(1 << dev->caps.log_num_vlans) *
		dev->caps.num_ports;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;

	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];

445
	dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
O
Or Gerlitz 已提交
446

447
	if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
O
Or Gerlitz 已提交
448 449 450 451 452 453
		if (dev_cap->flags &
		    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
			mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
			dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
		}
454 455 456 457 458 459 460 461

		if (dev_cap->flags2 &
		    (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
		     MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
			mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
			dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
		}
O
Or Gerlitz 已提交
462 463
	}

464
	if ((dev->caps.flags &
O
Or Gerlitz 已提交
465 466 467 468
	    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
	    mlx4_is_master(dev))
		dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;

469
	if (!mlx4_is_slave(dev)) {
470
		mlx4_enable_cqe_eqe_stride(dev);
471 472 473 474 475
		dev->caps.alloc_res_qp_mask =
			(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0);
	} else {
		dev->caps.alloc_res_qp_mask = 0;
	}
476

477 478
	return 0;
}
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556

static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
				       enum pci_bus_speed *speed,
				       enum pcie_link_width *width)
{
	u32 lnkcap1, lnkcap2;
	int err1, err2;

#define  PCIE_MLW_CAP_SHIFT 4	/* start of MLW mask in link capabilities */

	*speed = PCI_SPEED_UNKNOWN;
	*width = PCIE_LNK_WIDTH_UNKNOWN;

	err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
	err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
	if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
			*speed = PCIE_SPEED_8_0GT;
		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
			*speed = PCIE_SPEED_5_0GT;
		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
			*speed = PCIE_SPEED_2_5GT;
	}
	if (!err1) {
		*width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
		if (!lnkcap2) { /* pre-r3.0 */
			if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
				*speed = PCIE_SPEED_5_0GT;
			else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
				*speed = PCIE_SPEED_2_5GT;
		}
	}

	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
		return err1 ? err1 :
			err2 ? err2 : -EINVAL;
	}
	return 0;
}

static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
{
	enum pcie_link_width width, width_cap;
	enum pci_bus_speed speed, speed_cap;
	int err;

#define PCIE_SPEED_STR(speed) \
	(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
	 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
	 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
	 "Unknown")

	err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
	if (err) {
		mlx4_warn(dev,
			  "Unable to determine PCIe device BW capabilities\n");
		return;
	}

	err = pcie_get_minimum_link(dev->pdev, &speed, &width);
	if (err || speed == PCI_SPEED_UNKNOWN ||
	    width == PCIE_LNK_WIDTH_UNKNOWN) {
		mlx4_warn(dev,
			  "Unable to determine PCI device chain minimum BW\n");
		return;
	}

	if (width != width_cap || speed != speed_cap)
		mlx4_warn(dev,
			  "PCIe BW is different than device's capability\n");

	mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
		  PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
	mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
		  width, width_cap);
	return;
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_slave_state *s_state;
	int i;
	int ret = 0;

	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
		s_state = &priv->mfunc.master.slave_state[i];
		if (s_state->active && s_state->last_cmd !=
		    MLX4_COMM_CMD_RESET) {
			mlx4_warn(dev, "%s: slave: %d is still active\n",
				  __func__, i);
			ret++;
		}
	}
	return ret;
}

577 578 579
int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
{
	u32 qk = MLX4_RESERVED_QKEY_BASE;
580 581 582

	if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
	    qpn < dev->phys_caps.base_proxy_sqpn)
583 584
		return -EINVAL;

585
	if (qpn >= dev->phys_caps.base_tunnel_sqpn)
586
		/* tunnel qp */
587
		qk += qpn - dev->phys_caps.base_tunnel_sqpn;
588
	else
589
		qk += qpn - dev->phys_caps.base_proxy_sqpn;
590 591 592 593 594
	*qkey = qk;
	return 0;
}
EXPORT_SYMBOL(mlx4_get_parav_qkey);

595 596 597 598 599 600 601 602 603 604 605
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
{
	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);

	if (!mlx4_is_master(dev))
		return;

	priv->virt2phys_pkey[slave][port - 1][i] = val;
}
EXPORT_SYMBOL(mlx4_sync_pkey_table);

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
{
	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);

	if (!mlx4_is_master(dev))
		return;

	priv->slave_node_guids[slave] = guid;
}
EXPORT_SYMBOL(mlx4_put_slave_node_guid);

__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
{
	struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);

	if (!mlx4_is_master(dev))
		return 0;

	return priv->slave_node_guids[slave];
}
EXPORT_SYMBOL(mlx4_get_slave_node_guid);

628
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
629 630 631 632 633 634 635 636 637 638 639 640
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_slave_state *s_slave;

	if (!mlx4_is_master(dev))
		return 0;

	s_slave = &priv->mfunc.master.slave_state[slave];
	return !!s_slave->active;
}
EXPORT_SYMBOL(mlx4_is_slave_active);

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
				       struct mlx4_dev_cap *dev_cap,
				       struct mlx4_init_hca_param *hca_param)
{
	dev->caps.steering_mode = hca_param->steering_mode;
	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
		dev->caps.fs_log_max_ucast_qp_range_size =
			dev_cap->fs_log_max_ucast_qp_range_size;
	} else
		dev->caps.num_qp_per_mgm =
			4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);

	mlx4_dbg(dev, "Steering mode is: %s\n",
		 mlx4_steering_mode_str(dev->caps.steering_mode));
}

658 659 660 661 662 663 664
static int mlx4_slave_cap(struct mlx4_dev *dev)
{
	int			   err;
	u32			   page_size;
	struct mlx4_dev_cap	   dev_cap;
	struct mlx4_func_cap	   func_cap;
	struct mlx4_init_hca_param hca_param;
665
	u8			   i;
666 667 668 669

	memset(&hca_param, 0, sizeof(hca_param));
	err = mlx4_QUERY_HCA(dev, &hca_param);
	if (err) {
J
Joe Perches 已提交
670
		mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
671 672 673
		return err;
	}

674 675 676 677
	/* fail if the hca has an unknown global capability
	 * at this time global_caps should be always zeroed
	 */
	if (hca_param.global_caps) {
678 679 680 681 682 683
		mlx4_err(dev, "Unknown hca global capabilities\n");
		return -ENOSYS;
	}

	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;

684 685
	dev->caps.hca_core_clock = hca_param.hca_core_clock;

686
	memset(&dev_cap, 0, sizeof(dev_cap));
687
	dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
688 689
	err = mlx4_dev_cap(dev, &dev_cap);
	if (err) {
J
Joe Perches 已提交
690
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
691 692 693
		return err;
	}

694 695
	err = mlx4_QUERY_FW(dev);
	if (err)
J
Joe Perches 已提交
696
		mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
697

698 699 700
	page_size = ~dev->caps.page_size_cap + 1;
	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
	if (page_size > PAGE_SIZE) {
J
Joe Perches 已提交
701
		mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
			 page_size, PAGE_SIZE);
		return -ENODEV;
	}

	/* slave gets uar page size from QUERY_HCA fw command */
	dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);

	/* TODO: relax this assumption */
	if (dev->caps.uar_page_size != PAGE_SIZE) {
		mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
			 dev->caps.uar_page_size, PAGE_SIZE);
		return -ENODEV;
	}

	memset(&func_cap, 0, sizeof(func_cap));
717
	err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
718
	if (err) {
J
Joe Perches 已提交
719 720
		mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
			 err);
721 722 723 724 725 726 727 728 729 730
		return err;
	}

	if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
	    PF_CONTEXT_BEHAVIOUR_MASK) {
		mlx4_err(dev, "Unknown pf context behaviour\n");
		return -ENOSYS;
	}

	dev->caps.num_ports		= func_cap.num_ports;
731 732 733 734 735 736 737 738 739 740 741
	dev->quotas.qp			= func_cap.qp_quota;
	dev->quotas.srq			= func_cap.srq_quota;
	dev->quotas.cq			= func_cap.cq_quota;
	dev->quotas.mpt			= func_cap.mpt_quota;
	dev->quotas.mtt			= func_cap.mtt_quota;
	dev->caps.num_qps		= 1 << hca_param.log_num_qps;
	dev->caps.num_srqs		= 1 << hca_param.log_num_srqs;
	dev->caps.num_cqs		= 1 << hca_param.log_num_cqs;
	dev->caps.num_mpts		= 1 << hca_param.log_mpt_sz;
	dev->caps.num_eqs		= func_cap.max_eq;
	dev->caps.reserved_eqs		= func_cap.reserved_eq;
742 743 744 745 746
	dev->caps.num_pds               = MLX4_NUM_PDS;
	dev->caps.num_mgms              = 0;
	dev->caps.num_amgms             = 0;

	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
J
Joe Perches 已提交
747 748
		mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
			 dev->caps.num_ports, MLX4_MAX_PORTS);
749 750 751
		return -ENODEV;
	}

752
	dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
753 754 755 756 757 758
	dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
	dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
	dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
	dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);

	if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
759 760
	    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
	    !dev->caps.qp0_qkey) {
761 762 763 764
		err = -ENOMEM;
		goto err_mem;
	}

765
	for (i = 1; i <= dev->caps.num_ports; ++i) {
766
		err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
767
		if (err) {
J
Joe Perches 已提交
768 769
			mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
				 i, err);
770 771
			goto err_mem;
		}
772
		dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
773 774 775 776
		dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
		dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
		dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
		dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
777
		dev->caps.port_mask[i] = dev->caps.port_type[i];
778
		dev->caps.phys_port_id[i] = func_cap.phys_port_id;
779 780 781
		if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
						    &dev->caps.gid_table_len[i],
						    &dev->caps.pkey_table_len[i]))
782
			goto err_mem;
783
	}
784

785 786 787
	if (dev->caps.uar_page_size * (dev->caps.num_uars -
				       dev->caps.reserved_uars) >
				       pci_resource_len(dev->pdev, 2)) {
J
Joe Perches 已提交
788
		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
789 790
			 dev->caps.uar_page_size * dev->caps.num_uars,
			 (unsigned long long) pci_resource_len(dev->pdev, 2));
791
		goto err_mem;
792 793
	}

O
Or Gerlitz 已提交
794 795 796 797 798 799 800 801 802 803
	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
		dev->caps.eqe_size   = 64;
		dev->caps.eqe_factor = 1;
	} else {
		dev->caps.eqe_size   = 32;
		dev->caps.eqe_factor = 0;
	}

	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
		dev->caps.cqe_size   = 64;
804
		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
O
Or Gerlitz 已提交
805 806 807 808
	} else {
		dev->caps.cqe_size   = 32;
	}

809 810 811 812 813 814 815 816 817 818 819
	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
		dev->caps.eqe_size = hca_param.eqe_size;
		dev->caps.eqe_factor = 0;
	}

	if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
		dev->caps.cqe_size = hca_param.cqe_size;
		/* User still need to know when CQE > 32B */
		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
	}

820
	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
J
Joe Perches 已提交
821
	mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
822

823 824
	slave_adjust_steering_mode(dev, &dev_cap, &hca_param);

825 826 827 828
	if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
	    dev->caps.bf_reg_size)
		dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;

829
	return 0;
830 831

err_mem:
832
	kfree(dev->caps.qp0_qkey);
833 834 835 836
	kfree(dev->caps.qp0_tunnel);
	kfree(dev->caps.qp0_proxy);
	kfree(dev->caps.qp1_tunnel);
	kfree(dev->caps.qp1_proxy);
837 838 839 840 841
	dev->caps.qp0_qkey = NULL;
	dev->caps.qp0_tunnel = NULL;
	dev->caps.qp0_proxy = NULL;
	dev->caps.qp1_tunnel = NULL;
	dev->caps.qp1_proxy = NULL;
842 843

	return err;
844
}
845

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
static void mlx4_request_modules(struct mlx4_dev *dev)
{
	int port;
	int has_ib_port = false;
	int has_eth_port = false;
#define EN_DRV_NAME	"mlx4_en"
#define IB_DRV_NAME	"mlx4_ib"

	for (port = 1; port <= dev->caps.num_ports; port++) {
		if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
			has_ib_port = true;
		else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
			has_eth_port = true;
	}

	if (has_eth_port)
		request_module_nowait(EN_DRV_NAME);
863 864
	if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
		request_module_nowait(IB_DRV_NAME);
865 866
}

867 868 869 870
/*
 * Change the port configuration of the device.
 * Every user of this function must hold the port mutex.
 */
871 872
int mlx4_change_port_types(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_types)
873 874 875 876 877 878
{
	int err = 0;
	int change = 0;
	int port;

	for (port = 0; port <  dev->caps.num_ports; port++) {
879 880
		/* Change the port type only if the new type is different
		 * from the current, and not set to Auto */
881
		if (port_types[port] != dev->caps.port_type[port + 1])
882 883 884 885 886 887
			change = 1;
	}
	if (change) {
		mlx4_unregister_device(dev);
		for (port = 1; port <= dev->caps.num_ports; port++) {
			mlx4_CLOSE_PORT(dev, port);
888
			dev->caps.port_type[port] = port_types[port - 1];
889
			err = mlx4_SET_PORT(dev, port, -1);
890
			if (err) {
J
Joe Perches 已提交
891 892
				mlx4_err(dev, "Failed to set port %d, aborting\n",
					 port);
893 894 895 896 897
				goto out;
			}
		}
		mlx4_set_port_mask(dev);
		err = mlx4_register_device(dev);
898 899 900 901 902
		if (err) {
			mlx4_err(dev, "Failed to register device\n");
			goto out;
		}
		mlx4_request_modules(dev);
903 904 905 906 907 908 909 910 911 912 913 914 915
	}

out:
	return err;
}

static ssize_t show_port_type(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
916 917 918 919 920 921 922 923 924
	char type[8];

	sprintf(type, "%s",
		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
		"ib" : "eth");
	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
		sprintf(buf, "auto (%s)\n", type);
	else
		sprintf(buf, "%s\n", type);
925

926
	return strlen(buf);
927 928 929 930 931 932 933 934 935 936 937
}

static ssize_t set_port_type(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t count)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
	struct mlx4_priv *priv = mlx4_priv(mdev);
	enum mlx4_port_type types[MLX4_MAX_PORTS];
938
	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
939
	static DEFINE_MUTEX(set_port_type_mutex);
940 941 942
	int i;
	int err = 0;

943 944
	mutex_lock(&set_port_type_mutex);

945 946 947 948
	if (!strcmp(buf, "ib\n"))
		info->tmp_type = MLX4_PORT_TYPE_IB;
	else if (!strcmp(buf, "eth\n"))
		info->tmp_type = MLX4_PORT_TYPE_ETH;
949 950
	else if (!strcmp(buf, "auto\n"))
		info->tmp_type = MLX4_PORT_TYPE_AUTO;
951 952
	else {
		mlx4_err(mdev, "%s is not supported port type\n", buf);
953 954
		err = -EINVAL;
		goto err_out;
955 956
	}

957
	mlx4_stop_sense(mdev);
958
	mutex_lock(&priv->port_mutex);
959 960 961 962
	/* Possible type is always the one that was delivered */
	mdev->caps.possible_type[info->port] = info->tmp_type;

	for (i = 0; i < mdev->caps.num_ports; i++) {
963
		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
964 965 966 967
					mdev->caps.possible_type[i+1];
		if (types[i] == MLX4_PORT_TYPE_AUTO)
			types[i] = mdev->caps.port_type[i+1];
	}
968

969 970
	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
971 972 973 974 975 976 977 978
		for (i = 1; i <= mdev->caps.num_ports; i++) {
			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
				err = -EINVAL;
			}
		}
	}
	if (err) {
J
Joe Perches 已提交
979
		mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
980 981 982 983 984 985
		goto out;
	}

	mlx4_do_sense_ports(mdev, new_types, types);

	err = mlx4_check_port_params(mdev, new_types);
986 987 988
	if (err)
		goto out;

989 990 991 992 993
	/* We are about to apply the changes after the configuration
	 * was verified, no need to remember the temporary types
	 * any more */
	for (i = 0; i < mdev->caps.num_ports; i++)
		priv->port[i + 1].tmp_type = 0;
994

995
	err = mlx4_change_port_types(mdev, new_types);
996 997

out:
998
	mlx4_start_sense(mdev);
999
	mutex_unlock(&priv->port_mutex);
1000 1001 1002
err_out:
	mutex_unlock(&set_port_type_mutex);

1003 1004 1005
	return err ? err : count;
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
enum ibta_mtu {
	IB_MTU_256  = 1,
	IB_MTU_512  = 2,
	IB_MTU_1024 = 3,
	IB_MTU_2048 = 4,
	IB_MTU_4096 = 5
};

static inline int int_to_ibta_mtu(int mtu)
{
	switch (mtu) {
	case 256:  return IB_MTU_256;
	case 512:  return IB_MTU_512;
	case 1024: return IB_MTU_1024;
	case 2048: return IB_MTU_2048;
	case 4096: return IB_MTU_4096;
	default: return -1;
	}
}

static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
{
	switch (mtu) {
	case IB_MTU_256:  return  256;
	case IB_MTU_512:  return  512;
	case IB_MTU_1024: return 1024;
	case IB_MTU_2048: return 2048;
	case IB_MTU_4096: return 4096;
	default: return -1;
	}
}

static ssize_t show_port_ib_mtu(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_mtu_attr);
	struct mlx4_dev *mdev = info->dev;

	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");

	sprintf(buf, "%d\n",
			ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
	return strlen(buf);
}

static ssize_t set_port_ib_mtu(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t count)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_mtu_attr);
	struct mlx4_dev *mdev = info->dev;
	struct mlx4_priv *priv = mlx4_priv(mdev);
	int err, port, mtu, ibta_mtu = -1;

	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
		return -EINVAL;
	}

1069 1070
	err = kstrtoint(buf, 0, &mtu);
	if (!err)
1071 1072
		ibta_mtu = int_to_ibta_mtu(mtu);

1073
	if (err || ibta_mtu < 0) {
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
		return -EINVAL;
	}

	mdev->caps.port_ib_mtu[info->port] = ibta_mtu;

	mlx4_stop_sense(mdev);
	mutex_lock(&priv->port_mutex);
	mlx4_unregister_device(mdev);
	for (port = 1; port <= mdev->caps.num_ports; port++) {
		mlx4_CLOSE_PORT(mdev, port);
1085
		err = mlx4_SET_PORT(mdev, port, -1);
1086
		if (err) {
J
Joe Perches 已提交
1087 1088
			mlx4_err(mdev, "Failed to set port %d, aborting\n",
				 port);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
			goto err_set_port;
		}
	}
	err = mlx4_register_device(mdev);
err_set_port:
	mutex_unlock(&priv->port_mutex);
	mlx4_start_sense(mdev);
	return err ? err : count;
}

1099
static int mlx4_load_fw(struct mlx4_dev *dev)
1100 1101 1102 1103 1104
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1105
					 GFP_HIGHUSER | __GFP_NOWARN, 0);
1106
	if (!priv->fw.fw_icm) {
J
Joe Perches 已提交
1107
		mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1108 1109 1110 1111 1112
		return -ENOMEM;
	}

	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
	if (err) {
J
Joe Perches 已提交
1113
		mlx4_err(dev, "MAP_FA command failed, aborting\n");
1114 1115 1116 1117 1118
		goto err_free;
	}

	err = mlx4_RUN_FW(dev);
	if (err) {
J
Joe Perches 已提交
1119
		mlx4_err(dev, "RUN_FW command failed, aborting\n");
1120 1121 1122 1123 1124 1125 1126 1127 1128
		goto err_unmap_fa;
	}

	return 0;

err_unmap_fa:
	mlx4_UNMAP_FA(dev);

err_free:
1129
	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1130 1131 1132
	return err;
}

1133 1134
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
				int cmpt_entry_sz)
1135 1136 1137
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
1138
	int num_eqs;
1139 1140 1141 1142 1143 1144

	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_QP *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_qps,
1145 1146
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
1147 1148 1149 1150 1151 1152 1153 1154
	if (err)
		goto err;

	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_SRQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_srqs,
1155
				  dev->caps.reserved_srqs, 0, 0);
1156 1157 1158 1159 1160 1161 1162 1163
	if (err)
		goto err_qp;

	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_CQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_cqs,
1164
				  dev->caps.reserved_cqs, 0, 0);
1165 1166 1167
	if (err)
		goto err_srq;

1168
	num_eqs = dev->phys_caps.num_phys_eqs;
1169 1170 1171 1172
	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_EQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1173
				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	if (err)
		goto err_cq;

	return 0;

err_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);

err_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);

err_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err:
	return err;
}

R
Roland Dreier 已提交
1192 1193
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1194 1195 1196
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	u64 aux_pages;
1197
	int num_eqs;
1198 1199 1200 1201
	int err;

	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
	if (err) {
J
Joe Perches 已提交
1202
		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1203 1204 1205
		return err;
	}

J
Joe Perches 已提交
1206
	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1207 1208 1209 1210
		 (unsigned long long) icm_size >> 10,
		 (unsigned long long) aux_pages << 2);

	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1211
					  GFP_HIGHUSER | __GFP_NOWARN, 0);
1212
	if (!priv->fw.aux_icm) {
J
Joe Perches 已提交
1213
		mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1214 1215 1216 1217 1218
		return -ENOMEM;
	}

	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
	if (err) {
J
Joe Perches 已提交
1219
		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1220 1221 1222 1223 1224
		goto err_free_aux;
	}

	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
	if (err) {
J
Joe Perches 已提交
1225
		mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1226 1227 1228
		goto err_unmap_aux;
	}

1229

1230
	num_eqs = dev->phys_caps.num_phys_eqs;
1231 1232
	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
1233
				  num_eqs, num_eqs, 0, 0);
1234
	if (err) {
J
Joe Perches 已提交
1235
		mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1236 1237 1238
		goto err_unmap_cmpt;
	}

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	/*
	 * Reserved MTT entries must be aligned up to a cacheline
	 * boundary, since the FW will write to them, while the driver
	 * writes to all other MTT entries. (The variable
	 * dev->caps.mtt_entry_sz below is really the MTT segment
	 * size, not the raw entry size)
	 */
	dev->caps.reserved_mtts =
		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;

1250 1251 1252
	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
				  init_hca->mtt_base,
				  dev->caps.mtt_entry_sz,
1253
				  dev->caps.num_mtts,
1254
				  dev->caps.reserved_mtts, 1, 0);
1255
	if (err) {
J
Joe Perches 已提交
1256
		mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1257 1258 1259 1260 1261 1262 1263
		goto err_unmap_eq;
	}

	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
				  init_hca->dmpt_base,
				  dev_cap->dmpt_entry_sz,
				  dev->caps.num_mpts,
1264
				  dev->caps.reserved_mrws, 1, 1);
1265
	if (err) {
J
Joe Perches 已提交
1266
		mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1267 1268 1269 1270 1271 1272 1273
		goto err_unmap_mtt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
				  init_hca->qpc_base,
				  dev_cap->qpc_entry_sz,
				  dev->caps.num_qps,
1274 1275
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
1276
	if (err) {
J
Joe Perches 已提交
1277
		mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1278 1279 1280 1281 1282 1283 1284
		goto err_unmap_dmpt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
				  init_hca->auxc_base,
				  dev_cap->aux_entry_sz,
				  dev->caps.num_qps,
1285 1286
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
1287
	if (err) {
J
Joe Perches 已提交
1288
		mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1289 1290 1291 1292 1293 1294 1295
		goto err_unmap_qp;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
				  init_hca->altc_base,
				  dev_cap->altc_entry_sz,
				  dev->caps.num_qps,
1296 1297
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
1298
	if (err) {
J
Joe Perches 已提交
1299
		mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1300 1301 1302 1303 1304 1305 1306
		goto err_unmap_auxc;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
				  init_hca->rdmarc_base,
				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
				  dev->caps.num_qps,
1307 1308
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
1309 1310 1311 1312 1313 1314 1315 1316 1317
	if (err) {
		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
		goto err_unmap_altc;
	}

	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
				  init_hca->cqc_base,
				  dev_cap->cqc_entry_sz,
				  dev->caps.num_cqs,
1318
				  dev->caps.reserved_cqs, 0, 0);
1319
	if (err) {
J
Joe Perches 已提交
1320
		mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1321 1322 1323 1324 1325 1326 1327
		goto err_unmap_rdmarc;
	}

	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
				  init_hca->srqc_base,
				  dev_cap->srq_entry_sz,
				  dev->caps.num_srqs,
1328
				  dev->caps.reserved_srqs, 0, 0);
1329
	if (err) {
J
Joe Perches 已提交
1330
		mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1331 1332 1333 1334
		goto err_unmap_cq;
	}

	/*
1335 1336 1337 1338 1339
	 * For flow steering device managed mode it is required to use
	 * mlx4_init_icm_table. For B0 steering mode it's not strictly
	 * required, but for simplicity just map the whole multicast
	 * group table now.  The table isn't very big and it's a lot
	 * easier than trying to track ref counts.
1340 1341
	 */
	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1342 1343
				  init_hca->mc_base,
				  mlx4_get_mgm_entry_size(dev),
1344 1345
				  dev->caps.num_mgms + dev->caps.num_amgms,
				  dev->caps.num_mgms + dev->caps.num_amgms,
1346
				  0, 0);
1347
	if (err) {
J
Joe Perches 已提交
1348
		mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		goto err_unmap_srq;
	}

	return 0;

err_unmap_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);

err_unmap_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);

err_unmap_rdmarc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);

err_unmap_altc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);

err_unmap_auxc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);

err_unmap_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);

err_unmap_dmpt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);

err_unmap_mtt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);

err_unmap_eq:
1379
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

err_unmap_cmpt:
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err_unmap_aux:
	mlx4_UNMAP_ICM_AUX(dev);

err_free_aux:
1391
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408

	return err;
}

static void mlx4_free_icms(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1409
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1410 1411 1412 1413 1414 1415
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

	mlx4_UNMAP_ICM_AUX(dev);
1416
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1417 1418
}

1419 1420 1421 1422
static void mlx4_slave_exit(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

1423
	mutex_lock(&priv->cmd.slave_cmd_mutex);
1424
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
J
Joe Perches 已提交
1425
		mlx4_warn(dev, "Failed to close slave function\n");
1426
	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1427 1428
}

1429 1430 1431 1432 1433 1434 1435
static int map_bf_area(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	resource_size_t bf_start;
	resource_size_t bf_len;
	int err = 0;

1436 1437 1438
	if (!dev->caps.bf_reg_size)
		return -ENXIO;

1439 1440 1441 1442
	bf_start = pci_resource_start(dev->pdev, 2) +
			(dev->caps.num_uars << PAGE_SHIFT);
	bf_len = pci_resource_len(dev->pdev, 2) -
			(dev->caps.num_uars << PAGE_SHIFT);
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
	if (!priv->bf_mapping)
		err = -ENOMEM;

	return err;
}

static void unmap_bf_area(struct mlx4_dev *dev)
{
	if (mlx4_priv(dev)->bf_mapping)
		io_mapping_free(mlx4_priv(dev)->bf_mapping);
}

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
cycle_t mlx4_read_clock(struct mlx4_dev *dev)
{
	u32 clockhi, clocklo, clockhi1;
	cycle_t cycles;
	int i;
	struct mlx4_priv *priv = mlx4_priv(dev);

	for (i = 0; i < 10; i++) {
		clockhi = swab32(readl(priv->clock_mapping));
		clocklo = swab32(readl(priv->clock_mapping + 4));
		clockhi1 = swab32(readl(priv->clock_mapping));
		if (clockhi == clockhi1)
			break;
	}

	cycles = (u64) clockhi << 32 | (u64) clocklo;

	return cycles;
}
EXPORT_SYMBOL_GPL(mlx4_read_clock);


1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
static int map_internal_clock(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	priv->clock_mapping =
		ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
			priv->fw.clock_offset, MLX4_CLOCK_SIZE);

	if (!priv->clock_mapping)
		return -ENOMEM;

	return 0;
}

static void unmap_internal_clock(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	if (priv->clock_mapping)
		iounmap(priv->clock_mapping);
}

1500 1501
static void mlx4_close_hca(struct mlx4_dev *dev)
{
1502
	unmap_internal_clock(dev);
1503
	unmap_bf_area(dev);
1504 1505 1506 1507 1508
	if (mlx4_is_slave(dev))
		mlx4_slave_exit(dev);
	else {
		mlx4_CLOSE_HCA(dev, 0);
		mlx4_free_icms(dev);
1509 1510 1511 1512 1513 1514
	}
}

static void mlx4_close_fw(struct mlx4_dev *dev)
{
	if (!mlx4_is_slave(dev)) {
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
		mlx4_UNMAP_FA(dev);
		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
	}
}

static int mlx4_init_slave(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	u64 dma = (u64) priv->mfunc.vhcr_dma;
	int ret_from_reset = 0;
	u32 slave_read;
	u32 cmd_channel_ver;

1528
	if (atomic_read(&pf_loading)) {
J
Joe Perches 已提交
1529
		mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1530 1531 1532
		return -EPROBE_DEFER;
	}

1533
	mutex_lock(&priv->cmd.slave_cmd_mutex);
1534 1535 1536 1537 1538 1539 1540 1541
	priv->cmd.max_cmds = 1;
	mlx4_warn(dev, "Sending reset\n");
	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
				       MLX4_COMM_TIME);
	/* if we are in the middle of flr the slave will try
	 * NUM_OF_RESET_RETRIES times before leaving.*/
	if (ret_from_reset) {
		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
J
Joe Perches 已提交
1542
			mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1543 1544
			mutex_unlock(&priv->cmd.slave_cmd_mutex);
			return -EPROBE_DEFER;
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
		} else
			goto err;
	}

	/* check the driver version - the slave I/F revision
	 * must match the master's */
	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
	cmd_channel_ver = mlx4_comm_get_version();

	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
		MLX4_COMM_GET_IF_REV(slave_read)) {
J
Joe Perches 已提交
1556
		mlx4_err(dev, "slave driver version is not supported by the master\n");
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
		goto err;
	}

	mlx4_warn(dev, "Sending vhcr0\n");
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
		goto err;
1572 1573

	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1574 1575 1576 1577
	return 0;

err:
	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1578
	mutex_unlock(&priv->cmd.slave_cmd_mutex);
1579
	return -EIO;
1580 1581
}

1582 1583 1584 1585 1586
static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
{
	int i;

	for (i = 1; i <= dev->caps.num_ports; i++) {
1587 1588
		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
			dev->caps.gid_table_len[i] =
M
Matan Barak 已提交
1589
				mlx4_get_slave_num_gids(dev, 0, i);
1590 1591
		else
			dev->caps.gid_table_len[i] = 1;
1592 1593 1594 1595 1596
		dev->caps.pkey_table_len[i] =
			dev->phys_caps.pkey_phys_table_len[i] - 1;
	}
}

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
static int choose_log_fs_mgm_entry_size(int qp_per_entry)
{
	int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;

	for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
	      i++) {
		if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
			break;
	}

	return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
}

1610 1611 1612
static void choose_steering_mode(struct mlx4_dev *dev,
				 struct mlx4_dev_cap *dev_cap)
{
1613 1614
	if (mlx4_log_num_mgm_entry_size == -1 &&
	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1615
	    (!mlx4_is_mfunc(dev) ||
M
Matan Barak 已提交
1616
	     (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1617 1618 1619 1620
	    choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
		MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
		dev->oper_log_mgm_entry_size =
			choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
		dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
		dev->caps.fs_log_max_ucast_qp_range_size =
			dev_cap->fs_log_max_ucast_qp_range_size;
	} else {
		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
		    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
			dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
		else {
			dev->caps.steering_mode = MLX4_STEERING_MODE_A0;

			if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
			    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
J
Joe Perches 已提交
1634
				mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1635
		}
1636 1637 1638 1639
		dev->oper_log_mgm_entry_size =
			mlx4_log_num_mgm_entry_size > 0 ?
			mlx4_log_num_mgm_entry_size :
			MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1640 1641
		dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
	}
J
Joe Perches 已提交
1642
	mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1643 1644 1645
		 mlx4_steering_mode_str(dev->caps.steering_mode),
		 dev->oper_log_mgm_entry_size,
		 mlx4_log_num_mgm_entry_size);
1646 1647
}

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
				       struct mlx4_dev_cap *dev_cap)
{
	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
	else
		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;

	mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
		 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
}

1661
static int mlx4_init_fw(struct mlx4_dev *dev)
1662
{
1663
	struct mlx4_mod_stat_cfg   mlx4_cfg;
1664
	int err = 0;
1665

1666 1667 1668 1669
	if (!mlx4_is_slave(dev)) {
		err = mlx4_QUERY_FW(dev);
		if (err) {
			if (err == -EACCES)
J
Joe Perches 已提交
1670
				mlx4_info(dev, "non-primary physical function, skipping\n");
1671
			else
J
Joe Perches 已提交
1672
				mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1673
			return err;
1674
		}
1675

1676 1677
		err = mlx4_load_fw(dev);
		if (err) {
J
Joe Perches 已提交
1678
			mlx4_err(dev, "Failed to start FW, aborting\n");
1679
			return err;
1680
		}
1681

1682 1683 1684 1685 1686
		mlx4_cfg.log_pg_sz_m = 1;
		mlx4_cfg.log_pg_sz = 0;
		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
		if (err)
			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1687
	}
1688

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
	return err;
}

static int mlx4_init_hca(struct mlx4_dev *dev)
{
	struct mlx4_priv	  *priv = mlx4_priv(dev);
	struct mlx4_adapter	   adapter;
	struct mlx4_dev_cap	   dev_cap;
	struct mlx4_profile	   profile;
	struct mlx4_init_hca_param init_hca;
	u64 icm_size;
	struct mlx4_config_dev_params params;
	int err;

	if (!mlx4_is_slave(dev)) {
1704 1705
		err = mlx4_dev_cap(dev, &dev_cap);
		if (err) {
J
Joe Perches 已提交
1706
			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1707 1708
			goto err_stop_fw;
		}
1709

1710
		choose_steering_mode(dev, &dev_cap);
1711
		choose_tunnel_offload_mode(dev, &dev_cap);
1712

1713 1714 1715 1716
		err = mlx4_get_phys_port_id(dev);
		if (err)
			mlx4_err(dev, "Fail to get physical port id\n");

1717 1718 1719
		if (mlx4_is_master(dev))
			mlx4_parav_master_pf_caps(dev);

1720 1721 1722 1723 1724 1725
		if (mlx4_low_memory_profile()) {
			mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
			profile = low_mem_profile;
		} else {
			profile = default_profile;
		}
1726 1727 1728
		if (dev->caps.steering_mode ==
		    MLX4_STEERING_MODE_DEVICE_MANAGED)
			profile.num_mcg = MLX4_FS_NUM_MCG;
1729

1730 1731 1732 1733 1734 1735
		icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
					     &init_hca);
		if ((long long) icm_size < 0) {
			err = icm_size;
			goto err_stop_fw;
		}
1736

1737 1738
		dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;

1739 1740
		init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
		init_hca.uar_page_sz = PAGE_SHIFT - 12;
1741 1742 1743 1744
		init_hca.mw_enabled = 0;
		if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
		    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
			init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
1745

1746 1747 1748
		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
		if (err)
			goto err_stop_fw;
1749

1750 1751
		err = mlx4_INIT_HCA(dev, &init_hca);
		if (err) {
J
Joe Perches 已提交
1752
			mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1753 1754
			goto err_free_icm;
		}
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767

		if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
			err = mlx4_query_func(dev, &dev_cap);
			if (err < 0) {
				mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
				goto err_stop_fw;
			} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
				dev->caps.num_eqs = dev_cap.max_eqs;
				dev->caps.reserved_eqs = dev_cap.reserved_eqs;
				dev->caps.reserved_uars = dev_cap.reserved_uars;
			}
		}

1768 1769 1770 1771 1772 1773 1774 1775
		/*
		 * If TS is supported by FW
		 * read HCA frequency by QUERY_HCA command
		 */
		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
			memset(&init_hca, 0, sizeof(init_hca));
			err = mlx4_QUERY_HCA(dev, &init_hca);
			if (err) {
J
Joe Perches 已提交
1776
				mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
			} else {
				dev->caps.hca_core_clock =
					init_hca.hca_core_clock;
			}

			/* In case we got HCA frequency 0 - disable timestamping
			 * to avoid dividing by zero
			 */
			if (!dev->caps.hca_core_clock) {
				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
				mlx4_err(dev,
J
Joe Perches 已提交
1789
					 "HCA frequency is 0 - timestamping is not supported\n");
1790 1791 1792 1793 1794 1795
			} else if (map_internal_clock(dev)) {
				/*
				 * Map internal clock,
				 * in case of failure disable timestamping
				 */
				dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
J
Joe Perches 已提交
1796
				mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1797 1798
			}
		}
1799 1800 1801
	} else {
		err = mlx4_init_slave(dev);
		if (err) {
1802 1803
			if (err != -EPROBE_DEFER)
				mlx4_err(dev, "Failed to initialize slave\n");
1804
			return err;
1805
		}
1806

1807 1808 1809 1810 1811
		err = mlx4_slave_cap(dev);
		if (err) {
			mlx4_err(dev, "Failed to obtain slave caps\n");
			goto err_close;
		}
1812 1813
	}

1814 1815 1816 1817 1818 1819 1820
	if (map_bf_area(dev))
		mlx4_dbg(dev, "Failed to map blue flame area\n");

	/*Only the master set the ports, all the rest got it from it.*/
	if (!mlx4_is_slave(dev))
		mlx4_set_port_mask(dev);

1821 1822
	err = mlx4_QUERY_ADAPTER(dev, &adapter);
	if (err) {
J
Joe Perches 已提交
1823
		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1824
		goto unmap_bf;
1825 1826
	}

1827 1828 1829 1830 1831 1832 1833 1834
	/* Query CONFIG_DEV parameters */
	err = mlx4_config_dev_retrieval(dev, &params);
	if (err && err != -ENOTSUPP) {
		mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
	} else if (!err) {
		dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
		dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
	}
1835
	priv->eq_table.inta_pin = adapter.inta_pin;
1836
	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1837 1838 1839

	return 0;

1840
unmap_bf:
1841
	unmap_internal_clock(dev);
1842 1843
	unmap_bf_area(dev);

1844
	if (mlx4_is_slave(dev)) {
1845
		kfree(dev->caps.qp0_qkey);
1846 1847 1848 1849 1850 1851
		kfree(dev->caps.qp0_tunnel);
		kfree(dev->caps.qp0_proxy);
		kfree(dev->caps.qp1_tunnel);
		kfree(dev->caps.qp1_proxy);
	}

1852
err_close:
1853 1854 1855 1856
	if (mlx4_is_slave(dev))
		mlx4_slave_exit(dev);
	else
		mlx4_CLOSE_HCA(dev, 0);
1857 1858

err_free_icm:
1859 1860
	if (!mlx4_is_slave(dev))
		mlx4_free_icms(dev);
1861 1862

err_stop_fw:
1863 1864 1865 1866
	if (!mlx4_is_slave(dev)) {
		mlx4_UNMAP_FA(dev);
		mlx4_free_icm(dev, priv->fw.fw_icm, 0);
	}
1867 1868 1869
	return err;
}

1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int nent;

	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
		return -ENOENT;

	nent = dev->caps.max_counters;
	return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
}

static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}

1887
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
		return -ENOENT;

	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
	if (*idx == -1)
		return -ENOMEM;

	return 0;
}
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916

int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
	u64 out_param;
	int err;

	if (mlx4_is_mfunc(dev)) {
		err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
				   RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
		if (!err)
			*idx = get_param_l(&out_param);

		return err;
	}
	return __mlx4_counter_alloc(dev, idx);
}
1917 1918
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);

1919
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1920
{
1921
	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
1922 1923
	return;
}
1924 1925 1926

void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
1927
	u64 in_param = 0;
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937

	if (mlx4_is_mfunc(dev)) {
		set_param_l(&in_param, idx);
		mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
			 MLX4_CMD_WRAPPED);
		return;
	}
	__mlx4_counter_free(dev, idx);
}
1938 1939
EXPORT_SYMBOL_GPL(mlx4_counter_free);

R
Roland Dreier 已提交
1940
static int mlx4_setup_hca(struct mlx4_dev *dev)
1941 1942 1943
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
1944
	int port;
1945
	__be32 ib_port_default_caps;
1946 1947 1948

	err = mlx4_init_uar_table(dev);
	if (err) {
J
Joe Perches 已提交
1949 1950
		mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
		 return err;
1951 1952 1953 1954
	}

	err = mlx4_uar_alloc(dev, &priv->driver_uar);
	if (err) {
J
Joe Perches 已提交
1955
		mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1956 1957 1958
		goto err_uar_table_free;
	}

1959
	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1960
	if (!priv->kar) {
J
Joe Perches 已提交
1961
		mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1962 1963 1964 1965 1966 1967
		err = -ENOMEM;
		goto err_uar_free;
	}

	err = mlx4_init_pd_table(dev);
	if (err) {
J
Joe Perches 已提交
1968
		mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
1969 1970 1971
		goto err_kar_unmap;
	}

S
Sean Hefty 已提交
1972 1973
	err = mlx4_init_xrcd_table(dev);
	if (err) {
J
Joe Perches 已提交
1974
		mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
S
Sean Hefty 已提交
1975 1976 1977
		goto err_pd_table_free;
	}

1978 1979
	err = mlx4_init_mr_table(dev);
	if (err) {
J
Joe Perches 已提交
1980
		mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
S
Sean Hefty 已提交
1981
		goto err_xrcd_table_free;
1982 1983
	}

1984 1985 1986
	if (!mlx4_is_slave(dev)) {
		err = mlx4_init_mcg_table(dev);
		if (err) {
J
Joe Perches 已提交
1987
			mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
1988 1989
			goto err_mr_table_free;
		}
1990 1991 1992 1993 1994
		err = mlx4_config_mad_demux(dev);
		if (err) {
			mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
			goto err_mcg_table_free;
		}
1995 1996
	}

1997 1998
	err = mlx4_init_eq_table(dev);
	if (err) {
J
Joe Perches 已提交
1999
		mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2000
		goto err_mcg_table_free;
2001 2002 2003 2004
	}

	err = mlx4_cmd_use_events(dev);
	if (err) {
J
Joe Perches 已提交
2005
		mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2006 2007 2008 2009 2010
		goto err_eq_table_free;
	}

	err = mlx4_NOP(dev);
	if (err) {
2011
		if (dev->flags & MLX4_FLAG_MSI_X) {
J
Joe Perches 已提交
2012
			mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2013
				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
J
Joe Perches 已提交
2014
			mlx4_warn(dev, "Trying again without MSI-X\n");
2015
		} else {
J
Joe Perches 已提交
2016
			mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2017
				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
2018
			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2019
		}
2020 2021 2022 2023 2024 2025 2026 2027

		goto err_cmd_poll;
	}

	mlx4_dbg(dev, "NOP command IRQ test passed\n");

	err = mlx4_init_cq_table(dev);
	if (err) {
J
Joe Perches 已提交
2028
		mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2029 2030 2031 2032 2033
		goto err_cmd_poll;
	}

	err = mlx4_init_srq_table(dev);
	if (err) {
J
Joe Perches 已提交
2034
		mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2035 2036 2037 2038 2039
		goto err_cq_table_free;
	}

	err = mlx4_init_qp_table(dev);
	if (err) {
J
Joe Perches 已提交
2040
		mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2041 2042 2043
		goto err_srq_table_free;
	}

2044 2045
	err = mlx4_init_counters_table(dev);
	if (err && err != -ENOENT) {
J
Joe Perches 已提交
2046
		mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2047
		goto err_qp_table_free;
2048 2049
	}

2050 2051 2052 2053 2054 2055
	if (!mlx4_is_slave(dev)) {
		for (port = 1; port <= dev->caps.num_ports; port++) {
			ib_port_default_caps = 0;
			err = mlx4_get_port_ib_caps(dev, port,
						    &ib_port_default_caps);
			if (err)
J
Joe Perches 已提交
2056 2057
				mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
					  port, err);
2058 2059
			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;

2060 2061 2062 2063 2064 2065 2066
			/* initialize per-slave default ib port capabilities */
			if (mlx4_is_master(dev)) {
				int i;
				for (i = 0; i < dev->num_slaves; i++) {
					if (i == mlx4_master_func_num(dev))
						continue;
					priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
J
Joe Perches 已提交
2067
						ib_port_default_caps;
2068 2069 2070
				}
			}

2071 2072 2073 2074
			if (mlx4_is_mfunc(dev))
				dev->caps.port_ib_mtu[port] = IB_MTU_2048;
			else
				dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2075

2076 2077
			err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
					    dev->caps.pkey_table_len[port] : -1);
2078 2079
			if (err) {
				mlx4_err(dev, "Failed to set port %d, aborting\n",
J
Joe Perches 已提交
2080
					 port);
2081 2082
				goto err_counters_table_free;
			}
2083 2084 2085
		}
	}

2086 2087
	return 0;

2088 2089 2090
err_counters_table_free:
	mlx4_cleanup_counters_table(dev);

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
err_qp_table_free:
	mlx4_cleanup_qp_table(dev);

err_srq_table_free:
	mlx4_cleanup_srq_table(dev);

err_cq_table_free:
	mlx4_cleanup_cq_table(dev);

err_cmd_poll:
	mlx4_cmd_use_polling(dev);

err_eq_table_free:
	mlx4_cleanup_eq_table(dev);

2106 2107 2108 2109
err_mcg_table_free:
	if (!mlx4_is_slave(dev))
		mlx4_cleanup_mcg_table(dev);

2110
err_mr_table_free:
2111 2112
	mlx4_cleanup_mr_table(dev);

S
Sean Hefty 已提交
2113 2114 2115
err_xrcd_table_free:
	mlx4_cleanup_xrcd_table(dev);

2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
err_pd_table_free:
	mlx4_cleanup_pd_table(dev);

err_kar_unmap:
	iounmap(priv->kar);

err_uar_free:
	mlx4_uar_free(dev, &priv->driver_uar);

err_uar_table_free:
	mlx4_cleanup_uar_table(dev);
	return err;
}

2130
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2131 2132
{
	struct mlx4_priv *priv = mlx4_priv(dev);
2133
	struct msix_entry *entries;
2134 2135 2136
	int i;

	if (msi_x) {
2137 2138
		int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;

2139 2140
		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
			     nreq);
2141

2142 2143 2144 2145 2146
		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
		if (!entries)
			goto no_msi;

		for (i = 0; i < nreq; ++i)
2147 2148
			entries[i].entry = i;

2149 2150 2151
		nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);

		if (nreq < 0) {
2152
			kfree(entries);
2153
			goto no_msi;
2154
		} else if (nreq < MSIX_LEGACY_SZ +
J
Joe Perches 已提交
2155
			   dev->caps.num_ports * MIN_MSIX_P_PORT) {
2156 2157 2158 2159 2160 2161 2162
			/*Working in legacy mode , all EQ's shared*/
			dev->caps.comp_pool           = 0;
			dev->caps.num_comp_vectors = nreq - 1;
		} else {
			dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
			dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
		}
2163
		for (i = 0; i < nreq; ++i)
2164 2165 2166
			priv->eq_table.eq[i].irq = entries[i].vector;

		dev->flags |= MLX4_FLAG_MSI_X;
2167 2168

		kfree(entries);
2169 2170 2171 2172
		return;
	}

no_msi:
2173
	dev->caps.num_comp_vectors = 1;
2174
	dev->caps.comp_pool	   = 0;
2175 2176

	for (i = 0; i < 2; ++i)
2177 2178 2179
		priv->eq_table.eq[i].irq = dev->pdev->irq;
}

2180
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2181 2182
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2183
	int err = 0;
2184 2185 2186

	info->dev = dev;
	info->port = port;
2187 2188 2189
	if (!mlx4_is_slave(dev)) {
		mlx4_init_mac_table(dev, &info->mac_table);
		mlx4_init_vlan_table(dev, &info->vlan_table);
2190
		mlx4_init_roce_gid_table(dev, &info->gid_table);
2191
		info->base_qpn = mlx4_get_base_qpn(dev, port);
2192
	}
2193 2194 2195

	sprintf(info->dev_name, "mlx4_port%d", port);
	info->port_attr.attr.name = info->dev_name;
2196 2197 2198 2199 2200 2201
	if (mlx4_is_mfunc(dev))
		info->port_attr.attr.mode = S_IRUGO;
	else {
		info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
		info->port_attr.store     = set_port_type;
	}
2202
	info->port_attr.show      = show_port_type;
2203
	sysfs_attr_init(&info->port_attr.attr);
2204 2205 2206 2207 2208 2209 2210

	err = device_create_file(&dev->pdev->dev, &info->port_attr);
	if (err) {
		mlx4_err(dev, "Failed to create file for port %d\n", port);
		info->port = -1;
	}

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
	info->port_mtu_attr.attr.name = info->dev_mtu_name;
	if (mlx4_is_mfunc(dev))
		info->port_mtu_attr.attr.mode = S_IRUGO;
	else {
		info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
		info->port_mtu_attr.store     = set_port_ib_mtu;
	}
	info->port_mtu_attr.show      = show_port_ib_mtu;
	sysfs_attr_init(&info->port_mtu_attr.attr);

	err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
	if (err) {
		mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
		device_remove_file(&info->dev->pdev->dev, &info->port_attr);
		info->port = -1;
	}

2229 2230 2231 2232 2233 2234 2235 2236 2237
	return err;
}

static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
	if (info->port < 0)
		return;

	device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2238
	device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2239 2240
}

2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
static int mlx4_init_steering(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int num_entries = dev->caps.num_ports;
	int i, j;

	priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
	if (!priv->steer)
		return -ENOMEM;

2251
	for (i = 0; i < num_entries; i++)
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
		for (j = 0; j < MLX4_NUM_STEERS; j++) {
			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
		}
	return 0;
}

static void mlx4_clear_steering(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_steer_index *entry, *tmp_entry;
	struct mlx4_promisc_qp *pqp, *tmp_pqp;
	int num_entries = dev->caps.num_ports;
	int i, j;

	for (i = 0; i < num_entries; i++) {
		for (j = 0; j < MLX4_NUM_STEERS; j++) {
			list_for_each_entry_safe(pqp, tmp_pqp,
						 &priv->steer[i].promisc_qps[j],
						 list) {
				list_del(&pqp->list);
				kfree(pqp);
			}
			list_for_each_entry_safe(entry, tmp_entry,
						 &priv->steer[i].steer_entries[j],
						 list) {
				list_del(&entry->list);
				list_for_each_entry_safe(pqp, tmp_pqp,
							 &entry->duplicates,
							 list) {
					list_del(&pqp->list);
					kfree(pqp);
				}
				kfree(entry);
			}
		}
	}
	kfree(priv->steer);
}

2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
static int extended_func_num(struct pci_dev *pdev)
{
	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
}

#define MLX4_OWNER_BASE	0x8069c
#define MLX4_OWNER_SIZE	4

static int mlx4_get_ownership(struct mlx4_dev *dev)
{
	void __iomem *owner;
	u32 ret;

2305 2306 2307
	if (pci_channel_offline(dev->pdev))
		return -EIO;

2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
			MLX4_OWNER_SIZE);
	if (!owner) {
		mlx4_err(dev, "Failed to obtain ownership bit\n");
		return -ENOMEM;
	}

	ret = readl(owner);
	iounmap(owner);
	return (int) !!ret;
}

static void mlx4_free_ownership(struct mlx4_dev *dev)
{
	void __iomem *owner;

2324 2325 2326
	if (pci_channel_offline(dev->pdev))
		return;

2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
			MLX4_OWNER_SIZE);
	if (!owner) {
		mlx4_err(dev, "Failed to obtain ownership bit\n");
		return;
	}
	writel(0, owner);
	msleep(1000);
	iounmap(owner);
}

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV)	==\
				  !!((flags) & MLX4_FLAG_MASTER))

static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
			     u8 total_vfs, int existing_vfs)
{
	u64 dev_flags = dev->flags;

	dev->dev_vfs = kzalloc(
			total_vfs * sizeof(*dev->dev_vfs),
			GFP_KERNEL);
	if (NULL == dev->dev_vfs) {
		mlx4_err(dev, "Failed to allocate memory for VFs\n");
		goto disable_sriov;
	} else if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
		int err = 0;

		atomic_inc(&pf_loading);
		if (existing_vfs) {
			if (existing_vfs != total_vfs)
				mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
					 existing_vfs, total_vfs);
		} else {
			mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
			err = pci_enable_sriov(pdev, total_vfs);
		}
		if (err) {
			mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
				 err);
			atomic_dec(&pf_loading);
			goto disable_sriov;
		} else {
			mlx4_warn(dev, "Running in master mode\n");
			dev_flags |= MLX4_FLAG_SRIOV |
				MLX4_FLAG_MASTER;
			dev_flags &= ~MLX4_FLAG_SLAVE;
			dev->num_vfs = total_vfs;
		}
	}
	return dev_flags;

disable_sriov:
	dev->num_vfs = 0;
	kfree(dev->dev_vfs);
	return dev_flags & ~MLX4_FLAG_MASTER;
}

2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
enum {
	MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
};

static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
			      int *nvfs)
{
	int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
	/* Checking for 64 VFs as a limitation of CX2 */
	if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
	    requested_vfs >= 64) {
		mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
			 requested_vfs);
		return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
	}
	return 0;
}

2403 2404
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
			 int total_vfs, int *nvfs, struct mlx4_priv *priv)
2405 2406
{
	struct mlx4_dev *dev;
2407
	unsigned sum = 0;
2408
	int err;
2409
	int port;
2410
	int i;
2411
	struct mlx4_dev_cap *dev_cap = NULL;
2412
	int existing_vfs = 0;
2413

2414
	dev = &priv->dev;
2415

2416 2417
	INIT_LIST_HEAD(&priv->ctx_list);
	spin_lock_init(&priv->ctx_lock);
2418

2419 2420
	mutex_init(&priv->port_mutex);

2421 2422 2423
	INIT_LIST_HEAD(&priv->pgdir_list);
	mutex_init(&priv->pgdir_mutex);

2424 2425 2426
	INIT_LIST_HEAD(&priv->bf_list);
	mutex_init(&priv->bf_mutex);

S
Sergei Shtylyov 已提交
2427
	dev->rev_id = pdev->revision;
2428
	dev->numa_node = dev_to_node(&pdev->dev);
2429

2430
	/* Detect if this device is a virtual function */
2431
	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2432 2433 2434 2435 2436 2437 2438 2439 2440
		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
		dev->flags |= MLX4_FLAG_SLAVE;
	} else {
		/* We reset the device and enable SRIOV only for physical
		 * devices.  Try to claim ownership on the device;
		 * if already taken, skip -- do not allow multiple PFs */
		err = mlx4_get_ownership(dev);
		if (err) {
			if (err < 0)
2441
				return err;
2442
			else {
J
Joe Perches 已提交
2443
				mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2444
				return -EINVAL;
2445 2446
			}
		}
S
Sergei Shtylyov 已提交
2447

2448 2449 2450
		atomic_set(&priv->opreq_count, 0);
		INIT_WORK(&priv->opreq_task, mlx4_opreq_action);

2451 2452 2453 2454 2455 2456 2457
		/*
		 * Now reset the HCA before we touch the PCI capabilities or
		 * attempt a firmware command, since a boot ROM may have left
		 * the HCA in an undefined state.
		 */
		err = mlx4_reset(dev);
		if (err) {
J
Joe Perches 已提交
2458
			mlx4_err(dev, "Failed to reset HCA, aborting\n");
2459
			goto err_sriov;
2460
		}
2461 2462 2463 2464 2465 2466

		if (total_vfs) {
			existing_vfs = pci_num_vf(pdev);
			dev->flags = MLX4_FLAG_MASTER;
			dev->num_vfs = total_vfs;
		}
2467 2468
	}

2469
slave_start:
2470 2471
	err = mlx4_cmd_init(dev);
	if (err) {
J
Joe Perches 已提交
2472
		mlx4_err(dev, "Failed to init command interface, aborting\n");
2473 2474 2475 2476 2477 2478 2479
		goto err_sriov;
	}

	/* In slave functions, the communication channel must be initialized
	 * before posting commands. Also, init num_slaves before calling
	 * mlx4_init_hca */
	if (mlx4_is_mfunc(dev)) {
2480
		if (mlx4_is_master(dev)) {
2481
			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2482 2483

		} else {
2484
			dev->num_slaves = 0;
2485 2486
			err = mlx4_multi_func_init(dev);
			if (err) {
J
Joe Perches 已提交
2487
				mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2488 2489 2490
				goto err_cmd;
			}
		}
2491 2492
	}

2493 2494 2495 2496 2497 2498
	err = mlx4_init_fw(dev);
	if (err) {
		mlx4_err(dev, "Failed to init fw, aborting.\n");
		goto err_mfunc;
	}

2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	if (mlx4_is_master(dev)) {
		if (!dev_cap) {
			dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);

			if (!dev_cap) {
				err = -ENOMEM;
				goto err_fw;
			}

			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
			if (err) {
				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
				goto err_fw;
			}

2514 2515 2516
			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
				goto err_fw;

2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
			if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
				u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
								  existing_vfs);

				mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
				dev->flags = dev_flags;
				if (!SRIOV_VALID_STATE(dev->flags)) {
					mlx4_err(dev, "Invalid SRIOV state\n");
					goto err_sriov;
				}
				err = mlx4_reset(dev);
				if (err) {
					mlx4_err(dev, "Failed to reset HCA, aborting.\n");
					goto err_sriov;
				}
				goto slave_start;
			}
		} else {
			/* Legacy mode FW requires SRIOV to be enabled before
			 * doing QUERY_DEV_CAP, since max_eq's value is different if
			 * SRIOV is enabled.
			 */
			memset(dev_cap, 0, sizeof(*dev_cap));
			err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
			if (err) {
				mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
				goto err_fw;
			}
2545 2546 2547

			if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
				goto err_fw;
2548 2549 2550
		}
	}

2551
	err = mlx4_init_hca(dev);
2552 2553 2554 2555
	if (err) {
		if (err == -EACCES) {
			/* Not primary Physical function
			 * Running in slave mode */
2556
			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
			/* We're not a PF */
			if (dev->flags & MLX4_FLAG_SRIOV) {
				if (!existing_vfs)
					pci_disable_sriov(pdev);
				if (mlx4_is_master(dev))
					atomic_dec(&pf_loading);
				dev->flags &= ~MLX4_FLAG_SRIOV;
			}
			if (!mlx4_is_slave(dev))
				mlx4_free_ownership(dev);
2567 2568 2569 2570
			dev->flags |= MLX4_FLAG_SLAVE;
			dev->flags &= ~MLX4_FLAG_MASTER;
			goto slave_start;
		} else
2571
			goto err_fw;
2572 2573
	}

2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
	if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
		u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);

		if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
			mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
			dev->flags = dev_flags;
			err = mlx4_cmd_init(dev);
			if (err) {
				/* Only VHCR is cleaned up, so could still
				 * send FW commands
				 */
				mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
				goto err_close;
			}
		} else {
			dev->flags = dev_flags;
		}

		if (!SRIOV_VALID_STATE(dev->flags)) {
			mlx4_err(dev, "Invalid SRIOV state\n");
			goto err_close;
		}
	}

2598 2599 2600 2601
	/* check if the device is functioning at its maximum possible speed.
	 * No return code for this call, just warn the user in case of PCI
	 * express device capabilities are under-satisfied by the bus.
	 */
2602 2603
	if (!mlx4_is_slave(dev))
		mlx4_check_pcie_caps(dev);
2604

2605 2606 2607
	/* In master functions, the communication channel must be initialized
	 * after obtaining its address from fw */
	if (mlx4_is_master(dev)) {
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
		int ib_ports = 0;

		mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
			ib_ports++;

		if (ib_ports &&
		    (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
			mlx4_err(dev,
				 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
			err = -EINVAL;
			goto err_close;
		}
		if (dev->caps.num_ports < 2 &&
		    num_vfs_argc > 1) {
			err = -EINVAL;
			mlx4_err(dev,
				 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
				 dev->caps.num_ports);
2626 2627
			goto err_close;
		}
2628
		memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs));
2629

2630 2631 2632 2633 2634 2635 2636
		for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) {
			unsigned j;

			for (j = 0; j < dev->nvfs[i]; ++sum, ++j) {
				dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
				dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
					dev->caps.num_ports;
2637 2638
			}
		}
2639 2640 2641 2642 2643 2644 2645 2646 2647

		/* In master functions, the communication channel
		 * must be initialized after obtaining its address from fw
		 */
		err = mlx4_multi_func_init(dev);
		if (err) {
			mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
			goto err_close;
		}
2648
	}
2649

2650 2651
	err = mlx4_alloc_eq_table(dev);
	if (err)
2652
		goto err_master_mfunc;
2653

2654
	priv->msix_ctl.pool_bm = 0;
2655
	mutex_init(&priv->msix_ctl.pool_lock);
2656

2657
	mlx4_enable_msi_x(dev);
2658 2659
	if ((mlx4_is_mfunc(dev)) &&
	    !(dev->flags & MLX4_FLAG_MSI_X)) {
2660
		err = -ENOSYS;
J
Joe Perches 已提交
2661
		mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2662
		goto err_free_eq;
2663 2664 2665 2666 2667
	}

	if (!mlx4_is_slave(dev)) {
		err = mlx4_init_steering(dev);
		if (err)
2668
			goto err_disable_msix;
2669
	}
2670

2671
	err = mlx4_setup_hca(dev);
2672 2673
	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
	    !mlx4_is_mfunc(dev)) {
2674
		dev->flags &= ~MLX4_FLAG_MSI_X;
2675 2676
		dev->caps.num_comp_vectors = 1;
		dev->caps.comp_pool	   = 0;
2677 2678 2679 2680
		pci_disable_msix(pdev);
		err = mlx4_setup_hca(dev);
	}

2681
	if (err)
2682
		goto err_steer;
2683

2684 2685
	mlx4_init_quotas(dev);

2686 2687 2688 2689 2690
	for (port = 1; port <= dev->caps.num_ports; port++) {
		err = mlx4_init_port_info(dev, port);
		if (err)
			goto err_port;
	}
2691

2692 2693
	err = mlx4_register_device(dev);
	if (err)
2694
		goto err_port;
2695

2696 2697
	mlx4_request_modules(dev);

2698 2699 2700
	mlx4_sense_init(dev);
	mlx4_start_sense(dev);

2701
	priv->removed = 0;
2702

2703 2704 2705
	if (mlx4_is_master(dev) && dev->num_vfs)
		atomic_dec(&pf_loading);

2706 2707
	return 0;

2708
err_port:
2709
	for (--port; port >= 1; --port)
2710 2711
		mlx4_cleanup_port_info(&priv->port[port]);

2712
	mlx4_cleanup_counters_table(dev);
2713 2714 2715 2716 2717
	mlx4_cleanup_qp_table(dev);
	mlx4_cleanup_srq_table(dev);
	mlx4_cleanup_cq_table(dev);
	mlx4_cmd_use_polling(dev);
	mlx4_cleanup_eq_table(dev);
2718
	mlx4_cleanup_mcg_table(dev);
2719
	mlx4_cleanup_mr_table(dev);
S
Sean Hefty 已提交
2720
	mlx4_cleanup_xrcd_table(dev);
2721 2722 2723
	mlx4_cleanup_pd_table(dev);
	mlx4_cleanup_uar_table(dev);

2724
err_steer:
2725 2726
	if (!mlx4_is_slave(dev))
		mlx4_clear_steering(dev);
2727

2728 2729 2730 2731
err_disable_msix:
	if (dev->flags & MLX4_FLAG_MSI_X)
		pci_disable_msix(pdev);

2732 2733 2734
err_free_eq:
	mlx4_free_eq_table(dev);

2735 2736 2737 2738
err_master_mfunc:
	if (mlx4_is_master(dev))
		mlx4_multi_func_cleanup(dev);

2739
	if (mlx4_is_slave(dev)) {
2740
		kfree(dev->caps.qp0_qkey);
2741 2742 2743 2744 2745 2746
		kfree(dev->caps.qp0_tunnel);
		kfree(dev->caps.qp0_proxy);
		kfree(dev->caps.qp1_tunnel);
		kfree(dev->caps.qp1_proxy);
	}

2747 2748 2749
err_close:
	mlx4_close_hca(dev);

2750 2751 2752
err_fw:
	mlx4_close_fw(dev);

2753 2754 2755 2756
err_mfunc:
	if (mlx4_is_slave(dev))
		mlx4_multi_func_cleanup(dev);

2757
err_cmd:
2758
	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2759

2760
err_sriov:
2761
	if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
2762 2763
		pci_disable_sriov(pdev);

2764 2765 2766
	if (mlx4_is_master(dev) && dev->num_vfs)
		atomic_dec(&pf_loading);

2767 2768
	kfree(priv->dev.dev_vfs);

2769 2770 2771
	if (!mlx4_is_slave(dev))
		mlx4_free_ownership(dev);

2772
	kfree(dev_cap);
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
	return err;
}

static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
			   struct mlx4_priv *priv)
{
	int err;
	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
	int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
	const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
		{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
	unsigned total_vfs = 0;
	unsigned int i;

	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
		return err;
	}

	/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
	 * per port, we must limit the number of VFs to 63 (since their are
	 * 128 MACs)
	 */
	for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
	     total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
		nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
		if (nvfs[i] < 0) {
			dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
			err = -EINVAL;
			goto err_disable_pdev;
		}
	}
	for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
	     i++) {
		prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
		if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
			dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
			err = -EINVAL;
			goto err_disable_pdev;
		}
	}
	if (total_vfs >= MLX4_MAX_NUM_VF) {
		dev_err(&pdev->dev,
			"Requested more VF's (%d) than allowed (%d)\n",
			total_vfs, MLX4_MAX_NUM_VF - 1);
		err = -EINVAL;
		goto err_disable_pdev;
	}

	for (i = 0; i < MLX4_MAX_PORTS; i++) {
		if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
			dev_err(&pdev->dev,
				"Requested more VF's (%d) for port (%d) than allowed (%d)\n",
				nvfs[i] + nvfs[2], i + 1,
				MLX4_MAX_NUM_VF_P_PORT - 1);
			err = -EINVAL;
			goto err_disable_pdev;
		}
	}

	/* Check for BARs. */
	if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
			pci_dev_data, pci_resource_flags(pdev, 0));
		err = -ENODEV;
		goto err_disable_pdev;
	}
	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing UAR, aborting\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
		goto err_disable_pdev;
	}

	pci_set_master(pdev);

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
			goto err_release_regions;
		}
	}
	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
			goto err_release_regions;
		}
	}

	/* Allow large DMA segments, up to the firmware limit of 1 GB */
	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
	/* Detect if this device is a virtual function */
	if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
		/* When acting as pf, we normally skip vfs unless explicitly
		 * requested to probe them.
		 */
		if (total_vfs) {
			unsigned vfs_offset = 0;

			for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
			     vfs_offset + nvfs[i] < extended_func_num(pdev);
			     vfs_offset += nvfs[i], i++)
				;
			if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
				err = -ENODEV;
				goto err_release_regions;
			}
			if ((extended_func_num(pdev) - vfs_offset)
			    > prb_vf[i]) {
				dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
					 extended_func_num(pdev));
				err = -ENODEV;
				goto err_release_regions;
			}
		}
	}

	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
	if (err)
		goto err_release_regions;
	return 0;
2909

2910 2911
err_release_regions:
	pci_release_regions(pdev);
2912 2913 2914 2915 2916 2917 2918

err_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}

2919
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
R
Roland Dreier 已提交
2920
{
2921 2922
	struct mlx4_priv *priv;
	struct mlx4_dev *dev;
2923
	int ret;
2924

2925
	printk_once(KERN_INFO "%s", mlx4_version);
R
Roland Dreier 已提交
2926

2927 2928 2929 2930 2931
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	dev       = &priv->dev;
2932
	dev->pdev = pdev;
2933 2934 2935
	pci_set_drvdata(pdev, dev);
	priv->pci_dev_data = id->driver_data;

2936 2937 2938 2939 2940
	ret =  __mlx4_init_one(pdev, id->driver_data, priv);
	if (ret)
		kfree(priv);

	return ret;
R
Roland Dreier 已提交
2941 2942
}

2943
static void mlx4_unload_one(struct pci_dev *pdev)
2944 2945 2946
{
	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);
2947
	int               pci_dev_data;
2948
	int p;
2949
	int active_vfs = 0;
2950

2951 2952
	if (priv->removed)
		return;
2953

2954
	pci_dev_data = priv->pci_dev_data;
2955

2956 2957 2958 2959 2960 2961 2962 2963
	/* Disabling SR-IOV is not allowed while there are active vf's */
	if (mlx4_is_master(dev)) {
		active_vfs = mlx4_how_many_lives_vf(dev);
		if (active_vfs) {
			pr_warn("Removing PF when there are active VF's !!\n");
			pr_warn("Will not disable SR-IOV.\n");
		}
	}
2964 2965
	mlx4_stop_sense(dev);
	mlx4_unregister_device(dev);
2966

2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
	for (p = 1; p <= dev->caps.num_ports; p++) {
		mlx4_cleanup_port_info(&priv->port[p]);
		mlx4_CLOSE_PORT(dev, p);
	}

	if (mlx4_is_master(dev))
		mlx4_free_resource_tracker(dev,
					   RES_TR_FREE_SLAVES_ONLY);

	mlx4_cleanup_counters_table(dev);
	mlx4_cleanup_qp_table(dev);
	mlx4_cleanup_srq_table(dev);
	mlx4_cleanup_cq_table(dev);
	mlx4_cmd_use_polling(dev);
	mlx4_cleanup_eq_table(dev);
	mlx4_cleanup_mcg_table(dev);
	mlx4_cleanup_mr_table(dev);
	mlx4_cleanup_xrcd_table(dev);
	mlx4_cleanup_pd_table(dev);
2986

2987 2988 2989
	if (mlx4_is_master(dev))
		mlx4_free_resource_tracker(dev,
					   RES_TR_FREE_STRUCTS_ONLY);
2990

2991 2992 2993 2994 2995 2996 2997 2998 2999
	iounmap(priv->kar);
	mlx4_uar_free(dev, &priv->driver_uar);
	mlx4_cleanup_uar_table(dev);
	if (!mlx4_is_slave(dev))
		mlx4_clear_steering(dev);
	mlx4_free_eq_table(dev);
	if (mlx4_is_master(dev))
		mlx4_multi_func_cleanup(dev);
	mlx4_close_hca(dev);
3000
	mlx4_close_fw(dev);
3001 3002
	if (mlx4_is_slave(dev))
		mlx4_multi_func_cleanup(dev);
3003
	mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3004

3005 3006
	if (dev->flags & MLX4_FLAG_MSI_X)
		pci_disable_msix(pdev);
3007
	if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3008 3009
		mlx4_warn(dev, "Disabling SR-IOV\n");
		pci_disable_sriov(pdev);
3010
		dev->flags &= ~MLX4_FLAG_SRIOV;
3011
		dev->num_vfs = 0;
3012
	}
3013 3014 3015 3016

	if (!mlx4_is_slave(dev))
		mlx4_free_ownership(dev);

3017
	kfree(dev->caps.qp0_qkey);
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
	kfree(dev->caps.qp0_tunnel);
	kfree(dev->caps.qp0_proxy);
	kfree(dev->caps.qp1_tunnel);
	kfree(dev->caps.qp1_proxy);
	kfree(dev->dev_vfs);

	memset(priv, 0, sizeof(*priv));
	priv->pci_dev_data = pci_dev_data;
	priv->removed = 1;
}

static void mlx4_remove_one(struct pci_dev *pdev)
{
	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);

3034 3035 3036
	mlx4_unload_one(pdev);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
3037 3038
	kfree(priv);
	pci_set_drvdata(pdev, NULL);
3039 3040
}

3041 3042
int mlx4_restart_one(struct pci_dev *pdev)
{
3043 3044
	struct mlx4_dev	 *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);
3045 3046
	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
	int pci_dev_data, err, total_vfs;
3047 3048

	pci_dev_data = priv->pci_dev_data;
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
	total_vfs = dev->num_vfs;
	memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs));

	mlx4_unload_one(pdev);
	err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
	if (err) {
		mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
			 __func__, pci_name(pdev), err);
		return err;
	}

	return err;
3061 3062
}

3063
static const struct pci_device_id mlx4_pci_table[] = {
3064
	/* MT25408 "Hermon" SDR */
3065
	{ PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3066
	/* MT25408 "Hermon" DDR */
3067
	{ PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3068
	/* MT25408 "Hermon" QDR */
3069
	{ PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3070
	/* MT25408 "Hermon" DDR PCIe gen2 */
3071
	{ PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3072
	/* MT25408 "Hermon" QDR PCIe gen2 */
3073
	{ PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3074
	/* MT25408 "Hermon" EN 10GigE */
3075
	{ PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3076
	/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3077
	{ PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3078
	/* MT25458 ConnectX EN 10GBASE-T 10GigE */
3079
	{ PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3080
	/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3081
	{ PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3082
	/* MT26468 ConnectX EN 10GigE PCIe gen2*/
3083
	{ PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3084
	/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3085
	{ PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3086
	/* MT26478 ConnectX2 40GigE PCIe gen2 */
3087
	{ PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3088
	/* MT25400 Family [ConnectX-2 Virtual Function] */
3089
	{ PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
3090 3091 3092
	/* MT27500 Family [ConnectX-3] */
	{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
	/* MT27500 Family [ConnectX-3 Virtual Function] */
3093
	{ PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
	{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
3106 3107 3108 3109 3110
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, mlx4_pci_table);

3111 3112 3113
static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
					      pci_channel_state_t state)
{
3114
	mlx4_unload_one(pdev);
3115 3116 3117 3118 3119 3120 3121

	return state == pci_channel_io_perm_failure ?
		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
3122 3123 3124
	struct mlx4_dev	 *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);
	int               ret;
3125

3126
	ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv);
3127 3128 3129 3130

	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}

3131
static const struct pci_error_handlers mlx4_err_handler = {
3132 3133 3134 3135
	.error_detected = mlx4_pci_err_detected,
	.slot_reset     = mlx4_pci_slot_reset,
};

3136 3137 3138 3139
static struct pci_driver mlx4_driver = {
	.name		= DRV_NAME,
	.id_table	= mlx4_pci_table,
	.probe		= mlx4_init_one,
3140
	.shutdown	= mlx4_unload_one,
3141
	.remove		= mlx4_remove_one,
3142
	.err_handler    = &mlx4_err_handler,
3143 3144
};

3145 3146 3147
static int __init mlx4_verify_params(void)
{
	if ((log_num_mac < 0) || (log_num_mac > 7)) {
3148
		pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
3149 3150 3151
		return -1;
	}

3152
	if (log_num_vlan != 0)
3153 3154
		pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
			MLX4_LOG_NUM_VLANS);
3155

3156 3157
	if (use_prio != 0)
		pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3158

3159
	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
3160 3161
		pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
			log_mtts_per_seg);
3162 3163 3164
		return -1;
	}

3165 3166
	/* Check if module param for ports type has legal combination */
	if (port_type_array[0] == false && port_type_array[1] == true) {
3167
		pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3168 3169 3170
		port_type_array[0] = true;
	}

3171 3172 3173
	if (mlx4_log_num_mgm_entry_size != -1 &&
	    (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
	     mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
J
Joe Perches 已提交
3174 3175 3176 3177
		pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
			mlx4_log_num_mgm_entry_size,
			MLX4_MIN_MGM_LOG_ENTRY_SIZE,
			MLX4_MAX_MGM_LOG_ENTRY_SIZE);
3178 3179 3180
		return -1;
	}

3181 3182 3183
	return 0;
}

3184 3185 3186 3187
static int __init mlx4_init(void)
{
	int ret;

3188 3189 3190
	if (mlx4_verify_params())
		return -EINVAL;

3191 3192 3193 3194 3195
	mlx4_catas_init();

	mlx4_wq = create_singlethread_workqueue("mlx4");
	if (!mlx4_wq)
		return -ENOMEM;
3196

3197
	ret = pci_register_driver(&mlx4_driver);
3198 3199
	if (ret < 0)
		destroy_workqueue(mlx4_wq);
3200 3201 3202 3203 3204 3205
	return ret < 0 ? ret : 0;
}

static void __exit mlx4_cleanup(void)
{
	pci_unregister_driver(&mlx4_driver);
3206
	destroy_workqueue(mlx4_wq);
3207 3208 3209 3210
}

module_init(mlx4_init);
module_exit(mlx4_cleanup);