main.c 57.1 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
41
#include <linux/slab.h>
42
#include <linux/io-mapping.h>
43
#include <linux/delay.h>
44 45 46 47 48 49 50 51 52 53 54 55 56

#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>

#include "mlx4.h"
#include "fw.h"
#include "icm.h"

MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);

57 58
struct workqueue_struct *mlx4_wq;

59 60 61 62 63 64 65 66 67 68
#ifdef CONFIG_MLX4_DEBUG

int mlx4_debug_level = 0;
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");

#endif /* CONFIG_MLX4_DEBUG */

#ifdef CONFIG_PCI_MSI

69
static int msi_x = 1;
70 71 72 73 74 75 76 77 78
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");

#else /* CONFIG_PCI_MSI */

#define msi_x (0)

#endif /* CONFIG_PCI_MSI */

79 80 81 82 83 84 85 86
static int num_vfs;
module_param(num_vfs, int, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");

static int probe_vf;
module_param(probe_vf, int, 0644);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");

87 88 89 90 91 92 93 94
int mlx4_log_num_mgm_entry_size = 10;
module_param_named(log_num_mgm_entry_size,
			mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
					 " of qp per mcg, for example:"
					 " 10 gives 248.range: 9<="
					 " log_num_mgm_entry_size <= 12");

95 96 97 98 99
#define MLX4_VF                                        (1 << 0)

#define HCA_GLOBAL_CAP_MASK            0
#define PF_CONTEXT_BEHAVIOUR_MASK      0

100
static char mlx4_version[] __devinitdata =
101 102 103 104
	DRV_NAME ": Mellanox ConnectX core driver v"
	DRV_VERSION " (" DRV_RELDATE ")\n";

static struct mlx4_profile default_profile = {
105
	.num_qp		= 1 << 18,
106
	.num_srq	= 1 << 16,
107
	.rdmarc_per_qp	= 1 << 4,
108 109
	.num_cq		= 1 << 16,
	.num_mcg	= 1 << 13,
110
	.num_mpt	= 1 << 19,
111
	.num_mtt	= 1 << 20, /* It is really num mtt segements */
112 113
};

114
static int log_num_mac = 7;
115 116 117 118 119 120
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");

static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
121 122
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
123

124
static bool use_prio;
125 126 127 128
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
		  "(0/1, default 0)");

129
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
130
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
131
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
132

133
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
134 135
static int arr_argc = 2;
module_param_array(port_type_array, int, &arr_argc, 0444);
136 137
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
				"1 for IB, 2 for Ethernet");
138 139 140 141 142 143 144 145 146 147 148 149 150

struct mlx4_port_config {
	struct list_head list;
	enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
	struct pci_dev *pdev;
};

static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
{
	return dev->caps.reserved_eqs +
		MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
}

151 152
int mlx4_check_port_params(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_type)
153 154 155 156
{
	int i;

	for (i = 0; i < dev->caps.num_ports - 1; i++) {
157 158 159 160 161 162 163 164 165
		if (port_type[i] != port_type[i + 1]) {
			if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
				mlx4_err(dev, "Only same port types supported "
					 "on this HCA, aborting.\n");
				return -EINVAL;
			}
			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
				return -EINVAL;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
		}
	}

	for (i = 0; i < dev->caps.num_ports; i++) {
		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
			mlx4_err(dev, "Requested port type for port %d is not "
				      "supported on this HCA\n", i + 1);
			return -EINVAL;
		}
	}
	return 0;
}

static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
	int i;

	for (i = 1; i <= dev->caps.num_ports; ++i)
184
		dev->caps.port_mask[i] = dev->caps.port_type[i];
185
}
186

R
Roland Dreier 已提交
187
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
188 189
{
	int err;
190
	int i;
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219

	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
	if (err) {
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
		return err;
	}

	if (dev_cap->min_page_sz > PAGE_SIZE) {
		mlx4_err(dev, "HCA minimum page size of %d bigger than "
			 "kernel PAGE_SIZE of %ld, aborting.\n",
			 dev_cap->min_page_sz, PAGE_SIZE);
		return -ENODEV;
	}
	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
			 "aborting.\n",
			 dev_cap->num_ports, MLX4_MAX_PORTS);
		return -ENODEV;
	}

	if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
			 "PCI resource 2 size of 0x%llx, aborting.\n",
			 dev_cap->uar_size,
			 (unsigned long long) pci_resource_len(dev->pdev, 2));
		return -ENODEV;
	}

	dev->caps.num_ports	     = dev_cap->num_ports;
220 221
	for (i = 1; i <= dev->caps.num_ports; ++i) {
		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
222
		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
223 224 225
		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
226 227
		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
228
		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
229 230
		dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
		dev->caps.default_sense[i] = dev_cap->default_sense[i];
231 232 233 234
		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
235 236
	}

237
	dev->caps.uar_page_size	     = PAGE_SIZE;
238 239 240 241 242 243 244 245 246 247 248 249 250
	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
251
	dev->caps.num_qp_per_mgm     = mlx4_get_qp_per_mgm(dev);
252 253 254 255 256 257 258 259
	/*
	 * Subtract 1 from the limit because we need to allocate a
	 * spare CQE so the HCA HW can tell the difference between an
	 * empty CQ and a full CQ.
	 */
	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
260
	dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
261
	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
262 263 264

	/* The first 128 UARs are used for EQ doorbells */
	dev->caps.reserved_uars	     = max_t(int, 128, dev_cap->reserved_uars);
265
	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
S
Sean Hefty 已提交
266 267 268 269
	dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
					dev_cap->reserved_xrcds : 0;
	dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
					dev_cap->max_xrcds : 0;
270 271
	dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;

272
	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
273 274
	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
	dev->caps.flags		     = dev_cap->flags;
275 276
	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
277
	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
E
Eli Cohen 已提交
278
	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
279

280 281 282 283
	/* Sense port always allowed on supported devices for ConnectX1 and 2 */
	if (dev->pdev->device != 0x1003)
		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;

284
	dev->caps.log_num_macs  = log_num_mac;
285
	dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
286 287 288
	dev->caps.log_num_prios = use_prio ? 3 : 0;

	for (i = 1; i <= dev->caps.num_ports; ++i) {
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
		if (dev->caps.supported_type[i]) {
			/* if only ETH is supported - assign ETH */
			if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
			/* if only IB is supported,
			 * assign IB only if SRIOV is off*/
			else if (dev->caps.supported_type[i] ==
				 MLX4_PORT_TYPE_IB) {
				if (dev->flags & MLX4_FLAG_SRIOV)
					dev->caps.port_type[i] =
						MLX4_PORT_TYPE_NONE;
				else
					dev->caps.port_type[i] =
						MLX4_PORT_TYPE_IB;
			/* if IB and ETH are supported,
			 * first of all check if SRIOV is on */
			} else if (dev->flags & MLX4_FLAG_SRIOV)
				dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
			else {
309 310 311 312 313 314
				/* In non-SRIOV mode, we set the port type
				 * according to user selection of port type,
				 * if usere selected none, take the FW hint */
				if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
					dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
						MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
315
				else
316
					dev->caps.port_type[i] = port_type_array[i-1];
317 318
			}
		}
319 320 321 322 323 324
		/*
		 * Link sensing is allowed on the port if 3 conditions are true:
		 * 1. Both protocols are supported on the port.
		 * 2. Different types are supported on the port
		 * 3. FW declared that it supports link sensing
		 */
325
		mlx4_priv(dev)->sense.sense_allowed[i] =
326
			((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
327
			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
328
			 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
329

330 331 332 333 334
		/*
		 * If "default_sense" bit is set, we move the port to "AUTO" mode
		 * and perform sense_port FW command to try and set the correct
		 * port type from beginning
		 */
335
		if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
336 337 338 339 340 341 342 343 344
			enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
			dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
			mlx4_SENSE_PORT(dev, i, &sensed_port);
			if (sensed_port != MLX4_PORT_TYPE_NONE)
				dev->caps.port_type[i] = sensed_port;
		} else {
			dev->caps.possible_type[i] = dev->caps.port_type[i];
		}

345 346 347 348 349 350 351 352 353 354 355 356 357 358
		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
			mlx4_warn(dev, "Requested number of MACs is too much "
				  "for port %d, reducing to %d.\n",
				  i, 1 << dev->caps.log_num_macs);
		}
		if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
			dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
			mlx4_warn(dev, "Requested number of VLANs is too much "
				  "for port %d, reducing to %d.\n",
				  i, 1 << dev->caps.log_num_vlans);
		}
	}

359 360
	dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);

361 362 363 364 365 366 367 368 369 370 371 372 373 374
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
		(1 << dev->caps.log_num_macs) *
		(1 << dev->caps.log_num_vlans) *
		(1 << dev->caps.log_num_prios) *
		dev->caps.num_ports;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;

	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];

375 376
	return 0;
}
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_slave_state *s_state;
	int i;
	int ret = 0;

	for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
		s_state = &priv->mfunc.master.slave_state[i];
		if (s_state->active && s_state->last_cmd !=
		    MLX4_COMM_CMD_RESET) {
			mlx4_warn(dev, "%s: slave: %d is still active\n",
				  __func__, i);
			ret++;
		}
	}
	return ret;
}

static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_slave_state *s_slave;

	if (!mlx4_is_master(dev))
		return 0;

	s_slave = &priv->mfunc.master.slave_state[slave];
	return !!s_slave->active;
}
EXPORT_SYMBOL(mlx4_is_slave_active);

static int mlx4_slave_cap(struct mlx4_dev *dev)
{
	int			   err;
	u32			   page_size;
	struct mlx4_dev_cap	   dev_cap;
	struct mlx4_func_cap	   func_cap;
	struct mlx4_init_hca_param hca_param;
	int			   i;

	memset(&hca_param, 0, sizeof(hca_param));
	err = mlx4_QUERY_HCA(dev, &hca_param);
	if (err) {
		mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
		return err;
	}

	/*fail if the hca has an unknown capability */
	if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
	    HCA_GLOBAL_CAP_MASK) {
		mlx4_err(dev, "Unknown hca global capabilities\n");
		return -ENOSYS;
	}

	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;

	memset(&dev_cap, 0, sizeof(dev_cap));
	err = mlx4_dev_cap(dev, &dev_cap);
	if (err) {
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
		return err;
	}

	page_size = ~dev->caps.page_size_cap + 1;
	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
	if (page_size > PAGE_SIZE) {
		mlx4_err(dev, "HCA minimum page size of %d bigger than "
			 "kernel PAGE_SIZE of %ld, aborting.\n",
			 page_size, PAGE_SIZE);
		return -ENODEV;
	}

	/* slave gets uar page size from QUERY_HCA fw command */
	dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);

	/* TODO: relax this assumption */
	if (dev->caps.uar_page_size != PAGE_SIZE) {
		mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
			 dev->caps.uar_page_size, PAGE_SIZE);
		return -ENODEV;
	}

	memset(&func_cap, 0, sizeof(func_cap));
	err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
	if (err) {
		mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
		return err;
	}

	if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
	    PF_CONTEXT_BEHAVIOUR_MASK) {
		mlx4_err(dev, "Unknown pf context behaviour\n");
		return -ENOSYS;
	}

	dev->caps.num_ports		= func_cap.num_ports;
	dev->caps.num_qps		= func_cap.qp_quota;
	dev->caps.num_srqs		= func_cap.srq_quota;
	dev->caps.num_cqs		= func_cap.cq_quota;
	dev->caps.num_eqs               = func_cap.max_eq;
	dev->caps.reserved_eqs          = func_cap.reserved_eq;
	dev->caps.num_mpts		= func_cap.mpt_quota;
	dev->caps.num_mtts		= func_cap.mtt_quota;
	dev->caps.num_pds               = MLX4_NUM_PDS;
	dev->caps.num_mgms              = 0;
	dev->caps.num_amgms             = 0;

	for (i = 1; i <= dev->caps.num_ports; ++i)
		dev->caps.port_mask[i] = dev->caps.port_type[i];

	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
			 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
		return -ENODEV;
	}

	if (dev->caps.uar_page_size * (dev->caps.num_uars -
				       dev->caps.reserved_uars) >
				       pci_resource_len(dev->pdev, 2)) {
		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
			 "PCI resource 2 size of 0x%llx, aborting.\n",
			 dev->caps.uar_page_size * dev->caps.num_uars,
			 (unsigned long long) pci_resource_len(dev->pdev, 2));
		return -ENODEV;
	}

#if 0
	mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
	mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
		  dev->caps.num_uars, dev->caps.reserved_uars,
		  dev->caps.uar_page_size * dev->caps.num_uars,
		  pci_resource_len(dev->pdev, 2));
	mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
		  dev->caps.reserved_eqs);
	mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
		  dev->caps.num_pds, dev->caps.reserved_pds,
		  dev->caps.slave_pd_shift, dev->caps.pd_base);
#endif
	return 0;
}
519

520 521 522 523
/*
 * Change the port configuration of the device.
 * Every user of this function must hold the port mutex.
 */
524 525
int mlx4_change_port_types(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_types)
526 527 528 529 530 531
{
	int err = 0;
	int change = 0;
	int port;

	for (port = 0; port <  dev->caps.num_ports; port++) {
532 533
		/* Change the port type only if the new type is different
		 * from the current, and not set to Auto */
534
		if (port_types[port] != dev->caps.port_type[port + 1])
535 536 537 538 539 540
			change = 1;
	}
	if (change) {
		mlx4_unregister_device(dev);
		for (port = 1; port <= dev->caps.num_ports; port++) {
			mlx4_CLOSE_PORT(dev, port);
541
			dev->caps.port_type[port + 1] = port_types[port];
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
			err = mlx4_SET_PORT(dev, port);
			if (err) {
				mlx4_err(dev, "Failed to set port %d, "
					      "aborting\n", port);
				goto out;
			}
		}
		mlx4_set_port_mask(dev);
		err = mlx4_register_device(dev);
	}

out:
	return err;
}

static ssize_t show_port_type(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
564 565 566 567 568 569 570 571 572
	char type[8];

	sprintf(type, "%s",
		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
		"ib" : "eth");
	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
		sprintf(buf, "auto (%s)\n", type);
	else
		sprintf(buf, "%s\n", type);
573

574
	return strlen(buf);
575 576 577 578 579 580 581 582 583 584 585
}

static ssize_t set_port_type(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t count)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
	struct mlx4_priv *priv = mlx4_priv(mdev);
	enum mlx4_port_type types[MLX4_MAX_PORTS];
586
	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
587 588 589 590 591 592 593
	int i;
	int err = 0;

	if (!strcmp(buf, "ib\n"))
		info->tmp_type = MLX4_PORT_TYPE_IB;
	else if (!strcmp(buf, "eth\n"))
		info->tmp_type = MLX4_PORT_TYPE_ETH;
594 595
	else if (!strcmp(buf, "auto\n"))
		info->tmp_type = MLX4_PORT_TYPE_AUTO;
596 597 598 599 600
	else {
		mlx4_err(mdev, "%s is not supported port type\n", buf);
		return -EINVAL;
	}

601
	mlx4_stop_sense(mdev);
602
	mutex_lock(&priv->port_mutex);
603 604 605 606
	/* Possible type is always the one that was delivered */
	mdev->caps.possible_type[info->port] = info->tmp_type;

	for (i = 0; i < mdev->caps.num_ports; i++) {
607
		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
608 609 610 611
					mdev->caps.possible_type[i+1];
		if (types[i] == MLX4_PORT_TYPE_AUTO)
			types[i] = mdev->caps.port_type[i+1];
	}
612

613 614
	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
		for (i = 1; i <= mdev->caps.num_ports; i++) {
			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
				err = -EINVAL;
			}
		}
	}
	if (err) {
		mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
			       "Set only 'eth' or 'ib' for both ports "
			       "(should be the same)\n");
		goto out;
	}

	mlx4_do_sense_ports(mdev, new_types, types);

	err = mlx4_check_port_params(mdev, new_types);
632 633 634
	if (err)
		goto out;

635 636 637 638 639
	/* We are about to apply the changes after the configuration
	 * was verified, no need to remember the temporary types
	 * any more */
	for (i = 0; i < mdev->caps.num_ports; i++)
		priv->port[i + 1].tmp_type = 0;
640

641
	err = mlx4_change_port_types(mdev, new_types);
642 643

out:
644
	mlx4_start_sense(mdev);
645 646 647 648
	mutex_unlock(&priv->port_mutex);
	return err ? err : count;
}

649
static int mlx4_load_fw(struct mlx4_dev *dev)
650 651 652 653 654
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
655
					 GFP_HIGHUSER | __GFP_NOWARN, 0);
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	if (!priv->fw.fw_icm) {
		mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
		return -ENOMEM;
	}

	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
	if (err) {
		mlx4_err(dev, "MAP_FA command failed, aborting.\n");
		goto err_free;
	}

	err = mlx4_RUN_FW(dev);
	if (err) {
		mlx4_err(dev, "RUN_FW command failed, aborting.\n");
		goto err_unmap_fa;
	}

	return 0;

err_unmap_fa:
	mlx4_UNMAP_FA(dev);

err_free:
679
	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
680 681 682
	return err;
}

683 684
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
				int cmpt_entry_sz)
685 686 687
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
688
	int num_eqs;
689 690 691 692 693 694

	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_QP *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_qps,
695 696
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
697 698 699 700 701 702 703 704
	if (err)
		goto err;

	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_SRQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_srqs,
705
				  dev->caps.reserved_srqs, 0, 0);
706 707 708 709 710 711 712 713
	if (err)
		goto err_qp;

	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_CQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_cqs,
714
				  dev->caps.reserved_cqs, 0, 0);
715 716 717
	if (err)
		goto err_srq;

718 719 720
	num_eqs = (mlx4_is_master(dev)) ?
		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
		dev->caps.num_eqs;
721 722 723 724
	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_EQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
725
				  cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
	if (err)
		goto err_cq;

	return 0;

err_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);

err_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);

err_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err:
	return err;
}

R
Roland Dreier 已提交
744 745
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
746 747 748
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	u64 aux_pages;
749
	int num_eqs;
750 751 752 753 754 755 756 757 758 759 760 761 762
	int err;

	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
	if (err) {
		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
		return err;
	}

	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
		 (unsigned long long) icm_size >> 10,
		 (unsigned long long) aux_pages << 2);

	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
763
					  GFP_HIGHUSER | __GFP_NOWARN, 0);
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	if (!priv->fw.aux_icm) {
		mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
		return -ENOMEM;
	}

	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
	if (err) {
		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
		goto err_free_aux;
	}

	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
	if (err) {
		mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
		goto err_unmap_aux;
	}

781 782 783 784

	num_eqs = (mlx4_is_master(dev)) ?
		roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
		dev->caps.num_eqs;
785 786
	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
787
				  num_eqs, num_eqs, 0, 0);
788 789 790 791 792
	if (err) {
		mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
		goto err_unmap_cmpt;
	}

793 794 795 796 797 798 799 800 801 802 803
	/*
	 * Reserved MTT entries must be aligned up to a cacheline
	 * boundary, since the FW will write to them, while the driver
	 * writes to all other MTT entries. (The variable
	 * dev->caps.mtt_entry_sz below is really the MTT segment
	 * size, not the raw entry size)
	 */
	dev->caps.reserved_mtts =
		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;

804 805 806
	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
				  init_hca->mtt_base,
				  dev->caps.mtt_entry_sz,
807
				  dev->caps.num_mtts,
808
				  dev->caps.reserved_mtts, 1, 0);
809 810 811 812 813 814 815 816 817
	if (err) {
		mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
		goto err_unmap_eq;
	}

	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
				  init_hca->dmpt_base,
				  dev_cap->dmpt_entry_sz,
				  dev->caps.num_mpts,
818
				  dev->caps.reserved_mrws, 1, 1);
819 820 821 822 823 824 825 826 827
	if (err) {
		mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
		goto err_unmap_mtt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
				  init_hca->qpc_base,
				  dev_cap->qpc_entry_sz,
				  dev->caps.num_qps,
828 829
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
830 831 832 833 834 835 836 837 838
	if (err) {
		mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
		goto err_unmap_dmpt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
				  init_hca->auxc_base,
				  dev_cap->aux_entry_sz,
				  dev->caps.num_qps,
839 840
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
841 842 843 844 845 846 847 848 849
	if (err) {
		mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
		goto err_unmap_qp;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
				  init_hca->altc_base,
				  dev_cap->altc_entry_sz,
				  dev->caps.num_qps,
850 851
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
852 853 854 855 856 857 858 859 860
	if (err) {
		mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
		goto err_unmap_auxc;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
				  init_hca->rdmarc_base,
				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
				  dev->caps.num_qps,
861 862
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
863 864 865 866 867 868 869 870 871
	if (err) {
		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
		goto err_unmap_altc;
	}

	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
				  init_hca->cqc_base,
				  dev_cap->cqc_entry_sz,
				  dev->caps.num_cqs,
872
				  dev->caps.reserved_cqs, 0, 0);
873 874 875 876 877 878 879 880 881
	if (err) {
		mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
		goto err_unmap_rdmarc;
	}

	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
				  init_hca->srqc_base,
				  dev_cap->srq_entry_sz,
				  dev->caps.num_srqs,
882
				  dev->caps.reserved_srqs, 0, 0);
883 884 885 886 887 888 889 890 891 892 893
	if (err) {
		mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
		goto err_unmap_cq;
	}

	/*
	 * It's not strictly required, but for simplicity just map the
	 * whole multicast group table now.  The table isn't very big
	 * and it's a lot easier than trying to track ref counts.
	 */
	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
894 895
				  init_hca->mc_base,
				  mlx4_get_mgm_entry_size(dev),
896 897
				  dev->caps.num_mgms + dev->caps.num_amgms,
				  dev->caps.num_mgms + dev->caps.num_amgms,
898
				  0, 0);
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	if (err) {
		mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
		goto err_unmap_srq;
	}

	return 0;

err_unmap_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);

err_unmap_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);

err_unmap_rdmarc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);

err_unmap_altc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);

err_unmap_auxc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);

err_unmap_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);

err_unmap_dmpt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);

err_unmap_mtt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);

err_unmap_eq:
931
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
932 933 934 935 936 937 938 939 940 941 942

err_unmap_cmpt:
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err_unmap_aux:
	mlx4_UNMAP_ICM_AUX(dev);

err_free_aux:
943
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960

	return err;
}

static void mlx4_free_icms(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
961
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
962 963 964 965 966 967
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

	mlx4_UNMAP_ICM_AUX(dev);
968
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
969 970
}

971 972 973 974 975 976 977 978 979 980
static void mlx4_slave_exit(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	down(&priv->cmd.slave_sem);
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
		mlx4_warn(dev, "Failed to close slave function.\n");
	up(&priv->cmd.slave_sem);
}

981 982 983 984 985 986 987
static int map_bf_area(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	resource_size_t bf_start;
	resource_size_t bf_len;
	int err = 0;

988 989 990
	if (!dev->caps.bf_reg_size)
		return -ENXIO;

991 992 993 994
	bf_start = pci_resource_start(dev->pdev, 2) +
			(dev->caps.num_uars << PAGE_SHIFT);
	bf_len = pci_resource_len(dev->pdev, 2) -
			(dev->caps.num_uars << PAGE_SHIFT);
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
	if (!priv->bf_mapping)
		err = -ENOMEM;

	return err;
}

static void unmap_bf_area(struct mlx4_dev *dev)
{
	if (mlx4_priv(dev)->bf_mapping)
		io_mapping_free(mlx4_priv(dev)->bf_mapping);
}

1008 1009
static void mlx4_close_hca(struct mlx4_dev *dev)
{
1010
	unmap_bf_area(dev);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	if (mlx4_is_slave(dev))
		mlx4_slave_exit(dev);
	else {
		mlx4_CLOSE_HCA(dev, 0);
		mlx4_free_icms(dev);
		mlx4_UNMAP_FA(dev);
		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
	}
}

static int mlx4_init_slave(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	u64 dma = (u64) priv->mfunc.vhcr_dma;
	int num_of_reset_retries = NUM_OF_RESET_RETRIES;
	int ret_from_reset = 0;
	u32 slave_read;
	u32 cmd_channel_ver;

	down(&priv->cmd.slave_sem);
	priv->cmd.max_cmds = 1;
	mlx4_warn(dev, "Sending reset\n");
	ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
				       MLX4_COMM_TIME);
	/* if we are in the middle of flr the slave will try
	 * NUM_OF_RESET_RETRIES times before leaving.*/
	if (ret_from_reset) {
		if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
			msleep(SLEEP_TIME_IN_RESET);
			while (ret_from_reset && num_of_reset_retries) {
				mlx4_warn(dev, "slave is currently in the"
					  "middle of FLR. retrying..."
					  "(try num:%d)\n",
					  (NUM_OF_RESET_RETRIES -
					   num_of_reset_retries  + 1));
				ret_from_reset =
					mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
						      0, MLX4_COMM_TIME);
				num_of_reset_retries = num_of_reset_retries - 1;
			}
		} else
			goto err;
	}

	/* check the driver version - the slave I/F revision
	 * must match the master's */
	slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
	cmd_channel_ver = mlx4_comm_get_version();

	if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
		MLX4_COMM_GET_IF_REV(slave_read)) {
		mlx4_err(dev, "slave driver version is not supported"
			 " by the master\n");
		goto err;
	}

	mlx4_warn(dev, "Sending vhcr0\n");
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
						    MLX4_COMM_TIME))
		goto err;
	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
		goto err;
	up(&priv->cmd.slave_sem);
	return 0;

err:
	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
	up(&priv->cmd.slave_sem);
	return -EIO;
1086 1087
}

R
Roland Dreier 已提交
1088
static int mlx4_init_hca(struct mlx4_dev *dev)
1089 1090 1091 1092
{
	struct mlx4_priv	  *priv = mlx4_priv(dev);
	struct mlx4_adapter	   adapter;
	struct mlx4_dev_cap	   dev_cap;
1093
	struct mlx4_mod_stat_cfg   mlx4_cfg;
1094 1095 1096 1097 1098
	struct mlx4_profile	   profile;
	struct mlx4_init_hca_param init_hca;
	u64 icm_size;
	int err;

1099 1100 1101 1102 1103 1104 1105 1106 1107
	if (!mlx4_is_slave(dev)) {
		err = mlx4_QUERY_FW(dev);
		if (err) {
			if (err == -EACCES)
				mlx4_info(dev, "non-primary physical function, skipping.\n");
			else
				mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
			goto unmap_bf;
		}
1108

1109 1110 1111 1112 1113
		err = mlx4_load_fw(dev);
		if (err) {
			mlx4_err(dev, "Failed to start FW, aborting.\n");
			goto unmap_bf;
		}
1114

1115 1116 1117 1118 1119
		mlx4_cfg.log_pg_sz_m = 1;
		mlx4_cfg.log_pg_sz = 0;
		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
		if (err)
			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1120

1121 1122 1123 1124 1125
		err = mlx4_dev_cap(dev, &dev_cap);
		if (err) {
			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
			goto err_stop_fw;
		}
1126

1127
		profile = default_profile;
1128

1129 1130 1131 1132 1133 1134
		icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
					     &init_hca);
		if ((long long) icm_size < 0) {
			err = icm_size;
			goto err_stop_fw;
		}
1135

1136 1137
		init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
		init_hca.uar_page_sz = PAGE_SHIFT - 12;
1138

1139 1140 1141
		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
		if (err)
			goto err_stop_fw;
1142

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
		err = mlx4_INIT_HCA(dev, &init_hca);
		if (err) {
			mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
			goto err_free_icm;
		}
	} else {
		err = mlx4_init_slave(dev);
		if (err) {
			mlx4_err(dev, "Failed to initialize slave\n");
			goto unmap_bf;
		}
1154

1155 1156 1157 1158 1159
		err = mlx4_slave_cap(dev);
		if (err) {
			mlx4_err(dev, "Failed to obtain slave caps\n");
			goto err_close;
		}
1160 1161
	}

1162 1163 1164 1165 1166 1167 1168
	if (map_bf_area(dev))
		mlx4_dbg(dev, "Failed to map blue flame area\n");

	/*Only the master set the ports, all the rest got it from it.*/
	if (!mlx4_is_slave(dev))
		mlx4_set_port_mask(dev);

1169 1170 1171 1172 1173 1174 1175
	err = mlx4_QUERY_ADAPTER(dev, &adapter);
	if (err) {
		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
		goto err_close;
	}

	priv->eq_table.inta_pin = adapter.inta_pin;
1176
	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1177 1178 1179 1180

	return 0;

err_close:
1181
	mlx4_close_hca(dev);
1182 1183

err_free_icm:
1184 1185
	if (!mlx4_is_slave(dev))
		mlx4_free_icms(dev);
1186 1187

err_stop_fw:
1188 1189 1190 1191 1192
	if (!mlx4_is_slave(dev)) {
		mlx4_UNMAP_FA(dev);
		mlx4_free_icm(dev, priv->fw.fw_icm, 0);
	}
unmap_bf:
1193
	unmap_bf_area(dev);
1194 1195 1196
	return err;
}

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int nent;

	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
		return -ENOENT;

	nent = dev->caps.max_counters;
	return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
}

static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}

int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
		return -ENOENT;

	*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
	if (*idx == -1)
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);

void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
	return;
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);

R
Roland Dreier 已提交
1236
static int mlx4_setup_hca(struct mlx4_dev *dev)
1237 1238 1239
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
1240
	int port;
1241
	__be32 ib_port_default_caps;
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

	err = mlx4_init_uar_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "user access region table, aborting.\n");
		return err;
	}

	err = mlx4_uar_alloc(dev, &priv->driver_uar);
	if (err) {
		mlx4_err(dev, "Failed to allocate driver access region, "
			 "aborting.\n");
		goto err_uar_table_free;
	}

1257
	priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	if (!priv->kar) {
		mlx4_err(dev, "Couldn't map kernel access region, "
			 "aborting.\n");
		err = -ENOMEM;
		goto err_uar_free;
	}

	err = mlx4_init_pd_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "protection domain table, aborting.\n");
		goto err_kar_unmap;
	}

S
Sean Hefty 已提交
1272 1273 1274 1275 1276 1277 1278
	err = mlx4_init_xrcd_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "reliable connection domain table, aborting.\n");
		goto err_pd_table_free;
	}

1279 1280 1281 1282
	err = mlx4_init_mr_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "memory region table, aborting.\n");
S
Sean Hefty 已提交
1283
		goto err_xrcd_table_free;
1284 1285 1286 1287 1288 1289
	}

	err = mlx4_init_eq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "event queue table, aborting.\n");
1290
		goto err_mr_table_free;
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
	}

	err = mlx4_cmd_use_events(dev);
	if (err) {
		mlx4_err(dev, "Failed to switch to event-driven "
			 "firmware commands, aborting.\n");
		goto err_eq_table_free;
	}

	err = mlx4_NOP(dev);
	if (err) {
1302 1303 1304
		if (dev->flags & MLX4_FLAG_MSI_X) {
			mlx4_warn(dev, "NOP command failed to generate MSI-X "
				  "interrupt IRQ %d).\n",
1305
				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1306 1307 1308 1309
			mlx4_warn(dev, "Trying again without MSI-X.\n");
		} else {
			mlx4_err(dev, "NOP command failed to generate interrupt "
				 "(IRQ %d), aborting.\n",
1310
				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1311
			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1312
		}
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339

		goto err_cmd_poll;
	}

	mlx4_dbg(dev, "NOP command IRQ test passed\n");

	err = mlx4_init_cq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "completion queue table, aborting.\n");
		goto err_cmd_poll;
	}

	err = mlx4_init_srq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "shared receive queue table, aborting.\n");
		goto err_cq_table_free;
	}

	err = mlx4_init_qp_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "queue pair table, aborting.\n");
		goto err_srq_table_free;
	}

1340 1341 1342 1343 1344 1345 1346
	if (!mlx4_is_slave(dev)) {
		err = mlx4_init_mcg_table(dev);
		if (err) {
			mlx4_err(dev, "Failed to initialize "
				 "multicast group table, aborting.\n");
			goto err_qp_table_free;
		}
1347 1348
	}

1349 1350 1351
	err = mlx4_init_counters_table(dev);
	if (err && err != -ENOENT) {
		mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1352
		goto err_mcg_table_free;
1353 1354
	}

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	if (!mlx4_is_slave(dev)) {
		for (port = 1; port <= dev->caps.num_ports; port++) {
			ib_port_default_caps = 0;
			err = mlx4_get_port_ib_caps(dev, port,
						    &ib_port_default_caps);
			if (err)
				mlx4_warn(dev, "failed to get port %d default "
					  "ib capabilities (%d). Continuing "
					  "with caps = 0\n", port, err);
			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;

			err = mlx4_check_ext_port_caps(dev, port);
			if (err)
				mlx4_warn(dev, "failed to get port %d extended "
					  "port capabilities support info (%d)."
					  " Assuming not supported\n",
					  port, err);
1372

1373 1374 1375 1376 1377 1378
			err = mlx4_SET_PORT(dev, port);
			if (err) {
				mlx4_err(dev, "Failed to set port %d, aborting\n",
					port);
				goto err_counters_table_free;
			}
1379 1380 1381
		}
	}

1382 1383
	return 0;

1384 1385 1386
err_counters_table_free:
	mlx4_cleanup_counters_table(dev);

1387 1388 1389
err_mcg_table_free:
	mlx4_cleanup_mcg_table(dev);

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
err_qp_table_free:
	mlx4_cleanup_qp_table(dev);

err_srq_table_free:
	mlx4_cleanup_srq_table(dev);

err_cq_table_free:
	mlx4_cleanup_cq_table(dev);

err_cmd_poll:
	mlx4_cmd_use_polling(dev);

err_eq_table_free:
	mlx4_cleanup_eq_table(dev);

1405
err_mr_table_free:
1406 1407
	mlx4_cleanup_mr_table(dev);

S
Sean Hefty 已提交
1408 1409 1410
err_xrcd_table_free:
	mlx4_cleanup_xrcd_table(dev);

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
err_pd_table_free:
	mlx4_cleanup_pd_table(dev);

err_kar_unmap:
	iounmap(priv->kar);

err_uar_free:
	mlx4_uar_free(dev, &priv->driver_uar);

err_uar_table_free:
	mlx4_cleanup_uar_table(dev);
	return err;
}

1425
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1426 1427
{
	struct mlx4_priv *priv = mlx4_priv(dev);
1428
	struct msix_entry *entries;
1429 1430 1431
	int nreq = min_t(int, dev->caps.num_ports *
			 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
				+ MSIX_LEGACY_SZ, MAX_MSIX);
1432 1433 1434 1435
	int err;
	int i;

	if (msi_x) {
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		/* In multifunction mode each function gets 2 msi-X vectors
		 * one for data path completions anf the other for asynch events
		 * or command completions */
		if (mlx4_is_mfunc(dev)) {
			nreq = 2;
		} else {
			nreq = min_t(int, dev->caps.num_eqs -
				     dev->caps.reserved_eqs, nreq);
		}

1446 1447 1448 1449 1450
		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
		if (!entries)
			goto no_msi;

		for (i = 0; i < nreq; ++i)
1451 1452
			entries[i].entry = i;

1453 1454
	retry:
		err = pci_enable_msix(dev->pdev, entries, nreq);
1455
		if (err) {
1456 1457 1458 1459 1460 1461 1462 1463
			/* Try again if at least 2 vectors are available */
			if (err > 1) {
				mlx4_info(dev, "Requested %d vectors, "
					  "but only %d MSI-X vectors available, "
					  "trying again\n", nreq, err);
				nreq = err;
				goto retry;
			}
1464
			kfree(entries);
1465 1466 1467
			goto no_msi;
		}

1468 1469 1470 1471 1472 1473 1474 1475 1476
		if (nreq <
		    MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
			/*Working in legacy mode , all EQ's shared*/
			dev->caps.comp_pool           = 0;
			dev->caps.num_comp_vectors = nreq - 1;
		} else {
			dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
			dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
		}
1477
		for (i = 0; i < nreq; ++i)
1478 1479 1480
			priv->eq_table.eq[i].irq = entries[i].vector;

		dev->flags |= MLX4_FLAG_MSI_X;
1481 1482

		kfree(entries);
1483 1484 1485 1486
		return;
	}

no_msi:
1487
	dev->caps.num_comp_vectors = 1;
1488
	dev->caps.comp_pool	   = 0;
1489 1490

	for (i = 0; i < 2; ++i)
1491 1492 1493
		priv->eq_table.eq[i].irq = dev->pdev->irq;
}

1494
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1495 1496
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1497
	int err = 0;
1498 1499 1500

	info->dev = dev;
	info->port = port;
1501 1502 1503 1504 1505 1506
	if (!mlx4_is_slave(dev)) {
		INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
		mlx4_init_mac_table(dev, &info->mac_table);
		mlx4_init_vlan_table(dev, &info->vlan_table);
		info->base_qpn =
			dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1507
			(port - 1) * (1 << log_num_mac);
1508
	}
1509 1510 1511

	sprintf(info->dev_name, "mlx4_port%d", port);
	info->port_attr.attr.name = info->dev_name;
1512 1513 1514 1515 1516 1517
	if (mlx4_is_mfunc(dev))
		info->port_attr.attr.mode = S_IRUGO;
	else {
		info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
		info->port_attr.store     = set_port_type;
	}
1518
	info->port_attr.show      = show_port_type;
1519
	sysfs_attr_init(&info->port_attr.attr);
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535

	err = device_create_file(&dev->pdev->dev, &info->port_attr);
	if (err) {
		mlx4_err(dev, "Failed to create file for port %d\n", port);
		info->port = -1;
	}

	return err;
}

static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
	if (info->port < 0)
		return;

	device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1536 1537
}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
static int mlx4_init_steering(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int num_entries = dev->caps.num_ports;
	int i, j;

	priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
	if (!priv->steer)
		return -ENOMEM;

	for (i = 0; i < num_entries; i++) {
		for (j = 0; j < MLX4_NUM_STEERS; j++) {
			INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
			INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
		}
		INIT_LIST_HEAD(&priv->steer[i].high_prios);
	}
	return 0;
}

static void mlx4_clear_steering(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_steer_index *entry, *tmp_entry;
	struct mlx4_promisc_qp *pqp, *tmp_pqp;
	int num_entries = dev->caps.num_ports;
	int i, j;

	for (i = 0; i < num_entries; i++) {
		for (j = 0; j < MLX4_NUM_STEERS; j++) {
			list_for_each_entry_safe(pqp, tmp_pqp,
						 &priv->steer[i].promisc_qps[j],
						 list) {
				list_del(&pqp->list);
				kfree(pqp);
			}
			list_for_each_entry_safe(entry, tmp_entry,
						 &priv->steer[i].steer_entries[j],
						 list) {
				list_del(&entry->list);
				list_for_each_entry_safe(pqp, tmp_pqp,
							 &entry->duplicates,
							 list) {
					list_del(&pqp->list);
					kfree(pqp);
				}
				kfree(entry);
			}
		}
	}
	kfree(priv->steer);
}

1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
static int extended_func_num(struct pci_dev *pdev)
{
	return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
}

#define MLX4_OWNER_BASE	0x8069c
#define MLX4_OWNER_SIZE	4

static int mlx4_get_ownership(struct mlx4_dev *dev)
{
	void __iomem *owner;
	u32 ret;

	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
			MLX4_OWNER_SIZE);
	if (!owner) {
		mlx4_err(dev, "Failed to obtain ownership bit\n");
		return -ENOMEM;
	}

	ret = readl(owner);
	iounmap(owner);
	return (int) !!ret;
}

static void mlx4_free_ownership(struct mlx4_dev *dev)
{
	void __iomem *owner;

	owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
			MLX4_OWNER_SIZE);
	if (!owner) {
		mlx4_err(dev, "Failed to obtain ownership bit\n");
		return;
	}
	writel(0, owner);
	msleep(1000);
	iounmap(owner);
}

R
Roland Dreier 已提交
1631
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1632 1633 1634 1635
{
	struct mlx4_priv *priv;
	struct mlx4_dev *dev;
	int err;
1636
	int port;
1637

1638
	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1639 1640 1641 1642 1643 1644 1645

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable PCI device, "
			"aborting.\n");
		return err;
	}
1646 1647 1648 1649 1650
	if (num_vfs > MLX4_MAX_NUM_VF) {
		printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
		       num_vfs, MLX4_MAX_NUM_VF);
		return -EINVAL;
	}
1651
	/*
1652
	 * Check for BARs.
1653
	 */
1654 1655 1656 1657 1658 1659
	if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
	    !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing DCS, aborting."
			"(id == 0X%p, id->driver_data: 0x%lx,"
			" pci_resource_flags(pdev, 0):0x%lx)\n", id,
			id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
1660 1661 1662 1663 1664 1665 1666 1667 1668
		err = -ENODEV;
		goto err_disable_pdev;
	}
	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing UAR, aborting.\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}

1669
	err = pci_request_regions(pdev, DRV_NAME);
1670
	if (err) {
1671
		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1672 1673 1674 1675 1676
		goto err_disable_pdev;
	}

	pci_set_master(pdev);

1677
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1678 1679
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1680
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1681 1682
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1683
			goto err_release_regions;
1684 1685
		}
	}
1686
	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1687 1688 1689
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
			 "consistent PCI DMA mask.\n");
1690
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1691 1692 1693
		if (err) {
			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
				"aborting.\n");
1694
			goto err_release_regions;
1695 1696 1697
		}
	}

1698 1699 1700
	/* Allow large DMA segments, up to the firmware limit of 1 GB */
	dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);

1701 1702 1703 1704 1705
	priv = kzalloc(sizeof *priv, GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "Device struct alloc failed, "
			"aborting.\n");
		err = -ENOMEM;
1706
		goto err_release_regions;
1707 1708 1709 1710
	}

	dev       = &priv->dev;
	dev->pdev = pdev;
1711 1712
	INIT_LIST_HEAD(&priv->ctx_list);
	spin_lock_init(&priv->ctx_lock);
1713

1714 1715
	mutex_init(&priv->port_mutex);

1716 1717 1718
	INIT_LIST_HEAD(&priv->pgdir_list);
	mutex_init(&priv->pgdir_mutex);

1719 1720 1721
	INIT_LIST_HEAD(&priv->bf_list);
	mutex_init(&priv->bf_mutex);

S
Sergei Shtylyov 已提交
1722
	dev->rev_id = pdev->revision;
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
	/* Detect if this device is a virtual function */
	if (id && id->driver_data & MLX4_VF) {
		/* When acting as pf, we normally skip vfs unless explicitly
		 * requested to probe them. */
		if (num_vfs && extended_func_num(pdev) > probe_vf) {
			mlx4_warn(dev, "Skipping virtual function:%d\n",
						extended_func_num(pdev));
			err = -ENODEV;
			goto err_free_dev;
		}
		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
		dev->flags |= MLX4_FLAG_SLAVE;
	} else {
		/* We reset the device and enable SRIOV only for physical
		 * devices.  Try to claim ownership on the device;
		 * if already taken, skip -- do not allow multiple PFs */
		err = mlx4_get_ownership(dev);
		if (err) {
			if (err < 0)
				goto err_free_dev;
			else {
				mlx4_warn(dev, "Multiple PFs not yet supported."
					  " Skipping PF.\n");
				err = -EINVAL;
				goto err_free_dev;
			}
		}
S
Sergei Shtylyov 已提交
1750

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
		if (num_vfs) {
			mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
			err = pci_enable_sriov(pdev, num_vfs);
			if (err) {
				mlx4_err(dev, "Failed to enable sriov,"
					 "continuing without sriov enabled"
					 " (err = %d).\n", err);
				num_vfs = 0;
				err = 0;
			} else {
				mlx4_warn(dev, "Running in master mode\n");
				dev->flags |= MLX4_FLAG_SRIOV |
					      MLX4_FLAG_MASTER;
				dev->num_vfs = num_vfs;
			}
		}

		/*
		 * Now reset the HCA before we touch the PCI capabilities or
		 * attempt a firmware command, since a boot ROM may have left
		 * the HCA in an undefined state.
		 */
		err = mlx4_reset(dev);
		if (err) {
			mlx4_err(dev, "Failed to reset HCA, aborting.\n");
			goto err_rel_own;
		}
1778 1779
	}

1780
slave_start:
1781 1782
	if (mlx4_cmd_init(dev)) {
		mlx4_err(dev, "Failed to init command interface, aborting.\n");
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
		goto err_sriov;
	}

	/* In slave functions, the communication channel must be initialized
	 * before posting commands. Also, init num_slaves before calling
	 * mlx4_init_hca */
	if (mlx4_is_mfunc(dev)) {
		if (mlx4_is_master(dev))
			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
		else {
			dev->num_slaves = 0;
			if (mlx4_multi_func_init(dev)) {
				mlx4_err(dev, "Failed to init slave mfunc"
					 " interface, aborting.\n");
				goto err_cmd;
			}
		}
1800 1801 1802
	}

	err = mlx4_init_hca(dev);
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
	if (err) {
		if (err == -EACCES) {
			/* Not primary Physical function
			 * Running in slave mode */
			mlx4_cmd_cleanup(dev);
			dev->flags |= MLX4_FLAG_SLAVE;
			dev->flags &= ~MLX4_FLAG_MASTER;
			goto slave_start;
		} else
			goto err_mfunc;
	}

	/* In master functions, the communication channel must be initialized
	 * after obtaining its address from fw */
	if (mlx4_is_master(dev)) {
		if (mlx4_multi_func_init(dev)) {
			mlx4_err(dev, "Failed to init master mfunc"
				 "interface, aborting.\n");
			goto err_close;
		}
	}
1824

1825 1826
	err = mlx4_alloc_eq_table(dev);
	if (err)
1827
		goto err_master_mfunc;
1828

1829
	priv->msix_ctl.pool_bm = 0;
1830
	mutex_init(&priv->msix_ctl.pool_lock);
1831

1832
	mlx4_enable_msi_x(dev);
1833 1834 1835 1836
	if ((mlx4_is_mfunc(dev)) &&
	    !(dev->flags & MLX4_FLAG_MSI_X)) {
		mlx4_err(dev, "INTx is not supported in multi-function mode."
			 " aborting.\n");
1837
		goto err_free_eq;
1838 1839 1840 1841 1842 1843 1844
	}

	if (!mlx4_is_slave(dev)) {
		err = mlx4_init_steering(dev);
		if (err)
			goto err_free_eq;
	}
1845

1846
	err = mlx4_setup_hca(dev);
1847 1848
	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
	    !mlx4_is_mfunc(dev)) {
1849 1850 1851 1852 1853
		dev->flags &= ~MLX4_FLAG_MSI_X;
		pci_disable_msix(pdev);
		err = mlx4_setup_hca(dev);
	}

1854
	if (err)
1855
		goto err_steer;
1856

1857 1858 1859 1860 1861
	for (port = 1; port <= dev->caps.num_ports; port++) {
		err = mlx4_init_port_info(dev, port);
		if (err)
			goto err_port;
	}
1862

1863 1864
	err = mlx4_register_device(dev);
	if (err)
1865
		goto err_port;
1866

1867 1868 1869
	mlx4_sense_init(dev);
	mlx4_start_sense(dev);

1870 1871 1872 1873
	pci_set_drvdata(pdev, dev);

	return 0;

1874
err_port:
1875
	for (--port; port >= 1; --port)
1876 1877
		mlx4_cleanup_port_info(&priv->port[port]);

1878
	mlx4_cleanup_counters_table(dev);
1879 1880 1881 1882 1883 1884 1885
	mlx4_cleanup_mcg_table(dev);
	mlx4_cleanup_qp_table(dev);
	mlx4_cleanup_srq_table(dev);
	mlx4_cleanup_cq_table(dev);
	mlx4_cmd_use_polling(dev);
	mlx4_cleanup_eq_table(dev);
	mlx4_cleanup_mr_table(dev);
S
Sean Hefty 已提交
1886
	mlx4_cleanup_xrcd_table(dev);
1887 1888 1889
	mlx4_cleanup_pd_table(dev);
	mlx4_cleanup_uar_table(dev);

1890
err_steer:
1891 1892
	if (!mlx4_is_slave(dev))
		mlx4_clear_steering(dev);
1893

1894 1895 1896
err_free_eq:
	mlx4_free_eq_table(dev);

1897 1898 1899 1900
err_master_mfunc:
	if (mlx4_is_master(dev))
		mlx4_multi_func_cleanup(dev);

1901
err_close:
1902 1903 1904
	if (dev->flags & MLX4_FLAG_MSI_X)
		pci_disable_msix(pdev);

1905 1906
	mlx4_close_hca(dev);

1907 1908 1909 1910
err_mfunc:
	if (mlx4_is_slave(dev))
		mlx4_multi_func_cleanup(dev);

1911 1912 1913
err_cmd:
	mlx4_cmd_cleanup(dev);

1914 1915 1916 1917 1918 1919 1920 1921
err_sriov:
	if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
		pci_disable_sriov(pdev);

err_rel_own:
	if (!mlx4_is_slave(dev))
		mlx4_free_ownership(dev);

1922 1923 1924
err_free_dev:
	kfree(priv);

1925 1926
err_release_regions:
	pci_release_regions(pdev);
1927 1928 1929 1930 1931 1932 1933

err_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}

R
Roland Dreier 已提交
1934 1935 1936
static int __devinit mlx4_init_one(struct pci_dev *pdev,
				   const struct pci_device_id *id)
{
1937
	printk_once(KERN_INFO "%s", mlx4_version);
R
Roland Dreier 已提交
1938

1939
	return __mlx4_init_one(pdev, id);
R
Roland Dreier 已提交
1940 1941 1942
}

static void mlx4_remove_one(struct pci_dev *pdev)
1943 1944 1945 1946 1947 1948
{
	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);
	int p;

	if (dev) {
1949 1950 1951 1952 1953 1954
		/* in SRIOV it is not allowed to unload the pf's
		 * driver while there are alive vf's */
		if (mlx4_is_master(dev)) {
			if (mlx4_how_many_lives_vf(dev))
				printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
		}
1955
		mlx4_stop_sense(dev);
1956 1957
		mlx4_unregister_device(dev);

1958 1959
		for (p = 1; p <= dev->caps.num_ports; p++) {
			mlx4_cleanup_port_info(&priv->port[p]);
1960
			mlx4_CLOSE_PORT(dev, p);
1961
		}
1962

1963
		mlx4_cleanup_counters_table(dev);
1964 1965 1966 1967 1968 1969 1970
		mlx4_cleanup_mcg_table(dev);
		mlx4_cleanup_qp_table(dev);
		mlx4_cleanup_srq_table(dev);
		mlx4_cleanup_cq_table(dev);
		mlx4_cmd_use_polling(dev);
		mlx4_cleanup_eq_table(dev);
		mlx4_cleanup_mr_table(dev);
S
Sean Hefty 已提交
1971
		mlx4_cleanup_xrcd_table(dev);
1972 1973
		mlx4_cleanup_pd_table(dev);

1974 1975 1976
		if (mlx4_is_master(dev))
			mlx4_free_resource_tracker(dev);

1977 1978 1979
		iounmap(priv->kar);
		mlx4_uar_free(dev, &priv->driver_uar);
		mlx4_cleanup_uar_table(dev);
1980 1981
		if (!mlx4_is_slave(dev))
			mlx4_clear_steering(dev);
1982
		mlx4_free_eq_table(dev);
1983 1984
		if (mlx4_is_master(dev))
			mlx4_multi_func_cleanup(dev);
1985
		mlx4_close_hca(dev);
1986 1987
		if (mlx4_is_slave(dev))
			mlx4_multi_func_cleanup(dev);
1988 1989 1990 1991
		mlx4_cmd_cleanup(dev);

		if (dev->flags & MLX4_FLAG_MSI_X)
			pci_disable_msix(pdev);
1992 1993 1994 1995
		if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
			mlx4_warn(dev, "Disabling sriov\n");
			pci_disable_sriov(pdev);
		}
1996

1997 1998
		if (!mlx4_is_slave(dev))
			mlx4_free_ownership(dev);
1999
		kfree(priv);
2000
		pci_release_regions(pdev);
2001 2002 2003 2004 2005
		pci_disable_device(pdev);
		pci_set_drvdata(pdev, NULL);
	}
}

2006 2007 2008
int mlx4_restart_one(struct pci_dev *pdev)
{
	mlx4_remove_one(pdev);
R
Roland Dreier 已提交
2009
	return __mlx4_init_one(pdev, NULL);
2010 2011
}

2012
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
	/* MT25408 "Hermon" SDR */
	{ PCI_VDEVICE(MELLANOX, 0x6340), 0 },
	/* MT25408 "Hermon" DDR */
	{ PCI_VDEVICE(MELLANOX, 0x634a), 0 },
	/* MT25408 "Hermon" QDR */
	{ PCI_VDEVICE(MELLANOX, 0x6354), 0 },
	/* MT25408 "Hermon" DDR PCIe gen2 */
	{ PCI_VDEVICE(MELLANOX, 0x6732), 0 },
	/* MT25408 "Hermon" QDR PCIe gen2 */
	{ PCI_VDEVICE(MELLANOX, 0x673c), 0 },
	/* MT25408 "Hermon" EN 10GigE */
	{ PCI_VDEVICE(MELLANOX, 0x6368), 0 },
	/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
	{ PCI_VDEVICE(MELLANOX, 0x6750), 0 },
	/* MT25458 ConnectX EN 10GBASE-T 10GigE */
	{ PCI_VDEVICE(MELLANOX, 0x6372), 0 },
	/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
	{ PCI_VDEVICE(MELLANOX, 0x675a), 0 },
	/* MT26468 ConnectX EN 10GigE PCIe gen2*/
	{ PCI_VDEVICE(MELLANOX, 0x6764), 0 },
	/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
	{ PCI_VDEVICE(MELLANOX, 0x6746), 0 },
	/* MT26478 ConnectX2 40GigE PCIe gen2 */
	{ PCI_VDEVICE(MELLANOX, 0x676e), 0 },
	/* MT25400 Family [ConnectX-2 Virtual Function] */
	{ PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
	/* MT27500 Family [ConnectX-3] */
	{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
	/* MT27500 Family [ConnectX-3 Virtual Function] */
	{ PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
	{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
	{ PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
	{ PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, mlx4_pci_table);

static struct pci_driver mlx4_driver = {
	.name		= DRV_NAME,
	.id_table	= mlx4_pci_table,
	.probe		= mlx4_init_one,
	.remove		= __devexit_p(mlx4_remove_one)
};

2067 2068 2069
static int __init mlx4_verify_params(void)
{
	if ((log_num_mac < 0) || (log_num_mac > 7)) {
2070
		pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2071 2072 2073
		return -1;
	}

2074 2075 2076
	if (log_num_vlan != 0)
		pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
			   MLX4_LOG_NUM_VLANS);
2077

2078
	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2079
		pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2080 2081 2082
		return -1;
	}

2083 2084 2085 2086 2087 2088
	/* Check if module param for ports type has legal combination */
	if (port_type_array[0] == false && port_type_array[1] == true) {
		printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
		port_type_array[0] = true;
	}

2089 2090 2091
	return 0;
}

2092 2093 2094 2095
static int __init mlx4_init(void)
{
	int ret;

2096 2097 2098
	if (mlx4_verify_params())
		return -EINVAL;

2099 2100 2101 2102 2103
	mlx4_catas_init();

	mlx4_wq = create_singlethread_workqueue("mlx4");
	if (!mlx4_wq)
		return -ENOMEM;
2104

2105 2106 2107 2108 2109 2110 2111
	ret = pci_register_driver(&mlx4_driver);
	return ret < 0 ? ret : 0;
}

static void __exit mlx4_cleanup(void)
{
	pci_unregister_driver(&mlx4_driver);
2112
	destroy_workqueue(mlx4_wq);
2113 2114 2115 2116
}

module_init(mlx4_init);
module_exit(mlx4_cleanup);