main.c 35.5 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
41
#include <linux/slab.h>
42 43 44 45 46 47 48 49 50 51 52 53 54

#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>

#include "mlx4.h"
#include "fw.h"
#include "icm.h"

MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);

55 56
struct workqueue_struct *mlx4_wq;

57 58 59 60 61 62 63 64 65 66
#ifdef CONFIG_MLX4_DEBUG

int mlx4_debug_level = 0;
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");

#endif /* CONFIG_MLX4_DEBUG */

#ifdef CONFIG_PCI_MSI

67
static int msi_x = 1;
68 69 70 71 72 73 74 75 76
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");

#else /* CONFIG_PCI_MSI */

#define msi_x (0)

#endif /* CONFIG_PCI_MSI */

77
static char mlx4_version[] __devinitdata =
78 79 80 81
	DRV_NAME ": Mellanox ConnectX core driver v"
	DRV_VERSION " (" DRV_RELDATE ")\n";

static struct mlx4_profile default_profile = {
82
	.num_qp		= 1 << 17,
83
	.num_srq	= 1 << 16,
84
	.rdmarc_per_qp	= 1 << 4,
85 86 87 88 89 90
	.num_cq		= 1 << 16,
	.num_mcg	= 1 << 13,
	.num_mpt	= 1 << 17,
	.num_mtt	= 1 << 20,
};

91 92 93 94 95 96 97 98 99 100 101 102 103
static int log_num_mac = 2;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");

static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");

static int use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
		  "(0/1, default 0)");

104 105 106 107
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");

108 109
int mlx4_check_port_params(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_type)
110 111 112 113
{
	int i;

	for (i = 0; i < dev->caps.num_ports - 1; i++) {
114 115 116 117 118 119 120 121 122
		if (port_type[i] != port_type[i + 1]) {
			if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
				mlx4_err(dev, "Only same port types supported "
					 "on this HCA, aborting.\n");
				return -EINVAL;
			}
			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
				return -EINVAL;
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
		}
	}

	for (i = 0; i < dev->caps.num_ports; i++) {
		if (!(port_type[i] & dev->caps.supported_type[i+1])) {
			mlx4_err(dev, "Requested port type for port %d is not "
				      "supported on this HCA\n", i + 1);
			return -EINVAL;
		}
	}
	return 0;
}

static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
	int i;

	dev->caps.port_mask = 0;
	for (i = 1; i <= dev->caps.num_ports; ++i)
		if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
			dev->caps.port_mask |= 1 << (i - 1);
}
R
Roland Dreier 已提交
145
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
146 147
{
	int err;
148
	int i;
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

	err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
	if (err) {
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
		return err;
	}

	if (dev_cap->min_page_sz > PAGE_SIZE) {
		mlx4_err(dev, "HCA minimum page size of %d bigger than "
			 "kernel PAGE_SIZE of %ld, aborting.\n",
			 dev_cap->min_page_sz, PAGE_SIZE);
		return -ENODEV;
	}
	if (dev_cap->num_ports > MLX4_MAX_PORTS) {
		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
			 "aborting.\n",
			 dev_cap->num_ports, MLX4_MAX_PORTS);
		return -ENODEV;
	}

	if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
		mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
			 "PCI resource 2 size of 0x%llx, aborting.\n",
			 dev_cap->uar_size,
			 (unsigned long long) pci_resource_len(dev->pdev, 2));
		return -ENODEV;
	}

	dev->caps.num_ports	     = dev_cap->num_ports;
178 179
	for (i = 1; i <= dev->caps.num_ports; ++i) {
		dev->caps.vl_cap[i]	    = dev_cap->max_vl[i];
180
		dev->caps.ib_mtu_cap[i]	    = dev_cap->ib_mtu[i];
181 182 183
		dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
		dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
		dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
184 185
		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
186
		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
187 188 189 190
		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
191 192
	}

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
	dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
	dev->caps.max_sq_sg	     = dev_cap->max_sq_sg;
	dev->caps.max_rq_sg	     = dev_cap->max_rq_sg;
	dev->caps.max_wqes	     = dev_cap->max_qp_sz;
	dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
	dev->caps.max_srq_wqes	     = dev_cap->max_srq_sz;
	dev->caps.max_srq_sge	     = dev_cap->max_rq_sg - 1;
	dev->caps.reserved_srqs	     = dev_cap->reserved_srqs;
	dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
	dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
	dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
	/*
	 * Subtract 1 from the limit because we need to allocate a
	 * spare CQE so the HCA HW can tell the difference between an
	 * empty CQ and a full CQ.
	 */
	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
215
	dev->caps.mtts_per_seg	     = 1 << log_mtts_per_seg;
216
	dev->caps.reserved_mtts	     = DIV_ROUND_UP(dev_cap->reserved_mtts,
217
						    dev->caps.mtts_per_seg);
218 219 220
	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
221
	dev->caps.mtt_entry_sz	     = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
222
	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
223 224
	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
	dev->caps.flags		     = dev_cap->flags;
225 226
	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
227
	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
228
	dev->caps.loopback_support   = dev_cap->loopback_support;
E
Eli Cohen 已提交
229
	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
230

231 232 233 234 235
	dev->caps.log_num_macs  = log_num_mac;
	dev->caps.log_num_vlans = log_num_vlan;
	dev->caps.log_num_prios = use_prio ? 3 : 0;

	for (i = 1; i <= dev->caps.num_ports; ++i) {
236 237 238 239
		if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
			dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
		else
			dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
240 241 242
		dev->caps.possible_type[i] = dev->caps.port_type[i];
		mlx4_priv(dev)->sense.sense_allowed[i] =
			dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
243

244 245 246 247 248 249 250 251 252 253 254 255 256 257
		if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
			dev->caps.log_num_macs = dev_cap->log_max_macs[i];
			mlx4_warn(dev, "Requested number of MACs is too much "
				  "for port %d, reducing to %d.\n",
				  i, 1 << dev->caps.log_num_macs);
		}
		if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
			dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
			mlx4_warn(dev, "Requested number of VLANs is too much "
				  "for port %d, reducing to %d.\n",
				  i, 1 << dev->caps.log_num_vlans);
		}
	}

258 259
	mlx4_set_port_mask(dev);

260 261 262 263 264 265 266 267 268 269 270 271 272 273
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
		(1 << dev->caps.log_num_macs) *
		(1 << dev->caps.log_num_vlans) *
		(1 << dev->caps.log_num_prios) *
		dev->caps.num_ports;
	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;

	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];

274 275 276
	return 0;
}

277 278 279 280
/*
 * Change the port configuration of the device.
 * Every user of this function must hold the port mutex.
 */
281 282
int mlx4_change_port_types(struct mlx4_dev *dev,
			   enum mlx4_port_type *port_types)
283 284 285 286 287 288
{
	int err = 0;
	int change = 0;
	int port;

	for (port = 0; port <  dev->caps.num_ports; port++) {
289 290
		/* Change the port type only if the new type is different
		 * from the current, and not set to Auto */
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
		if (port_types[port] != dev->caps.port_type[port + 1]) {
			change = 1;
			dev->caps.port_type[port + 1] = port_types[port];
		}
	}
	if (change) {
		mlx4_unregister_device(dev);
		for (port = 1; port <= dev->caps.num_ports; port++) {
			mlx4_CLOSE_PORT(dev, port);
			err = mlx4_SET_PORT(dev, port);
			if (err) {
				mlx4_err(dev, "Failed to set port %d, "
					      "aborting\n", port);
				goto out;
			}
		}
		mlx4_set_port_mask(dev);
		err = mlx4_register_device(dev);
	}

out:
	return err;
}

static ssize_t show_port_type(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
322 323 324 325 326 327 328 329 330
	char type[8];

	sprintf(type, "%s",
		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
		"ib" : "eth");
	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
		sprintf(buf, "auto (%s)\n", type);
	else
		sprintf(buf, "%s\n", type);
331

332
	return strlen(buf);
333 334 335 336 337 338 339 340 341 342 343
}

static ssize_t set_port_type(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t count)
{
	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
						   port_attr);
	struct mlx4_dev *mdev = info->dev;
	struct mlx4_priv *priv = mlx4_priv(mdev);
	enum mlx4_port_type types[MLX4_MAX_PORTS];
344
	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
345 346 347 348 349 350 351
	int i;
	int err = 0;

	if (!strcmp(buf, "ib\n"))
		info->tmp_type = MLX4_PORT_TYPE_IB;
	else if (!strcmp(buf, "eth\n"))
		info->tmp_type = MLX4_PORT_TYPE_ETH;
352 353
	else if (!strcmp(buf, "auto\n"))
		info->tmp_type = MLX4_PORT_TYPE_AUTO;
354 355 356 357 358
	else {
		mlx4_err(mdev, "%s is not supported port type\n", buf);
		return -EINVAL;
	}

359
	mlx4_stop_sense(mdev);
360
	mutex_lock(&priv->port_mutex);
361 362 363 364
	/* Possible type is always the one that was delivered */
	mdev->caps.possible_type[info->port] = info->tmp_type;

	for (i = 0; i < mdev->caps.num_ports; i++) {
365
		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
366 367 368 369
					mdev->caps.possible_type[i+1];
		if (types[i] == MLX4_PORT_TYPE_AUTO)
			types[i] = mdev->caps.port_type[i+1];
	}
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
		for (i = 1; i <= mdev->caps.num_ports; i++) {
			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
				err = -EINVAL;
			}
		}
	}
	if (err) {
		mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
			       "Set only 'eth' or 'ib' for both ports "
			       "(should be the same)\n");
		goto out;
	}

	mlx4_do_sense_ports(mdev, new_types, types);

	err = mlx4_check_port_params(mdev, new_types);
389 390 391
	if (err)
		goto out;

392 393 394 395 396
	/* We are about to apply the changes after the configuration
	 * was verified, no need to remember the temporary types
	 * any more */
	for (i = 0; i < mdev->caps.num_ports; i++)
		priv->port[i + 1].tmp_type = 0;
397

398
	err = mlx4_change_port_types(mdev, new_types);
399 400

out:
401
	mlx4_start_sense(mdev);
402 403 404 405
	mutex_unlock(&priv->port_mutex);
	return err ? err : count;
}

406
static int mlx4_load_fw(struct mlx4_dev *dev)
407 408 409 410 411
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
412
					 GFP_HIGHUSER | __GFP_NOWARN, 0);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	if (!priv->fw.fw_icm) {
		mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
		return -ENOMEM;
	}

	err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
	if (err) {
		mlx4_err(dev, "MAP_FA command failed, aborting.\n");
		goto err_free;
	}

	err = mlx4_RUN_FW(dev);
	if (err) {
		mlx4_err(dev, "RUN_FW command failed, aborting.\n");
		goto err_unmap_fa;
	}

	return 0;

err_unmap_fa:
	mlx4_UNMAP_FA(dev);

err_free:
436
	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
437 438 439
	return err;
}

440 441
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
				int cmpt_entry_sz)
442 443 444 445 446 447 448 449 450
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;

	err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_QP *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_qps,
451 452
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
453 454 455 456 457 458 459 460
	if (err)
		goto err;

	err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_SRQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_srqs,
461
				  dev->caps.reserved_srqs, 0, 0);
462 463 464 465 466 467 468 469
	if (err)
		goto err_qp;

	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_CQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz, dev->caps.num_cqs,
470
				  dev->caps.reserved_cqs, 0, 0);
471 472 473 474 475 476 477 478
	if (err)
		goto err_srq;

	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
				  cmpt_base +
				  ((u64) (MLX4_CMPT_TYPE_EQ *
					  cmpt_entry_sz) << MLX4_CMPT_SHIFT),
				  cmpt_entry_sz,
479
				  dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
	if (err)
		goto err_cq;

	return 0;

err_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);

err_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);

err_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err:
	return err;
}

R
Roland Dreier 已提交
498 499
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
			 struct mlx4_init_hca_param *init_hca, u64 icm_size)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	u64 aux_pages;
	int err;

	err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
	if (err) {
		mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
		return err;
	}

	mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
		 (unsigned long long) icm_size >> 10,
		 (unsigned long long) aux_pages << 2);

	priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
516
					  GFP_HIGHUSER | __GFP_NOWARN, 0);
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	if (!priv->fw.aux_icm) {
		mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
		return -ENOMEM;
	}

	err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
	if (err) {
		mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
		goto err_free_aux;
	}

	err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
	if (err) {
		mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
		goto err_unmap_aux;
	}

534 535 536 537
	err = mlx4_init_icm_table(dev, &priv->eq_table.table,
				  init_hca->eqc_base, dev_cap->eqc_entry_sz,
				  dev->caps.num_eqs, dev->caps.num_eqs,
				  0, 0);
538 539 540 541 542
	if (err) {
		mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
		goto err_unmap_cmpt;
	}

543 544 545 546 547 548 549 550 551 552 553
	/*
	 * Reserved MTT entries must be aligned up to a cacheline
	 * boundary, since the FW will write to them, while the driver
	 * writes to all other MTT entries. (The variable
	 * dev->caps.mtt_entry_sz below is really the MTT segment
	 * size, not the raw entry size)
	 */
	dev->caps.reserved_mtts =
		ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
		      dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;

554 555 556 557
	err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
				  init_hca->mtt_base,
				  dev->caps.mtt_entry_sz,
				  dev->caps.num_mtt_segs,
558
				  dev->caps.reserved_mtts, 1, 0);
559 560 561 562 563 564 565 566 567
	if (err) {
		mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
		goto err_unmap_eq;
	}

	err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
				  init_hca->dmpt_base,
				  dev_cap->dmpt_entry_sz,
				  dev->caps.num_mpts,
568
				  dev->caps.reserved_mrws, 1, 1);
569 570 571 572 573 574 575 576 577
	if (err) {
		mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
		goto err_unmap_mtt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
				  init_hca->qpc_base,
				  dev_cap->qpc_entry_sz,
				  dev->caps.num_qps,
578 579
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
580 581 582 583 584 585 586 587 588
	if (err) {
		mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
		goto err_unmap_dmpt;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
				  init_hca->auxc_base,
				  dev_cap->aux_entry_sz,
				  dev->caps.num_qps,
589 590
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
591 592 593 594 595 596 597 598 599
	if (err) {
		mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
		goto err_unmap_qp;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
				  init_hca->altc_base,
				  dev_cap->altc_entry_sz,
				  dev->caps.num_qps,
600 601
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
602 603 604 605 606 607 608 609 610
	if (err) {
		mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
		goto err_unmap_auxc;
	}

	err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
				  init_hca->rdmarc_base,
				  dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
				  dev->caps.num_qps,
611 612
				  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
				  0, 0);
613 614 615 616 617 618 619 620 621
	if (err) {
		mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
		goto err_unmap_altc;
	}

	err = mlx4_init_icm_table(dev, &priv->cq_table.table,
				  init_hca->cqc_base,
				  dev_cap->cqc_entry_sz,
				  dev->caps.num_cqs,
622
				  dev->caps.reserved_cqs, 0, 0);
623 624 625 626 627 628 629 630 631
	if (err) {
		mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
		goto err_unmap_rdmarc;
	}

	err = mlx4_init_icm_table(dev, &priv->srq_table.table,
				  init_hca->srqc_base,
				  dev_cap->srq_entry_sz,
				  dev->caps.num_srqs,
632
				  dev->caps.reserved_srqs, 0, 0);
633 634 635 636 637 638 639 640 641 642 643 644 645 646
	if (err) {
		mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
		goto err_unmap_cq;
	}

	/*
	 * It's not strictly required, but for simplicity just map the
	 * whole multicast group table now.  The table isn't very big
	 * and it's a lot easier than trying to track ref counts.
	 */
	err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
				  init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
				  dev->caps.num_mgms + dev->caps.num_amgms,
				  dev->caps.num_mgms + dev->caps.num_amgms,
647
				  0, 0);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	if (err) {
		mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
		goto err_unmap_srq;
	}

	return 0;

err_unmap_srq:
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);

err_unmap_cq:
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);

err_unmap_rdmarc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);

err_unmap_altc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);

err_unmap_auxc:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);

err_unmap_qp:
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);

err_unmap_dmpt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);

err_unmap_mtt:
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);

err_unmap_eq:
680
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
681 682 683 684 685 686 687 688 689 690 691

err_unmap_cmpt:
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

err_unmap_aux:
	mlx4_UNMAP_ICM_AUX(dev);

err_free_aux:
692
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709

	return err;
}

static void mlx4_free_icms(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);

	mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
710
	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
711 712 713 714 715 716
	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
	mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);

	mlx4_UNMAP_ICM_AUX(dev);
717
	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
718 719 720 721 722 723 724
}

static void mlx4_close_hca(struct mlx4_dev *dev)
{
	mlx4_CLOSE_HCA(dev, 0);
	mlx4_free_icms(dev);
	mlx4_UNMAP_FA(dev);
725
	mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
726 727
}

R
Roland Dreier 已提交
728
static int mlx4_init_hca(struct mlx4_dev *dev)
729 730 731 732
{
	struct mlx4_priv	  *priv = mlx4_priv(dev);
	struct mlx4_adapter	   adapter;
	struct mlx4_dev_cap	   dev_cap;
733
	struct mlx4_mod_stat_cfg   mlx4_cfg;
734 735 736 737 738 739 740
	struct mlx4_profile	   profile;
	struct mlx4_init_hca_param init_hca;
	u64 icm_size;
	int err;

	err = mlx4_QUERY_FW(dev);
	if (err) {
741 742 743 744
		if (err == -EACCES)
			mlx4_info(dev, "non-primary physical function, skipping.\n");
		else
			mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
745 746 747 748 749 750 751 752 753
		return err;
	}

	err = mlx4_load_fw(dev);
	if (err) {
		mlx4_err(dev, "Failed to start FW, aborting.\n");
		return err;
	}

754 755 756 757 758 759
	mlx4_cfg.log_pg_sz_m = 1;
	mlx4_cfg.log_pg_sz = 0;
	err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
	if (err)
		mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	err = mlx4_dev_cap(dev, &dev_cap);
	if (err) {
		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
		goto err_stop_fw;
	}

	profile = default_profile;

	icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
	if ((long long) icm_size < 0) {
		err = icm_size;
		goto err_stop_fw;
	}

	init_hca.log_uar_sz = ilog2(dev->caps.num_uars);

	err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
	if (err)
		goto err_stop_fw;

	err = mlx4_INIT_HCA(dev, &init_hca);
	if (err) {
		mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
		goto err_free_icm;
	}

	err = mlx4_QUERY_ADAPTER(dev, &adapter);
	if (err) {
		mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
		goto err_close;
	}

	priv->eq_table.inta_pin = adapter.inta_pin;
793
	memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
794 795 796 797

	return 0;

err_close:
798
	mlx4_CLOSE_HCA(dev, 0);
799 800 801 802 803 804

err_free_icm:
	mlx4_free_icms(dev);

err_stop_fw:
	mlx4_UNMAP_FA(dev);
805
	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
806 807 808 809

	return err;
}

R
Roland Dreier 已提交
810
static int mlx4_setup_hca(struct mlx4_dev *dev)
811 812 813
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int err;
814
	int port;
815
	__be32 ib_port_default_caps;
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856

	err = mlx4_init_uar_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "user access region table, aborting.\n");
		return err;
	}

	err = mlx4_uar_alloc(dev, &priv->driver_uar);
	if (err) {
		mlx4_err(dev, "Failed to allocate driver access region, "
			 "aborting.\n");
		goto err_uar_table_free;
	}

	priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!priv->kar) {
		mlx4_err(dev, "Couldn't map kernel access region, "
			 "aborting.\n");
		err = -ENOMEM;
		goto err_uar_free;
	}

	err = mlx4_init_pd_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "protection domain table, aborting.\n");
		goto err_kar_unmap;
	}

	err = mlx4_init_mr_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "memory region table, aborting.\n");
		goto err_pd_table_free;
	}

	err = mlx4_init_eq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "event queue table, aborting.\n");
857
		goto err_mr_table_free;
858 859 860 861 862 863 864 865 866 867 868
	}

	err = mlx4_cmd_use_events(dev);
	if (err) {
		mlx4_err(dev, "Failed to switch to event-driven "
			 "firmware commands, aborting.\n");
		goto err_eq_table_free;
	}

	err = mlx4_NOP(dev);
	if (err) {
869 870 871
		if (dev->flags & MLX4_FLAG_MSI_X) {
			mlx4_warn(dev, "NOP command failed to generate MSI-X "
				  "interrupt IRQ %d).\n",
872
				  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
873 874 875 876
			mlx4_warn(dev, "Trying again without MSI-X.\n");
		} else {
			mlx4_err(dev, "NOP command failed to generate interrupt "
				 "(IRQ %d), aborting.\n",
877
				 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
878
			mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
879
		}
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913

		goto err_cmd_poll;
	}

	mlx4_dbg(dev, "NOP command IRQ test passed\n");

	err = mlx4_init_cq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "completion queue table, aborting.\n");
		goto err_cmd_poll;
	}

	err = mlx4_init_srq_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "shared receive queue table, aborting.\n");
		goto err_cq_table_free;
	}

	err = mlx4_init_qp_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "queue pair table, aborting.\n");
		goto err_srq_table_free;
	}

	err = mlx4_init_mcg_table(dev);
	if (err) {
		mlx4_err(dev, "Failed to initialize "
			 "multicast group table, aborting.\n");
		goto err_qp_table_free;
	}

914
	for (port = 1; port <= dev->caps.num_ports; port++) {
915 916 917 918 919 920 921
		ib_port_default_caps = 0;
		err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
		if (err)
			mlx4_warn(dev, "failed to get port %d default "
				  "ib capabilities (%d). Continuing with "
				  "caps = 0\n", port, err);
		dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
922 923 924 925 926 927 928 929
		err = mlx4_SET_PORT(dev, port);
		if (err) {
			mlx4_err(dev, "Failed to set port %d, aborting\n",
				port);
			goto err_mcg_table_free;
		}
	}

930 931
	return 0;

932 933 934
err_mcg_table_free:
	mlx4_cleanup_mcg_table(dev);

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
err_qp_table_free:
	mlx4_cleanup_qp_table(dev);

err_srq_table_free:
	mlx4_cleanup_srq_table(dev);

err_cq_table_free:
	mlx4_cleanup_cq_table(dev);

err_cmd_poll:
	mlx4_cmd_use_polling(dev);

err_eq_table_free:
	mlx4_cleanup_eq_table(dev);

950
err_mr_table_free:
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
	mlx4_cleanup_mr_table(dev);

err_pd_table_free:
	mlx4_cleanup_pd_table(dev);

err_kar_unmap:
	iounmap(priv->kar);

err_uar_free:
	mlx4_uar_free(dev, &priv->driver_uar);

err_uar_table_free:
	mlx4_cleanup_uar_table(dev);
	return err;
}

967
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
968 969
{
	struct mlx4_priv *priv = mlx4_priv(dev);
970 971
	struct msix_entry *entries;
	int nreq;
972 973 974 975
	int err;
	int i;

	if (msi_x) {
976 977
		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
			     num_possible_cpus() + 1);
978 979 980 981 982
		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
		if (!entries)
			goto no_msi;

		for (i = 0; i < nreq; ++i)
983 984
			entries[i].entry = i;

985 986
	retry:
		err = pci_enable_msix(dev->pdev, entries, nreq);
987
		if (err) {
988 989 990 991 992 993 994 995
			/* Try again if at least 2 vectors are available */
			if (err > 1) {
				mlx4_info(dev, "Requested %d vectors, "
					  "but only %d MSI-X vectors available, "
					  "trying again\n", nreq, err);
				nreq = err;
				goto retry;
			}
996
			kfree(entries);
997 998 999
			goto no_msi;
		}

1000 1001
		dev->caps.num_comp_vectors = nreq - 1;
		for (i = 0; i < nreq; ++i)
1002 1003 1004
			priv->eq_table.eq[i].irq = entries[i].vector;

		dev->flags |= MLX4_FLAG_MSI_X;
1005 1006

		kfree(entries);
1007 1008 1009 1010
		return;
	}

no_msi:
1011 1012 1013
	dev->caps.num_comp_vectors = 1;

	for (i = 0; i < 2; ++i)
1014 1015 1016
		priv->eq_table.eq[i].irq = dev->pdev->irq;
}

1017
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1018 1019
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1020
	int err = 0;
1021 1022 1023 1024 1025

	info->dev = dev;
	info->port = port;
	mlx4_init_mac_table(dev, &info->mac_table);
	mlx4_init_vlan_table(dev, &info->vlan_table);
1026 1027 1028 1029 1030 1031

	sprintf(info->dev_name, "mlx4_port%d", port);
	info->port_attr.attr.name = info->dev_name;
	info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
	info->port_attr.show      = show_port_type;
	info->port_attr.store     = set_port_type;
1032
	sysfs_attr_init(&info->port_attr.attr);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

	err = device_create_file(&dev->pdev->dev, &info->port_attr);
	if (err) {
		mlx4_err(dev, "Failed to create file for port %d\n", port);
		info->port = -1;
	}

	return err;
}

static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
	if (info->port < 0)
		return;

	device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1049 1050
}

R
Roland Dreier 已提交
1051
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1052 1053 1054 1055
{
	struct mlx4_priv *priv;
	struct mlx4_dev *dev;
	int err;
1056
	int port;
1057

1058
	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1059 1060 1061 1062 1063 1064 1065 1066 1067

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable PCI device, "
			"aborting.\n");
		return err;
	}

	/*
E
Eli Cohen 已提交
1068
	 * Check for BARs.  We expect 0: 1MB
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
	    pci_resource_len(pdev, 0) != 1 << 20) {
		dev_err(&pdev->dev, "Missing DCS, aborting.\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}
	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
		dev_err(&pdev->dev, "Missing UAR, aborting.\n");
		err = -ENODEV;
		goto err_disable_pdev;
	}

1082
	err = pci_request_regions(pdev, DRV_NAME);
1083
	if (err) {
1084
		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1085 1086 1087 1088 1089
		goto err_disable_pdev;
	}

	pci_set_master(pdev);

1090
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1091 1092
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1093
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1094 1095
		if (err) {
			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1096
			goto err_release_regions;
1097 1098
		}
	}
1099
	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1100 1101 1102
	if (err) {
		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
			 "consistent PCI DMA mask.\n");
1103
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1104 1105 1106
		if (err) {
			dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
				"aborting.\n");
1107
			goto err_release_regions;
1108 1109 1110 1111 1112 1113 1114 1115
		}
	}

	priv = kzalloc(sizeof *priv, GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "Device struct alloc failed, "
			"aborting.\n");
		err = -ENOMEM;
1116
		goto err_release_regions;
1117 1118 1119 1120
	}

	dev       = &priv->dev;
	dev->pdev = pdev;
1121 1122
	INIT_LIST_HEAD(&priv->ctx_list);
	spin_lock_init(&priv->ctx_lock);
1123

1124 1125
	mutex_init(&priv->port_mutex);

1126 1127 1128
	INIT_LIST_HEAD(&priv->pgdir_list);
	mutex_init(&priv->pgdir_mutex);

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	/*
	 * Now reset the HCA before we touch the PCI capabilities or
	 * attempt a firmware command, since a boot ROM may have left
	 * the HCA in an undefined state.
	 */
	err = mlx4_reset(dev);
	if (err) {
		mlx4_err(dev, "Failed to reset HCA, aborting.\n");
		goto err_free_dev;
	}

	if (mlx4_cmd_init(dev)) {
		mlx4_err(dev, "Failed to init command interface, aborting.\n");
		goto err_free_dev;
	}

	err = mlx4_init_hca(dev);
	if (err)
		goto err_cmd;

1149 1150 1151 1152
	err = mlx4_alloc_eq_table(dev);
	if (err)
		goto err_close;

1153 1154
	mlx4_enable_msi_x(dev);

1155
	err = mlx4_setup_hca(dev);
1156 1157 1158 1159 1160 1161
	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
		dev->flags &= ~MLX4_FLAG_MSI_X;
		pci_disable_msix(pdev);
		err = mlx4_setup_hca(dev);
	}

1162
	if (err)
1163
		goto err_free_eq;
1164

1165 1166 1167 1168 1169
	for (port = 1; port <= dev->caps.num_ports; port++) {
		err = mlx4_init_port_info(dev, port);
		if (err)
			goto err_port;
	}
1170

1171 1172
	err = mlx4_register_device(dev);
	if (err)
1173
		goto err_port;
1174

1175 1176 1177
	mlx4_sense_init(dev);
	mlx4_start_sense(dev);

1178 1179 1180 1181
	pci_set_drvdata(pdev, dev);

	return 0;

1182
err_port:
1183
	for (--port; port >= 1; --port)
1184 1185
		mlx4_cleanup_port_info(&priv->port[port]);

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	mlx4_cleanup_mcg_table(dev);
	mlx4_cleanup_qp_table(dev);
	mlx4_cleanup_srq_table(dev);
	mlx4_cleanup_cq_table(dev);
	mlx4_cmd_use_polling(dev);
	mlx4_cleanup_eq_table(dev);
	mlx4_cleanup_mr_table(dev);
	mlx4_cleanup_pd_table(dev);
	mlx4_cleanup_uar_table(dev);

1196 1197 1198
err_free_eq:
	mlx4_free_eq_table(dev);

1199
err_close:
1200 1201 1202
	if (dev->flags & MLX4_FLAG_MSI_X)
		pci_disable_msix(pdev);

1203 1204 1205 1206 1207 1208 1209 1210
	mlx4_close_hca(dev);

err_cmd:
	mlx4_cmd_cleanup(dev);

err_free_dev:
	kfree(priv);

1211 1212
err_release_regions:
	pci_release_regions(pdev);
1213 1214 1215 1216 1217 1218 1219

err_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}

R
Roland Dreier 已提交
1220 1221 1222
static int __devinit mlx4_init_one(struct pci_dev *pdev,
				   const struct pci_device_id *id)
{
1223
	printk_once(KERN_INFO "%s", mlx4_version);
R
Roland Dreier 已提交
1224

1225
	return __mlx4_init_one(pdev, id);
R
Roland Dreier 已提交
1226 1227 1228
}

static void mlx4_remove_one(struct pci_dev *pdev)
1229 1230 1231 1232 1233 1234
{
	struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
	struct mlx4_priv *priv = mlx4_priv(dev);
	int p;

	if (dev) {
1235
		mlx4_stop_sense(dev);
1236 1237
		mlx4_unregister_device(dev);

1238 1239
		for (p = 1; p <= dev->caps.num_ports; p++) {
			mlx4_cleanup_port_info(&priv->port[p]);
1240
			mlx4_CLOSE_PORT(dev, p);
1241
		}
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254

		mlx4_cleanup_mcg_table(dev);
		mlx4_cleanup_qp_table(dev);
		mlx4_cleanup_srq_table(dev);
		mlx4_cleanup_cq_table(dev);
		mlx4_cmd_use_polling(dev);
		mlx4_cleanup_eq_table(dev);
		mlx4_cleanup_mr_table(dev);
		mlx4_cleanup_pd_table(dev);

		iounmap(priv->kar);
		mlx4_uar_free(dev, &priv->driver_uar);
		mlx4_cleanup_uar_table(dev);
1255
		mlx4_free_eq_table(dev);
1256 1257 1258 1259 1260 1261 1262
		mlx4_close_hca(dev);
		mlx4_cmd_cleanup(dev);

		if (dev->flags & MLX4_FLAG_MSI_X)
			pci_disable_msix(pdev);

		kfree(priv);
1263
		pci_release_regions(pdev);
1264 1265 1266 1267 1268
		pci_disable_device(pdev);
		pci_set_drvdata(pdev, NULL);
	}
}

1269 1270 1271
int mlx4_restart_one(struct pci_dev *pdev)
{
	mlx4_remove_one(pdev);
R
Roland Dreier 已提交
1272
	return __mlx4_init_one(pdev, NULL);
1273 1274
}

1275
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1276 1277 1278
	{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
	{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
	{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1279 1280
	{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1281 1282
	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1283 1284
	{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
	{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1285
	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1286
	{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
Y
Yevgeny Petrilin 已提交
1287
	{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
	{ 0, }
};

MODULE_DEVICE_TABLE(pci, mlx4_pci_table);

static struct pci_driver mlx4_driver = {
	.name		= DRV_NAME,
	.id_table	= mlx4_pci_table,
	.probe		= mlx4_init_one,
	.remove		= __devexit_p(mlx4_remove_one)
};

1300 1301 1302
static int __init mlx4_verify_params(void)
{
	if ((log_num_mac < 0) || (log_num_mac > 7)) {
1303
		pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
1304 1305 1306 1307
		return -1;
	}

	if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1308
		pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1309 1310 1311
		return -1;
	}

1312
	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1313
		pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1314 1315 1316
		return -1;
	}

1317 1318 1319
	return 0;
}

1320 1321 1322 1323
static int __init mlx4_init(void)
{
	int ret;

1324 1325 1326
	if (mlx4_verify_params())
		return -EINVAL;

1327 1328 1329 1330 1331
	mlx4_catas_init();

	mlx4_wq = create_singlethread_workqueue("mlx4");
	if (!mlx4_wq)
		return -ENOMEM;
1332

1333 1334 1335 1336 1337 1338 1339
	ret = pci_register_driver(&mlx4_driver);
	return ret < 0 ? ret : 0;
}

static void __exit mlx4_cleanup(void)
{
	pci_unregister_driver(&mlx4_driver);
1340
	destroy_workqueue(mlx4_wq);
1341 1342 1343 1344
}

module_init(mlx4_init);
module_exit(mlx4_cleanup);