enic_main.c 62.6 KB
Newer Older
1
/*
V
Vasanthy Kolluri 已提交
2
 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
 *
 * This program is free software; you may redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/init.h>
26
#include <linux/interrupt.h>
27 28 29 30
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
31
#include <linux/if.h>
32 33 34 35 36 37 38
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
V
Vasanthy Kolluri 已提交
39
#include <linux/rtnetlink.h>
40
#include <linux/prefetch.h>
41
#include <net/ip6_checksum.h>
42 43 44 45 46

#include "cq_enet_desc.h"
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
47
#include "vnic_vic.h"
48 49
#include "enic_res.h"
#include "enic.h"
50
#include "enic_dev.h"
51
#include "enic_pp.h"
52 53

#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
54 55 56 57 58
#define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
#define MAX_TSO				(1 << 16)
#define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)

#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
59
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
60
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
61 62

/* Supported devices */
63
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
64
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
65
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
66
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	{ 0, }	/* end of table */
};

MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);

struct enic_stat {
	char name[ETH_GSTRING_LEN];
	unsigned int offset;
};

#define ENIC_TX_STAT(stat)	\
	{ .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
#define ENIC_RX_STAT(stat)	\
	{ .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }

static const struct enic_stat enic_tx_stats[] = {
	ENIC_TX_STAT(tx_frames_ok),
	ENIC_TX_STAT(tx_unicast_frames_ok),
	ENIC_TX_STAT(tx_multicast_frames_ok),
	ENIC_TX_STAT(tx_broadcast_frames_ok),
	ENIC_TX_STAT(tx_bytes_ok),
	ENIC_TX_STAT(tx_unicast_bytes_ok),
	ENIC_TX_STAT(tx_multicast_bytes_ok),
	ENIC_TX_STAT(tx_broadcast_bytes_ok),
	ENIC_TX_STAT(tx_drops),
	ENIC_TX_STAT(tx_errors),
	ENIC_TX_STAT(tx_tso),
};

static const struct enic_stat enic_rx_stats[] = {
	ENIC_RX_STAT(rx_frames_ok),
	ENIC_RX_STAT(rx_frames_total),
	ENIC_RX_STAT(rx_unicast_frames_ok),
	ENIC_RX_STAT(rx_multicast_frames_ok),
	ENIC_RX_STAT(rx_broadcast_frames_ok),
	ENIC_RX_STAT(rx_bytes_ok),
	ENIC_RX_STAT(rx_unicast_bytes_ok),
	ENIC_RX_STAT(rx_multicast_bytes_ok),
	ENIC_RX_STAT(rx_broadcast_bytes_ok),
	ENIC_RX_STAT(rx_drop),
	ENIC_RX_STAT(rx_no_bufs),
	ENIC_RX_STAT(rx_errors),
	ENIC_RX_STAT(rx_rss),
	ENIC_RX_STAT(rx_crc_errors),
	ENIC_RX_STAT(rx_frames_64),
	ENIC_RX_STAT(rx_frames_127),
	ENIC_RX_STAT(rx_frames_255),
	ENIC_RX_STAT(rx_frames_511),
	ENIC_RX_STAT(rx_frames_1023),
	ENIC_RX_STAT(rx_frames_1518),
	ENIC_RX_STAT(rx_frames_to_max),
};

static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);

127
int enic_is_dynamic(struct enic *enic)
128 129 130 131
{
	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
}

R
Roopa Prabhu 已提交
132 133 134 135 136
int enic_sriov_enabled(struct enic *enic)
{
	return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
}

137 138 139 140 141
static int enic_is_sriov_vf(struct enic *enic)
{
	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
}

142 143 144 145 146 147 148 149 150
int enic_is_valid_vf(struct enic *enic, int vf)
{
#ifdef CONFIG_PCI_IOV
	return vf >= 0 && vf < enic->num_vfs;
#else
	return 0;
#endif
}

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
{
	return rq;
}

static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
{
	return enic->rq_count + wq;
}

static inline unsigned int enic_legacy_io_intr(void)
{
	return 0;
}

static inline unsigned int enic_legacy_err_intr(void)
{
	return 1;
}

static inline unsigned int enic_legacy_notify_intr(void)
{
	return 2;
}

static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
{
178
	return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
179 180 181 182
}

static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
{
183
	return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
184 185 186 187 188 189 190 191 192 193 194 195
}

static inline unsigned int enic_msix_err_intr(struct enic *enic)
{
	return enic->rq_count + enic->wq_count;
}

static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
	return enic->rq_count + enic->wq_count + 1;
}

196 197 198 199 200 201 202 203 204 205 206
static int enic_get_settings(struct net_device *netdev,
	struct ethtool_cmd *ecmd)
{
	struct enic *enic = netdev_priv(netdev);

	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
	ecmd->port = PORT_FIBRE;
	ecmd->transceiver = XCVR_EXTERNAL;

	if (netif_carrier_ok(netdev)) {
207
		ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
208 209
		ecmd->duplex = DUPLEX_FULL;
	} else {
210
		ethtool_cmd_speed_set(ecmd, -1);
211 212 213 214 215 216 217 218 219 220 221 222 223 224
		ecmd->duplex = -1;
	}

	ecmd->autoneg = AUTONEG_DISABLE;

	return 0;
}

static void enic_get_drvinfo(struct net_device *netdev,
	struct ethtool_drvinfo *drvinfo)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_devcmd_fw_info *fw_info;

225
	enic_dev_fw_info(enic, &fw_info);
226

227 228 229
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	strlcpy(drvinfo->fw_version, fw_info->fw_version,
230
		sizeof(drvinfo->fw_version));
231
	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
		sizeof(drvinfo->bus_info));
}

static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
	unsigned int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < enic_n_tx_stats; i++) {
			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		for (i = 0; i < enic_n_rx_stats; i++) {
			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		break;
	}
}

253
static int enic_get_sset_count(struct net_device *netdev, int sset)
254
{
255 256 257 258 259 260
	switch (sset) {
	case ETH_SS_STATS:
		return enic_n_tx_stats + enic_n_rx_stats;
	default:
		return -EOPNOTSUPP;
	}
261 262 263 264 265 266 267 268 269
}

static void enic_get_ethtool_stats(struct net_device *netdev,
	struct ethtool_stats *stats, u64 *data)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_stats *vstats;
	unsigned int i;

270
	enic_dev_stats_dump(enic, &vstats);
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	for (i = 0; i < enic_n_tx_stats; i++)
		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
	for (i = 0; i < enic_n_rx_stats; i++)
		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
}

static u32 enic_get_msglevel(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	return enic->msg_enable;
}

static void enic_set_msglevel(struct net_device *netdev, u32 value)
{
	struct enic *enic = netdev_priv(netdev);
	enic->msg_enable = value;
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static int enic_get_coalesce(struct net_device *netdev,
	struct ethtool_coalesce *ecmd)
{
	struct enic *enic = netdev_priv(netdev);

	ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;

	return 0;
}

static int enic_set_coalesce(struct net_device *netdev,
	struct ethtool_coalesce *ecmd)
{
	struct enic *enic = netdev_priv(netdev);
	u32 tx_coalesce_usecs;
	u32 rx_coalesce_usecs;
307
	unsigned int i, intr;
308

309 310 311 312
	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
		vnic_dev_get_intr_coal_timer_max(enic->vdev));
	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
		vnic_dev_get_intr_coal_timer_max(enic->vdev));
313 314 315 316 317 318

	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
		if (tx_coalesce_usecs != rx_coalesce_usecs)
			return -EINVAL;

319 320
		intr = enic_legacy_io_intr();
		vnic_intr_coalescing_timer_set(&enic->intr[intr],
321
			tx_coalesce_usecs);
322 323 324 325 326 327
		break;
	case VNIC_DEV_INTR_MODE_MSI:
		if (tx_coalesce_usecs != rx_coalesce_usecs)
			return -EINVAL;

		vnic_intr_coalescing_timer_set(&enic->intr[0],
328
			tx_coalesce_usecs);
329 330
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
331 332 333
		for (i = 0; i < enic->wq_count; i++) {
			intr = enic_msix_wq_intr(enic, i);
			vnic_intr_coalescing_timer_set(&enic->intr[intr],
334
				tx_coalesce_usecs);
335 336 337 338 339
		}

		for (i = 0; i < enic->rq_count; i++) {
			intr = enic_msix_rq_intr(enic, i);
			vnic_intr_coalescing_timer_set(&enic->intr[intr],
340
				rx_coalesce_usecs);
341 342
		}

343 344 345 346 347 348 349 350 351 352 353
		break;
	default:
		break;
	}

	enic->tx_coalesce_usecs = tx_coalesce_usecs;
	enic->rx_coalesce_usecs = rx_coalesce_usecs;

	return 0;
}

354
static const struct ethtool_ops enic_ethtool_ops = {
355 356 357 358 359 360
	.get_settings = enic_get_settings,
	.get_drvinfo = enic_get_drvinfo,
	.get_msglevel = enic_get_msglevel,
	.set_msglevel = enic_set_msglevel,
	.get_link = ethtool_op_get_link,
	.get_strings = enic_get_strings,
361
	.get_sset_count = enic_get_sset_count,
362
	.get_ethtool_stats = enic_get_ethtool_stats,
363 364
	.get_coalesce = enic_get_coalesce,
	.set_coalesce = enic_set_coalesce,
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
};

static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
	struct enic *enic = vnic_dev_priv(wq->vdev);

	if (buf->sop)
		pci_unmap_single(enic->pdev, buf->dma_addr,
			buf->len, PCI_DMA_TODEVICE);
	else
		pci_unmap_page(enic->pdev, buf->dma_addr,
			buf->len, PCI_DMA_TODEVICE);

	if (buf->os_buf)
		dev_kfree_skb_any(buf->os_buf);
}

static void enic_wq_free_buf(struct vnic_wq *wq,
	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
{
	enic_free_wq_buf(wq, buf);
}

static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
	u8 type, u16 q_number, u16 completed_index, void *opaque)
{
	struct enic *enic = vnic_dev_priv(vdev);

	spin_lock(&enic->wq_lock[q_number]);

	vnic_wq_service(&enic->wq[q_number], cq_desc,
		completed_index, enic_wq_free_buf,
		opaque);

	if (netif_queue_stopped(enic->netdev) &&
400 401
	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
		netif_wake_queue(enic->netdev);

	spin_unlock(&enic->wq_lock[q_number]);

	return 0;
}

static void enic_log_q_error(struct enic *enic)
{
	unsigned int i;
	u32 error_status;

	for (i = 0; i < enic->wq_count; i++) {
		error_status = vnic_wq_error_status(&enic->wq[i]);
		if (error_status)
417 418
			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
				i, error_status);
419 420 421 422 423
	}

	for (i = 0; i < enic->rq_count; i++) {
		error_status = vnic_rq_error_status(&enic->rq[i]);
		if (error_status)
424 425
			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
				i, error_status);
426 427 428
	}
}

429
static void enic_msglvl_check(struct enic *enic)
430
{
431
	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
432

433
	if (msg_enable != enic->msg_enable) {
434 435
		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
			enic->msg_enable, msg_enable);
436
		enic->msg_enable = msg_enable;
437 438 439 440 441 442
	}
}

static void enic_mtu_check(struct enic *enic)
{
	u32 mtu = vnic_dev_mtu(enic->vdev);
443
	struct net_device *netdev = enic->netdev;
444

445
	if (mtu && mtu != enic->port_mtu) {
446
		enic->port_mtu = mtu;
447 448 449 450 451 452 453 454 455 456 457 458
		if (enic_is_dynamic(enic)) {
			mtu = max_t(int, ENIC_MIN_MTU,
				min_t(int, ENIC_MAX_MTU, mtu));
			if (mtu != netdev->mtu)
				schedule_work(&enic->change_mtu_work);
		} else {
			if (mtu < netdev->mtu)
				netdev_warn(netdev,
					"interface MTU (%d) set higher "
					"than switch port MTU (%d)\n",
					netdev->mtu, mtu);
		}
459 460 461
	}
}

462
static void enic_link_check(struct enic *enic)
463
{
464 465
	int link_status = vnic_dev_link_status(enic->vdev);
	int carrier_ok = netif_carrier_ok(enic->netdev);
466

467
	if (link_status && !carrier_ok) {
468
		netdev_info(enic->netdev, "Link UP\n");
469 470
		netif_carrier_on(enic->netdev);
	} else if (!link_status && carrier_ok) {
471
		netdev_info(enic->netdev, "Link DOWN\n");
472
		netif_carrier_off(enic->netdev);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
	}
}

static void enic_notify_check(struct enic *enic)
{
	enic_msglvl_check(enic);
	enic_mtu_check(enic);
	enic_link_check(enic);
}

#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))

static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
489 490 491
	unsigned int io_intr = enic_legacy_io_intr();
	unsigned int err_intr = enic_legacy_err_intr();
	unsigned int notify_intr = enic_legacy_notify_intr();
492 493
	u32 pba;

494
	vnic_intr_mask(&enic->intr[io_intr]);
495 496 497

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
498
		vnic_intr_unmask(&enic->intr[io_intr]);
499 500 501
		return IRQ_NONE;	/* not our interrupt */
	}

502 503
	if (ENIC_TEST_INTR(pba, notify_intr)) {
		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
504
		enic_notify_check(enic);
505
	}
506

507 508
	if (ENIC_TEST_INTR(pba, err_intr)) {
		vnic_intr_return_all_credits(&enic->intr[err_intr]);
509 510 511 512 513 514
		enic_log_q_error(enic);
		/* schedule recovery from WQ/RQ error */
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

515 516 517
	if (ENIC_TEST_INTR(pba, io_intr)) {
		if (napi_schedule_prep(&enic->napi[0]))
			__napi_schedule(&enic->napi[0]);
518
	} else {
519
		vnic_intr_unmask(&enic->intr[io_intr]);
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	}

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msi(int irq, void *data)
{
	struct enic *enic = data;

	/* With MSI, there is no sharing of interrupts, so this is
	 * our interrupt and there is no need to ack it.  The device
	 * is not providing per-vector masking, so the OS will not
	 * write to PCI config space to mask/unmask the interrupt.
	 * We're using mask_on_assertion for MSI, so the device
	 * automatically masks the interrupt when the interrupt is
	 * generated.  Later, when exiting polling, the interrupt
	 * will be unmasked (see enic_poll).
	 *
	 * Also, the device uses the same PCIe Traffic Class (TC)
	 * for Memory Write data and MSI, so there are no ordering
	 * issues; the MSI will always arrive at the Root Complex
	 * _after_ corresponding Memory Writes (i.e. descriptor
	 * writes).
	 */

545
	napi_schedule(&enic->napi[0]);
546 547 548 549 550 551

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_rq(int irq, void *data)
{
552
	struct napi_struct *napi = data;
553 554

	/* schedule NAPI polling for RQ cleanup */
555
	napi_schedule(napi);
556 557 558 559 560 561 562

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
	struct enic *enic = data;
563 564
	unsigned int cq = enic_cq_wq(enic, 0);
	unsigned int intr = enic_msix_wq_intr(enic, 0);
565 566 567
	unsigned int wq_work_to_do = -1; /* no limit */
	unsigned int wq_work_done;

568
	wq_work_done = vnic_cq_service(&enic->cq[cq],
569 570
		wq_work_to_do, enic_wq_service, NULL);

571
	vnic_intr_return_credits(&enic->intr[intr],
572 573 574 575 576 577 578 579 580 581
		wq_work_done,
		1 /* unmask intr */,
		1 /* reset intr timer */);

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_err(int irq, void *data)
{
	struct enic *enic = data;
582
	unsigned int intr = enic_msix_err_intr(enic);
583

584
	vnic_intr_return_all_credits(&enic->intr[intr]);
585

586 587 588 589 590 591 592 593 594 595 596
	enic_log_q_error(enic);

	/* schedule recovery from WQ/RQ error */
	schedule_work(&enic->reset);

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_notify(int irq, void *data)
{
	struct enic *enic = data;
597
	unsigned int intr = enic_msix_notify_intr(enic);
598

599
	vnic_intr_return_all_credits(&enic->intr[intr]);
600 601 602 603 604 605 606
	enic_notify_check(enic);

	return IRQ_HANDLED;
}

static inline void enic_queue_wq_skb_cont(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
607
	unsigned int len_left, int loopback)
608
{
E
Eric Dumazet 已提交
609
	const skb_frag_t *frag;
610 611 612

	/* Queue additional data fragments */
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
E
Eric Dumazet 已提交
613
		len_left -= skb_frag_size(frag);
614
		enic_queue_wq_desc_cont(wq, skb,
615
			skb_frag_dma_map(&enic->pdev->dev,
E
Eric Dumazet 已提交
616
					 frag, 0, skb_frag_size(frag),
617
					 DMA_TO_DEVICE),
E
Eric Dumazet 已提交
618
			skb_frag_size(frag),
619 620
			(len_left == 0),	/* EOP? */
			loopback);
621 622 623 624 625
	}
}

static inline void enic_queue_wq_skb_vlan(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
626
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
627 628 629 630 631
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
	int eop = (len_left == 0);

632 633 634 635 636
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
637 638 639 640 641
	enic_queue_wq_desc(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
		head_len,
		vlan_tag_insert, vlan_tag,
642
		eop, loopback);
643 644

	if (!eop)
645
		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
646 647 648 649
}

static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
650
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
651 652 653
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
654
	unsigned int hdr_len = skb_checksum_start_offset(skb);
655 656 657
	unsigned int csum_offset = hdr_len + skb->csum_offset;
	int eop = (len_left == 0);

658 659 660 661 662
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
663 664 665 666 667 668 669
	enic_queue_wq_desc_csum_l4(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
		head_len,
		csum_offset,
		hdr_len,
		vlan_tag_insert, vlan_tag,
670
		eop, loopback);
671 672

	if (!eop)
673
		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
674 675 676 677
}

static inline void enic_queue_wq_skb_tso(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
678
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
679
{
680 681
	unsigned int frag_len_left = skb_headlen(skb);
	unsigned int len_left = skb->len - frag_len_left;
682 683
	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int eop = (len_left == 0);
684 685 686 687
	unsigned int len;
	dma_addr_t dma_addr;
	unsigned int offset = 0;
	skb_frag_t *frag;
688 689 690 691 692 693

	/* Preload TCP csum field with IP pseudo hdr calculated
	 * with IP length set to zero.  HW will later add in length
	 * to each TCP segment resulting from the TSO.
	 */

694
	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
695 696 697
		ip_hdr(skb)->check = 0;
		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
698
	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
699 700 701 702
		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	}

703 704 705 706 707 708 709 710 711 712 713 714
	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for the main skb fragment
	 */
	while (frag_len_left) {
		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
		dma_addr = pci_map_single(enic->pdev, skb->data + offset,
				len, PCI_DMA_TODEVICE);
		enic_queue_wq_desc_tso(wq, skb,
			dma_addr,
			len,
			mss, hdr_len,
			vlan_tag_insert, vlan_tag,
715
			eop && (len == frag_len_left), loopback);
716 717 718
		frag_len_left -= len;
		offset += len;
	}
719

720 721 722 723 724 725 726
	if (eop)
		return;

	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for additional data fragments
	 */
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
E
Eric Dumazet 已提交
727 728
		len_left -= skb_frag_size(frag);
		frag_len_left = skb_frag_size(frag);
729
		offset = 0;
730 731 732 733

		while (frag_len_left) {
			len = min(frag_len_left,
				(unsigned int)WQ_ENET_MAX_DESC_LEN);
734 735
			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
						    offset, len,
736
						    DMA_TO_DEVICE);
737 738 739 740
			enic_queue_wq_desc_cont(wq, skb,
				dma_addr,
				len,
				(len_left == 0) &&
741 742
				(len == frag_len_left),		/* EOP? */
				loopback);
743 744 745 746
			frag_len_left -= len;
			offset += len;
		}
	}
747 748 749 750 751 752 753 754
}

static inline void enic_queue_wq_skb(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb)
{
	unsigned int mss = skb_shinfo(skb)->gso_size;
	unsigned int vlan_tag = 0;
	int vlan_tag_insert = 0;
755
	int loopback = 0;
756

757
	if (vlan_tx_tag_present(skb)) {
758 759 760
		/* VLAN tag from trunking driver */
		vlan_tag_insert = 1;
		vlan_tag = vlan_tx_tag_get(skb);
761 762 763
	} else if (enic->loop_enable) {
		vlan_tag = enic->loop_tag;
		loopback = 1;
764 765 766 767
	}

	if (mss)
		enic_queue_wq_skb_tso(enic, wq, skb, mss,
768
			vlan_tag_insert, vlan_tag, loopback);
769 770
	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
		enic_queue_wq_skb_csum_l4(enic, wq, skb,
771
			vlan_tag_insert, vlan_tag, loopback);
772 773
	else
		enic_queue_wq_skb_vlan(enic, wq, skb,
774
			vlan_tag_insert, vlan_tag, loopback);
775 776
}

777
/* netif_tx_lock held, process context with BHs disabled, or BH */
778
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
779
	struct net_device *netdev)
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_wq *wq = &enic->wq[0];
	unsigned long flags;

	if (skb->len <= 0) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
	 * which is very likely.  In the off chance it's going to take
	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
	 */

	if (skb_shinfo(skb)->gso_size == 0 &&
	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
	    skb_linearize(skb)) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	spin_lock_irqsave(&enic->wq_lock[0], flags);

804 805
	if (vnic_wq_desc_avail(wq) <
	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
806 807
		netif_stop_queue(netdev);
		/* This is a hard error, log it */
808
		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
809 810 811 812 813 814
		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
		return NETDEV_TX_BUSY;
	}

	enic_queue_wq_skb(enic, wq, skb);

815
	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
816 817 818 819 820 821 822 823
		netif_stop_queue(netdev);

	spin_unlock_irqrestore(&enic->wq_lock[0], flags);

	return NETDEV_TX_OK;
}

/* dev_base_lock rwlock held, nominally process context */
824 825
static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
						struct rtnl_link_stats64 *net_stats)
826 827 828 829
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_stats *stats;

830
	enic_dev_stats_dump(enic, &stats);
831

832 833 834 835
	net_stats->tx_packets = stats->tx.tx_frames_ok;
	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
	net_stats->tx_errors = stats->tx.tx_errors;
	net_stats->tx_dropped = stats->tx.tx_drops;
836

837 838 839 840
	net_stats->rx_packets = stats->rx.rx_frames_ok;
	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
	net_stats->rx_errors = stats->rx.rx_errors;
	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
841
	net_stats->rx_over_errors = enic->rq_truncated_pkts;
842
	net_stats->rx_crc_errors = enic->rq_bad_fcs;
843
	net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
844

845
	return net_stats;
846 847
}

848
void enic_reset_addr_lists(struct enic *enic)
849 850
{
	enic->mc_count = 0;
851
	enic->uc_count = 0;
852
	enic->flags = 0;
853 854 855 856
}

static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
857 858 859 860 861 862 863 864 865
	struct enic *enic = netdev_priv(netdev);

	if (enic_is_dynamic(enic)) {
		if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
			return -EADDRNOTAVAIL;
	} else {
		if (!is_valid_ether_addr(addr))
			return -EADDRNOTAVAIL;
	}
866 867 868 869 870 871

	memcpy(netdev->dev_addr, addr, netdev->addr_len);

	return 0;
}

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
	struct enic *enic = netdev_priv(netdev);
	struct sockaddr *saddr = p;
	char *addr = saddr->sa_data;
	int err;

	if (netif_running(enic->netdev)) {
		err = enic_dev_del_station_addr(enic);
		if (err)
			return err;
	}

	err = enic_set_mac_addr(netdev, addr);
	if (err)
		return err;

	if (netif_running(enic->netdev)) {
		err = enic_dev_add_station_addr(enic);
		if (err)
			return err;
	}

	return err;
}

static int enic_set_mac_address(struct net_device *netdev, void *p)
{
R
Roopa Prabhu 已提交
900
	struct sockaddr *saddr = p;
901 902 903 904 905 906 907 908 909 910 911
	char *addr = saddr->sa_data;
	struct enic *enic = netdev_priv(netdev);
	int err;

	err = enic_dev_del_station_addr(enic);
	if (err)
		return err;

	err = enic_set_mac_addr(netdev, addr);
	if (err)
		return err;
R
Roopa Prabhu 已提交
912

913
	return enic_dev_add_station_addr(enic);
914 915
}

916
static void enic_update_multicast_addr_list(struct enic *enic)
917
{
918
	struct net_device *netdev = enic->netdev;
919
	struct netdev_hw_addr *ha;
920
	unsigned int mc_count = netdev_mc_count(netdev);
921 922 923
	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
	unsigned int i, j;

924 925 926 927
	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
		netdev_warn(netdev, "Registering only %d out of %d "
			"multicast addresses\n",
			ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
928
		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
929
	}
930 931 932 933 934 935 936

	/* Is there an easier way?  Trying to minimize to
	 * calls to add/del multicast addrs.  We keep the
	 * addrs from the last call in enic->mc_addr and
	 * look for changes to add/del.
	 */

937
	i = 0;
938
	netdev_for_each_mc_addr(ha, netdev) {
939 940
		if (i == mc_count)
			break;
941
		memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
942 943 944 945 946 947 948 949
	}

	for (i = 0; i < enic->mc_count; i++) {
		for (j = 0; j < mc_count; j++)
			if (compare_ether_addr(enic->mc_addr[i],
				mc_addr[j]) == 0)
				break;
		if (j == mc_count)
950
			enic_dev_del_addr(enic, enic->mc_addr[i]);
951 952 953 954 955 956 957 958
	}

	for (i = 0; i < mc_count; i++) {
		for (j = 0; j < enic->mc_count; j++)
			if (compare_ether_addr(mc_addr[i],
				enic->mc_addr[j]) == 0)
				break;
		if (j == enic->mc_count)
959
			enic_dev_add_addr(enic, mc_addr[i]);
960 961 962 963 964 965 966 967 968 969 970
	}

	/* Save the list to compare against next time
	 */

	for (i = 0; i < mc_count; i++)
		memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);

	enic->mc_count = mc_count;
}

971
static void enic_update_unicast_addr_list(struct enic *enic)
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
{
	struct net_device *netdev = enic->netdev;
	struct netdev_hw_addr *ha;
	unsigned int uc_count = netdev_uc_count(netdev);
	u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
	unsigned int i, j;

	if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
		netdev_warn(netdev, "Registering only %d out of %d "
			"unicast addresses\n",
			ENIC_UNICAST_PERFECT_FILTERS, uc_count);
		uc_count = ENIC_UNICAST_PERFECT_FILTERS;
	}

	/* Is there an easier way?  Trying to minimize to
	 * calls to add/del unicast addrs.  We keep the
	 * addrs from the last call in enic->uc_addr and
	 * look for changes to add/del.
	 */

	i = 0;
	netdev_for_each_uc_addr(ha, netdev) {
		if (i == uc_count)
			break;
		memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
	}

	for (i = 0; i < enic->uc_count; i++) {
		for (j = 0; j < uc_count; j++)
			if (compare_ether_addr(enic->uc_addr[i],
				uc_addr[j]) == 0)
				break;
		if (j == uc_count)
			enic_dev_del_addr(enic, enic->uc_addr[i]);
	}

	for (i = 0; i < uc_count; i++) {
		for (j = 0; j < enic->uc_count; j++)
			if (compare_ether_addr(uc_addr[i],
				enic->uc_addr[j]) == 0)
				break;
		if (j == enic->uc_count)
			enic_dev_add_addr(enic, uc_addr[i]);
	}

	/* Save the list to compare against next time
	 */

	for (i = 0; i < uc_count; i++)
		memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);

	enic->uc_count = uc_count;
}

/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	int directed = 1;
	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
	int promisc = (netdev->flags & IFF_PROMISC) ||
		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
	unsigned int flags = netdev->flags |
		(allmulti ? IFF_ALLMULTI : 0) |
		(promisc ? IFF_PROMISC : 0);

	if (enic->flags != flags) {
		enic->flags = flags;
		enic_dev_packet_filter(enic, directed,
			multicast, broadcast, promisc, allmulti);
	}

	if (!promisc) {
1048
		enic_update_unicast_addr_list(enic);
1049
		if (!allmulti)
1050
			enic_update_multicast_addr_list(enic);
1051 1052 1053
	}
}

1054 1055 1056 1057 1058 1059 1060
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	schedule_work(&enic->reset);
}

1061 1062 1063
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
	struct enic *enic = netdev_priv(netdev);
1064 1065
	struct enic_port_profile *pp;
	int err;
1066

1067 1068 1069
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;
1070 1071

	if (is_valid_ether_addr(mac)) {
1072
		memcpy(pp->vf_mac, mac, ETH_ALEN);
1073 1074 1075 1076 1077
		return 0;
	} else
		return -EINVAL;
}

1078 1079 1080 1081
static int enic_set_vf_port(struct net_device *netdev, int vf,
	struct nlattr *port[])
{
	struct enic *enic = netdev_priv(netdev);
1082
	struct enic_port_profile prev_pp;
1083
	struct enic_port_profile *pp;
1084
	int err = 0, restore_pp = 1;
1085

1086 1087 1088
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;
1089

1090 1091 1092
	if (!port[IFLA_PORT_REQUEST])
		return -EOPNOTSUPP;

1093 1094
	memcpy(&prev_pp, pp, sizeof(*enic->pp));
	memset(pp, 0, sizeof(*enic->pp));
1095

1096 1097
	pp->set |= ENIC_SET_REQUEST;
	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1098 1099

	if (port[IFLA_PORT_PROFILE]) {
1100 1101
		pp->set |= ENIC_SET_NAME;
		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1102 1103 1104 1105
			PORT_PROFILE_MAX);
	}

	if (port[IFLA_PORT_INSTANCE_UUID]) {
1106 1107
		pp->set |= ENIC_SET_INSTANCE;
		memcpy(pp->instance_uuid,
1108 1109 1110 1111
			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
	}

	if (port[IFLA_PORT_HOST_UUID]) {
1112 1113
		pp->set |= ENIC_SET_HOST;
		memcpy(pp->host_uuid,
1114 1115
			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
	}
1116

1117 1118
	/* Special case handling: mac came from IFLA_VF_MAC */
	if (!is_zero_ether_addr(prev_pp.vf_mac))
1119
		memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1120

1121 1122
	if (vf == PORT_SELF_VF && is_zero_ether_addr(netdev->dev_addr))
		random_ether_addr(netdev->dev_addr);
1123

1124
	err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1125 1126 1127 1128 1129
	if (err) {
		if (restore_pp) {
			/* Things are still the way they were: Implicit
			 * DISASSOCIATE failed
			 */
1130
			memcpy(pp, &prev_pp, sizeof(*pp));
1131
		} else {
1132 1133 1134
			memset(pp, 0, sizeof(*pp));
			if (vf == PORT_SELF_VF)
				memset(netdev->dev_addr, 0, ETH_ALEN);
1135 1136 1137 1138 1139
		}
	} else {
		/* Set flag to indicate that the port assoc/disassoc
		 * request has been sent out to fw
		 */
1140
		pp->set |= ENIC_PORT_REQUEST_APPLIED;
1141 1142

		/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1143 1144 1145 1146
		if (pp->request == PORT_REQUEST_DISASSOCIATE) {
			memset(pp->mac_addr, 0, ETH_ALEN);
			if (vf == PORT_SELF_VF)
				memset(netdev->dev_addr, 0, ETH_ALEN);
1147 1148
		}
	}
1149

1150
	memset(pp->vf_mac, 0, ETH_ALEN);
1151 1152

	return err;
1153 1154 1155 1156 1157 1158 1159
}

static int enic_get_vf_port(struct net_device *netdev, int vf,
	struct sk_buff *skb)
{
	struct enic *enic = netdev_priv(netdev);
	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1160
	struct enic_port_profile *pp;
1161
	int err;
1162

1163 1164 1165 1166 1167
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;

	if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1168
		return -ENODATA;
1169

1170
	err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1171
	if (err)
1172
		return err;
1173

1174
	NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
1175
	NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1176
	if (pp->set & ENIC_SET_NAME)
1177
		NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1178 1179
			pp->name);
	if (pp->set & ENIC_SET_INSTANCE)
1180
		NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1181 1182
			pp->instance_uuid);
	if (pp->set & ENIC_SET_HOST)
1183
		NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1184
			pp->host_uuid);
1185 1186 1187 1188 1189 1190 1191

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);

	if (!buf->os_buf)
		return;

	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);
	dev_kfree_skb_any(buf->os_buf);
}

static int enic_rq_alloc_buf(struct vnic_rq *rq)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
S
Scott Feldman 已提交
1207
	struct net_device *netdev = enic->netdev;
1208
	struct sk_buff *skb;
1209
	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1210 1211 1212
	unsigned int os_buf_index = 0;
	dma_addr_t dma_addr;

1213
	skb = netdev_alloc_skb_ip_align(netdev, len);
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (!skb)
		return -ENOMEM;

	dma_addr = pci_map_single(enic->pdev, skb->data,
		len, PCI_DMA_FROMDEVICE);

	enic_queue_rq_desc(rq, skb, os_buf_index,
		dma_addr, len);

	return 0;
}

static void enic_rq_indicate_buf(struct vnic_rq *rq,
	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
	int skipped, void *opaque)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
S
Scott Feldman 已提交
1231
	struct net_device *netdev = enic->netdev;
1232 1233 1234 1235 1236 1237 1238
	struct sk_buff *skb;

	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
	u8 packet_error;
1239
	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	u32 rss_hash;

	if (skipped)
		return;

	skb = buf->os_buf;
	prefetch(skb->data - NET_IP_ALIGN);
	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);

	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
		&type, &color, &q_number, &completed_index,
		&ingress_port, &fcoe, &eop, &sop, &rss_type,
		&csum_not_calc, &rss_hash, &bytes_written,
1254
		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1255 1256 1257 1258 1259 1260 1261
		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
		&fcs_ok);

	if (packet_error) {

1262 1263 1264 1265 1266 1267
		if (!fcs_ok) {
			if (bytes_written > 0)
				enic->rq_bad_fcs++;
			else if (bytes_written == 0)
				enic->rq_truncated_pkts++;
		}
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

		dev_kfree_skb_any(skb);

		return;
	}

	if (eop && bytes_written > 0) {

		/* Good receive
		 */

		skb_put(skb, bytes_written);
S
Scott Feldman 已提交
1280
		skb->protocol = eth_type_trans(skb, netdev);
1281

1282
		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1283 1284 1285 1286
			skb->csum = htons(checksum);
			skb->ip_summed = CHECKSUM_COMPLETE;
		}

S
Scott Feldman 已提交
1287
		skb->dev = netdev;
1288

J
Jiri Pirko 已提交
1289 1290
		if (vlan_stripped)
			__vlan_hwaccel_put_tag(skb, vlan_tci);
1291

J
Jiri Pirko 已提交
1292 1293 1294 1295
		if (netdev->features & NETIF_F_GRO)
			napi_gro_receive(&enic->napi[q_number], skb);
		else
			netif_receive_skb(skb);
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	} else {

		/* Buffer overflow
		 */

		dev_kfree_skb_any(skb);
	}
}

static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
	u8 type, u16 q_number, u16 completed_index, void *opaque)
{
	struct enic *enic = vnic_dev_priv(vdev);

	vnic_rq_service(&enic->rq[q_number], cq_desc,
		completed_index, VNIC_RQ_RETURN_DESC,
		enic_rq_indicate_buf, opaque);

	return 0;
}

static int enic_poll(struct napi_struct *napi, int budget)
{
1319 1320 1321 1322 1323
	struct net_device *netdev = napi->dev;
	struct enic *enic = netdev_priv(netdev);
	unsigned int cq_rq = enic_cq_rq(enic, 0);
	unsigned int cq_wq = enic_cq_wq(enic, 0);
	unsigned int intr = enic_legacy_io_intr();
1324 1325 1326
	unsigned int rq_work_to_do = budget;
	unsigned int wq_work_to_do = -1; /* no limit */
	unsigned int  work_done, rq_work_done, wq_work_done;
1327
	int err;
1328 1329 1330 1331

	/* Service RQ (first) and WQ
	 */

1332
	rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1333 1334
		rq_work_to_do, enic_rq_service, NULL);

1335
	wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
		wq_work_to_do, enic_wq_service, NULL);

	/* Accumulate intr event credits for this polling
	 * cycle.  An intr event is the completion of a
	 * a WQ or RQ packet.
	 */

	work_done = rq_work_done + wq_work_done;

	if (work_done > 0)
1346
		vnic_intr_return_credits(&enic->intr[intr],
1347 1348 1349 1350
			work_done,
			0 /* don't unmask intr */,
			0 /* don't reset intr timer */);

1351
	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1352

1353 1354 1355
	/* Buffer allocation failed. Stay in polling
	 * mode so we can try to fill the ring again.
	 */
1356

1357 1358
	if (err)
		rq_work_done = rq_work_to_do;
1359

1360
	if (rq_work_done < rq_work_to_do) {
1361

1362
		/* Some work done, but not enough to stay in polling,
1363
		 * exit polling
1364 1365
		 */

1366
		napi_complete(napi);
1367
		vnic_intr_unmask(&enic->intr[intr]);
1368 1369 1370 1371 1372 1373 1374
	}

	return rq_work_done;
}

static int enic_poll_msix(struct napi_struct *napi, int budget)
{
1375 1376 1377 1378 1379
	struct net_device *netdev = napi->dev;
	struct enic *enic = netdev_priv(netdev);
	unsigned int rq = (napi - &enic->napi[0]);
	unsigned int cq = enic_cq_rq(enic, rq);
	unsigned int intr = enic_msix_rq_intr(enic, rq);
1380 1381
	unsigned int work_to_do = budget;
	unsigned int work_done;
1382
	int err;
1383 1384 1385 1386

	/* Service RQ
	 */

1387
	work_done = vnic_cq_service(&enic->cq[cq],
1388 1389
		work_to_do, enic_rq_service, NULL);

1390 1391 1392 1393
	/* Return intr event credits for this polling
	 * cycle.  An intr event is the completion of a
	 * RQ packet.
	 */
1394

1395
	if (work_done > 0)
1396
		vnic_intr_return_credits(&enic->intr[intr],
1397 1398 1399 1400
			work_done,
			0 /* don't unmask intr */,
			0 /* don't reset intr timer */);

1401
	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412

	/* Buffer allocation failed. Stay in polling mode
	 * so we can try to fill the ring again.
	 */

	if (err)
		work_done = work_to_do;

	if (work_done < work_to_do) {

		/* Some work done, but not enough to stay in polling,
1413
		 * exit polling
1414 1415
		 */

1416
		napi_complete(napi);
1417
		vnic_intr_unmask(&enic->intr[intr]);
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
	}

	return work_done;
}

static void enic_notify_timer(unsigned long data)
{
	struct enic *enic = (struct enic *)data;

	enic_notify_check(enic);

1429 1430
	mod_timer(&enic->notify_timer,
		round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
}

static void enic_free_intr(struct enic *enic)
{
	struct net_device *netdev = enic->netdev;
	unsigned int i;

	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
		free_irq(enic->pdev->irq, netdev);
		break;
1442 1443 1444
	case VNIC_DEV_INTR_MODE_MSI:
		free_irq(enic->pdev->irq, enic);
		break;
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	case VNIC_DEV_INTR_MODE_MSIX:
		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
			if (enic->msix[i].requested)
				free_irq(enic->msix_entry[i].vector,
					enic->msix[i].devid);
		break;
	default:
		break;
	}
}

static int enic_request_intr(struct enic *enic)
{
	struct net_device *netdev = enic->netdev;
1459
	unsigned int i, intr;
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	int err = 0;

	switch (vnic_dev_get_intr_mode(enic->vdev)) {

	case VNIC_DEV_INTR_MODE_INTX:

		err = request_irq(enic->pdev->irq, enic_isr_legacy,
			IRQF_SHARED, netdev->name, netdev);
		break;

	case VNIC_DEV_INTR_MODE_MSI:

		err = request_irq(enic->pdev->irq, enic_isr_msi,
			0, netdev->name, enic);
		break;

	case VNIC_DEV_INTR_MODE_MSIX:

1478 1479 1480 1481 1482 1483 1484
		for (i = 0; i < enic->rq_count; i++) {
			intr = enic_msix_rq_intr(enic, i);
			sprintf(enic->msix[intr].devname,
				"%.11s-rx-%d", netdev->name, i);
			enic->msix[intr].isr = enic_isr_msix_rq;
			enic->msix[intr].devid = &enic->napi[i];
		}
1485

1486 1487 1488 1489 1490 1491 1492
		for (i = 0; i < enic->wq_count; i++) {
			intr = enic_msix_wq_intr(enic, i);
			sprintf(enic->msix[intr].devname,
				"%.11s-tx-%d", netdev->name, i);
			enic->msix[intr].isr = enic_isr_msix_wq;
			enic->msix[intr].devid = enic;
		}
1493

1494 1495
		intr = enic_msix_err_intr(enic);
		sprintf(enic->msix[intr].devname,
1496
			"%.11s-err", netdev->name);
1497 1498
		enic->msix[intr].isr = enic_isr_msix_err;
		enic->msix[intr].devid = enic;
1499

1500 1501
		intr = enic_msix_notify_intr(enic);
		sprintf(enic->msix[intr].devname,
1502
			"%.11s-notify", netdev->name);
1503 1504 1505 1506 1507
		enic->msix[intr].isr = enic_isr_msix_notify;
		enic->msix[intr].devid = enic;

		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
			enic->msix[i].requested = 0;
1508

1509
		for (i = 0; i < enic->intr_count; i++) {
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
			err = request_irq(enic->msix_entry[i].vector,
				enic->msix[i].isr, 0,
				enic->msix[i].devname,
				enic->msix[i].devid);
			if (err) {
				enic_free_intr(enic);
				break;
			}
			enic->msix[i].requested = 1;
		}

		break;

	default:
		break;
	}

	return err;
}

1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
static void enic_synchronize_irqs(struct enic *enic)
{
	unsigned int i;

	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
	case VNIC_DEV_INTR_MODE_MSI:
		synchronize_irq(enic->pdev->irq);
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
		for (i = 0; i < enic->intr_count; i++)
			synchronize_irq(enic->msix_entry[i].vector);
		break;
	default:
		break;
	}
}

1548
static int enic_dev_notify_set(struct enic *enic)
1549 1550 1551
{
	int err;

1552
	spin_lock(&enic->devcmd_lock);
1553 1554
	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
1555 1556
		err = vnic_dev_notify_set(enic->vdev,
			enic_legacy_notify_intr());
1557 1558
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
1559 1560
		err = vnic_dev_notify_set(enic->vdev,
			enic_msix_notify_intr(enic));
1561 1562 1563 1564 1565
		break;
	default:
		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
		break;
	}
1566
	spin_unlock(&enic->devcmd_lock);
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579

	return err;
}

static void enic_notify_timer_start(struct enic *enic)
{
	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_MSI:
		mod_timer(&enic->notify_timer, jiffies);
		break;
	default:
		/* Using intr for notification for INTx/MSI-X */
		break;
1580
	}
1581 1582 1583 1584 1585 1586 1587 1588 1589
}

/* rtnl lock is held, process context */
static int enic_open(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	unsigned int i;
	int err;

1590 1591
	err = enic_request_intr(enic);
	if (err) {
1592
		netdev_err(netdev, "Unable to request irq.\n");
1593 1594 1595
		return err;
	}

1596
	err = enic_dev_notify_set(enic);
1597
	if (err) {
1598 1599
		netdev_err(netdev,
			"Failed to alloc notify buffer, aborting.\n");
1600 1601 1602
		goto err_out_free_intr;
	}

1603
	for (i = 0; i < enic->rq_count; i++) {
1604
		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1605 1606
		/* Need at least one buffer on ring to get going */
		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1607
			netdev_err(netdev, "Unable to alloc receive buffers\n");
1608
			err = -ENOMEM;
1609
			goto err_out_notify_unset;
1610 1611 1612 1613 1614 1615 1616 1617
		}
	}

	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_enable(&enic->wq[i]);
	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_enable(&enic->rq[i]);

1618
	if (!enic_is_dynamic(enic))
1619
		enic_dev_add_station_addr(enic);
1620

1621
	enic_set_rx_mode(netdev);
1622 1623

	netif_wake_queue(netdev);
1624 1625 1626 1627

	for (i = 0; i < enic->rq_count; i++)
		napi_enable(&enic->napi[i]);

1628
	enic_dev_enable(enic);
1629 1630 1631 1632 1633 1634 1635

	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_unmask(&enic->intr[i]);

	enic_notify_timer_start(enic);

	return 0;
1636 1637

err_out_notify_unset:
1638
	enic_dev_notify_unset(enic);
1639 1640 1641 1642
err_out_free_intr:
	enic_free_intr(enic);

	return err;
1643 1644 1645 1646 1647 1648 1649 1650 1651
}

/* rtnl lock is held, process context */
static int enic_stop(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	unsigned int i;
	int err;

V
Vasanthy Kolluri 已提交
1652
	for (i = 0; i < enic->intr_count; i++) {
1653
		vnic_intr_mask(&enic->intr[i]);
V
Vasanthy Kolluri 已提交
1654 1655
		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
	}
1656 1657 1658

	enic_synchronize_irqs(enic);

1659 1660
	del_timer_sync(&enic->notify_timer);

1661
	enic_dev_disable(enic);
1662 1663 1664 1665

	for (i = 0; i < enic->rq_count; i++)
		napi_disable(&enic->napi[i]);

1666 1667
	netif_carrier_off(netdev);
	netif_tx_disable(netdev);
1668 1669

	if (!enic_is_dynamic(enic))
1670
		enic_dev_del_station_addr(enic);
1671

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	for (i = 0; i < enic->wq_count; i++) {
		err = vnic_wq_disable(&enic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < enic->rq_count; i++) {
		err = vnic_rq_disable(&enic->rq[i]);
		if (err)
			return err;
	}

1683
	enic_dev_notify_unset(enic);
1684 1685
	enic_free_intr(enic);

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
	for (i = 0; i < enic->cq_count; i++)
		vnic_cq_clean(&enic->cq[i]);
	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_clean(&enic->intr[i]);

	return 0;
}

static int enic_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct enic *enic = netdev_priv(netdev);
	int running = netif_running(netdev);

1703 1704 1705
	if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
		return -EINVAL;

1706 1707 1708
	if (enic_is_dynamic(enic))
		return -EOPNOTSUPP;

1709 1710 1711 1712 1713 1714
	if (running)
		enic_stop(netdev);

	netdev->mtu = new_mtu;

	if (netdev->mtu > enic->port_mtu)
1715 1716 1717
		netdev_warn(netdev,
			"interface MTU (%d) set higher than port MTU (%d)\n",
			netdev->mtu, enic->port_mtu);
1718 1719 1720 1721 1722 1723 1724

	if (running)
		enic_open(netdev);

	return 0;
}

1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
static void enic_change_mtu_work(struct work_struct *work)
{
	struct enic *enic = container_of(work, struct enic, change_mtu_work);
	struct net_device *netdev = enic->netdev;
	int new_mtu = vnic_dev_mtu(enic->vdev);
	int err;
	unsigned int i;

	new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));

	rtnl_lock();

	/* Stop RQ */
	del_timer_sync(&enic->notify_timer);

	for (i = 0; i < enic->rq_count; i++)
		napi_disable(&enic->napi[i]);

	vnic_intr_mask(&enic->intr[0]);
	enic_synchronize_irqs(enic);
	err = vnic_rq_disable(&enic->rq[0]);
	if (err) {
		netdev_err(netdev, "Unable to disable RQ.\n");
		return;
	}
	vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
	vnic_cq_clean(&enic->cq[0]);
	vnic_intr_clean(&enic->intr[0]);

	/* Fill RQ with new_mtu-sized buffers */
	netdev->mtu = new_mtu;
	vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
	/* Need at least one buffer on ring to get going */
	if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
		netdev_err(netdev, "Unable to alloc receive buffers.\n");
		return;
	}

	/* Start RQ */
	vnic_rq_enable(&enic->rq[0]);
	napi_enable(&enic->napi[0]);
	vnic_intr_unmask(&enic->intr[0]);
	enic_notify_timer_start(enic);

	rtnl_unlock();

	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
}

1774 1775 1776 1777 1778
#ifdef CONFIG_NET_POLL_CONTROLLER
static void enic_poll_controller(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_dev *vdev = enic->vdev;
1779
	unsigned int i, intr;
1780 1781 1782

	switch (vnic_dev_get_intr_mode(vdev)) {
	case VNIC_DEV_INTR_MODE_MSIX:
1783 1784
		for (i = 0; i < enic->rq_count; i++) {
			intr = enic_msix_rq_intr(enic, i);
1785 1786
			enic_isr_msix_rq(enic->msix_entry[intr].vector,
				&enic->napi[i]);
1787
		}
1788 1789 1790 1791 1792 1793

		for (i = 0; i < enic->wq_count; i++) {
			intr = enic_msix_wq_intr(enic, i);
			enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
		}

1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
		break;
	case VNIC_DEV_INTR_MODE_MSI:
		enic_isr_msi(enic->pdev->irq, enic);
		break;
	case VNIC_DEV_INTR_MODE_INTX:
		enic_isr_legacy(enic->pdev->irq, netdev);
		break;
	default:
		break;
	}
}
#endif

static int enic_dev_wait(struct vnic_dev *vdev,
	int (*start)(struct vnic_dev *, int),
	int (*finished)(struct vnic_dev *, int *),
	int arg)
{
	unsigned long time;
	int done;
	int err;

	BUG_ON(in_interrupt());

	err = start(vdev, arg);
	if (err)
		return err;

	/* Wait for func to complete...2 seconds max
	 */

	time = jiffies + (HZ * 2);
	do {

		err = finished(vdev, &done);
		if (err)
			return err;

		if (done)
			return 0;

		schedule_timeout_uninterruptible(HZ / 10);

	} while (time_after(time, jiffies));

	return -ETIMEDOUT;
}

static int enic_dev_open(struct enic *enic)
{
	int err;

	err = enic_dev_wait(enic->vdev, vnic_dev_open,
		vnic_dev_open_done, 0);
	if (err)
1849 1850
		dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
			err);
1851 1852 1853 1854

	return err;
}

1855
static int enic_dev_hang_reset(struct enic *enic)
1856 1857 1858
{
	int err;

1859 1860
	err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
		vnic_dev_hang_reset_done, 0);
1861
	if (err)
1862 1863
		netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
			err);
1864 1865 1866 1867

	return err;
}

1868 1869
static int enic_set_rsskey(struct enic *enic)
{
V
Vasanthy Kolluri 已提交
1870
	dma_addr_t rss_key_buf_pa;
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
	union vnic_rss_key *rss_key_buf_va = NULL;
	union vnic_rss_key rss_key = {
		.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
		.key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
		.key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
		.key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
	};
	int err;

	rss_key_buf_va = pci_alloc_consistent(enic->pdev,
		sizeof(union vnic_rss_key), &rss_key_buf_pa);
	if (!rss_key_buf_va)
		return -ENOMEM;

	memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));

	spin_lock(&enic->devcmd_lock);
	err = enic_set_rss_key(enic,
		rss_key_buf_pa,
		sizeof(union vnic_rss_key));
	spin_unlock(&enic->devcmd_lock);

	pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
		rss_key_buf_va, rss_key_buf_pa);

	return err;
}

static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
V
Vasanthy Kolluri 已提交
1901
	dma_addr_t rss_cpu_buf_pa;
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
	unsigned int i;
	int err;

	rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
		sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
	if (!rss_cpu_buf_va)
		return -ENOMEM;

	for (i = 0; i < (1 << rss_hash_bits); i++)
		(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;

	spin_lock(&enic->devcmd_lock);
	err = enic_set_rss_cpu(enic,
		rss_cpu_buf_pa,
		sizeof(union vnic_rss_cpu));
	spin_unlock(&enic->devcmd_lock);

	pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
		rss_cpu_buf_va, rss_cpu_buf_pa);

	return err;
}

static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
	u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1928 1929 1930
{
	const u8 tso_ipid_split_en = 0;
	const u8 ig_vlan_strip_en = 1;
1931
	int err;
1932

1933 1934
	/* Enable VLAN tag stripping.
	*/
1935

1936 1937
	spin_lock(&enic->devcmd_lock);
	err = enic_set_nic_cfg(enic,
1938 1939 1940 1941
		rss_default_cpu, rss_hash_type,
		rss_hash_bits, rss_base_cpu,
		rss_enable, tso_ipid_split_en,
		ig_vlan_strip_en);
1942 1943 1944 1945 1946
	spin_unlock(&enic->devcmd_lock);

	return err;
}

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
static int enic_set_rss_nic_cfg(struct enic *enic)
{
	struct device *dev = enic_get_dev(enic);
	const u8 rss_default_cpu = 0;
	const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
		NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
		NIC_CFG_RSS_HASH_TYPE_IPV6 |
		NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
	const u8 rss_hash_bits = 7;
	const u8 rss_base_cpu = 0;
	u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);

	if (rss_enable) {
		if (!enic_set_rsskey(enic)) {
			if (enic_set_rsscpu(enic, rss_hash_bits)) {
				rss_enable = 0;
				dev_warn(dev, "RSS disabled, "
					"Failed to set RSS cpu indirection table.");
			}
		} else {
			rss_enable = 0;
			dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
		}
	}

	return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
		rss_hash_bits, rss_base_cpu, rss_enable);
1974 1975
}

1976 1977 1978 1979 1980 1981 1982 1983 1984
static void enic_reset(struct work_struct *work)
{
	struct enic *enic = container_of(work, struct enic, reset);

	if (!netif_running(enic->netdev))
		return;

	rtnl_lock();

1985
	enic_dev_hang_notify(enic);
1986
	enic_stop(enic->netdev);
1987
	enic_dev_hang_reset(enic);
1988
	enic_reset_addr_lists(enic);
1989
	enic_init_vnic_resources(enic);
1990
	enic_set_rss_nic_cfg(enic);
1991
	enic_dev_set_ig_vlan_rewrite_mode(enic);
1992 1993 1994 1995 1996 1997 1998
	enic_open(enic->netdev);

	rtnl_unlock();
}

static int enic_set_intr_mode(struct enic *enic)
{
1999
	unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2000
	unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2001 2002 2003
	unsigned int i;

	/* Set interrupt mode (INTx, MSI, MSI-X) depending
2004
	 * on system capabilities.
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
	 *
	 * Try MSI-X first
	 *
	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
	 * (the second to last INTR is used for WQ/RQ errors)
	 * (the last INTR is used for notifications)
	 */

	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
	for (i = 0; i < n + m + 2; i++)
		enic->msix_entry[i].entry = i;

2017 2018 2019 2020 2021
	/* Use multiple RQs if RSS is enabled
	 */

	if (ENIC_SETTING(enic, RSS) &&
	    enic->config.intr_mode < 1 &&
2022 2023 2024
	    enic->rq_count >= n &&
	    enic->wq_count >= m &&
	    enic->cq_count >= n + m &&
2025
	    enic->intr_count >= n + m + 2) {
2026

2027
		if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2028

2029 2030 2031 2032
			enic->rq_count = n;
			enic->wq_count = m;
			enic->cq_count = n + m;
			enic->intr_count = n + m + 2;
2033

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
			vnic_dev_set_intr_mode(enic->vdev,
				VNIC_DEV_INTR_MODE_MSIX);

			return 0;
		}
	}

	if (enic->config.intr_mode < 1 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= m &&
	    enic->cq_count >= 1 + m &&
	    enic->intr_count >= 1 + m + 2) {
		if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {

			enic->rq_count = 1;
			enic->wq_count = m;
			enic->cq_count = 1 + m;
			enic->intr_count = 1 + m + 2;

			vnic_dev_set_intr_mode(enic->vdev,
				VNIC_DEV_INTR_MODE_MSIX);

			return 0;
		}
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
	}

	/* Next try MSI
	 *
	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
	 */

	if (enic->config.intr_mode < 2 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 1 &&
	    !pci_enable_msi(enic->pdev)) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 1;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);

		return 0;
	}

	/* Next try INTx
	 *
	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
	 * (the first INTR is used for WQ/RQ)
	 * (the second INTR is used for WQ/RQ errors)
	 * (the last INTR is used for notifications)
	 */

	if (enic->config.intr_mode < 3 &&
	    enic->rq_count >= 1 &&
	    enic->wq_count >= 1 &&
	    enic->cq_count >= 2 &&
	    enic->intr_count >= 3) {

		enic->rq_count = 1;
		enic->wq_count = 1;
		enic->cq_count = 2;
		enic->intr_count = 3;

		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);

		return 0;
	}

	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);

	return -EINVAL;
}

static void enic_clear_intr_mode(struct enic *enic)
{
	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_MSIX:
		pci_disable_msix(enic->pdev);
		break;
	case VNIC_DEV_INTR_MODE_MSI:
		pci_disable_msi(enic->pdev);
		break;
	default:
		break;
	}

	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}

2127 2128 2129 2130
static const struct net_device_ops enic_netdev_dynamic_ops = {
	.ndo_open		= enic_open,
	.ndo_stop		= enic_stop,
	.ndo_start_xmit		= enic_hard_start_xmit,
2131
	.ndo_get_stats64	= enic_get_stats,
2132
	.ndo_validate_addr	= eth_validate_addr,
2133
	.ndo_set_rx_mode	= enic_set_rx_mode,
2134 2135 2136 2137 2138 2139 2140
	.ndo_set_mac_address	= enic_set_mac_address_dynamic,
	.ndo_change_mtu		= enic_change_mtu,
	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
	.ndo_tx_timeout		= enic_tx_timeout,
	.ndo_set_vf_port	= enic_set_vf_port,
	.ndo_get_vf_port	= enic_get_vf_port,
2141
	.ndo_set_vf_mac		= enic_set_vf_mac,
2142 2143 2144 2145 2146
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= enic_poll_controller,
#endif
};

2147 2148 2149
static const struct net_device_ops enic_netdev_ops = {
	.ndo_open		= enic_open,
	.ndo_stop		= enic_stop,
2150
	.ndo_start_xmit		= enic_hard_start_xmit,
2151
	.ndo_get_stats64	= enic_get_stats,
2152
	.ndo_validate_addr	= eth_validate_addr,
2153
	.ndo_set_mac_address	= enic_set_mac_address,
2154
	.ndo_set_rx_mode	= enic_set_rx_mode,
2155 2156 2157 2158
	.ndo_change_mtu		= enic_change_mtu,
	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
	.ndo_tx_timeout		= enic_tx_timeout,
2159 2160 2161
	.ndo_set_vf_port	= enic_set_vf_port,
	.ndo_get_vf_port	= enic_get_vf_port,
	.ndo_set_vf_mac		= enic_set_vf_mac,
2162 2163 2164 2165 2166
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= enic_poll_controller,
#endif
};

2167
static void enic_dev_deinit(struct enic *enic)
2168
{
2169 2170 2171 2172 2173
	unsigned int i;

	for (i = 0; i < enic->rq_count; i++)
		netif_napi_del(&enic->napi[i]);

2174 2175 2176 2177
	enic_free_vnic_resources(enic);
	enic_clear_intr_mode(enic);
}

2178
static int enic_dev_init(struct enic *enic)
2179
{
2180
	struct device *dev = enic_get_dev(enic);
2181
	struct net_device *netdev = enic->netdev;
2182
	unsigned int i;
2183 2184
	int err;

2185 2186 2187 2188 2189 2190 2191 2192
	/* Get interrupt coalesce timer info */
	err = enic_dev_intr_coal_timer_info(enic);
	if (err) {
		dev_warn(dev, "Using default conversion factor for "
			"interrupt coalesce timer\n");
		vnic_dev_intr_coal_timer_info_default(enic->vdev);
	}

2193 2194 2195 2196 2197
	/* Get vNIC configuration
	 */

	err = enic_get_vnic_config(enic);
	if (err) {
2198
		dev_err(dev, "Get vNIC configuration failed, aborting\n");
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
		return err;
	}

	/* Get available resource counts
	 */

	enic_get_res_counts(enic);

	/* Set interrupt mode based on resource counts and system
	 * capabilities
	 */

	err = enic_set_intr_mode(enic);
	if (err) {
2213 2214
		dev_err(dev, "Failed to set intr mode based on resource "
			"counts and system capabilities, aborting\n");
2215 2216 2217 2218 2219 2220 2221 2222
		return err;
	}

	/* Allocate and configure vNIC resources
	 */

	err = enic_alloc_vnic_resources(enic);
	if (err) {
2223
		dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2224 2225 2226 2227 2228
		goto err_out_free_vnic_resources;
	}

	enic_init_vnic_resources(enic);

2229
	err = enic_set_rss_nic_cfg(enic);
2230
	if (err) {
2231
		dev_err(dev, "Failed to config nic, aborting\n");
2232 2233 2234 2235 2236
		goto err_out_free_vnic_resources;
	}

	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	default:
2237
		netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2238 2239
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
2240 2241 2242
		for (i = 0; i < enic->rq_count; i++)
			netif_napi_add(netdev, &enic->napi[i],
				enic_poll_msix, 64);
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
		break;
	}

	return 0;

err_out_free_vnic_resources:
	enic_clear_intr_mode(enic);
	enic_free_vnic_resources(enic);

	return err;
}

2255 2256 2257 2258 2259 2260 2261 2262 2263
static void enic_iounmap(struct enic *enic)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
		if (enic->bar[i].vaddr)
			iounmap(enic->bar[i].vaddr);
}

2264 2265 2266
static int __devinit enic_probe(struct pci_dev *pdev,
	const struct pci_device_id *ent)
{
2267
	struct device *dev = &pdev->dev;
2268 2269 2270 2271 2272
	struct net_device *netdev;
	struct enic *enic;
	int using_dac = 0;
	unsigned int i;
	int err;
2273
	int num_pps = 1;
R
Roopa Prabhu 已提交
2274 2275 2276
#ifdef CONFIG_PCI_IOV
	int pos = 0;
#endif
2277 2278 2279 2280 2281 2282 2283

	/* Allocate net device structure and initialize.  Private
	 * instance data is initialized to zero.
	 */

	netdev = alloc_etherdev(sizeof(struct enic));
	if (!netdev) {
2284
		pr_err("Etherdev alloc failed, aborting\n");
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		return -ENOMEM;
	}

	pci_set_drvdata(pdev, netdev);

	SET_NETDEV_DEV(netdev, &pdev->dev);

	enic = netdev_priv(netdev);
	enic->netdev = netdev;
	enic->pdev = pdev;

	/* Setup PCI resources
	 */

V
Vasanthy Kolluri 已提交
2299
	err = pci_enable_device_mem(pdev);
2300
	if (err) {
2301
		dev_err(dev, "Cannot enable PCI device, aborting\n");
2302 2303 2304 2305 2306
		goto err_out_free_netdev;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
2307
		dev_err(dev, "Cannot request PCI regions, aborting\n");
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 40-bit first, and
	 * fail to 32-bit.
	 */

2318
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2319
	if (err) {
2320
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2321
		if (err) {
2322
			dev_err(dev, "No usable DMA configuration, aborting\n");
2323 2324
			goto err_out_release_regions;
		}
2325
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2326
		if (err) {
2327 2328
			dev_err(dev, "Unable to obtain %u-bit DMA "
				"for consistent allocations, aborting\n", 32);
2329 2330 2331
			goto err_out_release_regions;
		}
	} else {
2332
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2333
		if (err) {
2334 2335
			dev_err(dev, "Unable to obtain %u-bit DMA "
				"for consistent allocations, aborting\n", 40);
2336 2337 2338 2339 2340
			goto err_out_release_regions;
		}
		using_dac = 1;
	}

2341
	/* Map vNIC resources from BAR0-5
2342 2343
	 */

2344 2345 2346 2347 2348 2349
	for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
			continue;
		enic->bar[i].len = pci_resource_len(pdev, i);
		enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
		if (!enic->bar[i].vaddr) {
2350
			dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2351 2352 2353 2354
			err = -ENODEV;
			goto err_out_iounmap;
		}
		enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2355 2356 2357 2358 2359
	}

	/* Register vNIC device
	 */

2360 2361
	enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
		ARRAY_SIZE(enic->bar));
2362
	if (!enic->vdev) {
2363
		dev_err(dev, "vNIC registration failed, aborting\n");
2364 2365 2366 2367
		err = -ENODEV;
		goto err_out_iounmap;
	}

R
Roopa Prabhu 已提交
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
#ifdef CONFIG_PCI_IOV
	/* Get number of subvnics */
	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
	if (pos) {
		pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
			(u16 *)&enic->num_vfs);
		if (enic->num_vfs) {
			err = pci_enable_sriov(pdev, enic->num_vfs);
			if (err) {
				dev_err(dev, "SRIOV enable failed, aborting."
					" pci_enable_sriov() returned %d\n",
					err);
				goto err_out_vnic_unregister;
			}
			enic->priv_flags |= ENIC_SRIOV_ENABLED;
2383
			num_pps = enic->num_vfs;
R
Roopa Prabhu 已提交
2384 2385 2386 2387
		}
	}

#endif
2388
	/* Allocate structure for port profiles */
2389
	enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2390 2391 2392 2393 2394 2395
	if (!enic->pp) {
		pr_err("port profile alloc failed, aborting\n");
		err = -ENOMEM;
		goto err_out_disable_sriov;
	}

2396 2397 2398 2399 2400
	/* Issue device open to get device in known state
	 */

	err = enic_dev_open(enic);
	if (err) {
2401
		dev_err(dev, "vNIC dev open failed, aborting\n");
2402
		goto err_out_free_pp;
2403 2404
	}

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	/* Setup devcmd lock
	 */

	spin_lock_init(&enic->devcmd_lock);

	/*
	 * Set ingress vlan rewrite mode before vnic initialization
	 */

	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
	if (err) {
		dev_err(dev,
			"Failed to set ingress vlan rewrite mode, aborting.\n");
		goto err_out_dev_close;
	}

2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
	/* Issue device init to initialize the vnic-to-switch link.
	 * We'll start with carrier off and wait for link UP
	 * notification later to turn on carrier.  We don't need
	 * to wait here for the vnic-to-switch link initialization
	 * to complete; link UP notification is the indication that
	 * the process is complete.
	 */

	netif_carrier_off(netdev);

2431 2432 2433 2434 2435
	/* Do not call dev_init for a dynamic vnic.
	 * For a dynamic vnic, init_prov_info will be
	 * called later by an upper layer.
	 */

2436 2437 2438
	if (!enic_is_dynamic(enic)) {
		err = vnic_dev_init(enic->vdev, 0);
		if (err) {
2439
			dev_err(dev, "vNIC dev init failed, aborting\n");
2440 2441
			goto err_out_dev_close;
		}
2442 2443
	}

2444
	err = enic_dev_init(enic);
2445
	if (err) {
2446
		dev_err(dev, "Device initialization failed, aborting\n");
2447 2448 2449
		goto err_out_dev_close;
	}

2450
	/* Setup notification timer, HW reset task, and wq locks
2451 2452 2453 2454 2455 2456 2457
	 */

	init_timer(&enic->notify_timer);
	enic->notify_timer.function = enic_notify_timer;
	enic->notify_timer.data = (unsigned long)enic;

	INIT_WORK(&enic->reset, enic_reset);
2458
	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468

	for (i = 0; i < enic->wq_count; i++)
		spin_lock_init(&enic->wq_lock[i]);

	/* Register net device
	 */

	enic->port_mtu = enic->config.mtu;
	(void)enic_change_mtu(netdev, enic->port_mtu);

R
Roopa Prabhu 已提交
2469 2470 2471 2472 2473 2474
#ifdef CONFIG_PCI_IOV
	if (enic_is_dynamic(enic) && pdev->is_virtfn &&
		is_zero_ether_addr(enic->mac_addr))
		random_ether_addr(enic->mac_addr);
#endif

2475 2476
	err = enic_set_mac_addr(netdev, enic->mac_addr);
	if (err) {
2477
		dev_err(dev, "Invalid MAC address, aborting\n");
2478
		goto err_out_dev_deinit;
2479 2480
	}

2481 2482 2483
	enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
	enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;

2484 2485 2486 2487 2488
	if (enic_is_dynamic(enic))
		netdev->netdev_ops = &enic_netdev_dynamic_ops;
	else
		netdev->netdev_ops = &enic_netdev_ops;

2489 2490 2491
	netdev->watchdog_timeo = 2 * HZ;
	netdev->ethtool_ops = &enic_ethtool_ops;

2492
	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2493 2494 2495 2496 2497 2498
	if (ENIC_SETTING(enic, LOOP)) {
		netdev->features &= ~NETIF_F_HW_VLAN_TX;
		enic->loop_enable = 1;
		enic->loop_tag = enic->config.loop_tag;
		dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
	}
2499
	if (ENIC_SETTING(enic, TXCSUM))
2500
		netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2501
	if (ENIC_SETTING(enic, TSO))
2502
		netdev->hw_features |= NETIF_F_TSO |
2503
			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2504 2505 2506 2507 2508
	if (ENIC_SETTING(enic, RXCSUM))
		netdev->hw_features |= NETIF_F_RXCSUM;

	netdev->features |= netdev->hw_features;

2509 2510 2511
	if (using_dac)
		netdev->features |= NETIF_F_HIGHDMA;

2512 2513
	netdev->priv_flags |= IFF_UNICAST_FLT;

2514 2515
	err = register_netdev(netdev);
	if (err) {
2516
		dev_err(dev, "Cannot register net device, aborting\n");
2517
		goto err_out_dev_deinit;
2518 2519 2520 2521
	}

	return 0;

2522 2523
err_out_dev_deinit:
	enic_dev_deinit(enic);
2524 2525
err_out_dev_close:
	vnic_dev_close(enic->vdev);
2526 2527
err_out_free_pp:
	kfree(enic->pp);
R
Roopa Prabhu 已提交
2528 2529 2530 2531 2532 2533
err_out_disable_sriov:
#ifdef CONFIG_PCI_IOV
	if (enic_sriov_enabled(enic)) {
		pci_disable_sriov(pdev);
		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
	}
2534 2535
err_out_vnic_unregister:
	vnic_dev_unregister(enic->vdev);
R
Roopa Prabhu 已提交
2536
#endif
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
err_out_iounmap:
	enic_iounmap(enic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_netdev:
	pci_set_drvdata(pdev, NULL);
	free_netdev(netdev);

	return err;
}

static void __devexit enic_remove(struct pci_dev *pdev)
{
	struct net_device *netdev = pci_get_drvdata(pdev);

	if (netdev) {
		struct enic *enic = netdev_priv(netdev);

2557
		cancel_work_sync(&enic->reset);
2558
		cancel_work_sync(&enic->change_mtu_work);
2559
		unregister_netdev(netdev);
2560
		enic_dev_deinit(enic);
2561
		vnic_dev_close(enic->vdev);
R
Roopa Prabhu 已提交
2562 2563 2564 2565 2566 2567
#ifdef CONFIG_PCI_IOV
		if (enic_sriov_enabled(enic)) {
			pci_disable_sriov(pdev);
			enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
		}
#endif
2568
		kfree(enic->pp);
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
		vnic_dev_unregister(enic->vdev);
		enic_iounmap(enic);
		pci_release_regions(pdev);
		pci_disable_device(pdev);
		pci_set_drvdata(pdev, NULL);
		free_netdev(netdev);
	}
}

static struct pci_driver enic_driver = {
	.name = DRV_NAME,
	.id_table = enic_id_table,
	.probe = enic_probe,
	.remove = __devexit_p(enic_remove),
};

static int __init enic_init_module(void)
{
2587
	pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598

	return pci_register_driver(&enic_driver);
}

static void __exit enic_cleanup_module(void)
{
	pci_unregister_driver(&enic_driver);
}

module_init(enic_init_module);
module_exit(enic_cleanup_module);