efx.c 74.8 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2011 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
22
#include <linux/topology.h>
23
#include <linux/gfp.h>
24
#include <linux/cpu_rmap.h>
25 26
#include "net_driver.h"
#include "efx.h"
B
Ben Hutchings 已提交
27
#include "nic.h"
28
#include "selftest.h"
29

30
#include "mcdi.h"
31
#include "workarounds.h"
32

33 34 35 36 37 38 39 40 41
/**************************************************************************
 *
 * Type name strings
 *
 **************************************************************************
 */

/* Loopback mode names (see LOOPBACK_MODE()) */
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
42
const char *const efx_loopback_mode_names[] = {
43
	[LOOPBACK_NONE]		= "NONE",
44
	[LOOPBACK_DATA]		= "DATAPATH",
45 46 47
	[LOOPBACK_GMAC]		= "GMAC",
	[LOOPBACK_XGMII]	= "XGMII",
	[LOOPBACK_XGXS]		= "XGXS",
48 49 50
	[LOOPBACK_XAUI]		= "XAUI",
	[LOOPBACK_GMII]		= "GMII",
	[LOOPBACK_SGMII]	= "SGMII",
51 52 53 54 55 56
	[LOOPBACK_XGBR]		= "XGBR",
	[LOOPBACK_XFI]		= "XFI",
	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
57 58
	[LOOPBACK_GPHY]		= "GPHY",
	[LOOPBACK_PHYXS]	= "PHYXS",
59 60
	[LOOPBACK_PCS]		= "PCS",
	[LOOPBACK_PMAPMD]	= "PMA/PMD",
61 62
	[LOOPBACK_XPORT]	= "XPORT",
	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
63
	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
64 65
	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
66
	[LOOPBACK_GMII_WS]	= "GMII_WS",
67 68
	[LOOPBACK_XFI_WS]	= "XFI_WS",
	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
69
	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
70 71 72
};

const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
73
const char *const efx_reset_type_names[] = {
74 75 76 77 78 79 80 81 82 83
	[RESET_TYPE_INVISIBLE]     = "INVISIBLE",
	[RESET_TYPE_ALL]           = "ALL",
	[RESET_TYPE_WORLD]         = "WORLD",
	[RESET_TYPE_DISABLE]       = "DISABLE",
	[RESET_TYPE_TX_WATCHDOG]   = "TX_WATCHDOG",
	[RESET_TYPE_INT_ERROR]     = "INT_ERROR",
	[RESET_TYPE_RX_RECOVERY]   = "RX_RECOVERY",
	[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
	[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
	[RESET_TYPE_TX_SKIP]       = "TX_SKIP",
84
	[RESET_TYPE_MC_FAILURE]    = "MC_FAILURE",
85 86
};

87 88
#define EFX_MAX_MTU (9 * 1024)

89 90 91 92 93 94
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
 * queued onto this work queue. This is not a per-nic work queue, because
 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
 */
static struct workqueue_struct *reset_workqueue;

95 96 97 98 99 100 101 102 103
/**************************************************************************
 *
 * Configurable values
 *
 *************************************************************************/

/*
 * Use separate channels for TX and RX events
 *
104 105
 * Set this to 1 to use separate channels for TX and RX. It allows us
 * to control interrupt affinity separately for TX and RX.
106
 *
107
 * This is only used in MSI-X interrupt mode
108
 */
109
static unsigned int separate_tx_channels;
110
module_param(separate_tx_channels, uint, 0444);
111 112
MODULE_PARM_DESC(separate_tx_channels,
		 "Use separate channels for TX and RX");
113 114 115 116 117 118 119

/* This is the weight assigned to each of the (per-channel) virtual
 * NAPI devices.
 */
static int napi_weight = 64;

/* This is the time (in jiffies) between invocations of the hardware
120 121 122
 * monitor.  On Falcon-based NICs, this will:
 * - Check the on-board hardware monitor;
 * - Poll the link state and reconfigure the hardware as necessary.
123
 */
S
stephen hemminger 已提交
124
static unsigned int efx_monitor_interval = 1 * HZ;
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

/* Initial interrupt moderation settings.  They can be modified after
 * module load with ethtool.
 *
 * The default for RX should strike a balance between increasing the
 * round-trip latency and reducing overhead.
 */
static unsigned int rx_irq_mod_usec = 60;

/* Initial interrupt moderation settings.  They can be modified after
 * module load with ethtool.
 *
 * This default is chosen to ensure that a 10G link does not go idle
 * while a TX queue is stopped after it has become full.  A queue is
 * restarted when it drops below half full.  The time this takes (assuming
 * worst case 3 descriptors per packet and 1024 descriptors) is
 *   512 / 3 * 1.2 = 205 usec.
 */
static unsigned int tx_irq_mod_usec = 150;

/* This is the first interrupt mode to try out of:
 * 0 => MSI-X
 * 1 => MSI
 * 2 => legacy
 */
static unsigned int interrupt_mode;

/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 * i.e. the number of CPUs among which we may distribute simultaneous
 * interrupt handling.
 *
 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
157
 * The default (0) means to assign an interrupt to each core.
158 159 160 161 162
 */
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");

163 164 165 166
static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");

167
static unsigned irq_adapt_low_thresh = 8000;
168 169 170 171
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
		 "Threshold score for reducing IRQ moderation");

172
static unsigned irq_adapt_high_thresh = 16000;
173 174 175 176
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
		 "Threshold score for increasing IRQ moderation");

177 178 179 180 181 182 183
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");

184 185 186 187 188
/**************************************************************************
 *
 * Utility functions and prototypes
 *
 *************************************************************************/
189

190 191 192
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_remove_channel(struct efx_channel *channel);
193
static void efx_remove_channels(struct efx_nic *efx);
194
static const struct efx_channel_type efx_default_channel_type;
195
static void efx_remove_port(struct efx_nic *efx);
196
static void efx_init_napi_channel(struct efx_channel *channel);
197
static void efx_fini_napi(struct efx_nic *efx);
198
static void efx_fini_napi_channel(struct efx_channel *channel);
199 200 201
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
202 203 204

#define EFX_ASSERT_RESET_SERIALISED(efx)		\
	do {						\
205 206
		if ((efx->state == STATE_RUNNING) ||	\
		    (efx->state == STATE_DISABLED))	\
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
			ASSERT_RTNL();			\
	} while (0)

/**************************************************************************
 *
 * Event queue processing
 *
 *************************************************************************/

/* Process channel's event queue
 *
 * This function is responsible for processing the event queue of a
 * single channel.  The caller must guarantee that this function will
 * never be concurrently called more than once on the same channel,
 * though different channels may be being processed concurrently.
 */
223
static int efx_process_channel(struct efx_channel *channel, int budget)
224
{
225
	int spent;
226

227
	if (unlikely(!channel->enabled))
B
Ben Hutchings 已提交
228
		return 0;
229

230
	spent = efx_nic_process_eventq(channel, budget);
231 232 233 234 235 236 237 238 239
	if (spent && efx_channel_has_rx_queue(channel)) {
		struct efx_rx_queue *rx_queue =
			efx_channel_get_rx_queue(channel);

		/* Deliver last RX packet. */
		if (channel->rx_pkt) {
			__efx_rx_packet(channel, channel->rx_pkt);
			channel->rx_pkt = NULL;
		}
240 241 242 243
		if (rx_queue->enabled) {
			efx_rx_strategy(channel);
			efx_fast_push_rx_descriptors(rx_queue);
		}
244 245
	}

246
	return spent;
247 248 249 250 251 252 253 254 255 256
}

/* Mark channel as finished processing
 *
 * Note that since we will not receive further interrupts for this
 * channel before we finish processing and call the eventq_read_ack()
 * method, there is no need to use the interrupt hold-off timers.
 */
static inline void efx_channel_processed(struct efx_channel *channel)
{
257 258 259
	/* The interrupt handler for this channel may set work_pending
	 * as soon as we acknowledge the events we've seen.  Make sure
	 * it's cleared before then. */
260
	channel->work_pending = false;
261 262
	smp_wmb();

263
	efx_nic_eventq_read_ack(channel);
264 265 266 267 268 269 270 271 272 273 274
}

/* NAPI poll handler
 *
 * NAPI guarantees serialisation of polls of the same device, which
 * provides the guarantee required by efx_process_channel().
 */
static int efx_poll(struct napi_struct *napi, int budget)
{
	struct efx_channel *channel =
		container_of(napi, struct efx_channel, napi_str);
275
	struct efx_nic *efx = channel->efx;
276
	int spent;
277

278 279 280
	netif_vdbg(efx, intr, efx->net_dev,
		   "channel %d NAPI poll executing on CPU %d\n",
		   channel->channel, raw_smp_processor_id());
281

282
	spent = efx_process_channel(channel, budget);
283

284
	if (spent < budget) {
285
		if (efx_channel_has_rx_queue(channel) &&
286 287 288 289
		    efx->irq_rx_adaptive &&
		    unlikely(++channel->irq_count == 1000)) {
			if (unlikely(channel->irq_mod_score <
				     irq_adapt_low_thresh)) {
290 291
				if (channel->irq_moderation > 1) {
					channel->irq_moderation -= 1;
292
					efx->type->push_irq_moderation(channel);
293
				}
294 295
			} else if (unlikely(channel->irq_mod_score >
					    irq_adapt_high_thresh)) {
296 297 298
				if (channel->irq_moderation <
				    efx->irq_rx_moderation) {
					channel->irq_moderation += 1;
299
					efx->type->push_irq_moderation(channel);
300
				}
301 302 303 304 305
			}
			channel->irq_count = 0;
			channel->irq_mod_score = 0;
		}

306 307
		efx_filter_rfs_expire(channel);

308
		/* There is no race here; although napi_disable() will
309
		 * only wait for napi_complete(), this isn't a problem
310 311 312
		 * since efx_channel_processed() will have no effect if
		 * interrupts have already been disabled.
		 */
313
		napi_complete(napi);
314 315 316
		efx_channel_processed(channel);
	}

317
	return spent;
318 319 320 321 322 323 324 325
}

/* Process the eventq of the specified channel immediately on this CPU
 *
 * Disable hardware generated interrupts, wait for any existing
 * processing to finish, then directly poll (and ack ) the eventq.
 * Finally reenable NAPI and interrupts.
 *
326 327
 * This is for use only during a loopback self-test.  It must not
 * deliver any packets up the stack as this can result in deadlock.
328 329 330 331 332
 */
void efx_process_channel_now(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;

333
	BUG_ON(channel->channel >= efx->n_channels);
334
	BUG_ON(!channel->enabled);
335
	BUG_ON(!efx->loopback_selftest);
336 337

	/* Disable interrupts and wait for ISRs to complete */
338
	efx_nic_disable_interrupts(efx);
339
	if (efx->legacy_irq) {
340
		synchronize_irq(efx->legacy_irq);
341 342
		efx->legacy_irq_enabled = false;
	}
343
	if (channel->irq)
344 345 346 347 348 349
		synchronize_irq(channel->irq);

	/* Wait for any NAPI processing to complete */
	napi_disable(&channel->napi_str);

	/* Poll the channel */
350
	efx_process_channel(channel, channel->eventq_mask + 1);
351 352 353 354 355 356

	/* Ack the eventq. This may cause an interrupt to be generated
	 * when they are reenabled */
	efx_channel_processed(channel);

	napi_enable(&channel->napi_str);
357 358
	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
359
	efx_nic_enable_interrupts(efx);
360 361 362 363 364 365 366 367 368
}

/* Create event queue
 * Event queue memory allocations are done only once.  If the channel
 * is reset, the memory buffer will be reused; this guards against
 * errors during channel reset and also simplifies interrupt handling.
 */
static int efx_probe_eventq(struct efx_channel *channel)
{
369 370 371
	struct efx_nic *efx = channel->efx;
	unsigned long entries;

372
	netif_dbg(efx, probe, efx->net_dev,
373
		  "chan %d create event queue\n", channel->channel);
374

375 376 377 378 379 380
	/* Build an event queue with room for one event per tx and rx buffer,
	 * plus some extra for link state events and MCDI completions. */
	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
	EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;

381
	return efx_nic_probe_eventq(channel);
382 383 384
}

/* Prepare channel's event queue */
385
static void efx_init_eventq(struct efx_channel *channel)
386
{
387 388
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d init event queue\n", channel->channel);
389 390 391

	channel->eventq_read_ptr = 0;

392
	efx_nic_init_eventq(channel);
393 394
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/* Enable event queue processing and NAPI */
static void efx_start_eventq(struct efx_channel *channel)
{
	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
		  "chan %d start event queue\n", channel->channel);

	/* The interrupt handler for this channel may set work_pending
	 * as soon as we enable it.  Make sure it's cleared before
	 * then.  Similarly, make sure it sees the enabled flag set.
	 */
	channel->work_pending = false;
	channel->enabled = true;
	smp_wmb();

	napi_enable(&channel->napi_str);
	efx_nic_eventq_read_ack(channel);
}

/* Disable event queue processing and NAPI */
static void efx_stop_eventq(struct efx_channel *channel)
{
	if (!channel->enabled)
		return;

	napi_disable(&channel->napi_str);
	channel->enabled = false;
}

423 424
static void efx_fini_eventq(struct efx_channel *channel)
{
425 426
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d fini event queue\n", channel->channel);
427

428
	efx_nic_fini_eventq(channel);
429 430 431 432
}

static void efx_remove_eventq(struct efx_channel *channel)
{
433 434
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d remove event queue\n", channel->channel);
435

436
	efx_nic_remove_eventq(channel);
437 438 439 440 441 442 443 444
}

/**************************************************************************
 *
 * Channel handling
 *
 *************************************************************************/

445
/* Allocate and initialise a channel structure. */
446 447 448 449 450 451 452 453
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
	struct efx_channel *channel;
	struct efx_rx_queue *rx_queue;
	struct efx_tx_queue *tx_queue;
	int j;

454 455 456
	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		return NULL;
457

458 459 460
	channel->efx = efx;
	channel->channel = i;
	channel->type = &efx_default_channel_type;
461

462 463 464 465 466 467
	for (j = 0; j < EFX_TXQ_TYPES; j++) {
		tx_queue = &channel->tx_queue[j];
		tx_queue->efx = efx;
		tx_queue->queue = i * EFX_TXQ_TYPES + j;
		tx_queue->channel = channel;
	}
468

469 470 471 472
	rx_queue = &channel->rx_queue;
	rx_queue->efx = efx;
	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
		    (unsigned long)rx_queue);
473

474 475 476 477 478 479 480 481 482 483 484 485 486
	return channel;
}

/* Allocate and initialise a channel structure, copying parameters
 * (but not resources) from an old channel structure.
 */
static struct efx_channel *
efx_copy_channel(const struct efx_channel *old_channel)
{
	struct efx_channel *channel;
	struct efx_rx_queue *rx_queue;
	struct efx_tx_queue *tx_queue;
	int j;
487

488 489 490 491 492 493 494 495
	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		return NULL;

	*channel = *old_channel;

	channel->napi_dev = NULL;
	memset(&channel->eventq, 0, sizeof(channel->eventq));
496

497 498 499
	for (j = 0; j < EFX_TXQ_TYPES; j++) {
		tx_queue = &channel->tx_queue[j];
		if (tx_queue->channel)
500
			tx_queue->channel = channel;
501 502
		tx_queue->buffer = NULL;
		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
503 504 505
	}

	rx_queue = &channel->rx_queue;
506 507
	rx_queue->buffer = NULL;
	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
508 509 510 511 512 513
	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
		    (unsigned long)rx_queue);

	return channel;
}

514 515 516 517 518 519
static int efx_probe_channel(struct efx_channel *channel)
{
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	int rc;

520 521
	netif_dbg(channel->efx, probe, channel->efx->net_dev,
		  "creating channel %d\n", channel->channel);
522

523 524 525 526
	rc = channel->type->pre_probe(channel);
	if (rc)
		goto fail;

527 528
	rc = efx_probe_eventq(channel);
	if (rc)
529
		goto fail;
530 531 532 533

	efx_for_each_channel_tx_queue(tx_queue, channel) {
		rc = efx_probe_tx_queue(tx_queue);
		if (rc)
534
			goto fail;
535 536 537 538 539
	}

	efx_for_each_channel_rx_queue(rx_queue, channel) {
		rc = efx_probe_rx_queue(rx_queue);
		if (rc)
540
			goto fail;
541 542 543 544 545 546
	}

	channel->n_rx_frm_trunc = 0;

	return 0;

547 548
fail:
	efx_remove_channel(channel);
549 550 551
	return rc;
}

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
static void
efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
{
	struct efx_nic *efx = channel->efx;
	const char *type;
	int number;

	number = channel->channel;
	if (efx->tx_channel_offset == 0) {
		type = "";
	} else if (channel->channel < efx->tx_channel_offset) {
		type = "-rx";
	} else {
		type = "-tx";
		number -= efx->tx_channel_offset;
	}
	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
}
570

571 572 573 574
static void efx_set_channel_names(struct efx_nic *efx)
{
	struct efx_channel *channel;

575 576 577 578
	efx_for_each_channel(channel, efx)
		channel->type->get_name(channel,
					efx->channel_name[channel->channel],
					sizeof(efx->channel_name[0]));
579 580
}

581 582 583 584 585 586 587 588
static int efx_probe_channels(struct efx_nic *efx)
{
	struct efx_channel *channel;
	int rc;

	/* Restart special buffer allocation */
	efx->next_buffer_table = 0;

589 590 591 592 593 594
	/* Probe channels in reverse, so that any 'extra' channels
	 * use the start of the buffer table. This allows the traffic
	 * channels to be resized without moving them or wasting the
	 * entries before them.
	 */
	efx_for_each_channel_rev(channel, efx) {
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
		rc = efx_probe_channel(channel);
		if (rc) {
			netif_err(efx, probe, efx->net_dev,
				  "failed to create channel %d\n",
				  channel->channel);
			goto fail;
		}
	}
	efx_set_channel_names(efx);

	return 0;

fail:
	efx_remove_channels(efx);
	return rc;
}

612 613 614 615
/* Channels are shutdown and reinitialised whilst the NIC is running
 * to propagate configuration changes (mtu, checksum offload), or
 * to clear hardware error conditions
 */
616
static void efx_start_datapath(struct efx_nic *efx)
617 618 619 620 621
{
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	struct efx_channel *channel;

622 623 624 625 626 627
	/* Calculate the rx buffer allocation parameters required to
	 * support the current MTU, including padding for header
	 * alignment and overruns.
	 */
	efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
			      EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
628
			      efx->type->rx_buffer_hash_size +
629
			      efx->type->rx_buffer_padding);
630 631
	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
					 sizeof(struct efx_rx_page_state));
632 633 634

	/* Initialise the channels */
	efx_for_each_channel(channel, efx) {
635 636
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_init_tx_queue(tx_queue);
637 638 639 640

		/* The rx buffer allocation strategy is MTU dependent */
		efx_rx_strategy(channel);

641
		efx_for_each_channel_rx_queue(rx_queue, channel) {
642
			efx_init_rx_queue(rx_queue);
643 644
			efx_nic_generate_fill_event(rx_queue);
		}
645 646 647 648 649

		WARN_ON(channel->rx_pkt != NULL);
		efx_rx_strategy(channel);
	}

650 651
	if (netif_device_present(efx->net_dev))
		netif_tx_wake_all_queues(efx->net_dev);
652 653
}

654
static void efx_stop_datapath(struct efx_nic *efx)
655 656 657 658
{
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
659
	struct pci_dev *dev = efx->pci_dev;
660
	int rc;
661 662 663 664

	EFX_ASSERT_RESET_SERIALISED(efx);
	BUG_ON(efx->port_enabled);

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	/* Only perform flush if dma is enabled */
	if (dev->is_busmaster) {
		rc = efx_nic_flush_queues(efx);

		if (rc && EFX_WORKAROUND_7803(efx)) {
			/* Schedule a reset to recover from the flush failure. The
			 * descriptor caches reference memory we're about to free,
			 * but falcon_reconfigure_mac_wrapper() won't reconnect
			 * the MACs because of the pending reset. */
			netif_err(efx, drv, efx->net_dev,
				  "Resetting to recover from flush failure\n");
			efx_schedule_reset(efx, RESET_TYPE_ALL);
		} else if (rc) {
			netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
		} else {
			netif_dbg(efx, drv, efx->net_dev,
				  "successfully flushed all queues\n");
		}
683
	}
684

685
	efx_for_each_channel(channel, efx) {
686 687 688 689 690 691 692 693 694 695
		/* RX packet processing is pipelined, so wait for the
		 * NAPI handler to complete.  At least event queue 0
		 * might be kept active by non-data events, so don't
		 * use napi_synchronize() but actually disable NAPI
		 * temporarily.
		 */
		if (efx_channel_has_rx_queue(channel)) {
			efx_stop_eventq(channel);
			efx_start_eventq(channel);
		}
696 697 698

		efx_for_each_channel_rx_queue(rx_queue, channel)
			efx_fini_rx_queue(rx_queue);
699
		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
700 701 702 703 704 705 706 707 708
			efx_fini_tx_queue(tx_queue);
	}
}

static void efx_remove_channel(struct efx_channel *channel)
{
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;

709 710
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "destroy chan %d\n", channel->channel);
711 712 713

	efx_for_each_channel_rx_queue(rx_queue, channel)
		efx_remove_rx_queue(rx_queue);
714
	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
715 716 717 718
		efx_remove_tx_queue(tx_queue);
	efx_remove_eventq(channel);
}

719 720 721 722 723 724 725 726 727 728 729 730 731
static void efx_remove_channels(struct efx_nic *efx)
{
	struct efx_channel *channel;

	efx_for_each_channel(channel, efx)
		efx_remove_channel(channel);
}

int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
	u32 old_rxq_entries, old_txq_entries;
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	unsigned i, next_buffer_table = 0;
	int rc = 0;

	/* Not all channels should be reallocated. We must avoid
	 * reallocating their buffer table entries.
	 */
	efx_for_each_channel(channel, efx) {
		struct efx_rx_queue *rx_queue;
		struct efx_tx_queue *tx_queue;

		if (channel->type->copy)
			continue;
		next_buffer_table = max(next_buffer_table,
					channel->eventq.index +
					channel->eventq.entries);
		efx_for_each_channel_rx_queue(rx_queue, channel)
			next_buffer_table = max(next_buffer_table,
						rx_queue->rxd.index +
						rx_queue->rxd.entries);
		efx_for_each_channel_tx_queue(tx_queue, channel)
			next_buffer_table = max(next_buffer_table,
						tx_queue->txd.index +
						tx_queue->txd.entries);
	}
756 757

	efx_stop_all(efx);
758
	efx_stop_interrupts(efx, true);
759

760
	/* Clone channels (where possible) */
761 762
	memset(other_channel, 0, sizeof(other_channel));
	for (i = 0; i < efx->n_channels; i++) {
763 764 765
		channel = efx->channel[i];
		if (channel->type->copy)
			channel = channel->type->copy(channel);
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
		if (!channel) {
			rc = -ENOMEM;
			goto out;
		}
		other_channel[i] = channel;
	}

	/* Swap entry counts and channel pointers */
	old_rxq_entries = efx->rxq_entries;
	old_txq_entries = efx->txq_entries;
	efx->rxq_entries = rxq_entries;
	efx->txq_entries = txq_entries;
	for (i = 0; i < efx->n_channels; i++) {
		channel = efx->channel[i];
		efx->channel[i] = other_channel[i];
		other_channel[i] = channel;
	}

784 785
	/* Restart buffer table allocation */
	efx->next_buffer_table = next_buffer_table;
786 787

	for (i = 0; i < efx->n_channels; i++) {
788 789 790 791 792 793 794
		channel = efx->channel[i];
		if (!channel->type->copy)
			continue;
		rc = efx_probe_channel(channel);
		if (rc)
			goto rollback;
		efx_init_napi_channel(efx->channel[i]);
795
	}
796

797
out:
798 799 800 801 802 803 804 805 806
	/* Destroy unused channel structures */
	for (i = 0; i < efx->n_channels; i++) {
		channel = other_channel[i];
		if (channel && channel->type->copy) {
			efx_fini_napi_channel(channel);
			efx_remove_channel(channel);
			kfree(channel);
		}
	}
807

808
	efx_start_interrupts(efx, true);
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
	efx_start_all(efx);
	return rc;

rollback:
	/* Swap back */
	efx->rxq_entries = old_rxq_entries;
	efx->txq_entries = old_txq_entries;
	for (i = 0; i < efx->n_channels; i++) {
		channel = efx->channel[i];
		efx->channel[i] = other_channel[i];
		other_channel[i] = channel;
	}
	goto out;
}

824
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
825
{
826
	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
827 828
}

829 830 831 832 833 834 835 836 837 838 839 840
static const struct efx_channel_type efx_default_channel_type = {
	.pre_probe		= efx_channel_dummy_op_int,
	.get_name		= efx_get_channel_name,
	.copy			= efx_copy_channel,
	.keep_eventq		= false,
};

int efx_channel_dummy_op_int(struct efx_channel *channel)
{
	return 0;
}

841 842 843 844 845 846 847 848 849 850
/**************************************************************************
 *
 * Port handling
 *
 **************************************************************************/

/* This ensures that the kernel is kept informed (via
 * netif_carrier_on/off) of the link status, and also maintains the
 * link status's stop on the port's TX queue.
 */
S
Steve Hodgson 已提交
851
void efx_link_status_changed(struct efx_nic *efx)
852
{
853 854
	struct efx_link_state *link_state = &efx->link_state;

855 856 857 858 859 860 861
	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
	 * that no events are triggered between unregister_netdev() and the
	 * driver unloading. A more general condition is that NETDEV_CHANGE
	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
	if (!netif_running(efx->net_dev))
		return;

862
	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
863 864
		efx->n_link_state_changes++;

865
		if (link_state->up)
866 867 868 869 870 871
			netif_carrier_on(efx->net_dev);
		else
			netif_carrier_off(efx->net_dev);
	}

	/* Status message for kernel log */
B
Ben Hutchings 已提交
872
	if (link_state->up)
873 874 875 876 877
		netif_info(efx, link, efx->net_dev,
			   "link up at %uMbps %s-duplex (MTU %d)%s\n",
			   link_state->speed, link_state->fd ? "full" : "half",
			   efx->net_dev->mtu,
			   (efx->promiscuous ? " [PROMISC]" : ""));
B
Ben Hutchings 已提交
878
	else
879
		netif_info(efx, link, efx->net_dev, "link down\n");
880 881
}

B
Ben Hutchings 已提交
882 883 884 885 886 887 888 889 890 891 892 893 894
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
{
	efx->link_advertising = advertising;
	if (advertising) {
		if (advertising & ADVERTISED_Pause)
			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
		else
			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
		if (advertising & ADVERTISED_Asym_Pause)
			efx->wanted_fc ^= EFX_FC_TX;
	}
}

895
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
B
Ben Hutchings 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909
{
	efx->wanted_fc = wanted_fc;
	if (efx->link_advertising) {
		if (wanted_fc & EFX_FC_RX)
			efx->link_advertising |= (ADVERTISED_Pause |
						  ADVERTISED_Asym_Pause);
		else
			efx->link_advertising &= ~(ADVERTISED_Pause |
						   ADVERTISED_Asym_Pause);
		if (wanted_fc & EFX_FC_TX)
			efx->link_advertising ^= ADVERTISED_Asym_Pause;
	}
}

910 911
static void efx_fini_port(struct efx_nic *efx);

B
Ben Hutchings 已提交
912 913 914 915 916 917 918 919
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 * the MAC appropriately. All other PHY configuration changes are pushed
 * through phy_op->set_settings(), and pushed asynchronously to the MAC
 * through efx_monitor().
 *
 * Callers must hold the mac_lock
 */
int __efx_reconfigure_port(struct efx_nic *efx)
920
{
B
Ben Hutchings 已提交
921 922
	enum efx_phy_mode phy_mode;
	int rc;
923

B
Ben Hutchings 已提交
924
	WARN_ON(!mutex_is_locked(&efx->mac_lock));
925

926
	/* Serialise the promiscuous flag with efx_set_rx_mode. */
927 928
	netif_addr_lock_bh(efx->net_dev);
	netif_addr_unlock_bh(efx->net_dev);
929

B
Ben Hutchings 已提交
930 931
	/* Disable PHY transmit in mac level loopbacks */
	phy_mode = efx->phy_mode;
932 933 934 935 936
	if (LOOPBACK_INTERNAL(efx))
		efx->phy_mode |= PHY_MODE_TX_DISABLED;
	else
		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;

B
Ben Hutchings 已提交
937
	rc = efx->type->reconfigure_port(efx);
938

B
Ben Hutchings 已提交
939 940
	if (rc)
		efx->phy_mode = phy_mode;
941

B
Ben Hutchings 已提交
942
	return rc;
943 944 945 946
}

/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 * disabled. */
B
Ben Hutchings 已提交
947
int efx_reconfigure_port(struct efx_nic *efx)
948
{
B
Ben Hutchings 已提交
949 950
	int rc;

951 952 953
	EFX_ASSERT_RESET_SERIALISED(efx);

	mutex_lock(&efx->mac_lock);
B
Ben Hutchings 已提交
954
	rc = __efx_reconfigure_port(efx);
955
	mutex_unlock(&efx->mac_lock);
B
Ben Hutchings 已提交
956 957

	return rc;
958 959
}

960 961 962
/* Asynchronous work item for changing MAC promiscuity and multicast
 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
 * MAC directly. */
963 964 965 966 967
static void efx_mac_work(struct work_struct *data)
{
	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);

	mutex_lock(&efx->mac_lock);
968
	if (efx->port_enabled)
969
		efx->type->reconfigure_mac(efx);
970 971 972
	mutex_unlock(&efx->mac_lock);
}

973 974 975 976
static int efx_probe_port(struct efx_nic *efx)
{
	int rc;

977
	netif_dbg(efx, probe, efx->net_dev, "create port\n");
978

979 980 981
	if (phy_flash_cfg)
		efx->phy_mode = PHY_MODE_SPECIAL;

982 983
	/* Connect up MAC/PHY operations table */
	rc = efx->type->probe_port(efx);
984
	if (rc)
985
		return rc;
986

987 988
	/* Initialise MAC address to permanent address */
	memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
989 990 991 992 993 994 995 996

	return 0;
}

static int efx_init_port(struct efx_nic *efx)
{
	int rc;

997
	netif_dbg(efx, drv, efx->net_dev, "init port\n");
998

999 1000
	mutex_lock(&efx->mac_lock);

1001
	rc = efx->phy_op->init(efx);
1002
	if (rc)
1003
		goto fail1;
1004

1005
	efx->port_initialized = true;
1006

B
Ben Hutchings 已提交
1007 1008
	/* Reconfigure the MAC before creating dma queues (required for
	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1009
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
1010 1011 1012 1013 1014 1015

	/* Ensure the PHY advertises the correct flow control settings */
	rc = efx->phy_op->reconfigure(efx);
	if (rc)
		goto fail2;

1016
	mutex_unlock(&efx->mac_lock);
1017
	return 0;
1018

1019
fail2:
1020
	efx->phy_op->fini(efx);
1021 1022
fail1:
	mutex_unlock(&efx->mac_lock);
1023
	return rc;
1024 1025 1026 1027
}

static void efx_start_port(struct efx_nic *efx)
{
1028
	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1029 1030 1031
	BUG_ON(efx->port_enabled);

	mutex_lock(&efx->mac_lock);
1032
	efx->port_enabled = true;
1033 1034 1035

	/* efx_mac_work() might have been scheduled after efx_stop_port(),
	 * and then cancelled by efx_flush_all() */
1036
	efx->type->reconfigure_mac(efx);
1037

1038 1039 1040
	mutex_unlock(&efx->mac_lock);
}

S
Steve Hodgson 已提交
1041
/* Prevent efx_mac_work() and efx_monitor() from working */
1042 1043
static void efx_stop_port(struct efx_nic *efx)
{
1044
	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1045 1046

	mutex_lock(&efx->mac_lock);
1047
	efx->port_enabled = false;
1048 1049 1050
	mutex_unlock(&efx->mac_lock);

	/* Serialise against efx_set_multicast_list() */
1051 1052
	netif_addr_lock_bh(efx->net_dev);
	netif_addr_unlock_bh(efx->net_dev);
1053 1054 1055 1056
}

static void efx_fini_port(struct efx_nic *efx)
{
1057
	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1058 1059 1060 1061

	if (!efx->port_initialized)
		return;

1062
	efx->phy_op->fini(efx);
1063
	efx->port_initialized = false;
1064

1065
	efx->link_state.up = false;
1066 1067 1068 1069 1070
	efx_link_status_changed(efx);
}

static void efx_remove_port(struct efx_nic *efx)
{
1071
	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1072

1073
	efx->type->remove_port(efx);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
}

/**************************************************************************
 *
 * NIC handling
 *
 **************************************************************************/

/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
	struct pci_dev *pci_dev = efx->pci_dev;
	dma_addr_t dma_mask = efx->type->max_dma_mask;
	int rc;

1089
	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1090 1091 1092

	rc = pci_enable_device(pci_dev);
	if (rc) {
1093 1094
		netif_err(efx, probe, efx->net_dev,
			  "failed to enable PCI device\n");
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
		goto fail1;
	}

	pci_set_master(pci_dev);

	/* Set the PCI DMA mask.  Try all possibilities from our
	 * genuine mask down to 32 bits, because some architectures
	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
	 * masks event though they reject 46 bit masks.
	 */
	while (dma_mask > 0x7fffffffUL) {
1106 1107
		if (dma_supported(&pci_dev->dev, dma_mask)) {
			rc = dma_set_mask(&pci_dev->dev, dma_mask);
1108 1109 1110
			if (rc == 0)
				break;
		}
1111 1112 1113
		dma_mask >>= 1;
	}
	if (rc) {
1114 1115
		netif_err(efx, probe, efx->net_dev,
			  "could not find a suitable DMA mask\n");
1116 1117
		goto fail2;
	}
1118 1119
	netif_dbg(efx, probe, efx->net_dev,
		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1120
	rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1121
	if (rc) {
1122 1123
		/* dma_set_coherent_mask() is not *allowed* to
		 * fail with a mask that dma_set_mask() accepted,
1124 1125
		 * but just in case...
		 */
1126 1127
		netif_err(efx, probe, efx->net_dev,
			  "failed to set consistent DMA mask\n");
1128 1129 1130
		goto fail2;
	}

1131 1132
	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
1133
	if (rc) {
1134 1135
		netif_err(efx, probe, efx->net_dev,
			  "request for memory BAR failed\n");
1136 1137 1138
		rc = -EIO;
		goto fail3;
	}
1139 1140
	efx->membase = ioremap_nocache(efx->membase_phys,
				       efx->type->mem_map_size);
1141
	if (!efx->membase) {
1142 1143 1144 1145
		netif_err(efx, probe, efx->net_dev,
			  "could not map memory BAR at %llx+%x\n",
			  (unsigned long long)efx->membase_phys,
			  efx->type->mem_map_size);
1146 1147 1148
		rc = -ENOMEM;
		goto fail4;
	}
1149 1150 1151 1152
	netif_dbg(efx, probe, efx->net_dev,
		  "memory BAR at %llx+%x (virtual %p)\n",
		  (unsigned long long)efx->membase_phys,
		  efx->type->mem_map_size, efx->membase);
1153 1154 1155 1156

	return 0;

 fail4:
1157
	pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1158
 fail3:
1159
	efx->membase_phys = 0;
1160 1161 1162 1163 1164 1165 1166 1167
 fail2:
	pci_disable_device(efx->pci_dev);
 fail1:
	return rc;
}

static void efx_fini_io(struct efx_nic *efx)
{
1168
	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1169 1170 1171 1172 1173 1174 1175

	if (efx->membase) {
		iounmap(efx->membase);
		efx->membase = NULL;
	}

	if (efx->membase_phys) {
1176
		pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1177
		efx->membase_phys = 0;
1178 1179 1180 1181 1182
	}

	pci_disable_device(efx->pci_dev);
}

1183
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1184
{
1185
	cpumask_var_t thread_mask;
1186
	unsigned int count;
1187
	int cpu;
1188

1189 1190 1191 1192 1193 1194 1195 1196
	if (rss_cpus) {
		count = rss_cpus;
	} else {
		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
			netif_warn(efx, probe, efx->net_dev,
				   "RSS disabled due to allocation failure\n");
			return 1;
		}
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		count = 0;
		for_each_online_cpu(cpu) {
			if (!cpumask_test_cpu(cpu, thread_mask)) {
				++count;
				cpumask_or(thread_mask, thread_mask,
					   topology_thread_cpumask(cpu));
			}
		}

		free_cpumask_var(thread_mask);
R
Rusty Russell 已提交
1208 1209
	}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	/* If RSS is requested for the PF *and* VFs then we can't write RSS
	 * table entries that are inaccessible to VFs
	 */
	if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
	    count > efx_vf_size(efx)) {
		netif_warn(efx, probe, efx->net_dev,
			   "Reducing number of RSS channels from %u to %u for "
			   "VF support. Increase vf-msix-limit to use more "
			   "channels on the PF.\n",
			   count, efx_vf_size(efx));
		count = efx_vf_size(efx);
1221 1222 1223 1224 1225
	}

	return count;
}

1226 1227 1228 1229
static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
1230 1231
	unsigned int i;
	int rc;
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248

	efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
	if (!efx->net_dev->rx_cpu_rmap)
		return -ENOMEM;
	for (i = 0; i < efx->n_rx_channels; i++) {
		rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
				      xentries[i].vector);
		if (rc) {
			free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
			efx->net_dev->rx_cpu_rmap = NULL;
			return rc;
		}
	}
#endif
	return 0;
}

1249 1250 1251
/* Probe the number and type of interrupts we are able to obtain, and
 * the resulting numbers of channels and RX queues.
 */
1252
static int efx_probe_interrupts(struct efx_nic *efx)
1253
{
1254 1255
	unsigned int max_channels =
		min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1256 1257
	unsigned int extra_channels = 0;
	unsigned int i, j;
1258
	int rc;
1259

1260 1261 1262 1263
	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
		if (efx->extra_channel_type[i])
			++extra_channels;

1264
	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1265
		struct msix_entry xentries[EFX_MAX_CHANNELS];
1266
		unsigned int n_channels;
1267

1268
		n_channels = efx_wanted_parallelism(efx);
B
Ben Hutchings 已提交
1269 1270
		if (separate_tx_channels)
			n_channels *= 2;
1271
		n_channels += extra_channels;
B
Ben Hutchings 已提交
1272
		n_channels = min(n_channels, max_channels);
1273

B
Ben Hutchings 已提交
1274
		for (i = 0; i < n_channels; i++)
1275
			xentries[i].entry = i;
B
Ben Hutchings 已提交
1276
		rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1277
		if (rc > 0) {
1278 1279
			netif_err(efx, drv, efx->net_dev,
				  "WARNING: Insufficient MSI-X vectors"
1280
				  " available (%d < %u).\n", rc, n_channels);
1281 1282
			netif_err(efx, drv, efx->net_dev,
				  "WARNING: Performance may be reduced.\n");
B
Ben Hutchings 已提交
1283 1284
			EFX_BUG_ON_PARANOID(rc >= n_channels);
			n_channels = rc;
1285
			rc = pci_enable_msix(efx->pci_dev, xentries,
B
Ben Hutchings 已提交
1286
					     n_channels);
1287 1288 1289
		}

		if (rc == 0) {
B
Ben Hutchings 已提交
1290
			efx->n_channels = n_channels;
1291 1292
			if (n_channels > extra_channels)
				n_channels -= extra_channels;
B
Ben Hutchings 已提交
1293
			if (separate_tx_channels) {
1294 1295 1296 1297
				efx->n_tx_channels = max(n_channels / 2, 1U);
				efx->n_rx_channels = max(n_channels -
							 efx->n_tx_channels,
							 1U);
B
Ben Hutchings 已提交
1298
			} else {
1299 1300
				efx->n_tx_channels = n_channels;
				efx->n_rx_channels = n_channels;
B
Ben Hutchings 已提交
1301
			}
1302 1303 1304 1305 1306
			rc = efx_init_rx_cpu_rmap(efx, xentries);
			if (rc) {
				pci_disable_msix(efx->pci_dev);
				return rc;
			}
1307
			for (i = 0; i < efx->n_channels; i++)
1308 1309
				efx_get_channel(efx, i)->irq =
					xentries[i].vector;
1310 1311 1312
		} else {
			/* Fall back to single channel MSI */
			efx->interrupt_mode = EFX_INT_MODE_MSI;
1313 1314
			netif_err(efx, drv, efx->net_dev,
				  "could not enable MSI-X\n");
1315 1316 1317 1318 1319
		}
	}

	/* Try single interrupt MSI */
	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1320
		efx->n_channels = 1;
B
Ben Hutchings 已提交
1321 1322
		efx->n_rx_channels = 1;
		efx->n_tx_channels = 1;
1323 1324
		rc = pci_enable_msi(efx->pci_dev);
		if (rc == 0) {
1325
			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1326
		} else {
1327 1328
			netif_err(efx, drv, efx->net_dev,
				  "could not enable MSI\n");
1329 1330 1331 1332 1333 1334
			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
		}
	}

	/* Assume legacy interrupts */
	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1335
		efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
B
Ben Hutchings 已提交
1336 1337
		efx->n_rx_channels = 1;
		efx->n_tx_channels = 1;
1338 1339
		efx->legacy_irq = efx->pci_dev->irq;
	}
1340

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
	/* Assign extra channels if possible */
	j = efx->n_channels;
	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
		if (!efx->extra_channel_type[i])
			continue;
		if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
		    efx->n_channels <= extra_channels) {
			efx->extra_channel_type[i]->handle_no_channel(efx);
		} else {
			--j;
			efx_get_channel(efx, j)->type =
				efx->extra_channel_type[i];
		}
	}

1356
	/* RSS might be usable on VFs even if it is disabled on the PF */
1357
	efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
1358 1359
			   efx->n_rx_channels : efx_vf_size(efx));

1360
	return 0;
1361 1362
}

1363
/* Enable interrupts, then probe and start the event queues */
1364
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1365 1366 1367 1368 1369 1370 1371 1372
{
	struct efx_channel *channel;

	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
	efx_nic_enable_interrupts(efx);

	efx_for_each_channel(channel, efx) {
1373 1374
		if (!channel->type->keep_eventq || !may_keep_eventq)
			efx_init_eventq(channel);
1375 1376 1377 1378 1379 1380
		efx_start_eventq(channel);
	}

	efx_mcdi_mode_event(efx);
}

1381
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
{
	struct efx_channel *channel;

	efx_mcdi_mode_poll(efx);

	efx_nic_disable_interrupts(efx);
	if (efx->legacy_irq) {
		synchronize_irq(efx->legacy_irq);
		efx->legacy_irq_enabled = false;
	}

	efx_for_each_channel(channel, efx) {
		if (channel->irq)
			synchronize_irq(channel->irq);

		efx_stop_eventq(channel);
1398 1399
		if (!channel->type->keep_eventq || !may_keep_eventq)
			efx_fini_eventq(channel);
1400 1401 1402
	}
}

1403 1404 1405 1406 1407
static void efx_remove_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

	/* Remove MSI/MSI-X interrupts */
1408
	efx_for_each_channel(channel, efx)
1409 1410 1411 1412 1413 1414 1415 1416
		channel->irq = 0;
	pci_disable_msi(efx->pci_dev);
	pci_disable_msix(efx->pci_dev);

	/* Remove legacy interrupt */
	efx->legacy_irq = 0;
}

1417
static void efx_set_channels(struct efx_nic *efx)
1418
{
1419 1420 1421
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;

1422
	efx->tx_channel_offset =
B
Ben Hutchings 已提交
1423
		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1424 1425 1426 1427 1428 1429 1430 1431 1432

	/* We need to adjust the TX queue numbers if we have separate
	 * RX-only and TX-only channels.
	 */
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_tx_queue(tx_queue, channel)
			tx_queue->queue -= (efx->tx_channel_offset *
					    EFX_TXQ_TYPES);
	}
1433 1434 1435 1436
}

static int efx_probe_nic(struct efx_nic *efx)
{
1437
	size_t i;
1438 1439
	int rc;

1440
	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1441 1442

	/* Carry out hardware-type specific initialisation */
1443
	rc = efx->type->probe(efx);
1444 1445 1446
	if (rc)
		return rc;

B
Ben Hutchings 已提交
1447
	/* Determine the number of channels and queues by trying to hook
1448
	 * in MSI-X interrupts. */
1449 1450 1451
	rc = efx_probe_interrupts(efx);
	if (rc)
		goto fail;
1452

1453 1454
	efx->type->dimension_resources(efx);

1455 1456
	if (efx->n_channels > 1)
		get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1457
	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1458
		efx->rx_indir_table[i] =
1459
			ethtool_rxfh_indir_default(i, efx->rss_spread);
1460

1461
	efx_set_channels(efx);
1462 1463
	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1464 1465

	/* Initialise the interrupt moderation settings */
1466 1467
	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
				true);
1468 1469

	return 0;
1470 1471 1472 1473

fail:
	efx->type->remove(efx);
	return rc;
1474 1475 1476 1477
}

static void efx_remove_nic(struct efx_nic *efx)
{
1478
	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1479 1480

	efx_remove_interrupts(efx);
1481
	efx->type->remove(efx);
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
}

/**************************************************************************
 *
 * NIC startup/shutdown
 *
 *************************************************************************/

static int efx_probe_all(struct efx_nic *efx)
{
	int rc;

	rc = efx_probe_nic(efx);
	if (rc) {
1496
		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1497 1498 1499 1500 1501
		goto fail1;
	}

	rc = efx_probe_port(efx);
	if (rc) {
1502
		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1503 1504 1505
		goto fail2;
	}

1506 1507 1508 1509 1510
	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
		rc = -EINVAL;
		goto fail3;
	}
1511
	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1512

B
Ben Hutchings 已提交
1513 1514 1515 1516
	rc = efx_probe_filters(efx);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "failed to create filter tables\n");
1517
		goto fail3;
B
Ben Hutchings 已提交
1518 1519
	}

1520 1521 1522 1523
	rc = efx_probe_channels(efx);
	if (rc)
		goto fail4;

1524 1525
	return 0;

B
Ben Hutchings 已提交
1526
 fail4:
1527
	efx_remove_filters(efx);
1528 1529 1530 1531 1532 1533 1534 1535
 fail3:
	efx_remove_port(efx);
 fail2:
	efx_remove_nic(efx);
 fail1:
	return rc;
}

1536 1537 1538 1539 1540
/* Called after previous invocation(s) of efx_stop_all, restarts the port,
 * kernel transmit queues and NAPI processing, and ensures that the port is
 * scheduled to be reconfigured. This function is safe to call multiple
 * times when the NIC is in any state.
 */
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
static void efx_start_all(struct efx_nic *efx)
{
	EFX_ASSERT_RESET_SERIALISED(efx);

	/* Check that it is appropriate to restart the interface. All
	 * of these flags are safe to read under just the rtnl lock */
	if (efx->port_enabled)
		return;
	if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
		return;
1551
	if (!netif_running(efx->net_dev))
1552 1553 1554
		return;

	efx_start_port(efx);
1555
	efx_start_datapath(efx);
1556

1557 1558 1559 1560
	/* Start the hardware monitor if there is one. Otherwise (we're link
	 * event driven), we have to poll the PHY because after an event queue
	 * flush, we could have a missed a link state change */
	if (efx->type->monitor != NULL) {
1561 1562
		queue_delayed_work(efx->workqueue, &efx->monitor_work,
				   efx_monitor_interval);
1563 1564 1565 1566 1567 1568
	} else {
		mutex_lock(&efx->mac_lock);
		if (efx->phy_op->poll(efx))
			efx_link_status_changed(efx);
		mutex_unlock(&efx->mac_lock);
	}
1569

1570
	efx->type->start_stats(efx);
1571 1572 1573 1574 1575 1576 1577
}

/* Flush all delayed work. Should only be called when no more delayed work
 * will be scheduled. This doesn't flush pending online resets (efx_reset),
 * since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
1578
	/* Make sure the hardware monitor and event self-test are stopped */
1579
	cancel_delayed_work_sync(&efx->monitor_work);
1580
	efx_selftest_async_cancel(efx);
1581
	/* Stop scheduled port reconfigurations */
1582
	cancel_work_sync(&efx->mac_work);
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
}

/* Quiesce hardware and software without bringing the link down.
 * Safe to call multiple times, when the nic and interface is in any
 * state. The caller is guaranteed to subsequently be in a position
 * to modify any hardware and software state they see fit without
 * taking locks. */
static void efx_stop_all(struct efx_nic *efx)
{
	EFX_ASSERT_RESET_SERIALISED(efx);

	/* port_enabled can be read safely under the rtnl lock */
	if (!efx->port_enabled)
		return;

1598
	efx->type->stop_stats(efx);
1599 1600
	efx_stop_port(efx);

S
Steve Hodgson 已提交
1601
	/* Flush efx_mac_work(), refill_workqueue, monitor_work */
1602 1603 1604 1605
	efx_flush_all(efx);

	/* Stop the kernel transmit interface late, so the watchdog
	 * timer isn't ticking over the flush */
1606 1607 1608
	netif_tx_disable(efx->net_dev);

	efx_stop_datapath(efx);
1609 1610 1611 1612
}

static void efx_remove_all(struct efx_nic *efx)
{
1613
	efx_remove_channels(efx);
1614
	efx_remove_filters(efx);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	efx_remove_port(efx);
	efx_remove_nic(efx);
}

/**************************************************************************
 *
 * Interrupt moderation
 *
 **************************************************************************/

1625
static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1626
{
1627 1628
	if (usecs == 0)
		return 0;
1629
	if (usecs * 1000 < quantum_ns)
1630
		return 1; /* never round down to 0 */
1631
	return usecs * 1000 / quantum_ns;
1632 1633
}

1634
/* Set interrupt moderation parameters */
1635 1636 1637
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
			    unsigned int rx_usecs, bool rx_adaptive,
			    bool rx_may_override_tx)
1638
{
1639
	struct efx_channel *channel;
1640 1641 1642 1643 1644
	unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
						efx->timer_quantum_ns,
						1000);
	unsigned int tx_ticks;
	unsigned int rx_ticks;
1645 1646 1647

	EFX_ASSERT_RESET_SERIALISED(efx);

1648
	if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1649 1650
		return -EINVAL;

1651 1652 1653
	tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
	rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);

1654 1655 1656 1657 1658 1659 1660
	if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
	    !rx_may_override_tx) {
		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
			  "RX and TX IRQ moderation must be equal\n");
		return -EINVAL;
	}

1661
	efx->irq_rx_adaptive = rx_adaptive;
1662
	efx->irq_rx_moderation = rx_ticks;
1663
	efx_for_each_channel(channel, efx) {
1664
		if (efx_channel_has_rx_queue(channel))
1665
			channel->irq_moderation = rx_ticks;
1666
		else if (efx_channel_has_tx_queues(channel))
1667 1668
			channel->irq_moderation = tx_ticks;
	}
1669 1670

	return 0;
1671 1672
}

1673 1674 1675
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
			    unsigned int *rx_usecs, bool *rx_adaptive)
{
1676 1677 1678 1679
	/* We must round up when converting ticks to microseconds
	 * because we round down when converting the other way.
	 */

1680
	*rx_adaptive = efx->irq_rx_adaptive;
1681 1682 1683
	*rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
				 efx->timer_quantum_ns,
				 1000);
1684 1685 1686 1687 1688 1689 1690 1691

	/* If channels are shared between RX and TX, so is IRQ
	 * moderation.  Otherwise, IRQ moderation is the same for all
	 * TX channels and is not adaptive.
	 */
	if (efx->tx_channel_offset == 0)
		*tx_usecs = *rx_usecs;
	else
1692
		*tx_usecs = DIV_ROUND_UP(
1693
			efx->channel[efx->tx_channel_offset]->irq_moderation *
1694 1695
			efx->timer_quantum_ns,
			1000);
1696 1697
}

1698 1699 1700 1701 1702 1703
/**************************************************************************
 *
 * Hardware monitor
 *
 **************************************************************************/

1704
/* Run periodically off the general workqueue */
1705 1706 1707 1708 1709
static void efx_monitor(struct work_struct *data)
{
	struct efx_nic *efx = container_of(data, struct efx_nic,
					   monitor_work.work);

1710 1711 1712
	netif_vdbg(efx, timer, efx->net_dev,
		   "hardware monitor executing on CPU %d\n",
		   raw_smp_processor_id());
1713
	BUG_ON(efx->type->monitor == NULL);
1714 1715 1716

	/* If the mac_lock is already held then it is likely a port
	 * reconfiguration is already in place, which will likely do
1717 1718 1719 1720 1721 1722
	 * most of the work of monitor() anyway. */
	if (mutex_trylock(&efx->mac_lock)) {
		if (efx->port_enabled)
			efx->type->monitor(efx);
		mutex_unlock(&efx->mac_lock);
	}
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738

	queue_delayed_work(efx->workqueue, &efx->monitor_work,
			   efx_monitor_interval);
}

/**************************************************************************
 *
 * ioctls
 *
 *************************************************************************/

/* Net device ioctl
 * Context: process, rtnl_lock() held.
 */
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
1739
	struct efx_nic *efx = netdev_priv(net_dev);
1740
	struct mii_ioctl_data *data = if_mii(ifr);
1741 1742 1743

	EFX_ASSERT_RESET_SERIALISED(efx);

1744 1745 1746 1747 1748 1749
	/* Convert phy_id from older PRTAD/DEVAD format */
	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
	    (data->phy_id & 0xfc00) == 0x0400)
		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;

	return mdio_mii_ioctl(&efx->mdio, data, cmd);
1750 1751 1752 1753 1754 1755 1756 1757
}

/**************************************************************************
 *
 * NAPI interface
 *
 **************************************************************************/

1758 1759 1760 1761 1762 1763 1764 1765 1766
static void efx_init_napi_channel(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;

	channel->napi_dev = efx->net_dev;
	netif_napi_add(channel->napi_dev, &channel->napi_str,
		       efx_poll, napi_weight);
}

1767
static void efx_init_napi(struct efx_nic *efx)
1768 1769 1770
{
	struct efx_channel *channel;

1771 1772
	efx_for_each_channel(channel, efx)
		efx_init_napi_channel(channel);
1773 1774 1775 1776 1777 1778 1779
}

static void efx_fini_napi_channel(struct efx_channel *channel)
{
	if (channel->napi_dev)
		netif_napi_del(&channel->napi_str);
	channel->napi_dev = NULL;
1780 1781 1782 1783 1784 1785
}

static void efx_fini_napi(struct efx_nic *efx)
{
	struct efx_channel *channel;

1786 1787
	efx_for_each_channel(channel, efx)
		efx_fini_napi_channel(channel);
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
}

/**************************************************************************
 *
 * Kernel netpoll interface
 *
 *************************************************************************/

#ifdef CONFIG_NET_POLL_CONTROLLER

/* Although in the common case interrupts will be disabled, this is not
 * guaranteed. However, all our work happens inside the NAPI callback,
 * so no locking is required.
 */
static void efx_netpoll(struct net_device *net_dev)
{
1804
	struct efx_nic *efx = netdev_priv(net_dev);
1805 1806
	struct efx_channel *channel;

1807
	efx_for_each_channel(channel, efx)
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
		efx_schedule_channel(channel);
}

#endif

/**************************************************************************
 *
 * Kernel net device interface
 *
 *************************************************************************/

/* Context: process, rtnl_lock() held. */
static int efx_net_open(struct net_device *net_dev)
{
1822
	struct efx_nic *efx = netdev_priv(net_dev);
1823 1824
	EFX_ASSERT_RESET_SERIALISED(efx);

1825 1826
	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
		  raw_smp_processor_id());
1827

1828 1829
	if (efx->state == STATE_DISABLED)
		return -EIO;
1830 1831
	if (efx->phy_mode & PHY_MODE_SPECIAL)
		return -EBUSY;
1832 1833
	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
		return -EIO;
1834

1835 1836 1837 1838
	/* Notify the kernel of the link state polled during driver load,
	 * before the monitor starts running */
	efx_link_status_changed(efx);

1839
	efx_start_all(efx);
1840
	efx_selftest_async_start(efx);
1841 1842 1843 1844 1845 1846 1847 1848 1849
	return 0;
}

/* Context: process, rtnl_lock() held.
 * Note that the kernel will ignore our return code; this method
 * should really be a void.
 */
static int efx_net_stop(struct net_device *net_dev)
{
1850
	struct efx_nic *efx = netdev_priv(net_dev);
1851

1852 1853
	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
		  raw_smp_processor_id());
1854

1855 1856 1857 1858
	if (efx->state != STATE_DISABLED) {
		/* Stop the device and flush all the channels */
		efx_stop_all(efx);
	}
1859 1860 1861 1862

	return 0;
}

1863
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
B
Ben Hutchings 已提交
1864 1865
static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
					       struct rtnl_link_stats64 *stats)
1866
{
1867
	struct efx_nic *efx = netdev_priv(net_dev);
1868 1869
	struct efx_mac_stats *mac_stats = &efx->mac_stats;

1870
	spin_lock_bh(&efx->stats_lock);
1871

1872
	efx->type->update_stats(efx);
1873 1874 1875 1876 1877

	stats->rx_packets = mac_stats->rx_packets;
	stats->tx_packets = mac_stats->tx_packets;
	stats->rx_bytes = mac_stats->rx_bytes;
	stats->tx_bytes = mac_stats->tx_bytes;
1878
	stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	stats->multicast = mac_stats->rx_multicast;
	stats->collisions = mac_stats->tx_collision;
	stats->rx_length_errors = (mac_stats->rx_gtjumbo +
				   mac_stats->rx_length_error);
	stats->rx_crc_errors = mac_stats->rx_bad;
	stats->rx_frame_errors = mac_stats->rx_align_error;
	stats->rx_fifo_errors = mac_stats->rx_overflow;
	stats->rx_missed_errors = mac_stats->rx_missed;
	stats->tx_window_errors = mac_stats->tx_late_collision;

	stats->rx_errors = (stats->rx_length_errors +
			    stats->rx_crc_errors +
			    stats->rx_frame_errors +
			    mac_stats->rx_symbol_error);
	stats->tx_errors = (stats->tx_window_errors +
			    mac_stats->tx_bad);

1896 1897
	spin_unlock_bh(&efx->stats_lock);

1898 1899 1900 1901 1902 1903
	return stats;
}

/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev)
{
1904
	struct efx_nic *efx = netdev_priv(net_dev);
1905

1906 1907 1908
	netif_err(efx, tx_err, efx->net_dev,
		  "TX stuck with port_enabled=%d: resetting channels\n",
		  efx->port_enabled);
1909

1910
	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1911 1912 1913 1914 1915 1916
}


/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
1917
	struct efx_nic *efx = netdev_priv(net_dev);
1918 1919 1920 1921 1922 1923 1924 1925

	EFX_ASSERT_RESET_SERIALISED(efx);

	if (new_mtu > EFX_MAX_MTU)
		return -EINVAL;

	efx_stop_all(efx);

1926
	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1927

B
Ben Hutchings 已提交
1928 1929 1930
	mutex_lock(&efx->mac_lock);
	/* Reconfigure the MAC before enabling the dma queues so that
	 * the RX buffers don't overflow */
1931
	net_dev->mtu = new_mtu;
1932
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
1933 1934
	mutex_unlock(&efx->mac_lock);

1935
	efx_start_all(efx);
1936
	return 0;
1937 1938 1939 1940
}

static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
1941
	struct efx_nic *efx = netdev_priv(net_dev);
1942 1943 1944 1945 1946 1947
	struct sockaddr *addr = data;
	char *new_addr = addr->sa_data;

	EFX_ASSERT_RESET_SERIALISED(efx);

	if (!is_valid_ether_addr(new_addr)) {
1948 1949 1950
		netif_err(efx, drv, efx->net_dev,
			  "invalid ethernet MAC address requested: %pM\n",
			  new_addr);
1951
		return -EADDRNOTAVAIL;
1952 1953 1954
	}

	memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1955
	efx_sriov_mac_address_changed(efx);
1956 1957

	/* Reconfigure the MAC */
B
Ben Hutchings 已提交
1958
	mutex_lock(&efx->mac_lock);
1959
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
1960
	mutex_unlock(&efx->mac_lock);
1961 1962 1963 1964

	return 0;
}

1965
/* Context: netif_addr_lock held, BHs disabled. */
1966
static void efx_set_rx_mode(struct net_device *net_dev)
1967
{
1968
	struct efx_nic *efx = netdev_priv(net_dev);
1969
	struct netdev_hw_addr *ha;
1970 1971 1972 1973
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
	u32 crc;
	int bit;

1974
	efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
1975 1976

	/* Build multicast hash table */
1977
	if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1978 1979 1980
		memset(mc_hash, 0xff, sizeof(*mc_hash));
	} else {
		memset(mc_hash, 0x00, sizeof(*mc_hash));
1981 1982
		netdev_for_each_mc_addr(ha, net_dev) {
			crc = ether_crc_le(ETH_ALEN, ha->addr);
1983 1984 1985 1986
			bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
			set_bit_le(bit, mc_hash->byte);
		}

1987 1988 1989 1990 1991 1992
		/* Broadcast packets go through the multicast hash filter.
		 * ether_crc_le() of the broadcast address is 0xbe2612ff
		 * so we always add bit 0xff to the mask.
		 */
		set_bit_le(0xff, mc_hash->byte);
	}
1993

1994 1995 1996
	if (efx->port_enabled)
		queue_work(efx->workqueue, &efx->mac_work);
	/* Otherwise efx_start_port() will do this */
1997 1998
}

1999
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
{
	struct efx_nic *efx = netdev_priv(net_dev);

	/* If disabling RX n-tuple filtering, clear existing filters */
	if (net_dev->features & ~data & NETIF_F_NTUPLE)
		efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);

	return 0;
}

S
Stephen Hemminger 已提交
2010 2011 2012
static const struct net_device_ops efx_netdev_ops = {
	.ndo_open		= efx_net_open,
	.ndo_stop		= efx_net_stop,
2013
	.ndo_get_stats64	= efx_net_stats,
S
Stephen Hemminger 已提交
2014 2015 2016 2017 2018 2019
	.ndo_tx_timeout		= efx_watchdog,
	.ndo_start_xmit		= efx_hard_start_xmit,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_do_ioctl		= efx_ioctl,
	.ndo_change_mtu		= efx_change_mtu,
	.ndo_set_mac_address	= efx_set_mac_address,
2020
	.ndo_set_rx_mode	= efx_set_rx_mode,
2021
	.ndo_set_features	= efx_set_features,
2022 2023 2024 2025 2026 2027
#ifdef CONFIG_SFC_SRIOV
	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
	.ndo_get_vf_config	= efx_sriov_get_vf_config,
#endif
S
Stephen Hemminger 已提交
2028 2029 2030
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = efx_netpoll,
#endif
2031
	.ndo_setup_tc		= efx_setup_tc,
2032 2033 2034
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	= efx_filter_rfs,
#endif
S
Stephen Hemminger 已提交
2035 2036
};

2037 2038 2039 2040 2041 2042 2043
static void efx_update_name(struct efx_nic *efx)
{
	strcpy(efx->name, efx->net_dev->name);
	efx_mtd_rename(efx);
	efx_set_channel_names(efx);
}

2044 2045 2046
static int efx_netdev_event(struct notifier_block *this,
			    unsigned long event, void *ptr)
{
2047
	struct net_device *net_dev = ptr;
2048

2049 2050 2051
	if (net_dev->netdev_ops == &efx_netdev_ops &&
	    event == NETDEV_CHANGENAME)
		efx_update_name(netdev_priv(net_dev));
2052 2053 2054 2055 2056 2057 2058 2059

	return NOTIFY_DONE;
}

static struct notifier_block efx_netdev_notifier = {
	.notifier_call = efx_netdev_event,
};

B
Ben Hutchings 已提交
2060 2061 2062 2063 2064 2065 2066 2067
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
	return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);

2068 2069 2070
static int efx_register_netdev(struct efx_nic *efx)
{
	struct net_device *net_dev = efx->net_dev;
2071
	struct efx_channel *channel;
2072 2073 2074 2075
	int rc;

	net_dev->watchdog_timeo = 5 * HZ;
	net_dev->irq = efx->pci_dev->irq;
S
Stephen Hemminger 已提交
2076
	net_dev->netdev_ops = &efx_netdev_ops;
2077
	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2078
	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2079

2080
	rtnl_lock();
2081 2082 2083 2084

	rc = dev_alloc_name(net_dev, net_dev->name);
	if (rc < 0)
		goto fail_locked;
2085
	efx_update_name(efx);
2086 2087 2088 2089 2090

	rc = register_netdevice(net_dev);
	if (rc)
		goto fail_locked;

2091 2092
	efx_for_each_channel(channel, efx) {
		struct efx_tx_queue *tx_queue;
2093 2094
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_init_tx_queue_core_txq(tx_queue);
2095 2096
	}

2097
	/* Always start with carrier off; PHY events will detect the link */
2098
	netif_carrier_off(net_dev);
2099

2100
	rtnl_unlock();
2101

B
Ben Hutchings 已提交
2102 2103
	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
	if (rc) {
2104 2105
		netif_err(efx, drv, efx->net_dev,
			  "failed to init net dev attributes\n");
B
Ben Hutchings 已提交
2106 2107 2108
		goto fail_registered;
	}

2109
	return 0;
B
Ben Hutchings 已提交
2110

2111 2112
fail_locked:
	rtnl_unlock();
2113
	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2114 2115
	return rc;

B
Ben Hutchings 已提交
2116 2117 2118
fail_registered:
	unregister_netdev(net_dev);
	return rc;
2119 2120 2121 2122
}

static void efx_unregister_netdev(struct efx_nic *efx)
{
2123
	struct efx_channel *channel;
2124 2125 2126 2127 2128
	struct efx_tx_queue *tx_queue;

	if (!efx->net_dev)
		return;

2129
	BUG_ON(netdev_priv(efx->net_dev) != efx);
2130 2131 2132 2133

	/* Free up any skbs still remaining. This has to happen before
	 * we try to unregister the netdev as running their destructors
	 * may be needed to get the device ref. count to 0. */
2134 2135 2136 2137
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_release_tx_buffers(tx_queue);
	}
2138

2139 2140 2141
	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
	unregister_netdev(efx->net_dev);
2142 2143 2144 2145 2146 2147 2148 2149
}

/**************************************************************************
 *
 * Device reset and suspend
 *
 **************************************************************************/

B
Ben Hutchings 已提交
2150 2151
/* Tears down the entire software state and most of the hardware state
 * before reset.  */
B
Ben Hutchings 已提交
2152
void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2153 2154 2155
{
	EFX_ASSERT_RESET_SERIALISED(efx);

B
Ben Hutchings 已提交
2156 2157 2158
	efx_stop_all(efx);
	mutex_lock(&efx->mac_lock);

2159
	efx_stop_interrupts(efx, false);
2160 2161
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
		efx->phy_op->fini(efx);
2162
	efx->type->fini(efx);
2163 2164
}

B
Ben Hutchings 已提交
2165 2166 2167 2168 2169
/* This function will always ensure that the locks acquired in
 * efx_reset_down() are released. A failure return code indicates
 * that we were unable to reinitialise the hardware, and the
 * driver should be disabled. If ok is false, then the rx and tx
 * engines are not restarted, pending a RESET_DISABLE. */
B
Ben Hutchings 已提交
2170
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2171 2172 2173
{
	int rc;

B
Ben Hutchings 已提交
2174
	EFX_ASSERT_RESET_SERIALISED(efx);
2175

2176
	rc = efx->type->init(efx);
2177
	if (rc) {
2178
		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2179
		goto fail;
2180 2181
	}

2182 2183 2184
	if (!ok)
		goto fail;

2185
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
2186 2187 2188 2189
		rc = efx->phy_op->init(efx);
		if (rc)
			goto fail;
		if (efx->phy_op->reconfigure(efx))
2190 2191
			netif_err(efx, drv, efx->net_dev,
				  "could not restore PHY settings\n");
2192 2193
	}

2194
	efx->type->reconfigure_mac(efx);
2195

2196
	efx_start_interrupts(efx, false);
B
Ben Hutchings 已提交
2197
	efx_restore_filters(efx);
2198
	efx_sriov_reset(efx);
2199 2200 2201 2202 2203 2204 2205 2206 2207

	mutex_unlock(&efx->mac_lock);

	efx_start_all(efx);

	return 0;

fail:
	efx->port_initialized = false;
B
Ben Hutchings 已提交
2208 2209 2210

	mutex_unlock(&efx->mac_lock);

2211 2212 2213
	return rc;
}

2214 2215
/* Reset the NIC using the specified method.  Note that the reset may
 * fail, in which case the card will be left in an unusable state.
2216
 *
2217
 * Caller must hold the rtnl_lock.
2218
 */
2219
int efx_reset(struct efx_nic *efx, enum reset_type method)
2220
{
2221 2222
	int rc, rc2;
	bool disabled;
2223

2224 2225
	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
		   RESET_TYPE(method));
2226

2227
	netif_device_detach(efx->net_dev);
B
Ben Hutchings 已提交
2228
	efx_reset_down(efx, method);
2229

2230
	rc = efx->type->reset(efx, method);
2231
	if (rc) {
2232
		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2233
		goto out;
2234 2235
	}

2236 2237 2238 2239
	/* Clear flags for the scopes we covered.  We assume the NIC and
	 * driver are now quiescent so that there is no race here.
	 */
	efx->reset_pending &= -(1 << (method + 1));
2240 2241 2242 2243 2244 2245 2246

	/* Reinitialise bus-mastering, which may have been turned off before
	 * the reset was scheduled. This is still appropriate, even in the
	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
	 * can respond to requests. */
	pci_set_master(efx->pci_dev);

2247
out:
2248
	/* Leave device stopped if necessary */
2249 2250 2251 2252 2253 2254
	disabled = rc || method == RESET_TYPE_DISABLE;
	rc2 = efx_reset_up(efx, method, !disabled);
	if (rc2) {
		disabled = true;
		if (!rc)
			rc = rc2;
2255 2256
	}

2257
	if (disabled) {
2258
		dev_close(efx->net_dev);
2259
		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2260 2261
		efx->state = STATE_DISABLED;
	} else {
2262
		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2263
		netif_device_attach(efx->net_dev);
2264
	}
2265 2266 2267 2268 2269 2270 2271 2272
	return rc;
}

/* The worker thread exists so that code that cannot sleep can
 * schedule a reset for later.
 */
static void efx_reset_work(struct work_struct *data)
{
2273
	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2274
	unsigned long pending = ACCESS_ONCE(efx->reset_pending);
2275

2276
	if (!pending)
2277 2278
		return;

2279
	/* If we're not RUNNING then don't reset. Leave the reset_pending
2280
	 * flags set so that efx_pci_probe_main will be retried */
2281
	if (efx->state != STATE_RUNNING) {
2282 2283
		netif_info(efx, drv, efx->net_dev,
			   "scheduled reset quenched. NIC not RUNNING\n");
2284 2285 2286 2287
		return;
	}

	rtnl_lock();
2288
	(void)efx_reset(efx, fls(pending) - 1);
2289
	rtnl_unlock();
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
}

void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
	enum reset_type method;

	switch (type) {
	case RESET_TYPE_INVISIBLE:
	case RESET_TYPE_ALL:
	case RESET_TYPE_WORLD:
	case RESET_TYPE_DISABLE:
		method = type;
2302 2303
		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
			  RESET_TYPE(method));
2304 2305
		break;
	default:
2306
		method = efx->type->map_reset_reason(type);
2307 2308 2309
		netif_dbg(efx, drv, efx->net_dev,
			  "scheduling %s reset for %s\n",
			  RESET_TYPE(method), RESET_TYPE(type));
2310 2311
		break;
	}
2312

2313
	set_bit(method, &efx->reset_pending);
2314

2315 2316 2317 2318
	/* efx_process_channel() will no longer read events once a
	 * reset is scheduled. So switch back to poll'd MCDI completions. */
	efx_mcdi_mode_poll(efx);

2319
	queue_work(reset_workqueue, &efx->reset_work);
2320 2321 2322 2323 2324 2325 2326 2327 2328
}

/**************************************************************************
 *
 * List of NICs we support
 *
 **************************************************************************/

/* PCI device ID table */
2329
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2330 2331
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2332
	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2333 2334
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2335
	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2336
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
2337
	 .driver_data = (unsigned long) &siena_a0_nic_type},
2338
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
2339
	 .driver_data = (unsigned long) &siena_a0_nic_type},
2340 2341 2342 2343 2344
	{0}			/* end of list */
};

/**************************************************************************
 *
2345
 * Dummy PHY/MAC operations
2346
 *
2347
 * Can be used for some unimplemented operations
2348 2349 2350 2351 2352 2353 2354 2355 2356
 * Needed so all function pointers are valid and do not have to be tested
 * before use
 *
 **************************************************************************/
int efx_port_dummy_op_int(struct efx_nic *efx)
{
	return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
S
stephen hemminger 已提交
2357 2358

static bool efx_port_dummy_op_poll(struct efx_nic *efx)
S
Steve Hodgson 已提交
2359 2360 2361
{
	return false;
}
2362

2363
static const struct efx_phy_operations efx_dummy_phy_operations = {
2364
	.init		 = efx_port_dummy_op_int,
B
Ben Hutchings 已提交
2365
	.reconfigure	 = efx_port_dummy_op_int,
S
Steve Hodgson 已提交
2366
	.poll		 = efx_port_dummy_op_poll,
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
	.fini		 = efx_port_dummy_op_void,
};

/**************************************************************************
 *
 * Data housekeeping
 *
 **************************************************************************/

/* This zeroes out and then fills in the invariants in a struct
 * efx_nic (including all sub-structures).
 */
2379
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2380 2381
			   struct pci_dev *pci_dev, struct net_device *net_dev)
{
2382
	int i;
2383 2384 2385 2386

	/* Initialise common structures */
	memset(efx, 0, sizeof(*efx));
	spin_lock_init(&efx->biu_lock);
2387 2388 2389
#ifdef CONFIG_SFC_MTD
	INIT_LIST_HEAD(&efx->mtd_list);
#endif
2390 2391
	INIT_WORK(&efx->reset_work, efx_reset_work);
	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2392
	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2393
	efx->pci_dev = pci_dev;
2394
	efx->msg_enable = debug;
2395 2396 2397 2398 2399 2400 2401
	efx->state = STATE_INIT;
	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));

	efx->net_dev = net_dev;
	spin_lock_init(&efx->stats_lock);
	mutex_init(&efx->mac_lock);
	efx->phy_op = &efx_dummy_phy_operations;
2402
	efx->mdio.dev = net_dev;
2403
	INIT_WORK(&efx->mac_work, efx_mac_work);
2404
	init_waitqueue_head(&efx->flush_wq);
2405 2406

	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2407 2408 2409
		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
		if (!efx->channel[i])
			goto fail;
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
	}

	efx->type = type;

	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);

	/* Higher numbered interrupt modes are less capable! */
	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
				  interrupt_mode);

2420 2421 2422 2423
	/* Would be good to use the net_dev name, but we're too early */
	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
		 pci_name(pci_dev));
	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2424
	if (!efx->workqueue)
2425
		goto fail;
2426

2427
	return 0;
2428 2429 2430 2431

fail:
	efx_fini_struct(efx);
	return -ENOMEM;
2432 2433 2434 2435
}

static void efx_fini_struct(struct efx_nic *efx)
{
2436 2437 2438 2439 2440
	int i;

	for (i = 0; i < EFX_MAX_CHANNELS; i++)
		kfree(efx->channel[i]);

2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
	if (efx->workqueue) {
		destroy_workqueue(efx->workqueue);
		efx->workqueue = NULL;
	}
}

/**************************************************************************
 *
 * PCI interface
 *
 **************************************************************************/

/* Main body of final NIC shutdown code
 * This is called only at module unload (or hotplug removal).
 */
static void efx_pci_remove_main(struct efx_nic *efx)
{
2458 2459 2460 2461
#ifdef CONFIG_RFS_ACCEL
	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
	efx->net_dev->rx_cpu_rmap = NULL;
#endif
2462
	efx_stop_interrupts(efx, false);
2463
	efx_nic_fini_interrupt(efx);
2464
	efx_fini_port(efx);
2465
	efx->type->fini(efx);
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	efx_fini_napi(efx);
	efx_remove_all(efx);
}

/* Final NIC shutdown
 * This is called only at module unload (or hotplug removal).
 */
static void efx_pci_remove(struct pci_dev *pci_dev)
{
	struct efx_nic *efx;

	efx = pci_get_drvdata(pci_dev);
	if (!efx)
		return;

	/* Mark the NIC as fini, then stop the interface */
	rtnl_lock();
	efx->state = STATE_FINI;
	dev_close(efx->net_dev);

	/* Allow any queued efx_resets() to complete */
	rtnl_unlock();

2489
	efx_stop_interrupts(efx, false);
2490
	efx_sriov_fini(efx);
2491 2492
	efx_unregister_netdev(efx);

2493 2494
	efx_mtd_remove(efx);

2495 2496 2497 2498
	/* Wait for any scheduled resets to complete. No more will be
	 * scheduled from this point because efx_stop_all() has been
	 * called, we are no longer registered with driverlink, and
	 * the net_device's have been removed. */
2499
	cancel_work_sync(&efx->reset_work);
2500 2501 2502 2503

	efx_pci_remove_main(efx);

	efx_fini_io(efx);
2504
	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2505 2506

	efx_fini_struct(efx);
2507
	pci_set_drvdata(pci_dev, NULL);
2508 2509 2510
	free_netdev(efx->net_dev);
};

2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
/* NIC VPD information
 * Called during probe to display the part number of the
 * installed NIC.  VPD is potentially very large but this should
 * always appear within the first 512 bytes.
 */
#define SFC_VPD_LEN 512
static void efx_print_product_vpd(struct efx_nic *efx)
{
	struct pci_dev *dev = efx->pci_dev;
	char vpd_data[SFC_VPD_LEN];
	ssize_t vpd_size;
	int i, j;

	/* Get the vpd data from the device */
	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
	if (vpd_size <= 0) {
		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
		return;
	}

	/* Get the Read only section */
	i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
	if (i < 0) {
		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
		return;
	}

	j = pci_vpd_lrdt_size(&vpd_data[i]);
	i += PCI_VPD_LRDT_TAG_SIZE;
	if (i + j > vpd_size)
		j = vpd_size - i;

	/* Get the Part number */
	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
	if (i < 0) {
		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
		return;
	}

	j = pci_vpd_info_field_size(&vpd_data[i]);
	i += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (i + j > vpd_size) {
		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
		return;
	}

	netif_info(efx, drv, efx->net_dev,
		   "Part Number : %.*s\n", j, &vpd_data[i]);
}


2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
/* Main body of NIC initialisation
 * This is called at module load (or hotplug insertion, theoretically).
 */
static int efx_pci_probe_main(struct efx_nic *efx)
{
	int rc;

	/* Do start-of-day initialisation */
	rc = efx_probe_all(efx);
	if (rc)
		goto fail1;

2574
	efx_init_napi(efx);
2575

2576
	rc = efx->type->init(efx);
2577
	if (rc) {
2578 2579
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise NIC\n");
2580
		goto fail3;
2581 2582 2583 2584
	}

	rc = efx_init_port(efx);
	if (rc) {
2585 2586
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise port\n");
2587
		goto fail4;
2588 2589
	}

2590
	rc = efx_nic_init_interrupt(efx);
2591
	if (rc)
2592
		goto fail5;
2593
	efx_start_interrupts(efx, false);
2594 2595 2596

	return 0;

2597
 fail5:
2598 2599
	efx_fini_port(efx);
 fail4:
2600
	efx->type->fini(efx);
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
 fail3:
	efx_fini_napi(efx);
	efx_remove_all(efx);
 fail1:
	return rc;
}

/* NIC initialisation
 *
 * This is called at module load (or hotplug insertion,
2611
 * theoretically).  It sets up PCI mappings, resets the NIC,
2612 2613 2614 2615 2616 2617 2618 2619
 * sets up and registers the network devices with the kernel and hooks
 * the interrupt service routine.  It does not prepare the device for
 * transmission; this is left to the first time one of the network
 * interfaces is brought up (i.e. efx_net_open).
 */
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
				   const struct pci_device_id *entry)
{
2620
	const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2621 2622
	struct net_device *net_dev;
	struct efx_nic *efx;
2623
	int rc;
2624 2625

	/* Allocate and initialise a struct net_device and struct efx_nic */
2626 2627
	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
				     EFX_MAX_RX_QUEUES);
2628 2629
	if (!net_dev)
		return -ENOMEM;
2630
	net_dev->features |= (type->offload_features | NETIF_F_SG |
B
Ben Hutchings 已提交
2631
			      NETIF_F_HIGHDMA | NETIF_F_TSO |
2632
			      NETIF_F_RXCSUM);
B
Ben Hutchings 已提交
2633 2634
	if (type->offload_features & NETIF_F_V6_CSUM)
		net_dev->features |= NETIF_F_TSO6;
2635 2636
	/* Mask for features that also apply to VLAN devices */
	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2637 2638 2639 2640
				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
				   NETIF_F_RXCSUM);
	/* All offloads can be toggled */
	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2641
	efx = netdev_priv(net_dev);
2642
	pci_set_drvdata(pci_dev, efx);
2643
	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2644 2645 2646 2647
	rc = efx_init_struct(efx, type, pci_dev, net_dev);
	if (rc)
		goto fail1;

2648
	netif_info(efx, probe, efx->net_dev,
2649
		   "Solarflare NIC detected\n");
2650

2651 2652
	efx_print_product_vpd(efx);

2653 2654 2655 2656 2657
	/* Set up basic I/O (BAR mappings etc) */
	rc = efx_init_io(efx);
	if (rc)
		goto fail2;

2658
	rc = efx_pci_probe_main(efx);
2659

2660 2661 2662 2663 2664
	/* Serialise against efx_reset(). No more resets will be
	 * scheduled since efx_stop_all() has been called, and we have
	 * not and never have been registered.
	 */
	cancel_work_sync(&efx->reset_work);
2665

2666 2667
	if (rc)
		goto fail3;
2668

2669 2670 2671 2672 2673
	/* If there was a scheduled reset during probe, the NIC is
	 * probably hosed anyway.
	 */
	if (efx->reset_pending) {
		rc = -EIO;
2674 2675 2676
		goto fail4;
	}

2677 2678
	/* Switch to the running state before we expose the device to the OS,
	 * so that dev_open()|efx_start_all() will actually start the device */
2679
	efx->state = STATE_RUNNING;
2680

2681 2682
	rc = efx_register_netdev(efx);
	if (rc)
2683
		goto fail4;
2684

2685 2686 2687 2688 2689
	rc = efx_sriov_init(efx);
	if (rc)
		netif_err(efx, probe, efx->net_dev,
			  "SR-IOV can't be enabled rc %d\n", rc);

2690
	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2691

2692
	/* Try to create MTDs, but allow this to fail */
2693
	rtnl_lock();
2694
	rc = efx_mtd_probe(efx);
2695
	rtnl_unlock();
2696 2697 2698 2699
	if (rc)
		netif_warn(efx, probe, efx->net_dev,
			   "failed to create MTDs (%d)\n", rc);

2700 2701 2702
	return 0;

 fail4:
2703
	efx_pci_remove_main(efx);
2704 2705 2706 2707 2708
 fail3:
	efx_fini_io(efx);
 fail2:
	efx_fini_struct(efx);
 fail1:
2709
	pci_set_drvdata(pci_dev, NULL);
S
Steve Hodgson 已提交
2710
	WARN_ON(rc > 0);
2711
	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2712 2713 2714 2715
	free_netdev(net_dev);
	return rc;
}

2716 2717 2718 2719 2720 2721 2722 2723 2724
static int efx_pm_freeze(struct device *dev)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));

	efx->state = STATE_FINI;

	netif_device_detach(efx->net_dev);

	efx_stop_all(efx);
2725
	efx_stop_interrupts(efx, false);
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735

	return 0;
}

static int efx_pm_thaw(struct device *dev)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));

	efx->state = STATE_INIT;

2736
	efx_start_interrupts(efx, false);
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749

	mutex_lock(&efx->mac_lock);
	efx->phy_op->reconfigure(efx);
	mutex_unlock(&efx->mac_lock);

	efx_start_all(efx);

	netif_device_attach(efx->net_dev);

	efx->state = STATE_RUNNING;

	efx->type->resume_wol(efx);

2750 2751 2752
	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
	queue_work(reset_workqueue, &efx->reset_work);

2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
	return 0;
}

static int efx_pm_poweroff(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct efx_nic *efx = pci_get_drvdata(pci_dev);

	efx->type->fini(efx);

2763
	efx->reset_pending = 0;
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804

	pci_save_state(pci_dev);
	return pci_set_power_state(pci_dev, PCI_D3hot);
}

/* Used for both resume and restore */
static int efx_pm_resume(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct efx_nic *efx = pci_get_drvdata(pci_dev);
	int rc;

	rc = pci_set_power_state(pci_dev, PCI_D0);
	if (rc)
		return rc;
	pci_restore_state(pci_dev);
	rc = pci_enable_device(pci_dev);
	if (rc)
		return rc;
	pci_set_master(efx->pci_dev);
	rc = efx->type->reset(efx, RESET_TYPE_ALL);
	if (rc)
		return rc;
	rc = efx->type->init(efx);
	if (rc)
		return rc;
	efx_pm_thaw(dev);
	return 0;
}

static int efx_pm_suspend(struct device *dev)
{
	int rc;

	efx_pm_freeze(dev);
	rc = efx_pm_poweroff(dev);
	if (rc)
		efx_pm_resume(dev);
	return rc;
}

2805
static const struct dev_pm_ops efx_pm_ops = {
2806 2807 2808 2809 2810 2811 2812 2813
	.suspend	= efx_pm_suspend,
	.resume		= efx_pm_resume,
	.freeze		= efx_pm_freeze,
	.thaw		= efx_pm_thaw,
	.poweroff	= efx_pm_poweroff,
	.restore	= efx_pm_resume,
};

2814
static struct pci_driver efx_pci_driver = {
2815
	.name		= KBUILD_MODNAME,
2816 2817 2818
	.id_table	= efx_pci_table,
	.probe		= efx_pci_probe,
	.remove		= efx_pci_remove,
2819
	.driver.pm	= &efx_pm_ops,
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
};

/**************************************************************************
 *
 * Kernel module interface
 *
 *************************************************************************/

module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");

static int __init efx_init_module(void)
{
	int rc;

	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");

	rc = register_netdevice_notifier(&efx_netdev_notifier);
	if (rc)
		goto err_notifier;

2842 2843 2844 2845
	rc = efx_init_sriov();
	if (rc)
		goto err_sriov;

2846 2847 2848 2849 2850
	reset_workqueue = create_singlethread_workqueue("sfc_reset");
	if (!reset_workqueue) {
		rc = -ENOMEM;
		goto err_reset;
	}
2851 2852 2853 2854 2855 2856 2857 2858

	rc = pci_register_driver(&efx_pci_driver);
	if (rc < 0)
		goto err_pci;

	return 0;

 err_pci:
2859 2860
	destroy_workqueue(reset_workqueue);
 err_reset:
2861 2862
	efx_fini_sriov();
 err_sriov:
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
	unregister_netdevice_notifier(&efx_netdev_notifier);
 err_notifier:
	return rc;
}

static void __exit efx_exit_module(void)
{
	printk(KERN_INFO "Solarflare NET driver unloading\n");

	pci_unregister_driver(&efx_pci_driver);
2873
	destroy_workqueue(reset_workqueue);
2874
	efx_fini_sriov();
2875 2876 2877 2878 2879 2880 2881
	unregister_netdevice_notifier(&efx_netdev_notifier);

}

module_init(efx_init_module);
module_exit(efx_exit_module);

2882 2883
MODULE_AUTHOR("Solarflare Communications and "
	      "Michael Brown <mbrown@fensystems.co.uk>");
2884 2885 2886
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);