efx.c 81.1 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2011 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
22
#include <linux/topology.h>
23
#include <linux/gfp.h>
24
#include <linux/cpu_rmap.h>
25
#include <linux/aer.h>
26 27
#include "net_driver.h"
#include "efx.h"
B
Ben Hutchings 已提交
28
#include "nic.h"
29
#include "selftest.h"
30

31
#include "mcdi.h"
32
#include "workarounds.h"
33

34 35 36 37 38 39 40 41 42
/**************************************************************************
 *
 * Type name strings
 *
 **************************************************************************
 */

/* Loopback mode names (see LOOPBACK_MODE()) */
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
43
const char *const efx_loopback_mode_names[] = {
44
	[LOOPBACK_NONE]		= "NONE",
45
	[LOOPBACK_DATA]		= "DATAPATH",
46 47 48
	[LOOPBACK_GMAC]		= "GMAC",
	[LOOPBACK_XGMII]	= "XGMII",
	[LOOPBACK_XGXS]		= "XGXS",
49 50 51
	[LOOPBACK_XAUI]		= "XAUI",
	[LOOPBACK_GMII]		= "GMII",
	[LOOPBACK_SGMII]	= "SGMII",
52 53 54 55 56 57
	[LOOPBACK_XGBR]		= "XGBR",
	[LOOPBACK_XFI]		= "XFI",
	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
58 59
	[LOOPBACK_GPHY]		= "GPHY",
	[LOOPBACK_PHYXS]	= "PHYXS",
60 61
	[LOOPBACK_PCS]		= "PCS",
	[LOOPBACK_PMAPMD]	= "PMA/PMD",
62 63
	[LOOPBACK_XPORT]	= "XPORT",
	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
64
	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
65 66
	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
67
	[LOOPBACK_GMII_WS]	= "GMII_WS",
68 69
	[LOOPBACK_XFI_WS]	= "XFI_WS",
	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
70
	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
71 72 73
};

const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
74
const char *const efx_reset_type_names[] = {
75 76 77 78 79 80 81 82 83 84 85 86 87
	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
	[RESET_TYPE_ALL]                = "ALL",
	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
	[RESET_TYPE_WORLD]              = "WORLD",
	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
	[RESET_TYPE_DISABLE]            = "DISABLE",
	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
	[RESET_TYPE_RX_DESC_FETCH]      = "RX_DESC_FETCH",
	[RESET_TYPE_TX_DESC_FETCH]      = "TX_DESC_FETCH",
	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
88 89
};

90 91 92 93 94 95
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
 * queued onto this work queue. This is not a per-nic work queue, because
 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
 */
static struct workqueue_struct *reset_workqueue;

96 97 98 99 100 101 102 103 104
/**************************************************************************
 *
 * Configurable values
 *
 *************************************************************************/

/*
 * Use separate channels for TX and RX events
 *
105 106
 * Set this to 1 to use separate channels for TX and RX. It allows us
 * to control interrupt affinity separately for TX and RX.
107
 *
108
 * This is only used in MSI-X interrupt mode
109
 */
110 111
static bool separate_tx_channels;
module_param(separate_tx_channels, bool, 0444);
112 113
MODULE_PARM_DESC(separate_tx_channels,
		 "Use separate channels for TX and RX");
114 115 116 117 118 119 120

/* This is the weight assigned to each of the (per-channel) virtual
 * NAPI devices.
 */
static int napi_weight = 64;

/* This is the time (in jiffies) between invocations of the hardware
121 122
 * monitor.
 * On Falcon-based NICs, this will:
123 124
 * - Check the on-board hardware monitor;
 * - Poll the link state and reconfigure the hardware as necessary.
125 126
 * On Siena-based NICs for power systems with EEH support, this will give EEH a
 * chance to start.
127
 */
S
stephen hemminger 已提交
128
static unsigned int efx_monitor_interval = 1 * HZ;
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

/* Initial interrupt moderation settings.  They can be modified after
 * module load with ethtool.
 *
 * The default for RX should strike a balance between increasing the
 * round-trip latency and reducing overhead.
 */
static unsigned int rx_irq_mod_usec = 60;

/* Initial interrupt moderation settings.  They can be modified after
 * module load with ethtool.
 *
 * This default is chosen to ensure that a 10G link does not go idle
 * while a TX queue is stopped after it has become full.  A queue is
 * restarted when it drops below half full.  The time this takes (assuming
 * worst case 3 descriptors per packet and 1024 descriptors) is
 *   512 / 3 * 1.2 = 205 usec.
 */
static unsigned int tx_irq_mod_usec = 150;

/* This is the first interrupt mode to try out of:
 * 0 => MSI-X
 * 1 => MSI
 * 2 => legacy
 */
static unsigned int interrupt_mode;

/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 * i.e. the number of CPUs among which we may distribute simultaneous
 * interrupt handling.
 *
 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
161
 * The default (0) means to assign an interrupt to each core.
162 163 164 165 166
 */
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");

167 168
static bool phy_flash_cfg;
module_param(phy_flash_cfg, bool, 0644);
169 170
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");

171
static unsigned irq_adapt_low_thresh = 8000;
172 173 174 175
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
		 "Threshold score for reducing IRQ moderation");

176
static unsigned irq_adapt_high_thresh = 16000;
177 178 179 180
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
		 "Threshold score for increasing IRQ moderation");

181 182 183 184 185 186 187
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");

188 189 190 191 192
/**************************************************************************
 *
 * Utility functions and prototypes
 *
 *************************************************************************/
193

194 195 196
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_remove_channel(struct efx_channel *channel);
197
static void efx_remove_channels(struct efx_nic *efx);
198
static const struct efx_channel_type efx_default_channel_type;
199
static void efx_remove_port(struct efx_nic *efx);
200
static void efx_init_napi_channel(struct efx_channel *channel);
201
static void efx_fini_napi(struct efx_nic *efx);
202
static void efx_fini_napi_channel(struct efx_channel *channel);
203 204 205
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
206 207 208

#define EFX_ASSERT_RESET_SERIALISED(efx)		\
	do {						\
209
		if ((efx->state == STATE_READY) ||	\
210
		    (efx->state == STATE_RECOVERY) ||	\
211
		    (efx->state == STATE_DISABLED))	\
212 213 214
			ASSERT_RTNL();			\
	} while (0)

215 216
static int efx_check_disabled(struct efx_nic *efx)
{
217
	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
218 219 220 221 222 223 224
		netif_err(efx, drv, efx->net_dev,
			  "device is disabled due to earlier errors\n");
		return -EIO;
	}
	return 0;
}

225 226 227 228 229 230 231 232 233 234 235 236 237
/**************************************************************************
 *
 * Event queue processing
 *
 *************************************************************************/

/* Process channel's event queue
 *
 * This function is responsible for processing the event queue of a
 * single channel.  The caller must guarantee that this function will
 * never be concurrently called more than once on the same channel,
 * though different channels may be being processed concurrently.
 */
238
static int efx_process_channel(struct efx_channel *channel, int budget)
239
{
240
	int spent;
241

242
	if (unlikely(!channel->enabled))
B
Ben Hutchings 已提交
243
		return 0;
244

245
	spent = efx_nic_process_eventq(channel, budget);
246 247 248 249
	if (spent && efx_channel_has_rx_queue(channel)) {
		struct efx_rx_queue *rx_queue =
			efx_channel_get_rx_queue(channel);

250
		efx_rx_flush_packet(channel);
251
		if (rx_queue->enabled)
252
			efx_fast_push_rx_descriptors(rx_queue);
253 254
	}

255
	return spent;
256 257 258 259 260 261 262 263 264 265
}

/* Mark channel as finished processing
 *
 * Note that since we will not receive further interrupts for this
 * channel before we finish processing and call the eventq_read_ack()
 * method, there is no need to use the interrupt hold-off timers.
 */
static inline void efx_channel_processed(struct efx_channel *channel)
{
266 267 268
	/* The interrupt handler for this channel may set work_pending
	 * as soon as we acknowledge the events we've seen.  Make sure
	 * it's cleared before then. */
269
	channel->work_pending = false;
270 271
	smp_wmb();

272
	efx_nic_eventq_read_ack(channel);
273 274 275 276 277 278 279 280 281 282 283
}

/* NAPI poll handler
 *
 * NAPI guarantees serialisation of polls of the same device, which
 * provides the guarantee required by efx_process_channel().
 */
static int efx_poll(struct napi_struct *napi, int budget)
{
	struct efx_channel *channel =
		container_of(napi, struct efx_channel, napi_str);
284
	struct efx_nic *efx = channel->efx;
285
	int spent;
286

287 288 289
	netif_vdbg(efx, intr, efx->net_dev,
		   "channel %d NAPI poll executing on CPU %d\n",
		   channel->channel, raw_smp_processor_id());
290

291
	spent = efx_process_channel(channel, budget);
292

293
	if (spent < budget) {
294
		if (efx_channel_has_rx_queue(channel) &&
295 296 297 298
		    efx->irq_rx_adaptive &&
		    unlikely(++channel->irq_count == 1000)) {
			if (unlikely(channel->irq_mod_score <
				     irq_adapt_low_thresh)) {
299 300
				if (channel->irq_moderation > 1) {
					channel->irq_moderation -= 1;
301
					efx->type->push_irq_moderation(channel);
302
				}
303 304
			} else if (unlikely(channel->irq_mod_score >
					    irq_adapt_high_thresh)) {
305 306 307
				if (channel->irq_moderation <
				    efx->irq_rx_moderation) {
					channel->irq_moderation += 1;
308
					efx->type->push_irq_moderation(channel);
309
				}
310 311 312 313 314
			}
			channel->irq_count = 0;
			channel->irq_mod_score = 0;
		}

315 316
		efx_filter_rfs_expire(channel);

317
		/* There is no race here; although napi_disable() will
318
		 * only wait for napi_complete(), this isn't a problem
319 320 321
		 * since efx_channel_processed() will have no effect if
		 * interrupts have already been disabled.
		 */
322
		napi_complete(napi);
323 324 325
		efx_channel_processed(channel);
	}

326
	return spent;
327 328 329 330 331 332 333 334
}

/* Process the eventq of the specified channel immediately on this CPU
 *
 * Disable hardware generated interrupts, wait for any existing
 * processing to finish, then directly poll (and ack ) the eventq.
 * Finally reenable NAPI and interrupts.
 *
335 336
 * This is for use only during a loopback self-test.  It must not
 * deliver any packets up the stack as this can result in deadlock.
337 338 339 340 341
 */
void efx_process_channel_now(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;

342
	BUG_ON(channel->channel >= efx->n_channels);
343
	BUG_ON(!channel->enabled);
344
	BUG_ON(!efx->loopback_selftest);
345 346

	/* Disable interrupts and wait for ISRs to complete */
347
	efx_nic_disable_interrupts(efx);
348
	if (efx->legacy_irq) {
349
		synchronize_irq(efx->legacy_irq);
350 351
		efx->legacy_irq_enabled = false;
	}
352
	if (channel->irq)
353 354 355 356 357 358
		synchronize_irq(channel->irq);

	/* Wait for any NAPI processing to complete */
	napi_disable(&channel->napi_str);

	/* Poll the channel */
359
	efx_process_channel(channel, channel->eventq_mask + 1);
360 361 362 363 364 365

	/* Ack the eventq. This may cause an interrupt to be generated
	 * when they are reenabled */
	efx_channel_processed(channel);

	napi_enable(&channel->napi_str);
366 367
	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
368
	efx_nic_enable_interrupts(efx);
369 370 371 372 373 374 375 376 377
}

/* Create event queue
 * Event queue memory allocations are done only once.  If the channel
 * is reset, the memory buffer will be reused; this guards against
 * errors during channel reset and also simplifies interrupt handling.
 */
static int efx_probe_eventq(struct efx_channel *channel)
{
378 379 380
	struct efx_nic *efx = channel->efx;
	unsigned long entries;

381
	netif_dbg(efx, probe, efx->net_dev,
382
		  "chan %d create event queue\n", channel->channel);
383

384 385 386 387 388 389
	/* Build an event queue with room for one event per tx and rx buffer,
	 * plus some extra for link state events and MCDI completions. */
	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
	EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;

390
	return efx_nic_probe_eventq(channel);
391 392 393
}

/* Prepare channel's event queue */
394
static void efx_init_eventq(struct efx_channel *channel)
395
{
396 397
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d init event queue\n", channel->channel);
398 399 400

	channel->eventq_read_ptr = 0;

401
	efx_nic_init_eventq(channel);
402 403
}

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
/* Enable event queue processing and NAPI */
static void efx_start_eventq(struct efx_channel *channel)
{
	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
		  "chan %d start event queue\n", channel->channel);

	/* The interrupt handler for this channel may set work_pending
	 * as soon as we enable it.  Make sure it's cleared before
	 * then.  Similarly, make sure it sees the enabled flag set.
	 */
	channel->work_pending = false;
	channel->enabled = true;
	smp_wmb();

	napi_enable(&channel->napi_str);
	efx_nic_eventq_read_ack(channel);
}

/* Disable event queue processing and NAPI */
static void efx_stop_eventq(struct efx_channel *channel)
{
	if (!channel->enabled)
		return;

	napi_disable(&channel->napi_str);
	channel->enabled = false;
}

432 433
static void efx_fini_eventq(struct efx_channel *channel)
{
434 435
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d fini event queue\n", channel->channel);
436

437
	efx_nic_fini_eventq(channel);
438 439 440 441
}

static void efx_remove_eventq(struct efx_channel *channel)
{
442 443
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "chan %d remove event queue\n", channel->channel);
444

445
	efx_nic_remove_eventq(channel);
446 447 448 449 450 451 452 453
}

/**************************************************************************
 *
 * Channel handling
 *
 *************************************************************************/

454
/* Allocate and initialise a channel structure. */
455 456 457 458 459 460 461 462
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
	struct efx_channel *channel;
	struct efx_rx_queue *rx_queue;
	struct efx_tx_queue *tx_queue;
	int j;

463 464 465
	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		return NULL;
466

467 468 469
	channel->efx = efx;
	channel->channel = i;
	channel->type = &efx_default_channel_type;
470

471 472 473 474 475 476
	for (j = 0; j < EFX_TXQ_TYPES; j++) {
		tx_queue = &channel->tx_queue[j];
		tx_queue->efx = efx;
		tx_queue->queue = i * EFX_TXQ_TYPES + j;
		tx_queue->channel = channel;
	}
477

478 479 480 481
	rx_queue = &channel->rx_queue;
	rx_queue->efx = efx;
	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
		    (unsigned long)rx_queue);
482

483 484 485 486 487 488 489 490 491 492 493 494 495
	return channel;
}

/* Allocate and initialise a channel structure, copying parameters
 * (but not resources) from an old channel structure.
 */
static struct efx_channel *
efx_copy_channel(const struct efx_channel *old_channel)
{
	struct efx_channel *channel;
	struct efx_rx_queue *rx_queue;
	struct efx_tx_queue *tx_queue;
	int j;
496

497 498 499 500 501 502 503 504
	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		return NULL;

	*channel = *old_channel;

	channel->napi_dev = NULL;
	memset(&channel->eventq, 0, sizeof(channel->eventq));
505

506 507 508
	for (j = 0; j < EFX_TXQ_TYPES; j++) {
		tx_queue = &channel->tx_queue[j];
		if (tx_queue->channel)
509
			tx_queue->channel = channel;
510 511
		tx_queue->buffer = NULL;
		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
512 513 514
	}

	rx_queue = &channel->rx_queue;
515 516
	rx_queue->buffer = NULL;
	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
517 518 519 520 521 522
	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
		    (unsigned long)rx_queue);

	return channel;
}

523 524 525 526 527 528
static int efx_probe_channel(struct efx_channel *channel)
{
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	int rc;

529 530
	netif_dbg(channel->efx, probe, channel->efx->net_dev,
		  "creating channel %d\n", channel->channel);
531

532 533 534 535
	rc = channel->type->pre_probe(channel);
	if (rc)
		goto fail;

536 537
	rc = efx_probe_eventq(channel);
	if (rc)
538
		goto fail;
539 540 541 542

	efx_for_each_channel_tx_queue(tx_queue, channel) {
		rc = efx_probe_tx_queue(tx_queue);
		if (rc)
543
			goto fail;
544 545 546 547 548
	}

	efx_for_each_channel_rx_queue(rx_queue, channel) {
		rc = efx_probe_rx_queue(rx_queue);
		if (rc)
549
			goto fail;
550 551 552 553 554 555
	}

	channel->n_rx_frm_trunc = 0;

	return 0;

556 557
fail:
	efx_remove_channel(channel);
558 559 560
	return rc;
}

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
static void
efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
{
	struct efx_nic *efx = channel->efx;
	const char *type;
	int number;

	number = channel->channel;
	if (efx->tx_channel_offset == 0) {
		type = "";
	} else if (channel->channel < efx->tx_channel_offset) {
		type = "-rx";
	} else {
		type = "-tx";
		number -= efx->tx_channel_offset;
	}
	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
}
579

580 581 582 583
static void efx_set_channel_names(struct efx_nic *efx)
{
	struct efx_channel *channel;

584 585 586 587
	efx_for_each_channel(channel, efx)
		channel->type->get_name(channel,
					efx->channel_name[channel->channel],
					sizeof(efx->channel_name[0]));
588 589
}

590 591 592 593 594 595 596 597
static int efx_probe_channels(struct efx_nic *efx)
{
	struct efx_channel *channel;
	int rc;

	/* Restart special buffer allocation */
	efx->next_buffer_table = 0;

598 599 600 601 602 603
	/* Probe channels in reverse, so that any 'extra' channels
	 * use the start of the buffer table. This allows the traffic
	 * channels to be resized without moving them or wasting the
	 * entries before them.
	 */
	efx_for_each_channel_rev(channel, efx) {
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
		rc = efx_probe_channel(channel);
		if (rc) {
			netif_err(efx, probe, efx->net_dev,
				  "failed to create channel %d\n",
				  channel->channel);
			goto fail;
		}
	}
	efx_set_channel_names(efx);

	return 0;

fail:
	efx_remove_channels(efx);
	return rc;
}

621 622 623 624
/* Channels are shutdown and reinitialised whilst the NIC is running
 * to propagate configuration changes (mtu, checksum offload), or
 * to clear hardware error conditions
 */
625
static void efx_start_datapath(struct efx_nic *efx)
626
{
627
	bool old_rx_scatter = efx->rx_scatter;
628 629 630
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	struct efx_channel *channel;
631
	size_t rx_buf_len;
632

633 634 635 636
	/* Calculate the rx buffer allocation parameters required to
	 * support the current MTU, including padding for header
	 * alignment and overruns.
	 */
637 638 639
	efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
			   efx->type->rx_buffer_padding);
640
	rx_buf_len = (sizeof(struct efx_rx_page_state) +
641
		      NET_IP_ALIGN + efx->rx_dma_len);
642 643 644 645
	if (rx_buf_len <= PAGE_SIZE) {
		efx->rx_scatter = false;
		efx->rx_buffer_order = 0;
	} else if (efx->type->can_rx_scatter) {
646
		BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
647
		BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
648 649 650
			     2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
				       EFX_RX_BUF_ALIGNMENT) >
			     PAGE_SIZE);
651 652 653 654 655 656 657 658
		efx->rx_scatter = true;
		efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
		efx->rx_buffer_order = 0;
	} else {
		efx->rx_scatter = false;
		efx->rx_buffer_order = get_order(rx_buf_len);
	}

659 660 661 662 663 664 665 666 667 668 669
	efx_rx_config_page_split(efx);
	if (efx->rx_buffer_order)
		netif_dbg(efx, drv, efx->net_dev,
			  "RX buf len=%u; page order=%u batch=%u\n",
			  efx->rx_dma_len, efx->rx_buffer_order,
			  efx->rx_pages_per_batch);
	else
		netif_dbg(efx, drv, efx->net_dev,
			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
			  efx->rx_dma_len, efx->rx_page_buf_step,
			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
670

671 672 673
	/* RX filters also have scatter-enabled flags */
	if (efx->rx_scatter != old_rx_scatter)
		efx_filter_update_rx_scatter(efx);
674

675 676 677 678 679 680 681 682 683 684
	/* We must keep at least one descriptor in a TX ring empty.
	 * We could avoid this when the queue size does not exactly
	 * match the hardware ring size, but it's not that important.
	 * Therefore we stop the queue when one more skb might fill
	 * the ring completely.  We wake it when half way back to
	 * empty.
	 */
	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;

685 686
	/* Initialise the channels */
	efx_for_each_channel(channel, efx) {
687 688
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_init_tx_queue(tx_queue);
689

690
		efx_for_each_channel_rx_queue(rx_queue, channel) {
691
			efx_init_rx_queue(rx_queue);
692 693
			efx_nic_generate_fill_event(rx_queue);
		}
694

695
		WARN_ON(channel->rx_pkt_n_frags);
696 697
	}

698 699
	if (netif_device_present(efx->net_dev))
		netif_tx_wake_all_queues(efx->net_dev);
700 701
}

702
static void efx_stop_datapath(struct efx_nic *efx)
703 704 705 706
{
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
707
	struct pci_dev *dev = efx->pci_dev;
708
	int rc;
709 710 711 712

	EFX_ASSERT_RESET_SERIALISED(efx);
	BUG_ON(efx->port_enabled);

713
	/* Only perform flush if dma is enabled */
714
	if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
		rc = efx_nic_flush_queues(efx);

		if (rc && EFX_WORKAROUND_7803(efx)) {
			/* Schedule a reset to recover from the flush failure. The
			 * descriptor caches reference memory we're about to free,
			 * but falcon_reconfigure_mac_wrapper() won't reconnect
			 * the MACs because of the pending reset. */
			netif_err(efx, drv, efx->net_dev,
				  "Resetting to recover from flush failure\n");
			efx_schedule_reset(efx, RESET_TYPE_ALL);
		} else if (rc) {
			netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
		} else {
			netif_dbg(efx, drv, efx->net_dev,
				  "successfully flushed all queues\n");
		}
731
	}
732

733
	efx_for_each_channel(channel, efx) {
734 735 736 737 738 739 740 741 742 743
		/* RX packet processing is pipelined, so wait for the
		 * NAPI handler to complete.  At least event queue 0
		 * might be kept active by non-data events, so don't
		 * use napi_synchronize() but actually disable NAPI
		 * temporarily.
		 */
		if (efx_channel_has_rx_queue(channel)) {
			efx_stop_eventq(channel);
			efx_start_eventq(channel);
		}
744 745 746

		efx_for_each_channel_rx_queue(rx_queue, channel)
			efx_fini_rx_queue(rx_queue);
747
		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
748 749 750 751 752 753 754 755 756
			efx_fini_tx_queue(tx_queue);
	}
}

static void efx_remove_channel(struct efx_channel *channel)
{
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;

757 758
	netif_dbg(channel->efx, drv, channel->efx->net_dev,
		  "destroy chan %d\n", channel->channel);
759 760 761

	efx_for_each_channel_rx_queue(rx_queue, channel)
		efx_remove_rx_queue(rx_queue);
762
	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
763 764
		efx_remove_tx_queue(tx_queue);
	efx_remove_eventq(channel);
765
	channel->type->post_remove(channel);
766 767
}

768 769 770 771 772 773 774 775 776 777 778 779 780
static void efx_remove_channels(struct efx_nic *efx)
{
	struct efx_channel *channel;

	efx_for_each_channel(channel, efx)
		efx_remove_channel(channel);
}

int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
	u32 old_rxq_entries, old_txq_entries;
781
	unsigned i, next_buffer_table = 0;
782 783 784 785 786
	int rc;

	rc = efx_check_disabled(efx);
	if (rc)
		return rc;
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808

	/* Not all channels should be reallocated. We must avoid
	 * reallocating their buffer table entries.
	 */
	efx_for_each_channel(channel, efx) {
		struct efx_rx_queue *rx_queue;
		struct efx_tx_queue *tx_queue;

		if (channel->type->copy)
			continue;
		next_buffer_table = max(next_buffer_table,
					channel->eventq.index +
					channel->eventq.entries);
		efx_for_each_channel_rx_queue(rx_queue, channel)
			next_buffer_table = max(next_buffer_table,
						rx_queue->rxd.index +
						rx_queue->rxd.entries);
		efx_for_each_channel_tx_queue(tx_queue, channel)
			next_buffer_table = max(next_buffer_table,
						tx_queue->txd.index +
						tx_queue->txd.entries);
	}
809

810
	efx_device_detach_sync(efx);
811
	efx_stop_all(efx);
812
	efx_stop_interrupts(efx, true);
813

814
	/* Clone channels (where possible) */
815 816
	memset(other_channel, 0, sizeof(other_channel));
	for (i = 0; i < efx->n_channels; i++) {
817 818 819
		channel = efx->channel[i];
		if (channel->type->copy)
			channel = channel->type->copy(channel);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
		if (!channel) {
			rc = -ENOMEM;
			goto out;
		}
		other_channel[i] = channel;
	}

	/* Swap entry counts and channel pointers */
	old_rxq_entries = efx->rxq_entries;
	old_txq_entries = efx->txq_entries;
	efx->rxq_entries = rxq_entries;
	efx->txq_entries = txq_entries;
	for (i = 0; i < efx->n_channels; i++) {
		channel = efx->channel[i];
		efx->channel[i] = other_channel[i];
		other_channel[i] = channel;
	}

838 839
	/* Restart buffer table allocation */
	efx->next_buffer_table = next_buffer_table;
840 841

	for (i = 0; i < efx->n_channels; i++) {
842 843 844 845 846 847 848
		channel = efx->channel[i];
		if (!channel->type->copy)
			continue;
		rc = efx_probe_channel(channel);
		if (rc)
			goto rollback;
		efx_init_napi_channel(efx->channel[i]);
849
	}
850

851
out:
852 853 854 855 856 857 858 859 860
	/* Destroy unused channel structures */
	for (i = 0; i < efx->n_channels; i++) {
		channel = other_channel[i];
		if (channel && channel->type->copy) {
			efx_fini_napi_channel(channel);
			efx_remove_channel(channel);
			kfree(channel);
		}
	}
861

862
	efx_start_interrupts(efx, true);
863
	efx_start_all(efx);
864
	netif_device_attach(efx->net_dev);
865 866 867 868 869 870 871 872 873 874 875 876 877 878
	return rc;

rollback:
	/* Swap back */
	efx->rxq_entries = old_rxq_entries;
	efx->txq_entries = old_txq_entries;
	for (i = 0; i < efx->n_channels; i++) {
		channel = efx->channel[i];
		efx->channel[i] = other_channel[i];
		other_channel[i] = channel;
	}
	goto out;
}

879
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
880
{
881
	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
882 883
}

884 885
static const struct efx_channel_type efx_default_channel_type = {
	.pre_probe		= efx_channel_dummy_op_int,
886
	.post_remove		= efx_channel_dummy_op_void,
887 888 889 890 891 892 893 894 895 896
	.get_name		= efx_get_channel_name,
	.copy			= efx_copy_channel,
	.keep_eventq		= false,
};

int efx_channel_dummy_op_int(struct efx_channel *channel)
{
	return 0;
}

897 898 899 900
void efx_channel_dummy_op_void(struct efx_channel *channel)
{
}

901 902 903 904 905 906 907 908 909 910
/**************************************************************************
 *
 * Port handling
 *
 **************************************************************************/

/* This ensures that the kernel is kept informed (via
 * netif_carrier_on/off) of the link status, and also maintains the
 * link status's stop on the port's TX queue.
 */
S
Steve Hodgson 已提交
911
void efx_link_status_changed(struct efx_nic *efx)
912
{
913 914
	struct efx_link_state *link_state = &efx->link_state;

915 916 917 918 919 920 921
	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
	 * that no events are triggered between unregister_netdev() and the
	 * driver unloading. A more general condition is that NETDEV_CHANGE
	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
	if (!netif_running(efx->net_dev))
		return;

922
	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
923 924
		efx->n_link_state_changes++;

925
		if (link_state->up)
926 927 928 929 930 931
			netif_carrier_on(efx->net_dev);
		else
			netif_carrier_off(efx->net_dev);
	}

	/* Status message for kernel log */
B
Ben Hutchings 已提交
932
	if (link_state->up)
933 934 935 936 937
		netif_info(efx, link, efx->net_dev,
			   "link up at %uMbps %s-duplex (MTU %d)%s\n",
			   link_state->speed, link_state->fd ? "full" : "half",
			   efx->net_dev->mtu,
			   (efx->promiscuous ? " [PROMISC]" : ""));
B
Ben Hutchings 已提交
938
	else
939
		netif_info(efx, link, efx->net_dev, "link down\n");
940 941
}

B
Ben Hutchings 已提交
942 943 944 945 946 947 948 949 950 951 952 953 954
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
{
	efx->link_advertising = advertising;
	if (advertising) {
		if (advertising & ADVERTISED_Pause)
			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
		else
			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
		if (advertising & ADVERTISED_Asym_Pause)
			efx->wanted_fc ^= EFX_FC_TX;
	}
}

955
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
B
Ben Hutchings 已提交
956 957 958 959 960 961 962 963 964 965 966 967 968 969
{
	efx->wanted_fc = wanted_fc;
	if (efx->link_advertising) {
		if (wanted_fc & EFX_FC_RX)
			efx->link_advertising |= (ADVERTISED_Pause |
						  ADVERTISED_Asym_Pause);
		else
			efx->link_advertising &= ~(ADVERTISED_Pause |
						   ADVERTISED_Asym_Pause);
		if (wanted_fc & EFX_FC_TX)
			efx->link_advertising ^= ADVERTISED_Asym_Pause;
	}
}

970 971
static void efx_fini_port(struct efx_nic *efx);

B
Ben Hutchings 已提交
972 973 974 975 976 977 978 979
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 * the MAC appropriately. All other PHY configuration changes are pushed
 * through phy_op->set_settings(), and pushed asynchronously to the MAC
 * through efx_monitor().
 *
 * Callers must hold the mac_lock
 */
int __efx_reconfigure_port(struct efx_nic *efx)
980
{
B
Ben Hutchings 已提交
981 982
	enum efx_phy_mode phy_mode;
	int rc;
983

B
Ben Hutchings 已提交
984
	WARN_ON(!mutex_is_locked(&efx->mac_lock));
985

986
	/* Serialise the promiscuous flag with efx_set_rx_mode. */
987 988
	netif_addr_lock_bh(efx->net_dev);
	netif_addr_unlock_bh(efx->net_dev);
989

B
Ben Hutchings 已提交
990 991
	/* Disable PHY transmit in mac level loopbacks */
	phy_mode = efx->phy_mode;
992 993 994 995 996
	if (LOOPBACK_INTERNAL(efx))
		efx->phy_mode |= PHY_MODE_TX_DISABLED;
	else
		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;

B
Ben Hutchings 已提交
997
	rc = efx->type->reconfigure_port(efx);
998

B
Ben Hutchings 已提交
999 1000
	if (rc)
		efx->phy_mode = phy_mode;
1001

B
Ben Hutchings 已提交
1002
	return rc;
1003 1004 1005 1006
}

/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 * disabled. */
B
Ben Hutchings 已提交
1007
int efx_reconfigure_port(struct efx_nic *efx)
1008
{
B
Ben Hutchings 已提交
1009 1010
	int rc;

1011 1012 1013
	EFX_ASSERT_RESET_SERIALISED(efx);

	mutex_lock(&efx->mac_lock);
B
Ben Hutchings 已提交
1014
	rc = __efx_reconfigure_port(efx);
1015
	mutex_unlock(&efx->mac_lock);
B
Ben Hutchings 已提交
1016 1017

	return rc;
1018 1019
}

1020 1021 1022
/* Asynchronous work item for changing MAC promiscuity and multicast
 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
 * MAC directly. */
1023 1024 1025 1026 1027
static void efx_mac_work(struct work_struct *data)
{
	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);

	mutex_lock(&efx->mac_lock);
1028
	if (efx->port_enabled)
1029
		efx->type->reconfigure_mac(efx);
1030 1031 1032
	mutex_unlock(&efx->mac_lock);
}

1033 1034 1035 1036
static int efx_probe_port(struct efx_nic *efx)
{
	int rc;

1037
	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1038

1039 1040 1041
	if (phy_flash_cfg)
		efx->phy_mode = PHY_MODE_SPECIAL;

1042 1043
	/* Connect up MAC/PHY operations table */
	rc = efx->type->probe_port(efx);
1044
	if (rc)
1045
		return rc;
1046

1047 1048
	/* Initialise MAC address to permanent address */
	memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
1049 1050 1051 1052 1053 1054 1055 1056

	return 0;
}

static int efx_init_port(struct efx_nic *efx)
{
	int rc;

1057
	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1058

1059 1060
	mutex_lock(&efx->mac_lock);

1061
	rc = efx->phy_op->init(efx);
1062
	if (rc)
1063
		goto fail1;
1064

1065
	efx->port_initialized = true;
1066

B
Ben Hutchings 已提交
1067 1068
	/* Reconfigure the MAC before creating dma queues (required for
	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1069
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
1070 1071 1072 1073 1074 1075

	/* Ensure the PHY advertises the correct flow control settings */
	rc = efx->phy_op->reconfigure(efx);
	if (rc)
		goto fail2;

1076
	mutex_unlock(&efx->mac_lock);
1077
	return 0;
1078

1079
fail2:
1080
	efx->phy_op->fini(efx);
1081 1082
fail1:
	mutex_unlock(&efx->mac_lock);
1083
	return rc;
1084 1085 1086 1087
}

static void efx_start_port(struct efx_nic *efx)
{
1088
	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1089 1090 1091
	BUG_ON(efx->port_enabled);

	mutex_lock(&efx->mac_lock);
1092
	efx->port_enabled = true;
1093 1094 1095

	/* efx_mac_work() might have been scheduled after efx_stop_port(),
	 * and then cancelled by efx_flush_all() */
1096
	efx->type->reconfigure_mac(efx);
1097

1098 1099 1100
	mutex_unlock(&efx->mac_lock);
}

S
Steve Hodgson 已提交
1101
/* Prevent efx_mac_work() and efx_monitor() from working */
1102 1103
static void efx_stop_port(struct efx_nic *efx)
{
1104
	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1105 1106

	mutex_lock(&efx->mac_lock);
1107
	efx->port_enabled = false;
1108 1109 1110
	mutex_unlock(&efx->mac_lock);

	/* Serialise against efx_set_multicast_list() */
1111 1112
	netif_addr_lock_bh(efx->net_dev);
	netif_addr_unlock_bh(efx->net_dev);
1113 1114 1115 1116
}

static void efx_fini_port(struct efx_nic *efx)
{
1117
	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1118 1119 1120 1121

	if (!efx->port_initialized)
		return;

1122
	efx->phy_op->fini(efx);
1123
	efx->port_initialized = false;
1124

1125
	efx->link_state.up = false;
1126 1127 1128 1129 1130
	efx_link_status_changed(efx);
}

static void efx_remove_port(struct efx_nic *efx)
{
1131
	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1132

1133
	efx->type->remove_port(efx);
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
}

/**************************************************************************
 *
 * NIC handling
 *
 **************************************************************************/

/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
	struct pci_dev *pci_dev = efx->pci_dev;
	dma_addr_t dma_mask = efx->type->max_dma_mask;
	int rc;

1149
	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1150 1151 1152

	rc = pci_enable_device(pci_dev);
	if (rc) {
1153 1154
		netif_err(efx, probe, efx->net_dev,
			  "failed to enable PCI device\n");
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
		goto fail1;
	}

	pci_set_master(pci_dev);

	/* Set the PCI DMA mask.  Try all possibilities from our
	 * genuine mask down to 32 bits, because some architectures
	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
	 * masks event though they reject 46 bit masks.
	 */
	while (dma_mask > 0x7fffffffUL) {
1166 1167
		if (dma_supported(&pci_dev->dev, dma_mask)) {
			rc = dma_set_mask(&pci_dev->dev, dma_mask);
1168 1169 1170
			if (rc == 0)
				break;
		}
1171 1172 1173
		dma_mask >>= 1;
	}
	if (rc) {
1174 1175
		netif_err(efx, probe, efx->net_dev,
			  "could not find a suitable DMA mask\n");
1176 1177
		goto fail2;
	}
1178 1179
	netif_dbg(efx, probe, efx->net_dev,
		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1180
	rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1181
	if (rc) {
1182 1183
		/* dma_set_coherent_mask() is not *allowed* to
		 * fail with a mask that dma_set_mask() accepted,
1184 1185
		 * but just in case...
		 */
1186 1187
		netif_err(efx, probe, efx->net_dev,
			  "failed to set consistent DMA mask\n");
1188 1189 1190
		goto fail2;
	}

1191 1192
	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
1193
	if (rc) {
1194 1195
		netif_err(efx, probe, efx->net_dev,
			  "request for memory BAR failed\n");
1196 1197 1198
		rc = -EIO;
		goto fail3;
	}
1199 1200
	efx->membase = ioremap_nocache(efx->membase_phys,
				       efx->type->mem_map_size);
1201
	if (!efx->membase) {
1202 1203 1204 1205
		netif_err(efx, probe, efx->net_dev,
			  "could not map memory BAR at %llx+%x\n",
			  (unsigned long long)efx->membase_phys,
			  efx->type->mem_map_size);
1206 1207 1208
		rc = -ENOMEM;
		goto fail4;
	}
1209 1210 1211 1212
	netif_dbg(efx, probe, efx->net_dev,
		  "memory BAR at %llx+%x (virtual %p)\n",
		  (unsigned long long)efx->membase_phys,
		  efx->type->mem_map_size, efx->membase);
1213 1214 1215 1216

	return 0;

 fail4:
1217
	pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1218
 fail3:
1219
	efx->membase_phys = 0;
1220 1221 1222 1223 1224 1225 1226 1227
 fail2:
	pci_disable_device(efx->pci_dev);
 fail1:
	return rc;
}

static void efx_fini_io(struct efx_nic *efx)
{
1228
	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1229 1230 1231 1232 1233 1234 1235

	if (efx->membase) {
		iounmap(efx->membase);
		efx->membase = NULL;
	}

	if (efx->membase_phys) {
1236
		pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1237
		efx->membase_phys = 0;
1238 1239 1240 1241 1242
	}

	pci_disable_device(efx->pci_dev);
}

1243
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1244
{
1245
	cpumask_var_t thread_mask;
1246
	unsigned int count;
1247
	int cpu;
1248

1249 1250 1251 1252 1253 1254 1255 1256
	if (rss_cpus) {
		count = rss_cpus;
	} else {
		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
			netif_warn(efx, probe, efx->net_dev,
				   "RSS disabled due to allocation failure\n");
			return 1;
		}
1257

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
		count = 0;
		for_each_online_cpu(cpu) {
			if (!cpumask_test_cpu(cpu, thread_mask)) {
				++count;
				cpumask_or(thread_mask, thread_mask,
					   topology_thread_cpumask(cpu));
			}
		}

		free_cpumask_var(thread_mask);
R
Rusty Russell 已提交
1268 1269
	}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
	/* If RSS is requested for the PF *and* VFs then we can't write RSS
	 * table entries that are inaccessible to VFs
	 */
	if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
	    count > efx_vf_size(efx)) {
		netif_warn(efx, probe, efx->net_dev,
			   "Reducing number of RSS channels from %u to %u for "
			   "VF support. Increase vf-msix-limit to use more "
			   "channels on the PF.\n",
			   count, efx_vf_size(efx));
		count = efx_vf_size(efx);
1281 1282 1283 1284 1285
	}

	return count;
}

1286 1287 1288 1289
static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
1290 1291
	unsigned int i;
	int rc;
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308

	efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
	if (!efx->net_dev->rx_cpu_rmap)
		return -ENOMEM;
	for (i = 0; i < efx->n_rx_channels; i++) {
		rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
				      xentries[i].vector);
		if (rc) {
			free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
			efx->net_dev->rx_cpu_rmap = NULL;
			return rc;
		}
	}
#endif
	return 0;
}

1309 1310 1311
/* Probe the number and type of interrupts we are able to obtain, and
 * the resulting numbers of channels and RX queues.
 */
1312
static int efx_probe_interrupts(struct efx_nic *efx)
1313
{
1314 1315
	unsigned int max_channels =
		min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1316 1317
	unsigned int extra_channels = 0;
	unsigned int i, j;
1318
	int rc;
1319

1320 1321 1322 1323
	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
		if (efx->extra_channel_type[i])
			++extra_channels;

1324
	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1325
		struct msix_entry xentries[EFX_MAX_CHANNELS];
1326
		unsigned int n_channels;
1327

1328
		n_channels = efx_wanted_parallelism(efx);
B
Ben Hutchings 已提交
1329 1330
		if (separate_tx_channels)
			n_channels *= 2;
1331
		n_channels += extra_channels;
B
Ben Hutchings 已提交
1332
		n_channels = min(n_channels, max_channels);
1333

B
Ben Hutchings 已提交
1334
		for (i = 0; i < n_channels; i++)
1335
			xentries[i].entry = i;
B
Ben Hutchings 已提交
1336
		rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1337
		if (rc > 0) {
1338 1339
			netif_err(efx, drv, efx->net_dev,
				  "WARNING: Insufficient MSI-X vectors"
1340
				  " available (%d < %u).\n", rc, n_channels);
1341 1342
			netif_err(efx, drv, efx->net_dev,
				  "WARNING: Performance may be reduced.\n");
B
Ben Hutchings 已提交
1343 1344
			EFX_BUG_ON_PARANOID(rc >= n_channels);
			n_channels = rc;
1345
			rc = pci_enable_msix(efx->pci_dev, xentries,
B
Ben Hutchings 已提交
1346
					     n_channels);
1347 1348 1349
		}

		if (rc == 0) {
B
Ben Hutchings 已提交
1350
			efx->n_channels = n_channels;
1351 1352
			if (n_channels > extra_channels)
				n_channels -= extra_channels;
B
Ben Hutchings 已提交
1353
			if (separate_tx_channels) {
1354 1355 1356 1357
				efx->n_tx_channels = max(n_channels / 2, 1U);
				efx->n_rx_channels = max(n_channels -
							 efx->n_tx_channels,
							 1U);
B
Ben Hutchings 已提交
1358
			} else {
1359 1360
				efx->n_tx_channels = n_channels;
				efx->n_rx_channels = n_channels;
B
Ben Hutchings 已提交
1361
			}
1362 1363 1364 1365 1366
			rc = efx_init_rx_cpu_rmap(efx, xentries);
			if (rc) {
				pci_disable_msix(efx->pci_dev);
				return rc;
			}
1367
			for (i = 0; i < efx->n_channels; i++)
1368 1369
				efx_get_channel(efx, i)->irq =
					xentries[i].vector;
1370 1371 1372
		} else {
			/* Fall back to single channel MSI */
			efx->interrupt_mode = EFX_INT_MODE_MSI;
1373 1374
			netif_err(efx, drv, efx->net_dev,
				  "could not enable MSI-X\n");
1375 1376 1377 1378 1379
		}
	}

	/* Try single interrupt MSI */
	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1380
		efx->n_channels = 1;
B
Ben Hutchings 已提交
1381 1382
		efx->n_rx_channels = 1;
		efx->n_tx_channels = 1;
1383 1384
		rc = pci_enable_msi(efx->pci_dev);
		if (rc == 0) {
1385
			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1386
		} else {
1387 1388
			netif_err(efx, drv, efx->net_dev,
				  "could not enable MSI\n");
1389 1390 1391 1392 1393 1394
			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
		}
	}

	/* Assume legacy interrupts */
	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1395
		efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
B
Ben Hutchings 已提交
1396 1397
		efx->n_rx_channels = 1;
		efx->n_tx_channels = 1;
1398 1399
		efx->legacy_irq = efx->pci_dev->irq;
	}
1400

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	/* Assign extra channels if possible */
	j = efx->n_channels;
	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
		if (!efx->extra_channel_type[i])
			continue;
		if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
		    efx->n_channels <= extra_channels) {
			efx->extra_channel_type[i]->handle_no_channel(efx);
		} else {
			--j;
			efx_get_channel(efx, j)->type =
				efx->extra_channel_type[i];
		}
	}

1416
	/* RSS might be usable on VFs even if it is disabled on the PF */
1417
	efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
1418 1419
			   efx->n_rx_channels : efx_vf_size(efx));

1420
	return 0;
1421 1422
}

1423
/* Enable interrupts, then probe and start the event queues */
1424
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1425 1426 1427
{
	struct efx_channel *channel;

1428 1429
	BUG_ON(efx->state == STATE_DISABLED);

1430 1431 1432 1433 1434
	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
	efx_nic_enable_interrupts(efx);

	efx_for_each_channel(channel, efx) {
1435 1436
		if (!channel->type->keep_eventq || !may_keep_eventq)
			efx_init_eventq(channel);
1437 1438 1439 1440 1441 1442
		efx_start_eventq(channel);
	}

	efx_mcdi_mode_event(efx);
}

1443
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1444 1445 1446
{
	struct efx_channel *channel;

1447 1448 1449
	if (efx->state == STATE_DISABLED)
		return;

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	efx_mcdi_mode_poll(efx);

	efx_nic_disable_interrupts(efx);
	if (efx->legacy_irq) {
		synchronize_irq(efx->legacy_irq);
		efx->legacy_irq_enabled = false;
	}

	efx_for_each_channel(channel, efx) {
		if (channel->irq)
			synchronize_irq(channel->irq);

		efx_stop_eventq(channel);
1463 1464
		if (!channel->type->keep_eventq || !may_keep_eventq)
			efx_fini_eventq(channel);
1465 1466 1467
	}
}

1468 1469 1470 1471 1472
static void efx_remove_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

	/* Remove MSI/MSI-X interrupts */
1473
	efx_for_each_channel(channel, efx)
1474 1475 1476 1477 1478 1479 1480 1481
		channel->irq = 0;
	pci_disable_msi(efx->pci_dev);
	pci_disable_msix(efx->pci_dev);

	/* Remove legacy interrupt */
	efx->legacy_irq = 0;
}

1482
static void efx_set_channels(struct efx_nic *efx)
1483
{
1484 1485 1486
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;

1487
	efx->tx_channel_offset =
B
Ben Hutchings 已提交
1488
		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1489

1490 1491
	/* We need to mark which channels really have RX and TX
	 * queues, and adjust the TX queue numbers if we have separate
1492 1493 1494
	 * RX-only and TX-only channels.
	 */
	efx_for_each_channel(channel, efx) {
1495 1496 1497 1498 1499
		if (channel->channel < efx->n_rx_channels)
			channel->rx_queue.core_index = channel->channel;
		else
			channel->rx_queue.core_index = -1;

1500 1501 1502 1503
		efx_for_each_channel_tx_queue(tx_queue, channel)
			tx_queue->queue -= (efx->tx_channel_offset *
					    EFX_TXQ_TYPES);
	}
1504 1505 1506 1507
}

static int efx_probe_nic(struct efx_nic *efx)
{
1508
	size_t i;
1509 1510
	int rc;

1511
	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1512 1513

	/* Carry out hardware-type specific initialisation */
1514
	rc = efx->type->probe(efx);
1515 1516 1517
	if (rc)
		return rc;

B
Ben Hutchings 已提交
1518
	/* Determine the number of channels and queues by trying to hook
1519
	 * in MSI-X interrupts. */
1520 1521 1522
	rc = efx_probe_interrupts(efx);
	if (rc)
		goto fail;
1523

1524 1525
	efx->type->dimension_resources(efx);

1526 1527
	if (efx->n_channels > 1)
		get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1528
	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1529
		efx->rx_indir_table[i] =
1530
			ethtool_rxfh_indir_default(i, efx->rss_spread);
1531

1532
	efx_set_channels(efx);
1533 1534
	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1535 1536

	/* Initialise the interrupt moderation settings */
1537 1538
	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
				true);
1539 1540

	return 0;
1541 1542 1543 1544

fail:
	efx->type->remove(efx);
	return rc;
1545 1546 1547 1548
}

static void efx_remove_nic(struct efx_nic *efx)
{
1549
	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1550 1551

	efx_remove_interrupts(efx);
1552
	efx->type->remove(efx);
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
}

/**************************************************************************
 *
 * NIC startup/shutdown
 *
 *************************************************************************/

static int efx_probe_all(struct efx_nic *efx)
{
	int rc;

	rc = efx_probe_nic(efx);
	if (rc) {
1567
		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1568 1569 1570 1571 1572
		goto fail1;
	}

	rc = efx_probe_port(efx);
	if (rc) {
1573
		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1574 1575 1576
		goto fail2;
	}

1577 1578 1579 1580 1581
	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
		rc = -EINVAL;
		goto fail3;
	}
1582
	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1583

B
Ben Hutchings 已提交
1584 1585 1586 1587
	rc = efx_probe_filters(efx);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "failed to create filter tables\n");
1588
		goto fail3;
B
Ben Hutchings 已提交
1589 1590
	}

1591 1592 1593 1594
	rc = efx_probe_channels(efx);
	if (rc)
		goto fail4;

1595 1596
	return 0;

B
Ben Hutchings 已提交
1597
 fail4:
1598
	efx_remove_filters(efx);
1599 1600 1601 1602 1603 1604 1605 1606
 fail3:
	efx_remove_port(efx);
 fail2:
	efx_remove_nic(efx);
 fail1:
	return rc;
}

1607 1608 1609 1610 1611 1612
/* If the interface is supposed to be running but is not, start
 * the hardware and software data path, regular activity for the port
 * (MAC statistics, link polling, etc.) and schedule the port to be
 * reconfigured.  Interrupts must already be enabled.  This function
 * is safe to call multiple times, so long as the NIC is not disabled.
 * Requires the RTNL lock.
1613
 */
1614 1615 1616
static void efx_start_all(struct efx_nic *efx)
{
	EFX_ASSERT_RESET_SERIALISED(efx);
1617
	BUG_ON(efx->state == STATE_DISABLED);
1618 1619 1620

	/* Check that it is appropriate to restart the interface. All
	 * of these flags are safe to read under just the rtnl lock */
1621
	if (efx->port_enabled || !netif_running(efx->net_dev))
1622 1623 1624
		return;

	efx_start_port(efx);
1625
	efx_start_datapath(efx);
1626

1627 1628
	/* Start the hardware monitor if there is one */
	if (efx->type->monitor != NULL)
1629 1630
		queue_delayed_work(efx->workqueue, &efx->monitor_work,
				   efx_monitor_interval);
1631 1632 1633 1634 1635

	/* If link state detection is normally event-driven, we have
	 * to poll now because we could have missed a change
	 */
	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1636 1637 1638 1639 1640
		mutex_lock(&efx->mac_lock);
		if (efx->phy_op->poll(efx))
			efx_link_status_changed(efx);
		mutex_unlock(&efx->mac_lock);
	}
1641

1642
	efx->type->start_stats(efx);
1643 1644 1645 1646 1647 1648 1649
}

/* Flush all delayed work. Should only be called when no more delayed work
 * will be scheduled. This doesn't flush pending online resets (efx_reset),
 * since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
1650
	/* Make sure the hardware monitor and event self-test are stopped */
1651
	cancel_delayed_work_sync(&efx->monitor_work);
1652
	efx_selftest_async_cancel(efx);
1653
	/* Stop scheduled port reconfigurations */
1654
	cancel_work_sync(&efx->mac_work);
1655 1656
}

1657 1658 1659 1660 1661
/* Quiesce the hardware and software data path, and regular activity
 * for the port without bringing the link down.  Safe to call multiple
 * times with the NIC in almost any state, but interrupts should be
 * enabled.  Requires the RTNL lock.
 */
1662 1663 1664 1665 1666 1667 1668 1669
static void efx_stop_all(struct efx_nic *efx)
{
	EFX_ASSERT_RESET_SERIALISED(efx);

	/* port_enabled can be read safely under the rtnl lock */
	if (!efx->port_enabled)
		return;

1670
	efx->type->stop_stats(efx);
1671 1672
	efx_stop_port(efx);

S
Steve Hodgson 已提交
1673
	/* Flush efx_mac_work(), refill_workqueue, monitor_work */
1674 1675
	efx_flush_all(efx);

1676 1677 1678 1679 1680 1681
	/* Stop the kernel transmit interface.  This is only valid if
	 * the device is stopped or detached; otherwise the watchdog
	 * may fire immediately.
	 */
	WARN_ON(netif_running(efx->net_dev) &&
		netif_device_present(efx->net_dev));
1682 1683 1684
	netif_tx_disable(efx->net_dev);

	efx_stop_datapath(efx);
1685 1686 1687 1688
}

static void efx_remove_all(struct efx_nic *efx)
{
1689
	efx_remove_channels(efx);
1690
	efx_remove_filters(efx);
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	efx_remove_port(efx);
	efx_remove_nic(efx);
}

/**************************************************************************
 *
 * Interrupt moderation
 *
 **************************************************************************/

1701
static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1702
{
1703 1704
	if (usecs == 0)
		return 0;
1705
	if (usecs * 1000 < quantum_ns)
1706
		return 1; /* never round down to 0 */
1707
	return usecs * 1000 / quantum_ns;
1708 1709
}

1710
/* Set interrupt moderation parameters */
1711 1712 1713
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
			    unsigned int rx_usecs, bool rx_adaptive,
			    bool rx_may_override_tx)
1714
{
1715
	struct efx_channel *channel;
1716 1717 1718 1719 1720
	unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
						efx->timer_quantum_ns,
						1000);
	unsigned int tx_ticks;
	unsigned int rx_ticks;
1721 1722 1723

	EFX_ASSERT_RESET_SERIALISED(efx);

1724
	if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1725 1726
		return -EINVAL;

1727 1728 1729
	tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
	rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);

1730 1731 1732 1733 1734 1735 1736
	if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
	    !rx_may_override_tx) {
		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
			  "RX and TX IRQ moderation must be equal\n");
		return -EINVAL;
	}

1737
	efx->irq_rx_adaptive = rx_adaptive;
1738
	efx->irq_rx_moderation = rx_ticks;
1739
	efx_for_each_channel(channel, efx) {
1740
		if (efx_channel_has_rx_queue(channel))
1741
			channel->irq_moderation = rx_ticks;
1742
		else if (efx_channel_has_tx_queues(channel))
1743 1744
			channel->irq_moderation = tx_ticks;
	}
1745 1746

	return 0;
1747 1748
}

1749 1750 1751
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
			    unsigned int *rx_usecs, bool *rx_adaptive)
{
1752 1753 1754 1755
	/* We must round up when converting ticks to microseconds
	 * because we round down when converting the other way.
	 */

1756
	*rx_adaptive = efx->irq_rx_adaptive;
1757 1758 1759
	*rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
				 efx->timer_quantum_ns,
				 1000);
1760 1761 1762 1763 1764 1765 1766 1767

	/* If channels are shared between RX and TX, so is IRQ
	 * moderation.  Otherwise, IRQ moderation is the same for all
	 * TX channels and is not adaptive.
	 */
	if (efx->tx_channel_offset == 0)
		*tx_usecs = *rx_usecs;
	else
1768
		*tx_usecs = DIV_ROUND_UP(
1769
			efx->channel[efx->tx_channel_offset]->irq_moderation *
1770 1771
			efx->timer_quantum_ns,
			1000);
1772 1773
}

1774 1775 1776 1777 1778 1779
/**************************************************************************
 *
 * Hardware monitor
 *
 **************************************************************************/

1780
/* Run periodically off the general workqueue */
1781 1782 1783 1784 1785
static void efx_monitor(struct work_struct *data)
{
	struct efx_nic *efx = container_of(data, struct efx_nic,
					   monitor_work.work);

1786 1787 1788
	netif_vdbg(efx, timer, efx->net_dev,
		   "hardware monitor executing on CPU %d\n",
		   raw_smp_processor_id());
1789
	BUG_ON(efx->type->monitor == NULL);
1790 1791 1792

	/* If the mac_lock is already held then it is likely a port
	 * reconfiguration is already in place, which will likely do
1793 1794 1795 1796 1797 1798
	 * most of the work of monitor() anyway. */
	if (mutex_trylock(&efx->mac_lock)) {
		if (efx->port_enabled)
			efx->type->monitor(efx);
		mutex_unlock(&efx->mac_lock);
	}
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814

	queue_delayed_work(efx->workqueue, &efx->monitor_work,
			   efx_monitor_interval);
}

/**************************************************************************
 *
 * ioctls
 *
 *************************************************************************/

/* Net device ioctl
 * Context: process, rtnl_lock() held.
 */
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
1815
	struct efx_nic *efx = netdev_priv(net_dev);
1816
	struct mii_ioctl_data *data = if_mii(ifr);
1817

1818 1819 1820
	if (cmd == SIOCSHWTSTAMP)
		return efx_ptp_ioctl(efx, ifr, cmd);

1821 1822 1823 1824 1825 1826
	/* Convert phy_id from older PRTAD/DEVAD format */
	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
	    (data->phy_id & 0xfc00) == 0x0400)
		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;

	return mdio_mii_ioctl(&efx->mdio, data, cmd);
1827 1828 1829 1830 1831 1832 1833 1834
}

/**************************************************************************
 *
 * NAPI interface
 *
 **************************************************************************/

1835 1836 1837 1838 1839 1840 1841 1842 1843
static void efx_init_napi_channel(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;

	channel->napi_dev = efx->net_dev;
	netif_napi_add(channel->napi_dev, &channel->napi_str,
		       efx_poll, napi_weight);
}

1844
static void efx_init_napi(struct efx_nic *efx)
1845 1846 1847
{
	struct efx_channel *channel;

1848 1849
	efx_for_each_channel(channel, efx)
		efx_init_napi_channel(channel);
1850 1851 1852 1853 1854 1855 1856
}

static void efx_fini_napi_channel(struct efx_channel *channel)
{
	if (channel->napi_dev)
		netif_napi_del(&channel->napi_str);
	channel->napi_dev = NULL;
1857 1858 1859 1860 1861 1862
}

static void efx_fini_napi(struct efx_nic *efx)
{
	struct efx_channel *channel;

1863 1864
	efx_for_each_channel(channel, efx)
		efx_fini_napi_channel(channel);
1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
}

/**************************************************************************
 *
 * Kernel netpoll interface
 *
 *************************************************************************/

#ifdef CONFIG_NET_POLL_CONTROLLER

/* Although in the common case interrupts will be disabled, this is not
 * guaranteed. However, all our work happens inside the NAPI callback,
 * so no locking is required.
 */
static void efx_netpoll(struct net_device *net_dev)
{
1881
	struct efx_nic *efx = netdev_priv(net_dev);
1882 1883
	struct efx_channel *channel;

1884
	efx_for_each_channel(channel, efx)
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
		efx_schedule_channel(channel);
}

#endif

/**************************************************************************
 *
 * Kernel net device interface
 *
 *************************************************************************/

/* Context: process, rtnl_lock() held. */
static int efx_net_open(struct net_device *net_dev)
{
1899
	struct efx_nic *efx = netdev_priv(net_dev);
1900 1901
	int rc;

1902 1903
	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
		  raw_smp_processor_id());
1904

1905 1906 1907
	rc = efx_check_disabled(efx);
	if (rc)
		return rc;
1908 1909
	if (efx->phy_mode & PHY_MODE_SPECIAL)
		return -EBUSY;
1910 1911
	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
		return -EIO;
1912

1913 1914 1915 1916
	/* Notify the kernel of the link state polled during driver load,
	 * before the monitor starts running */
	efx_link_status_changed(efx);

1917
	efx_start_all(efx);
1918
	efx_selftest_async_start(efx);
1919 1920 1921 1922 1923 1924 1925 1926 1927
	return 0;
}

/* Context: process, rtnl_lock() held.
 * Note that the kernel will ignore our return code; this method
 * should really be a void.
 */
static int efx_net_stop(struct net_device *net_dev)
{
1928
	struct efx_nic *efx = netdev_priv(net_dev);
1929

1930 1931
	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
		  raw_smp_processor_id());
1932

1933 1934
	/* Stop the device and flush all the channels */
	efx_stop_all(efx);
1935 1936 1937 1938

	return 0;
}

1939
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
B
Ben Hutchings 已提交
1940 1941
static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
					       struct rtnl_link_stats64 *stats)
1942
{
1943
	struct efx_nic *efx = netdev_priv(net_dev);
1944 1945
	struct efx_mac_stats *mac_stats = &efx->mac_stats;

1946
	spin_lock_bh(&efx->stats_lock);
1947

1948
	efx->type->update_stats(efx);
1949 1950 1951 1952 1953

	stats->rx_packets = mac_stats->rx_packets;
	stats->tx_packets = mac_stats->tx_packets;
	stats->rx_bytes = mac_stats->rx_bytes;
	stats->tx_bytes = mac_stats->tx_bytes;
1954
	stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
	stats->multicast = mac_stats->rx_multicast;
	stats->collisions = mac_stats->tx_collision;
	stats->rx_length_errors = (mac_stats->rx_gtjumbo +
				   mac_stats->rx_length_error);
	stats->rx_crc_errors = mac_stats->rx_bad;
	stats->rx_frame_errors = mac_stats->rx_align_error;
	stats->rx_fifo_errors = mac_stats->rx_overflow;
	stats->rx_missed_errors = mac_stats->rx_missed;
	stats->tx_window_errors = mac_stats->tx_late_collision;

	stats->rx_errors = (stats->rx_length_errors +
			    stats->rx_crc_errors +
			    stats->rx_frame_errors +
			    mac_stats->rx_symbol_error);
	stats->tx_errors = (stats->tx_window_errors +
			    mac_stats->tx_bad);

1972 1973
	spin_unlock_bh(&efx->stats_lock);

1974 1975 1976 1977 1978 1979
	return stats;
}

/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev)
{
1980
	struct efx_nic *efx = netdev_priv(net_dev);
1981

1982 1983 1984
	netif_err(efx, tx_err, efx->net_dev,
		  "TX stuck with port_enabled=%d: resetting channels\n",
		  efx->port_enabled);
1985

1986
	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1987 1988 1989 1990 1991 1992
}


/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
1993
	struct efx_nic *efx = netdev_priv(net_dev);
1994
	int rc;
1995

1996 1997 1998
	rc = efx_check_disabled(efx);
	if (rc)
		return rc;
1999 2000 2001
	if (new_mtu > EFX_MAX_MTU)
		return -EINVAL;

2002
	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2003

2004 2005 2006
	efx_device_detach_sync(efx);
	efx_stop_all(efx);

B
Ben Hutchings 已提交
2007
	mutex_lock(&efx->mac_lock);
2008
	net_dev->mtu = new_mtu;
2009
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
2010 2011
	mutex_unlock(&efx->mac_lock);

2012
	efx_start_all(efx);
2013
	netif_device_attach(efx->net_dev);
2014
	return 0;
2015 2016 2017 2018
}

static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
2019
	struct efx_nic *efx = netdev_priv(net_dev);
2020 2021 2022 2023
	struct sockaddr *addr = data;
	char *new_addr = addr->sa_data;

	if (!is_valid_ether_addr(new_addr)) {
2024 2025 2026
		netif_err(efx, drv, efx->net_dev,
			  "invalid ethernet MAC address requested: %pM\n",
			  new_addr);
2027
		return -EADDRNOTAVAIL;
2028 2029 2030
	}

	memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
2031
	efx_sriov_mac_address_changed(efx);
2032 2033

	/* Reconfigure the MAC */
B
Ben Hutchings 已提交
2034
	mutex_lock(&efx->mac_lock);
2035
	efx->type->reconfigure_mac(efx);
B
Ben Hutchings 已提交
2036
	mutex_unlock(&efx->mac_lock);
2037 2038 2039 2040

	return 0;
}

2041
/* Context: netif_addr_lock held, BHs disabled. */
2042
static void efx_set_rx_mode(struct net_device *net_dev)
2043
{
2044
	struct efx_nic *efx = netdev_priv(net_dev);
2045
	struct netdev_hw_addr *ha;
2046 2047 2048 2049
	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
	u32 crc;
	int bit;

2050
	efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
2051 2052

	/* Build multicast hash table */
2053
	if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
2054 2055 2056
		memset(mc_hash, 0xff, sizeof(*mc_hash));
	} else {
		memset(mc_hash, 0x00, sizeof(*mc_hash));
2057 2058
		netdev_for_each_mc_addr(ha, net_dev) {
			crc = ether_crc_le(ETH_ALEN, ha->addr);
2059
			bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2060
			__set_bit_le(bit, mc_hash);
2061 2062
		}

2063 2064 2065 2066
		/* Broadcast packets go through the multicast hash filter.
		 * ether_crc_le() of the broadcast address is 0xbe2612ff
		 * so we always add bit 0xff to the mask.
		 */
2067
		__set_bit_le(0xff, mc_hash);
2068
	}
2069

2070 2071 2072
	if (efx->port_enabled)
		queue_work(efx->workqueue, &efx->mac_work);
	/* Otherwise efx_start_port() will do this */
2073 2074
}

2075
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
{
	struct efx_nic *efx = netdev_priv(net_dev);

	/* If disabling RX n-tuple filtering, clear existing filters */
	if (net_dev->features & ~data & NETIF_F_NTUPLE)
		efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);

	return 0;
}

S
Stephen Hemminger 已提交
2086 2087 2088
static const struct net_device_ops efx_netdev_ops = {
	.ndo_open		= efx_net_open,
	.ndo_stop		= efx_net_stop,
2089
	.ndo_get_stats64	= efx_net_stats,
S
Stephen Hemminger 已提交
2090 2091 2092 2093 2094 2095
	.ndo_tx_timeout		= efx_watchdog,
	.ndo_start_xmit		= efx_hard_start_xmit,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_do_ioctl		= efx_ioctl,
	.ndo_change_mtu		= efx_change_mtu,
	.ndo_set_mac_address	= efx_set_mac_address,
2096
	.ndo_set_rx_mode	= efx_set_rx_mode,
2097
	.ndo_set_features	= efx_set_features,
2098 2099 2100 2101 2102 2103
#ifdef CONFIG_SFC_SRIOV
	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
	.ndo_get_vf_config	= efx_sriov_get_vf_config,
#endif
S
Stephen Hemminger 已提交
2104 2105 2106
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = efx_netpoll,
#endif
2107
	.ndo_setup_tc		= efx_setup_tc,
2108 2109 2110
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	= efx_filter_rfs,
#endif
S
Stephen Hemminger 已提交
2111 2112
};

2113 2114 2115 2116 2117 2118 2119
static void efx_update_name(struct efx_nic *efx)
{
	strcpy(efx->name, efx->net_dev->name);
	efx_mtd_rename(efx);
	efx_set_channel_names(efx);
}

2120 2121 2122
static int efx_netdev_event(struct notifier_block *this,
			    unsigned long event, void *ptr)
{
2123
	struct net_device *net_dev = ptr;
2124

2125 2126 2127
	if (net_dev->netdev_ops == &efx_netdev_ops &&
	    event == NETDEV_CHANGENAME)
		efx_update_name(netdev_priv(net_dev));
2128 2129 2130 2131 2132 2133 2134 2135

	return NOTIFY_DONE;
}

static struct notifier_block efx_netdev_notifier = {
	.notifier_call = efx_netdev_event,
};

B
Ben Hutchings 已提交
2136 2137 2138 2139 2140 2141 2142 2143
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
	return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);

2144 2145 2146
static int efx_register_netdev(struct efx_nic *efx)
{
	struct net_device *net_dev = efx->net_dev;
2147
	struct efx_channel *channel;
2148 2149 2150 2151
	int rc;

	net_dev->watchdog_timeo = 5 * HZ;
	net_dev->irq = efx->pci_dev->irq;
S
Stephen Hemminger 已提交
2152
	net_dev->netdev_ops = &efx_netdev_ops;
2153
	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2154
	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2155

2156
	rtnl_lock();
2157

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	/* Enable resets to be scheduled and check whether any were
	 * already requested.  If so, the NIC is probably hosed so we
	 * abort.
	 */
	efx->state = STATE_READY;
	smp_mb(); /* ensure we change state before checking reset_pending */
	if (efx->reset_pending) {
		netif_err(efx, probe, efx->net_dev,
			  "aborting probe due to scheduled reset\n");
		rc = -EIO;
		goto fail_locked;
	}

2171 2172 2173
	rc = dev_alloc_name(net_dev, net_dev->name);
	if (rc < 0)
		goto fail_locked;
2174
	efx_update_name(efx);
2175

2176 2177 2178
	/* Always start with carrier off; PHY events will detect the link */
	netif_carrier_off(net_dev);

2179 2180 2181 2182
	rc = register_netdevice(net_dev);
	if (rc)
		goto fail_locked;

2183 2184
	efx_for_each_channel(channel, efx) {
		struct efx_tx_queue *tx_queue;
2185 2186
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_init_tx_queue_core_txq(tx_queue);
2187 2188
	}

2189
	rtnl_unlock();
2190

B
Ben Hutchings 已提交
2191 2192
	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
	if (rc) {
2193 2194
		netif_err(efx, drv, efx->net_dev,
			  "failed to init net dev attributes\n");
B
Ben Hutchings 已提交
2195 2196 2197
		goto fail_registered;
	}

2198
	return 0;
B
Ben Hutchings 已提交
2199

2200 2201 2202
fail_registered:
	rtnl_lock();
	unregister_netdevice(net_dev);
2203
fail_locked:
2204
	efx->state = STATE_UNINIT;
2205
	rtnl_unlock();
2206
	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2207
	return rc;
2208 2209 2210 2211
}

static void efx_unregister_netdev(struct efx_nic *efx)
{
2212
	struct efx_channel *channel;
2213 2214 2215 2216 2217
	struct efx_tx_queue *tx_queue;

	if (!efx->net_dev)
		return;

2218
	BUG_ON(netdev_priv(efx->net_dev) != efx);
2219 2220 2221 2222

	/* Free up any skbs still remaining. This has to happen before
	 * we try to unregister the netdev as running their destructors
	 * may be needed to get the device ref. count to 0. */
2223 2224 2225 2226
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_release_tx_buffers(tx_queue);
	}
2227

2228 2229
	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2230 2231 2232 2233 2234

	rtnl_lock();
	unregister_netdevice(efx->net_dev);
	efx->state = STATE_UNINIT;
	rtnl_unlock();
2235 2236 2237 2238 2239 2240 2241 2242
}

/**************************************************************************
 *
 * Device reset and suspend
 *
 **************************************************************************/

B
Ben Hutchings 已提交
2243 2244
/* Tears down the entire software state and most of the hardware state
 * before reset.  */
B
Ben Hutchings 已提交
2245
void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2246 2247 2248
{
	EFX_ASSERT_RESET_SERIALISED(efx);

B
Ben Hutchings 已提交
2249
	efx_stop_all(efx);
2250
	efx_stop_interrupts(efx, false);
2251 2252

	mutex_lock(&efx->mac_lock);
2253 2254
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
		efx->phy_op->fini(efx);
2255
	efx->type->fini(efx);
2256 2257
}

B
Ben Hutchings 已提交
2258 2259 2260 2261 2262
/* This function will always ensure that the locks acquired in
 * efx_reset_down() are released. A failure return code indicates
 * that we were unable to reinitialise the hardware, and the
 * driver should be disabled. If ok is false, then the rx and tx
 * engines are not restarted, pending a RESET_DISABLE. */
B
Ben Hutchings 已提交
2263
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2264 2265 2266
{
	int rc;

B
Ben Hutchings 已提交
2267
	EFX_ASSERT_RESET_SERIALISED(efx);
2268

2269
	rc = efx->type->init(efx);
2270
	if (rc) {
2271
		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2272
		goto fail;
2273 2274
	}

2275 2276 2277
	if (!ok)
		goto fail;

2278
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
2279 2280 2281 2282
		rc = efx->phy_op->init(efx);
		if (rc)
			goto fail;
		if (efx->phy_op->reconfigure(efx))
2283 2284
			netif_err(efx, drv, efx->net_dev,
				  "could not restore PHY settings\n");
2285 2286
	}

2287
	efx->type->reconfigure_mac(efx);
2288

2289
	efx_start_interrupts(efx, false);
B
Ben Hutchings 已提交
2290
	efx_restore_filters(efx);
2291
	efx_sriov_reset(efx);
2292 2293 2294 2295 2296 2297 2298 2299 2300

	mutex_unlock(&efx->mac_lock);

	efx_start_all(efx);

	return 0;

fail:
	efx->port_initialized = false;
B
Ben Hutchings 已提交
2301 2302 2303

	mutex_unlock(&efx->mac_lock);

2304 2305 2306
	return rc;
}

2307 2308
/* Reset the NIC using the specified method.  Note that the reset may
 * fail, in which case the card will be left in an unusable state.
2309
 *
2310
 * Caller must hold the rtnl_lock.
2311
 */
2312
int efx_reset(struct efx_nic *efx, enum reset_type method)
2313
{
2314 2315
	int rc, rc2;
	bool disabled;
2316

2317 2318
	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
		   RESET_TYPE(method));
2319

2320
	efx_device_detach_sync(efx);
B
Ben Hutchings 已提交
2321
	efx_reset_down(efx, method);
2322

2323
	rc = efx->type->reset(efx, method);
2324
	if (rc) {
2325
		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2326
		goto out;
2327 2328
	}

2329 2330 2331 2332
	/* Clear flags for the scopes we covered.  We assume the NIC and
	 * driver are now quiescent so that there is no race here.
	 */
	efx->reset_pending &= -(1 << (method + 1));
2333 2334 2335 2336 2337 2338 2339

	/* Reinitialise bus-mastering, which may have been turned off before
	 * the reset was scheduled. This is still appropriate, even in the
	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
	 * can respond to requests. */
	pci_set_master(efx->pci_dev);

2340
out:
2341
	/* Leave device stopped if necessary */
2342 2343 2344
	disabled = rc ||
		method == RESET_TYPE_DISABLE ||
		method == RESET_TYPE_RECOVER_OR_DISABLE;
2345 2346 2347 2348 2349
	rc2 = efx_reset_up(efx, method, !disabled);
	if (rc2) {
		disabled = true;
		if (!rc)
			rc = rc2;
2350 2351
	}

2352
	if (disabled) {
2353
		dev_close(efx->net_dev);
2354
		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2355 2356
		efx->state = STATE_DISABLED;
	} else {
2357
		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2358
		netif_device_attach(efx->net_dev);
2359
	}
2360 2361 2362
	return rc;
}

2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
/* Try recovery mechanisms.
 * For now only EEH is supported.
 * Returns 0 if the recovery mechanisms are unsuccessful.
 * Returns a non-zero value otherwise.
 */
static int efx_try_recovery(struct efx_nic *efx)
{
#ifdef CONFIG_EEH
	/* A PCI error can occur and not be seen by EEH because nothing
	 * happens on the PCI bus. In this case the driver may fail and
	 * schedule a 'recover or reset', leading to this recovery handler.
	 * Manually call the eeh failure check function.
	 */
	struct eeh_dev *eehdev =
		of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));

	if (eeh_dev_check_failure(eehdev)) {
		/* The EEH mechanisms will handle the error and reset the
		 * device if necessary.
		 */
		return 1;
	}
#endif
	return 0;
}

2389 2390 2391 2392 2393
/* The worker thread exists so that code that cannot sleep can
 * schedule a reset for later.
 */
static void efx_reset_work(struct work_struct *data)
{
2394
	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
	unsigned long pending;
	enum reset_type method;

	pending = ACCESS_ONCE(efx->reset_pending);
	method = fls(pending) - 1;

	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
	     method == RESET_TYPE_RECOVER_OR_ALL) &&
	    efx_try_recovery(efx))
		return;
2405

2406
	if (!pending)
2407 2408
		return;

2409
	rtnl_lock();
2410 2411 2412 2413 2414 2415

	/* We checked the state in efx_schedule_reset() but it may
	 * have changed by now.  Now that we have the RTNL lock,
	 * it cannot change again.
	 */
	if (efx->state == STATE_READY)
2416
		(void)efx_reset(efx, method);
2417

2418
	rtnl_unlock();
2419 2420 2421 2422 2423 2424
}

void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
	enum reset_type method;

2425 2426 2427 2428 2429 2430 2431
	if (efx->state == STATE_RECOVERY) {
		netif_dbg(efx, drv, efx->net_dev,
			  "recovering: skip scheduling %s reset\n",
			  RESET_TYPE(type));
		return;
	}

2432 2433 2434
	switch (type) {
	case RESET_TYPE_INVISIBLE:
	case RESET_TYPE_ALL:
2435
	case RESET_TYPE_RECOVER_OR_ALL:
2436 2437
	case RESET_TYPE_WORLD:
	case RESET_TYPE_DISABLE:
2438
	case RESET_TYPE_RECOVER_OR_DISABLE:
2439
		method = type;
2440 2441
		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
			  RESET_TYPE(method));
2442 2443
		break;
	default:
2444
		method = efx->type->map_reset_reason(type);
2445 2446 2447
		netif_dbg(efx, drv, efx->net_dev,
			  "scheduling %s reset for %s\n",
			  RESET_TYPE(method), RESET_TYPE(type));
2448 2449
		break;
	}
2450

2451
	set_bit(method, &efx->reset_pending);
2452 2453 2454 2455 2456 2457 2458
	smp_mb(); /* ensure we change reset_pending before checking state */

	/* If we're not READY then just leave the flags set as the cue
	 * to abort probing or reschedule the reset later.
	 */
	if (ACCESS_ONCE(efx->state) != STATE_READY)
		return;
2459

2460 2461 2462 2463
	/* efx_process_channel() will no longer read events once a
	 * reset is scheduled. So switch back to poll'd MCDI completions. */
	efx_mcdi_mode_poll(efx);

2464
	queue_work(reset_workqueue, &efx->reset_work);
2465 2466 2467 2468 2469 2470 2471 2472 2473
}

/**************************************************************************
 *
 * List of NICs we support
 *
 **************************************************************************/

/* PCI device ID table */
2474
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2475 2476
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2477
	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2478 2479
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2480
	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2481
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
2482
	 .driver_data = (unsigned long) &siena_a0_nic_type},
2483
	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
2484
	 .driver_data = (unsigned long) &siena_a0_nic_type},
2485 2486 2487 2488 2489
	{0}			/* end of list */
};

/**************************************************************************
 *
2490
 * Dummy PHY/MAC operations
2491
 *
2492
 * Can be used for some unimplemented operations
2493 2494 2495 2496 2497 2498 2499 2500 2501
 * Needed so all function pointers are valid and do not have to be tested
 * before use
 *
 **************************************************************************/
int efx_port_dummy_op_int(struct efx_nic *efx)
{
	return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
S
stephen hemminger 已提交
2502 2503

static bool efx_port_dummy_op_poll(struct efx_nic *efx)
S
Steve Hodgson 已提交
2504 2505 2506
{
	return false;
}
2507

2508
static const struct efx_phy_operations efx_dummy_phy_operations = {
2509
	.init		 = efx_port_dummy_op_int,
B
Ben Hutchings 已提交
2510
	.reconfigure	 = efx_port_dummy_op_int,
S
Steve Hodgson 已提交
2511
	.poll		 = efx_port_dummy_op_poll,
2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	.fini		 = efx_port_dummy_op_void,
};

/**************************************************************************
 *
 * Data housekeeping
 *
 **************************************************************************/

/* This zeroes out and then fills in the invariants in a struct
 * efx_nic (including all sub-structures).
 */
2524
static int efx_init_struct(struct efx_nic *efx,
2525 2526
			   struct pci_dev *pci_dev, struct net_device *net_dev)
{
2527
	int i;
2528 2529 2530

	/* Initialise common structures */
	spin_lock_init(&efx->biu_lock);
2531 2532 2533
#ifdef CONFIG_SFC_MTD
	INIT_LIST_HEAD(&efx->mtd_list);
#endif
2534 2535
	INIT_WORK(&efx->reset_work, efx_reset_work);
	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2536
	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2537
	efx->pci_dev = pci_dev;
2538
	efx->msg_enable = debug;
2539
	efx->state = STATE_UNINIT;
2540 2541 2542 2543 2544 2545
	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));

	efx->net_dev = net_dev;
	spin_lock_init(&efx->stats_lock);
	mutex_init(&efx->mac_lock);
	efx->phy_op = &efx_dummy_phy_operations;
2546
	efx->mdio.dev = net_dev;
2547
	INIT_WORK(&efx->mac_work, efx_mac_work);
2548
	init_waitqueue_head(&efx->flush_wq);
2549 2550

	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2551 2552 2553
		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
		if (!efx->channel[i])
			goto fail;
2554 2555 2556 2557 2558 2559 2560 2561
	}

	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);

	/* Higher numbered interrupt modes are less capable! */
	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
				  interrupt_mode);

2562 2563 2564 2565
	/* Would be good to use the net_dev name, but we're too early */
	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
		 pci_name(pci_dev));
	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2566
	if (!efx->workqueue)
2567
		goto fail;
2568

2569
	return 0;
2570 2571 2572 2573

fail:
	efx_fini_struct(efx);
	return -ENOMEM;
2574 2575 2576 2577
}

static void efx_fini_struct(struct efx_nic *efx)
{
2578 2579 2580 2581 2582
	int i;

	for (i = 0; i < EFX_MAX_CHANNELS; i++)
		kfree(efx->channel[i]);

2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
	if (efx->workqueue) {
		destroy_workqueue(efx->workqueue);
		efx->workqueue = NULL;
	}
}

/**************************************************************************
 *
 * PCI interface
 *
 **************************************************************************/

/* Main body of final NIC shutdown code
 * This is called only at module unload (or hotplug removal).
 */
static void efx_pci_remove_main(struct efx_nic *efx)
{
2600 2601 2602 2603 2604 2605
	/* Flush reset_work. It can no longer be scheduled since we
	 * are not READY.
	 */
	BUG_ON(efx->state == STATE_READY);
	cancel_work_sync(&efx->reset_work);

2606 2607 2608 2609
#ifdef CONFIG_RFS_ACCEL
	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
	efx->net_dev->rx_cpu_rmap = NULL;
#endif
2610
	efx_stop_interrupts(efx, false);
2611
	efx_nic_fini_interrupt(efx);
2612
	efx_fini_port(efx);
2613
	efx->type->fini(efx);
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
	efx_fini_napi(efx);
	efx_remove_all(efx);
}

/* Final NIC shutdown
 * This is called only at module unload (or hotplug removal).
 */
static void efx_pci_remove(struct pci_dev *pci_dev)
{
	struct efx_nic *efx;

	efx = pci_get_drvdata(pci_dev);
	if (!efx)
		return;

	/* Mark the NIC as fini, then stop the interface */
	rtnl_lock();
	dev_close(efx->net_dev);
2632
	efx_stop_interrupts(efx, false);
2633 2634
	rtnl_unlock();

2635
	efx_sriov_fini(efx);
2636 2637
	efx_unregister_netdev(efx);

2638 2639
	efx_mtd_remove(efx);

2640 2641 2642
	efx_pci_remove_main(efx);

	efx_fini_io(efx);
2643
	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2644 2645

	efx_fini_struct(efx);
2646
	pci_set_drvdata(pci_dev, NULL);
2647
	free_netdev(efx->net_dev);
2648 2649

	pci_disable_pcie_error_reporting(pci_dev);
2650 2651
};

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
/* NIC VPD information
 * Called during probe to display the part number of the
 * installed NIC.  VPD is potentially very large but this should
 * always appear within the first 512 bytes.
 */
#define SFC_VPD_LEN 512
static void efx_print_product_vpd(struct efx_nic *efx)
{
	struct pci_dev *dev = efx->pci_dev;
	char vpd_data[SFC_VPD_LEN];
	ssize_t vpd_size;
	int i, j;

	/* Get the vpd data from the device */
	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
	if (vpd_size <= 0) {
		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
		return;
	}

	/* Get the Read only section */
	i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
	if (i < 0) {
		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
		return;
	}

	j = pci_vpd_lrdt_size(&vpd_data[i]);
	i += PCI_VPD_LRDT_TAG_SIZE;
	if (i + j > vpd_size)
		j = vpd_size - i;

	/* Get the Part number */
	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
	if (i < 0) {
		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
		return;
	}

	j = pci_vpd_info_field_size(&vpd_data[i]);
	i += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (i + j > vpd_size) {
		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
		return;
	}

	netif_info(efx, drv, efx->net_dev,
		   "Part Number : %.*s\n", j, &vpd_data[i]);
}


2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
/* Main body of NIC initialisation
 * This is called at module load (or hotplug insertion, theoretically).
 */
static int efx_pci_probe_main(struct efx_nic *efx)
{
	int rc;

	/* Do start-of-day initialisation */
	rc = efx_probe_all(efx);
	if (rc)
		goto fail1;

2715
	efx_init_napi(efx);
2716

2717
	rc = efx->type->init(efx);
2718
	if (rc) {
2719 2720
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise NIC\n");
2721
		goto fail3;
2722 2723 2724 2725
	}

	rc = efx_init_port(efx);
	if (rc) {
2726 2727
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise port\n");
2728
		goto fail4;
2729 2730
	}

2731
	rc = efx_nic_init_interrupt(efx);
2732
	if (rc)
2733
		goto fail5;
2734
	efx_start_interrupts(efx, false);
2735 2736 2737

	return 0;

2738
 fail5:
2739 2740
	efx_fini_port(efx);
 fail4:
2741
	efx->type->fini(efx);
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
 fail3:
	efx_fini_napi(efx);
	efx_remove_all(efx);
 fail1:
	return rc;
}

/* NIC initialisation
 *
 * This is called at module load (or hotplug insertion,
2752
 * theoretically).  It sets up PCI mappings, resets the NIC,
2753 2754 2755 2756 2757
 * sets up and registers the network devices with the kernel and hooks
 * the interrupt service routine.  It does not prepare the device for
 * transmission; this is left to the first time one of the network
 * interfaces is brought up (i.e. efx_net_open).
 */
B
Bill Pemberton 已提交
2758
static int efx_pci_probe(struct pci_dev *pci_dev,
2759
			 const struct pci_device_id *entry)
2760 2761 2762
{
	struct net_device *net_dev;
	struct efx_nic *efx;
2763
	int rc;
2764 2765

	/* Allocate and initialise a struct net_device and struct efx_nic */
2766 2767
	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
				     EFX_MAX_RX_QUEUES);
2768 2769
	if (!net_dev)
		return -ENOMEM;
2770 2771 2772
	efx = netdev_priv(net_dev);
	efx->type = (const struct efx_nic_type *) entry->driver_data;
	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
B
Ben Hutchings 已提交
2773
			      NETIF_F_HIGHDMA | NETIF_F_TSO |
2774
			      NETIF_F_RXCSUM);
2775
	if (efx->type->offload_features & NETIF_F_V6_CSUM)
B
Ben Hutchings 已提交
2776
		net_dev->features |= NETIF_F_TSO6;
2777 2778
	/* Mask for features that also apply to VLAN devices */
	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2779 2780 2781 2782
				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
				   NETIF_F_RXCSUM);
	/* All offloads can be toggled */
	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2783
	pci_set_drvdata(pci_dev, efx);
2784
	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2785
	rc = efx_init_struct(efx, pci_dev, net_dev);
2786 2787 2788
	if (rc)
		goto fail1;

2789
	netif_info(efx, probe, efx->net_dev,
2790
		   "Solarflare NIC detected\n");
2791

2792 2793
	efx_print_product_vpd(efx);

2794 2795 2796 2797 2798
	/* Set up basic I/O (BAR mappings etc) */
	rc = efx_init_io(efx);
	if (rc)
		goto fail2;

2799 2800 2801
	rc = efx_pci_probe_main(efx);
	if (rc)
		goto fail3;
2802 2803 2804

	rc = efx_register_netdev(efx);
	if (rc)
2805
		goto fail4;
2806

2807 2808 2809 2810 2811
	rc = efx_sriov_init(efx);
	if (rc)
		netif_err(efx, probe, efx->net_dev,
			  "SR-IOV can't be enabled rc %d\n", rc);

2812
	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2813

2814
	/* Try to create MTDs, but allow this to fail */
2815
	rtnl_lock();
2816
	rc = efx_mtd_probe(efx);
2817
	rtnl_unlock();
2818 2819 2820 2821
	if (rc)
		netif_warn(efx, probe, efx->net_dev,
			   "failed to create MTDs (%d)\n", rc);

2822 2823 2824 2825 2826
	rc = pci_enable_pcie_error_reporting(pci_dev);
	if (rc && rc != -EINVAL)
		netif_warn(efx, probe, efx->net_dev,
			   "pci_enable_pcie_error_reporting failed (%d)\n", rc);

2827 2828 2829
	return 0;

 fail4:
2830
	efx_pci_remove_main(efx);
2831 2832 2833 2834 2835
 fail3:
	efx_fini_io(efx);
 fail2:
	efx_fini_struct(efx);
 fail1:
2836
	pci_set_drvdata(pci_dev, NULL);
S
Steve Hodgson 已提交
2837
	WARN_ON(rc > 0);
2838
	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2839 2840 2841 2842
	free_netdev(net_dev);
	return rc;
}

2843 2844 2845 2846
static int efx_pm_freeze(struct device *dev)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));

2847 2848
	rtnl_lock();

2849 2850
	if (efx->state != STATE_DISABLED) {
		efx->state = STATE_UNINIT;
2851

2852
		efx_device_detach_sync(efx);
2853

2854 2855 2856
		efx_stop_all(efx);
		efx_stop_interrupts(efx, false);
	}
2857

2858 2859
	rtnl_unlock();

2860 2861 2862 2863 2864 2865 2866
	return 0;
}

static int efx_pm_thaw(struct device *dev)
{
	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));

2867 2868
	rtnl_lock();

2869 2870
	if (efx->state != STATE_DISABLED) {
		efx_start_interrupts(efx, false);
2871

2872 2873 2874
		mutex_lock(&efx->mac_lock);
		efx->phy_op->reconfigure(efx);
		mutex_unlock(&efx->mac_lock);
2875

2876
		efx_start_all(efx);
2877

2878
		netif_device_attach(efx->net_dev);
2879

2880
		efx->state = STATE_READY;
2881

2882 2883
		efx->type->resume_wol(efx);
	}
2884

2885 2886
	rtnl_unlock();

2887 2888 2889
	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
	queue_work(reset_workqueue, &efx->reset_work);

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
	return 0;
}

static int efx_pm_poweroff(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct efx_nic *efx = pci_get_drvdata(pci_dev);

	efx->type->fini(efx);

2900
	efx->reset_pending = 0;
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941

	pci_save_state(pci_dev);
	return pci_set_power_state(pci_dev, PCI_D3hot);
}

/* Used for both resume and restore */
static int efx_pm_resume(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct efx_nic *efx = pci_get_drvdata(pci_dev);
	int rc;

	rc = pci_set_power_state(pci_dev, PCI_D0);
	if (rc)
		return rc;
	pci_restore_state(pci_dev);
	rc = pci_enable_device(pci_dev);
	if (rc)
		return rc;
	pci_set_master(efx->pci_dev);
	rc = efx->type->reset(efx, RESET_TYPE_ALL);
	if (rc)
		return rc;
	rc = efx->type->init(efx);
	if (rc)
		return rc;
	efx_pm_thaw(dev);
	return 0;
}

static int efx_pm_suspend(struct device *dev)
{
	int rc;

	efx_pm_freeze(dev);
	rc = efx_pm_poweroff(dev);
	if (rc)
		efx_pm_resume(dev);
	return rc;
}

2942
static const struct dev_pm_ops efx_pm_ops = {
2943 2944 2945 2946 2947 2948 2949 2950
	.suspend	= efx_pm_suspend,
	.resume		= efx_pm_resume,
	.freeze		= efx_pm_freeze,
	.thaw		= efx_pm_thaw,
	.poweroff	= efx_pm_poweroff,
	.restore	= efx_pm_resume,
};

2951 2952 2953 2954
/* A PCI error affecting this device was detected.
 * At this point MMIO and DMA may be disabled.
 * Stop the software path and request a slot reset.
 */
2955 2956
static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
					      enum pci_channel_state state)
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
{
	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
	struct efx_nic *efx = pci_get_drvdata(pdev);

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	rtnl_lock();

	if (efx->state != STATE_DISABLED) {
		efx->state = STATE_RECOVERY;
		efx->reset_pending = 0;

		efx_device_detach_sync(efx);

		efx_stop_all(efx);
		efx_stop_interrupts(efx, false);

		status = PCI_ERS_RESULT_NEED_RESET;
	} else {
		/* If the interface is disabled we don't want to do anything
		 * with it.
		 */
		status = PCI_ERS_RESULT_RECOVERED;
	}

	rtnl_unlock();

	pci_disable_device(pdev);

	return status;
}

/* Fake a successfull reset, which will be performed later in efx_io_resume. */
2991
static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
{
	struct efx_nic *efx = pci_get_drvdata(pdev);
	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
	int rc;

	if (pci_enable_device(pdev)) {
		netif_err(efx, hw, efx->net_dev,
			  "Cannot re-enable PCI device after reset.\n");
		status =  PCI_ERS_RESULT_DISCONNECT;
	}

	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
	if (rc) {
		netif_err(efx, hw, efx->net_dev,
		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
		/* Non-fatal error. Continue. */
	}

	return status;
}

/* Perform the actual reset and resume I/O operations. */
static void efx_io_resume(struct pci_dev *pdev)
{
	struct efx_nic *efx = pci_get_drvdata(pdev);
	int rc;

	rtnl_lock();

	if (efx->state == STATE_DISABLED)
		goto out;

	rc = efx_reset(efx, RESET_TYPE_ALL);
	if (rc) {
		netif_err(efx, hw, efx->net_dev,
			  "efx_reset failed after PCI error (%d)\n", rc);
	} else {
		efx->state = STATE_READY;
		netif_dbg(efx, hw, efx->net_dev,
			  "Done resetting and resuming IO after PCI error.\n");
	}

out:
	rtnl_unlock();
}

/* For simplicity and reliability, we always require a slot reset and try to
 * reset the hardware when a pci error affecting the device is detected.
 * We leave both the link_reset and mmio_enabled callback unimplemented:
 * with our request for slot reset the mmio_enabled callback will never be
 * called, and the link_reset callback is not used by AER or EEH mechanisms.
 */
static struct pci_error_handlers efx_err_handlers = {
	.error_detected = efx_io_error_detected,
	.slot_reset	= efx_io_slot_reset,
	.resume		= efx_io_resume,
};

3050
static struct pci_driver efx_pci_driver = {
3051
	.name		= KBUILD_MODNAME,
3052 3053 3054
	.id_table	= efx_pci_table,
	.probe		= efx_pci_probe,
	.remove		= efx_pci_remove,
3055
	.driver.pm	= &efx_pm_ops,
3056
	.err_handler	= &efx_err_handlers,
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
};

/**************************************************************************
 *
 * Kernel module interface
 *
 *************************************************************************/

module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");

static int __init efx_init_module(void)
{
	int rc;

	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");

	rc = register_netdevice_notifier(&efx_netdev_notifier);
	if (rc)
		goto err_notifier;

3079 3080 3081 3082
	rc = efx_init_sriov();
	if (rc)
		goto err_sriov;

3083 3084 3085 3086 3087
	reset_workqueue = create_singlethread_workqueue("sfc_reset");
	if (!reset_workqueue) {
		rc = -ENOMEM;
		goto err_reset;
	}
3088 3089 3090 3091 3092 3093 3094 3095

	rc = pci_register_driver(&efx_pci_driver);
	if (rc < 0)
		goto err_pci;

	return 0;

 err_pci:
3096 3097
	destroy_workqueue(reset_workqueue);
 err_reset:
3098 3099
	efx_fini_sriov();
 err_sriov:
3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
	unregister_netdevice_notifier(&efx_netdev_notifier);
 err_notifier:
	return rc;
}

static void __exit efx_exit_module(void)
{
	printk(KERN_INFO "Solarflare NET driver unloading\n");

	pci_unregister_driver(&efx_pci_driver);
3110
	destroy_workqueue(reset_workqueue);
3111
	efx_fini_sriov();
3112 3113 3114 3115 3116 3117 3118
	unregister_netdevice_notifier(&efx_netdev_notifier);

}

module_init(efx_init_module);
module_exit(efx_exit_module);

3119 3120
MODULE_AUTHOR("Solarflare Communications and "
	      "Michael Brown <mbrown@fensystems.co.uk>");
3121 3122 3123
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);