netdev.c 180.6 KB
Newer Older
1 2 3
/*******************************************************************************

  Intel PRO/1000 Linux driver
B
Bruce Allan 已提交
4
  Copyright(c) 1999 - 2012 Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
  Linux NICS <linux.nics@intel.com>
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

29 30
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

31 32 33 34 35 36 37 38
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
39
#include <linux/interrupt.h>
40 41
#include <linux/tcp.h>
#include <linux/ipv6.h>
42
#include <linux/slab.h>
43 44 45 46 47 48 49
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
50
#include <linux/pm_qos.h>
51
#include <linux/pm_runtime.h>
J
Jesse Brandeburg 已提交
52
#include <linux/aer.h>
53
#include <linux/prefetch.h>
54 55 56

#include "e1000.h"

B
Bruce Allan 已提交
57
#define DRV_EXTRAVERSION "-k"
58

B
Bruce Allan 已提交
59
#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
60 61 62
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;

63 64
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);

65 66 67 68
static const struct e1000_info *e1000_info_tbl[] = {
	[board_82571]		= &e1000_82571_info,
	[board_82572]		= &e1000_82572_info,
	[board_82573]		= &e1000_82573_info,
69
	[board_82574]		= &e1000_82574_info,
70
	[board_82583]		= &e1000_82583_info,
71 72 73
	[board_80003es2lan]	= &e1000_es2_info,
	[board_ich8lan]		= &e1000_ich8_info,
	[board_ich9lan]		= &e1000_ich9_info,
74
	[board_ich10lan]	= &e1000_ich10_info,
75
	[board_pchlan]		= &e1000_pch_info,
76
	[board_pch2lan]		= &e1000_pch2_info,
77 78
};

79 80 81 82 83
struct e1000_reg_info {
	u32 ofs;
	char *name;
};

84 85 86 87 88 89 90 91 92 93 94
#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */

#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
95 96 97 98 99 100 101 102 103 104 105

static const struct e1000_reg_info e1000_reg_info_tbl[] = {

	/* General Registers */
	{E1000_CTRL, "CTRL"},
	{E1000_STATUS, "STATUS"},
	{E1000_CTRL_EXT, "CTRL_EXT"},

	/* Interrupt Registers */
	{E1000_ICR, "ICR"},

106
	/* Rx Registers */
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	{E1000_RCTL, "RCTL"},
	{E1000_RDLEN, "RDLEN"},
	{E1000_RDH, "RDH"},
	{E1000_RDT, "RDT"},
	{E1000_RDTR, "RDTR"},
	{E1000_RXDCTL(0), "RXDCTL"},
	{E1000_ERT, "ERT"},
	{E1000_RDBAL, "RDBAL"},
	{E1000_RDBAH, "RDBAH"},
	{E1000_RDFH, "RDFH"},
	{E1000_RDFT, "RDFT"},
	{E1000_RDFHS, "RDFHS"},
	{E1000_RDFTS, "RDFTS"},
	{E1000_RDFPC, "RDFPC"},

122
	/* Tx Registers */
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
	{E1000_TCTL, "TCTL"},
	{E1000_TDBAL, "TDBAL"},
	{E1000_TDBAH, "TDBAH"},
	{E1000_TDLEN, "TDLEN"},
	{E1000_TDH, "TDH"},
	{E1000_TDT, "TDT"},
	{E1000_TIDV, "TIDV"},
	{E1000_TXDCTL(0), "TXDCTL"},
	{E1000_TADV, "TADV"},
	{E1000_TARC(0), "TARC"},
	{E1000_TDFH, "TDFH"},
	{E1000_TDFT, "TDFT"},
	{E1000_TDFHS, "TDFHS"},
	{E1000_TDFTS, "TDFTS"},
	{E1000_TDFPC, "TDFPC"},

	/* List Terminator */
140
	{0, NULL}
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
};

/*
 * e1000_regdump - register printout routine
 */
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
	int n = 0;
	char rname[16];
	u32 regs[8];

	switch (reginfo->ofs) {
	case E1000_RXDCTL(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_RXDCTL(n));
		break;
	case E1000_TXDCTL(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_TXDCTL(n));
		break;
	case E1000_TARC(0):
		for (n = 0; n < 2; n++)
			regs[n] = __er32(hw, E1000_TARC(n));
		break;
	default:
166 167
		pr_info("%-15s %08x\n",
			reginfo->name, __er32(hw, reginfo->ofs));
168 169 170 171
		return;
	}

	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
172
	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
173 174 175
}

/*
176
 * e1000e_dump - Print registers, Tx-ring and Rx-ring
177 178 179 180 181 182 183 184
 */
static void e1000e_dump(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_reg_info *reginfo;
	struct e1000_ring *tx_ring = adapter->tx_ring;
	struct e1000_tx_desc *tx_desc;
185
	struct my_u0 {
186 187
		__le64 a;
		__le64 b;
188
	} *u0;
189 190 191
	struct e1000_buffer *buffer_info;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	union e1000_rx_desc_packet_split *rx_desc_ps;
192
	union e1000_rx_desc_extended *rx_desc;
193
	struct my_u1 {
194 195 196 197
		__le64 a;
		__le64 b;
		__le64 c;
		__le64 d;
198
	} *u1;
199 200 201 202 203 204 205 206 207
	u32 staterr;
	int i = 0;

	if (!netif_msg_hw(adapter))
		return;

	/* Print netdevice Info */
	if (netdev) {
		dev_info(&adapter->pdev->dev, "Net device Info\n");
208 209 210 211
		pr_info("Device Name     state            trans_start      last_rx\n");
		pr_info("%-15s %016lX %016lX %016lX\n",
			netdev->name, netdev->state, netdev->trans_start,
			netdev->last_rx);
212 213 214 215
	}

	/* Print Registers */
	dev_info(&adapter->pdev->dev, "Register Dump\n");
216
	pr_info(" Register Name   Value\n");
217 218 219 220 221
	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
	     reginfo->name; reginfo++) {
		e1000_regdump(hw, reginfo);
	}

222
	/* Print Tx Ring Summary */
223
	if (!netdev || !netif_running(netdev))
224
		return;
225

226
	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
227
	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
228
	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
229 230 231 232 233 234
	pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
		0, tx_ring->next_to_use, tx_ring->next_to_clean,
		(unsigned long long)buffer_info->dma,
		buffer_info->length,
		buffer_info->next_to_watch,
		(unsigned long long)buffer_info->time_stamp);
235

236
	/* Print Tx Ring */
237 238 239
	if (!netif_msg_tx_done(adapter))
		goto rx_ring_summary;

240
	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
	 *
	 * Legacy Transmit Descriptor
	 *   +--------------------------------------------------------------+
	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
	 *   +--------------------------------------------------------------+
	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
	 *   +--------------------------------------------------------------+
	 *   63       48 47        36 35    32 31     24 23    16 15        0
	 *
	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
	 *   63      48 47    40 39       32 31             16 15    8 7      0
	 *   +----------------------------------------------------------------+
	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
	 *   +----------------------------------------------------------------+
	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
	 *   +----------------------------------------------------------------+
	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
	 *
	 * Extended Data Descriptor (DTYP=0x1)
	 *   +----------------------------------------------------------------+
	 * 0 |                     Buffer Address [63:0]                      |
	 *   +----------------------------------------------------------------+
	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
	 *   +----------------------------------------------------------------+
	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
	 */
269 270 271
	pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
272
	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
273
		const char *next_desc;
274 275 276 277
		tx_desc = E1000_TX_DESC(*tx_ring, i);
		buffer_info = &tx_ring->buffer_info[i];
		u0 = (struct my_u0 *)tx_desc;
		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
278
			next_desc = " NTC/U";
279
		else if (i == tx_ring->next_to_use)
280
			next_desc = " NTU";
281
		else if (i == tx_ring->next_to_clean)
282
			next_desc = " NTC";
283
		else
284 285 286 287 288 289 290 291 292 293 294
			next_desc = "";
		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
			(!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
			 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
			i,
			(unsigned long long)le64_to_cpu(u0->a),
			(unsigned long long)le64_to_cpu(u0->b),
			(unsigned long long)buffer_info->dma,
			buffer_info->length, buffer_info->next_to_watch,
			(unsigned long long)buffer_info->time_stamp,
			buffer_info->skb, next_desc);
295 296 297

		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
298 299
				       16, 1, phys_to_virt(buffer_info->dma),
				       buffer_info->length, true);
300 301
	}

302
	/* Print Rx Ring Summary */
303
rx_ring_summary:
304
	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
305 306 307
	pr_info("Queue [NTU] [NTC]\n");
	pr_info(" %5d %5X %5X\n",
		0, rx_ring->next_to_use, rx_ring->next_to_clean);
308

309
	/* Print Rx Ring */
310
	if (!netif_msg_rx_status(adapter))
311
		return;
312

313
	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	switch (adapter->rx_ps_pages) {
	case 1:
	case 2:
	case 3:
		/* [Extended] Packet Split Receive Descriptor Format
		 *
		 *    +-----------------------------------------------------+
		 *  0 |                Buffer Address 0 [63:0]              |
		 *    +-----------------------------------------------------+
		 *  8 |                Buffer Address 1 [63:0]              |
		 *    +-----------------------------------------------------+
		 * 16 |                Buffer Address 2 [63:0]              |
		 *    +-----------------------------------------------------+
		 * 24 |                Buffer Address 3 [63:0]              |
		 *    +-----------------------------------------------------+
		 */
330
		pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
331 332 333 334 335 336 337 338 339 340 341
		/* [Extended] Receive Descriptor (Write-Back) Format
		 *
		 *   63       48 47    32 31     13 12    8 7    4 3        0
		 *   +------------------------------------------------------+
		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
		 *   | Checksum | Ident  |         | Queue |      |  Type   |
		 *   +------------------------------------------------------+
		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
		 *   +------------------------------------------------------+
		 *   63       48 47    32 31            20 19               0
		 */
342
		pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
343
		for (i = 0; i < rx_ring->count; i++) {
344
			const char *next_desc;
345 346 347 348
			buffer_info = &rx_ring->buffer_info[i];
			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
			u1 = (struct my_u1 *)rx_desc_ps;
			staterr =
349
			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
350 351 352 353 354 355 356 357

			if (i == rx_ring->next_to_use)
				next_desc = " NTU";
			else if (i == rx_ring->next_to_clean)
				next_desc = " NTC";
			else
				next_desc = "";

358 359
			if (staterr & E1000_RXD_STAT_DD) {
				/* Descriptor Done */
360 361 362 363 364 365 366
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
					"RWB", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)le64_to_cpu(u1->c),
					(unsigned long long)le64_to_cpu(u1->d),
					buffer_info->skb, next_desc);
367
			} else {
368 369 370 371 372 373 374 375
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
					"R  ", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)le64_to_cpu(u1->c),
					(unsigned long long)le64_to_cpu(u1->d),
					(unsigned long long)buffer_info->dma,
					buffer_info->skb, next_desc);
376 377 378 379 380 381 382 383 384 385 386

				if (netif_msg_pktdata(adapter))
					print_hex_dump(KERN_INFO, "",
						DUMP_PREFIX_ADDRESS, 16, 1,
						phys_to_virt(buffer_info->dma),
						adapter->rx_ps_bsize0, true);
			}
		}
		break;
	default:
	case 0:
387
		/* Extended Receive Descriptor (Read) Format
388
		 *
389 390 391 392 393
		 *   +-----------------------------------------------------+
		 * 0 |                Buffer Address [63:0]                |
		 *   +-----------------------------------------------------+
		 * 8 |                      Reserved                       |
		 *   +-----------------------------------------------------+
394
		 */
395
		pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
396 397 398 399 400 401 402 403 404 405 406 407 408
		/* Extended Receive Descriptor (Write-Back) Format
		 *
		 *   63       48 47    32 31    24 23            4 3        0
		 *   +------------------------------------------------------+
		 *   |     RSS Hash      |        |               |         |
		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
		 *   | Packet   | IP     |        |               |  Type   |
		 *   | Checksum | Ident  |        |               |         |
		 *   +------------------------------------------------------+
		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
		 *   +------------------------------------------------------+
		 *   63       48 47    32 31            20 19               0
		 */
409
		pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
410 411

		for (i = 0; i < rx_ring->count; i++) {
412 413
			const char *next_desc;

414
			buffer_info = &rx_ring->buffer_info[i];
415 416 417
			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
			u1 = (struct my_u1 *)rx_desc;
			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
418 419 420 421 422 423 424 425

			if (i == rx_ring->next_to_use)
				next_desc = " NTU";
			else if (i == rx_ring->next_to_clean)
				next_desc = " NTC";
			else
				next_desc = "";

426 427
			if (staterr & E1000_RXD_STAT_DD) {
				/* Descriptor Done */
428 429 430 431 432
				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
					"RWB", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					buffer_info->skb, next_desc);
433
			} else {
434 435 436 437 438 439
				pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
					"R  ", i,
					(unsigned long long)le64_to_cpu(u1->a),
					(unsigned long long)le64_to_cpu(u1->b),
					(unsigned long long)buffer_info->dma,
					buffer_info->skb, next_desc);
440 441 442 443 444 445 446 447 448 449

				if (netif_msg_pktdata(adapter))
					print_hex_dump(KERN_INFO, "",
						       DUMP_PREFIX_ADDRESS, 16,
						       1,
						       phys_to_virt
						       (buffer_info->dma),
						       adapter->rx_buffer_len,
						       true);
			}
450 451 452 453
		}
	}
}

454 455 456 457 458 459 460 461 462 463 464 465
/**
 * e1000_desc_unused - calculate if we have unused descriptors
 **/
static int e1000_desc_unused(struct e1000_ring *ring)
{
	if (ring->next_to_clean > ring->next_to_use)
		return ring->next_to_clean - ring->next_to_use - 1;

	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}

/**
466
 * e1000_receive_skb - helper function to handle Rx indications
467 468 469 470 471 472
 * @adapter: board private structure
 * @status: descriptor status field as written by hardware
 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 * @skb: pointer to sk_buff to be indicated to stack
 **/
static void e1000_receive_skb(struct e1000_adapter *adapter,
473
			      struct net_device *netdev, struct sk_buff *skb,
A
Al Viro 已提交
474
			      u8 status, __le16 vlan)
475
{
J
Jeff Kirsher 已提交
476
	u16 tag = le16_to_cpu(vlan);
477 478
	skb->protocol = eth_type_trans(skb, netdev);

J
Jeff Kirsher 已提交
479 480 481 482
	if (status & E1000_RXD_STAT_VP)
		__vlan_hwaccel_put_tag(skb, tag);

	napi_gro_receive(&adapter->napi, skb);
483 484 485
}

/**
486
 * e1000_rx_checksum - Receive Checksum Offload
487 488 489 490
 * @adapter: board private structure
 * @status_err: receive descriptor status and error fields
 * @csum: receive descriptor csum field
 * @sk_buff: socket buffer with received data
491 492
 **/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
493
			      __le16 csum, struct sk_buff *skb)
494 495 496
{
	u16 status = (u16)status_err;
	u8 errors = (u8)(status_err >> 24);
497 498

	skb_checksum_none_assert(skb);
499

500 501 502 503
	/* Rx checksum disabled */
	if (!(adapter->netdev->features & NETIF_F_RXCSUM))
		return;

504 505 506
	/* Ignore Checksum bit is set */
	if (status & E1000_RXD_STAT_IXSM)
		return;
507

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	/* TCP/UDP checksum error bit is set */
	if (errors & E1000_RXD_ERR_TCPE) {
		/* let the stack verify checksum errors */
		adapter->hw_csum_err++;
		return;
	}

	/* TCP/UDP Checksum has not been calculated */
	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
		return;

	/* It must be a TCP or UDP packet with a valid checksum */
	if (status & E1000_RXD_STAT_TCPCS) {
		/* TCP checksum is good */
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else {
524 525 526
		/*
		 * IP fragment with UDP payload
		 * Hardware complements the payload checksum, so we undo it
527 528
		 * and then put the value in host order for further stack use.
		 */
529
		__sum16 sum = (__force __sum16)swab16((__force u16)csum);
A
Al Viro 已提交
530
		skb->csum = csum_unfold(~sum);
531 532 533 534 535
		skb->ip_summed = CHECKSUM_COMPLETE;
	}
	adapter->hw_csum_good++;
}

536 537 538 539 540 541 542 543 544 545 546 547 548 549
/**
 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
 * @hw: pointer to the HW structure
 * @tail: address of tail descriptor register
 * @i: value to write to tail descriptor register
 *
 * When updating the tail register, the ME could be accessing Host CSR
 * registers at the same time.  Normally, this is handled in h/w by an
 * arbiter but on some parts there is a bug that acknowledges Host accesses
 * later than it should which could result in the descriptor register to
 * have an incorrect value.  Workaround this by checking the FWSM register
 * which has bit 24 set while ME is accessing Host CSR registers, wait
 * if it is set and try again a number of times.
 **/
550
static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
					unsigned int i)
{
	unsigned int j = 0;

	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
		udelay(50);

	writel(i, tail);

	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
		return E1000_ERR_SWFW_SYNC;

	return 0;
}

567
static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
568
{
569
	struct e1000_adapter *adapter = rx_ring->adapter;
570 571
	struct e1000_hw *hw = &adapter->hw;

572
	if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
573 574 575 576 577 578 579
		u32 rctl = er32(RCTL);
		ew32(RCTL, rctl & ~E1000_RCTL_EN);
		e_err("ME firmware caused invalid RDT - resetting\n");
		schedule_work(&adapter->reset_task);
	}
}

580
static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
581
{
582
	struct e1000_adapter *adapter = tx_ring->adapter;
583 584
	struct e1000_hw *hw = &adapter->hw;

585
	if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
586 587 588 589 590 591 592
		u32 tctl = er32(TCTL);
		ew32(TCTL, tctl & ~E1000_TCTL_EN);
		e_err("ME firmware caused invalid TDT - resetting\n");
		schedule_work(&adapter->reset_task);
	}
}

593
/**
594
 * e1000_alloc_rx_buffers - Replace used receive buffers
595
 * @rx_ring: Rx descriptor ring
596
 **/
597
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
598
				   int cleaned_count, gfp_t gfp)
599
{
600
	struct e1000_adapter *adapter = rx_ring->adapter;
601 602
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
603
	union e1000_rx_desc_extended *rx_desc;
604 605 606
	struct e1000_buffer *buffer_info;
	struct sk_buff *skb;
	unsigned int i;
607
	unsigned int bufsz = adapter->rx_buffer_len;
608 609 610 611 612 613 614 615 616 617 618

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		skb = buffer_info->skb;
		if (skb) {
			skb_trim(skb, 0);
			goto map_skb;
		}

619
		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
620 621 622 623 624 625 626 627
		if (!skb) {
			/* Better luck next round */
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
map_skb:
628
		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
629
						  adapter->rx_buffer_len,
630 631
						  DMA_FROM_DEVICE);
		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
632
			dev_err(&pdev->dev, "Rx DMA map failed\n");
633 634 635 636
			adapter->rx_dma_failed++;
			break;
		}

637 638
		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
639

640 641 642 643 644 645 646 647
		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
			/*
			 * Force memory writes to complete before letting h/w
			 * know there are new descriptors to fetch.  (Only
			 * applicable for weak-ordered memory model archs,
			 * such as IA-64).
			 */
			wmb();
648
			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
649
				e1000e_update_rdt_wa(rx_ring, i);
650
			else
651
				writel(i, rx_ring->tail);
652
		}
653 654 655 656 657 658
		i++;
		if (i == rx_ring->count)
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

659
	rx_ring->next_to_use = i;
660 661 662 663
}

/**
 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
664
 * @rx_ring: Rx descriptor ring
665
 **/
666
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
667
				      int cleaned_count, gfp_t gfp)
668
{
669
	struct e1000_adapter *adapter = rx_ring->adapter;
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
	union e1000_rx_desc_packet_split *rx_desc;
	struct e1000_buffer *buffer_info;
	struct e1000_ps_page *ps_page;
	struct sk_buff *skb;
	unsigned int i, j;

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);

		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
A
Auke Kok 已提交
685 686 687
			ps_page = &buffer_info->ps_pages[j];
			if (j >= adapter->rx_ps_pages) {
				/* all unused desc entries get hw null ptr */
688 689
				rx_desc->read.buffer_addr[j + 1] =
				    ~cpu_to_le64(0);
A
Auke Kok 已提交
690 691 692
				continue;
			}
			if (!ps_page->page) {
693
				ps_page->page = alloc_page(gfp);
694
				if (!ps_page->page) {
A
Auke Kok 已提交
695 696 697
					adapter->alloc_rx_buff_failed++;
					goto no_buffers;
				}
698 699 700 701 702 703
				ps_page->dma = dma_map_page(&pdev->dev,
							    ps_page->page,
							    0, PAGE_SIZE,
							    DMA_FROM_DEVICE);
				if (dma_mapping_error(&pdev->dev,
						      ps_page->dma)) {
A
Auke Kok 已提交
704
					dev_err(&adapter->pdev->dev,
705
						"Rx DMA page map failed\n");
A
Auke Kok 已提交
706 707
					adapter->rx_dma_failed++;
					goto no_buffers;
708 709
				}
			}
A
Auke Kok 已提交
710 711 712 713 714
			/*
			 * Refresh the desc even if buffer_addrs
			 * didn't change because each write-back
			 * erases this info.
			 */
715 716
			rx_desc->read.buffer_addr[j + 1] =
			    cpu_to_le64(ps_page->dma);
717 718
		}

719 720 721
		skb = __netdev_alloc_skb_ip_align(netdev,
						  adapter->rx_ps_bsize0,
						  gfp);
722 723 724 725 726 727 728

		if (!skb) {
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
729
		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
730
						  adapter->rx_ps_bsize0,
731 732
						  DMA_FROM_DEVICE);
		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
733
			dev_err(&pdev->dev, "Rx DMA map failed\n");
734 735 736 737 738 739 740 741 742
			adapter->rx_dma_failed++;
			/* cleanup skb */
			dev_kfree_skb_any(skb);
			buffer_info->skb = NULL;
			break;
		}

		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);

743 744 745 746 747 748 749 750
		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
			/*
			 * Force memory writes to complete before letting h/w
			 * know there are new descriptors to fetch.  (Only
			 * applicable for weak-ordered memory model archs,
			 * such as IA-64).
			 */
			wmb();
751
			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
752
				e1000e_update_rdt_wa(rx_ring, i << 1);
753
			else
754
				writel(i << 1, rx_ring->tail);
755 756
		}

757 758 759 760 761 762 763
		i++;
		if (i == rx_ring->count)
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

no_buffers:
764
	rx_ring->next_to_use = i;
765 766
}

767 768
/**
 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
769
 * @rx_ring: Rx descriptor ring
770 771 772
 * @cleaned_count: number of buffers to allocate this pass
 **/

773
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
774
					 int cleaned_count, gfp_t gfp)
775
{
776
	struct e1000_adapter *adapter = rx_ring->adapter;
777 778
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
779
	union e1000_rx_desc_extended *rx_desc;
780 781 782
	struct e1000_buffer *buffer_info;
	struct sk_buff *skb;
	unsigned int i;
783
	unsigned int bufsz = 256 - 16 /* for skb_reserve */;
784 785 786 787 788 789 790 791 792 793 794

	i = rx_ring->next_to_use;
	buffer_info = &rx_ring->buffer_info[i];

	while (cleaned_count--) {
		skb = buffer_info->skb;
		if (skb) {
			skb_trim(skb, 0);
			goto check_page;
		}

795
		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
796 797 798 799 800 801 802 803 804 805
		if (unlikely(!skb)) {
			/* Better luck next round */
			adapter->alloc_rx_buff_failed++;
			break;
		}

		buffer_info->skb = skb;
check_page:
		/* allocate a new page if necessary */
		if (!buffer_info->page) {
806
			buffer_info->page = alloc_page(gfp);
807 808 809 810 811 812 813
			if (unlikely(!buffer_info->page)) {
				adapter->alloc_rx_buff_failed++;
				break;
			}
		}

		if (!buffer_info->dma)
814
			buffer_info->dma = dma_map_page(&pdev->dev,
815 816
			                                buffer_info->page, 0,
			                                PAGE_SIZE,
817
							DMA_FROM_DEVICE);
818

819 820
		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836

		if (unlikely(++i == rx_ring->count))
			i = 0;
		buffer_info = &rx_ring->buffer_info[i];
	}

	if (likely(rx_ring->next_to_use != i)) {
		rx_ring->next_to_use = i;
		if (unlikely(i-- == 0))
			i = (rx_ring->count - 1);

		/* Force memory writes to complete before letting h/w
		 * know there are new descriptors to fetch.  (Only
		 * applicable for weak-ordered memory model archs,
		 * such as IA-64). */
		wmb();
837
		if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
838
			e1000e_update_rdt_wa(rx_ring, i);
839
		else
840
			writel(i, rx_ring->tail);
841 842 843
	}
}

844 845 846 847 848 849 850
static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
				 struct sk_buff *skb)
{
	if (netdev->features & NETIF_F_RXHASH)
		skb->rxhash = le32_to_cpu(rss);
}

851
/**
852 853
 * e1000_clean_rx_irq - Send received data up the network stack
 * @rx_ring: Rx descriptor ring
854 855 856 857
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/
858 859
static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
			       int work_to_do)
860
{
861
	struct e1000_adapter *adapter = rx_ring->adapter;
862 863
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
864
	struct e1000_hw *hw = &adapter->hw;
865
	union e1000_rx_desc_extended *rx_desc, *next_rxd;
866
	struct e1000_buffer *buffer_info, *next_buffer;
867
	u32 length, staterr;
868 869
	unsigned int i;
	int cleaned_count = 0;
870
	bool cleaned = false;
871 872 873
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;

	i = rx_ring->next_to_clean;
874 875
	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
876 877
	buffer_info = &rx_ring->buffer_info[i];

878
	while (staterr & E1000_RXD_STAT_DD) {
879 880 881 882 883
		struct sk_buff *skb;

		if (*work_done >= work_to_do)
			break;
		(*work_done)++;
884
		rmb();	/* read descriptor and rx_buffer_info after status DD */
885 886 887 888 889 890 891 892 893

		skb = buffer_info->skb;
		buffer_info->skb = NULL;

		prefetch(skb->data - NET_IP_ALIGN);

		i++;
		if (i == rx_ring->count)
			i = 0;
894
		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
895 896 897 898
		prefetch(next_rxd);

		next_buffer = &rx_ring->buffer_info[i];

899
		cleaned = true;
900
		cleaned_count++;
901
		dma_unmap_single(&pdev->dev,
902 903
				 buffer_info->dma,
				 adapter->rx_buffer_len,
904
				 DMA_FROM_DEVICE);
905 906
		buffer_info->dma = 0;

907
		length = le16_to_cpu(rx_desc->wb.upper.length);
908

909 910 911 912 913 914 915
		/*
		 * !EOP means multiple descriptors were used to store a single
		 * packet, if that's the case we need to toss it.  In fact, we
		 * need to toss every packet with the EOP bit clear and the
		 * next frame that _does_ have the EOP bit set, as it is by
		 * definition only a frame fragment
		 */
916
		if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
917 918 919
			adapter->flags2 |= FLAG2_IS_DISCARDING;

		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
920
			/* All receives must fit into a single buffer */
921
			e_dbg("Receive packet consumed multiple buffers\n");
922 923
			/* recycle */
			buffer_info->skb = skb;
924
			if (staterr & E1000_RXD_STAT_EOP)
925
				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
926 927 928
			goto next_desc;
		}

929
		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
930 931 932 933 934
			/* recycle */
			buffer_info->skb = skb;
			goto next_desc;
		}

J
Jeff Kirsher 已提交
935 936 937 938
		/* adjust length to remove Ethernet CRC */
		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
			length -= 4;

939 940 941
		total_rx_bytes += length;
		total_rx_packets++;

942 943
		/*
		 * code added for copybreak, this should improve
944
		 * performance for small packets with large amounts
945 946
		 * of reassembly being done in the stack
		 */
947 948
		if (length < copybreak) {
			struct sk_buff *new_skb =
949
			    netdev_alloc_skb_ip_align(netdev, length);
950
			if (new_skb) {
951 952 953 954 955 956
				skb_copy_to_linear_data_offset(new_skb,
							       -NET_IP_ALIGN,
							       (skb->data -
								NET_IP_ALIGN),
							       (length +
								NET_IP_ALIGN));
957 958 959 960 961 962 963 964 965 966
				/* save the skb in buffer_info as good */
				buffer_info->skb = skb;
				skb = new_skb;
			}
			/* else just continue with the old one */
		}
		/* end copybreak code */
		skb_put(skb, length);

		/* Receive Checksum Offload */
967
		e1000_rx_checksum(adapter, staterr,
968
				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
969

970 971
		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

972 973
		e1000_receive_skb(adapter, netdev, skb, staterr,
				  rx_desc->wb.upper.vlan);
974 975

next_desc:
976
		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
977 978 979

		/* return some buffers to hardware, one at a time is too slow */
		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
980
			adapter->alloc_rx_buf(rx_ring, cleaned_count,
981
					      GFP_ATOMIC);
982 983 984 985 986 987
			cleaned_count = 0;
		}

		/* use prefetched values */
		rx_desc = next_rxd;
		buffer_info = next_buffer;
988 989

		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
990 991 992 993 994
	}
	rx_ring->next_to_clean = i;

	cleaned_count = e1000_desc_unused(rx_ring);
	if (cleaned_count)
995
		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
996 997

	adapter->total_rx_bytes += total_rx_bytes;
998
	adapter->total_rx_packets += total_rx_packets;
999 1000 1001
	return cleaned;
}

1002 1003
static void e1000_put_txbuf(struct e1000_ring *tx_ring,
			    struct e1000_buffer *buffer_info)
1004
{
1005 1006
	struct e1000_adapter *adapter = tx_ring->adapter;

1007 1008
	if (buffer_info->dma) {
		if (buffer_info->mapped_as_page)
1009 1010
			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
				       buffer_info->length, DMA_TO_DEVICE);
1011
		else
1012 1013
			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
					 buffer_info->length, DMA_TO_DEVICE);
1014 1015
		buffer_info->dma = 0;
	}
1016 1017 1018 1019
	if (buffer_info->skb) {
		dev_kfree_skb_any(buffer_info->skb);
		buffer_info->skb = NULL;
	}
1020
	buffer_info->time_stamp = 0;
1021 1022
}

1023
static void e1000_print_hw_hang(struct work_struct *work)
1024
{
1025 1026 1027
	struct e1000_adapter *adapter = container_of(work,
	                                             struct e1000_adapter,
	                                             print_hang_task);
1028
	struct net_device *netdev = adapter->netdev;
1029 1030 1031 1032
	struct e1000_ring *tx_ring = adapter->tx_ring;
	unsigned int i = tx_ring->next_to_clean;
	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1033 1034 1035 1036
	struct e1000_hw *hw = &adapter->hw;
	u16 phy_status, phy_1000t_status, phy_ext_status;
	u16 pci_status;

1037 1038 1039
	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	if (!adapter->tx_hang_recheck &&
	    (adapter->flags2 & FLAG2_DMA_BURST)) {
		/* May be block on write-back, flush and detect again
		 * flush pending descriptor writebacks to memory
		 */
		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
		/* execute the writes immediately */
		e1e_flush();
		adapter->tx_hang_recheck = true;
		return;
	}
	/* Real hang detected */
	adapter->tx_hang_recheck = false;
	netif_stop_queue(netdev);

1055 1056 1057
	e1e_rphy(hw, PHY_STATUS, &phy_status);
	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1058

1059 1060 1061 1062
	pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);

	/* detected Hardware unit hang */
	e_err("Detected Hardware Unit Hang:\n"
1063 1064 1065 1066 1067 1068 1069 1070
	      "  TDH                  <%x>\n"
	      "  TDT                  <%x>\n"
	      "  next_to_use          <%x>\n"
	      "  next_to_clean        <%x>\n"
	      "buffer_info[next_to_clean]:\n"
	      "  time_stamp           <%lx>\n"
	      "  next_to_watch        <%x>\n"
	      "  jiffies              <%lx>\n"
1071 1072 1073 1074 1075 1076
	      "  next_to_watch.status <%x>\n"
	      "MAC Status             <%x>\n"
	      "PHY Status             <%x>\n"
	      "PHY 1000BASE-T Status  <%x>\n"
	      "PHY Extended Status    <%x>\n"
	      "PCI Status             <%x>\n",
1077 1078
	      readl(tx_ring->head),
	      readl(tx_ring->tail),
1079 1080 1081 1082 1083
	      tx_ring->next_to_use,
	      tx_ring->next_to_clean,
	      tx_ring->buffer_info[eop].time_stamp,
	      eop,
	      jiffies,
1084 1085 1086 1087 1088 1089
	      eop_desc->upper.fields.status,
	      er32(STATUS),
	      phy_status,
	      phy_1000t_status,
	      phy_ext_status,
	      pci_status);
1090 1091 1092 1093
}

/**
 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1094
 * @tx_ring: Tx descriptor ring
1095 1096 1097 1098
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/
1099
static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1100
{
1101
	struct e1000_adapter *adapter = tx_ring->adapter;
1102 1103 1104 1105 1106 1107 1108
	struct net_device *netdev = adapter->netdev;
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_tx_desc *tx_desc, *eop_desc;
	struct e1000_buffer *buffer_info;
	unsigned int i, eop;
	unsigned int count = 0;
	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1109
	unsigned int bytes_compl = 0, pkts_compl = 0;
1110 1111 1112 1113 1114

	i = tx_ring->next_to_clean;
	eop = tx_ring->buffer_info[i].next_to_watch;
	eop_desc = E1000_TX_DESC(*tx_ring, eop);

1115 1116
	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
	       (count < tx_ring->count)) {
1117
		bool cleaned = false;
1118
		rmb(); /* read buffer_info after eop_desc */
1119
		for (; !cleaned; count++) {
1120 1121 1122 1123 1124
			tx_desc = E1000_TX_DESC(*tx_ring, i);
			buffer_info = &tx_ring->buffer_info[i];
			cleaned = (i == eop);

			if (cleaned) {
1125 1126
				total_tx_packets += buffer_info->segs;
				total_tx_bytes += buffer_info->bytecount;
1127 1128 1129 1130
				if (buffer_info->skb) {
					bytes_compl += buffer_info->skb->len;
					pkts_compl++;
				}
1131 1132
			}

1133
			e1000_put_txbuf(tx_ring, buffer_info);
1134 1135 1136 1137 1138 1139 1140
			tx_desc->upper.data = 0;

			i++;
			if (i == tx_ring->count)
				i = 0;
		}

1141 1142
		if (i == tx_ring->next_to_use)
			break;
1143 1144 1145 1146 1147 1148
		eop = tx_ring->buffer_info[i].next_to_watch;
		eop_desc = E1000_TX_DESC(*tx_ring, eop);
	}

	tx_ring->next_to_clean = i;

1149 1150
	netdev_completed_queue(netdev, pkts_compl, bytes_compl);

1151
#define TX_WAKE_THRESHOLD 32
1152 1153
	if (count && netif_carrier_ok(netdev) &&
	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
		smp_mb();

		if (netif_queue_stopped(netdev) &&
		    !(test_bit(__E1000_DOWN, &adapter->state))) {
			netif_wake_queue(netdev);
			++adapter->restart_queue;
		}
	}

	if (adapter->detect_tx_hung) {
1167 1168 1169 1170
		/*
		 * Detect a transmit hang in hardware, this serializes the
		 * check with the clearing of time_stamp and movement of i
		 */
1171
		adapter->detect_tx_hung = false;
1172 1173
		if (tx_ring->buffer_info[i].time_stamp &&
		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1174
			       + (adapter->tx_timeout_factor * HZ)) &&
1175
		    !(er32(STATUS) & E1000_STATUS_TXOFF))
1176
			schedule_work(&adapter->print_hang_task);
1177 1178
		else
			adapter->tx_hang_recheck = false;
1179 1180 1181
	}
	adapter->total_tx_bytes += total_tx_bytes;
	adapter->total_tx_packets += total_tx_packets;
1182
	return count < tx_ring->count;
1183 1184 1185 1186
}

/**
 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1187
 * @rx_ring: Rx descriptor ring
1188 1189 1190 1191
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/
1192 1193
static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
				  int work_to_do)
1194
{
1195
	struct e1000_adapter *adapter = rx_ring->adapter;
1196
	struct e1000_hw *hw = &adapter->hw;
1197 1198 1199 1200 1201 1202 1203 1204 1205
	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
	struct e1000_buffer *buffer_info, *next_buffer;
	struct e1000_ps_page *ps_page;
	struct sk_buff *skb;
	unsigned int i, j;
	u32 length, staterr;
	int cleaned_count = 0;
1206
	bool cleaned = false;
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;

	i = rx_ring->next_to_clean;
	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
	buffer_info = &rx_ring->buffer_info[i];

	while (staterr & E1000_RXD_STAT_DD) {
		if (*work_done >= work_to_do)
			break;
		(*work_done)++;
		skb = buffer_info->skb;
1219
		rmb();	/* read descriptor and rx_buffer_info after status DD */
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231

		/* in the packet split case this is header only */
		prefetch(skb->data - NET_IP_ALIGN);

		i++;
		if (i == rx_ring->count)
			i = 0;
		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
		prefetch(next_rxd);

		next_buffer = &rx_ring->buffer_info[i];

1232
		cleaned = true;
1233
		cleaned_count++;
1234
		dma_unmap_single(&pdev->dev, buffer_info->dma,
1235
				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1236 1237
		buffer_info->dma = 0;

1238
		/* see !EOP comment in other Rx routine */
1239 1240 1241 1242
		if (!(staterr & E1000_RXD_STAT_EOP))
			adapter->flags2 |= FLAG2_IS_DISCARDING;

		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1243
			e_dbg("Packet Split buffers didn't pick up the full packet\n");
1244
			dev_kfree_skb_irq(skb);
1245 1246
			if (staterr & E1000_RXD_STAT_EOP)
				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
			goto next_desc;
		}

		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
			dev_kfree_skb_irq(skb);
			goto next_desc;
		}

		length = le16_to_cpu(rx_desc->wb.middle.length0);

		if (!length) {
1258
			e_dbg("Last part of the packet spanning multiple descriptors\n");
1259 1260 1261 1262 1263 1264 1265 1266
			dev_kfree_skb_irq(skb);
			goto next_desc;
		}

		/* Good Receive */
		skb_put(skb, length);

		{
1267 1268 1269 1270 1271
			/*
			 * this looks ugly, but it seems compiler issues make
			 * it more efficient than reusing j
			 */
			int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1272

1273
			/*
1274 1275 1276 1277
			 * page alloc/put takes too long and effects small
			 * packet throughput, so unsplit small packets and
			 * save the alloc/put only valid in softirq (napi)
			 * context to call kmap_*
1278
			 */
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
			if (l1 && (l1 <= copybreak) &&
			    ((length + l1) <= adapter->rx_ps_bsize0)) {
				u8 *vaddr;

				ps_page = &buffer_info->ps_pages[0];

				/*
				 * there is no documentation about how to call
				 * kmap_atomic, so we can't hold the mapping
				 * very long
				 */
				dma_sync_single_for_cpu(&pdev->dev,
							ps_page->dma,
							PAGE_SIZE,
							DMA_FROM_DEVICE);
				vaddr = kmap_atomic(ps_page->page,
						    KM_SKB_DATA_SOFTIRQ);
				memcpy(skb_tail_pointer(skb), vaddr, l1);
				kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
				dma_sync_single_for_device(&pdev->dev,
							   ps_page->dma,
							   PAGE_SIZE,
							   DMA_FROM_DEVICE);

				/* remove the CRC */
				if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
					l1 -= 4;

				skb_put(skb, l1);
				goto copydone;
			} /* if */
1310 1311 1312 1313 1314 1315 1316
		}

		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
			if (!length)
				break;

A
Auke Kok 已提交
1317
			ps_page = &buffer_info->ps_pages[j];
1318 1319
			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
				       DMA_FROM_DEVICE);
1320 1321 1322 1323 1324
			ps_page->dma = 0;
			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
			ps_page->page = NULL;
			skb->len += length;
			skb->data_len += length;
1325
			skb->truesize += PAGE_SIZE;
1326 1327
		}

J
Jeff Kirsher 已提交
1328 1329 1330 1331 1332 1333
		/* strip the ethernet crc, problem is we're using pages now so
		 * this whole operation can get a little cpu intensive
		 */
		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
			pskb_trim(skb, skb->len - 4);

1334 1335 1336 1337
copydone:
		total_rx_bytes += skb->len;
		total_rx_packets++;

1338 1339
		e1000_rx_checksum(adapter, staterr,
				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1340

1341 1342
		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		if (rx_desc->wb.upper.header_status &
			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
			adapter->rx_hdr_split++;

		e1000_receive_skb(adapter, netdev, skb,
				  staterr, rx_desc->wb.middle.vlan);

next_desc:
		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
		buffer_info->skb = NULL;

		/* return some buffers to hardware, one at a time is too slow */
		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1356
			adapter->alloc_rx_buf(rx_ring, cleaned_count,
1357
					      GFP_ATOMIC);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
			cleaned_count = 0;
		}

		/* use prefetched values */
		rx_desc = next_rxd;
		buffer_info = next_buffer;

		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
	}
	rx_ring->next_to_clean = i;

	cleaned_count = e1000_desc_unused(rx_ring);
	if (cleaned_count)
1371
		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1372 1373

	adapter->total_rx_bytes += total_rx_bytes;
1374
	adapter->total_rx_packets += total_rx_packets;
1375 1376 1377
	return cleaned;
}

1378 1379 1380 1381 1382 1383 1384 1385 1386
/**
 * e1000_consume_page - helper function
 **/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
                               u16 length)
{
	bi->page = NULL;
	skb->len += length;
	skb->data_len += length;
1387
	skb->truesize += PAGE_SIZE;
1388 1389 1390 1391 1392 1393 1394 1395 1396
}

/**
 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
 * @adapter: board private structure
 *
 * the return value indicates whether actual cleaning was done, there
 * is no guarantee that everything was cleaned
 **/
1397 1398
static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
				     int work_to_do)
1399
{
1400
	struct e1000_adapter *adapter = rx_ring->adapter;
1401 1402
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
1403
	union e1000_rx_desc_extended *rx_desc, *next_rxd;
1404
	struct e1000_buffer *buffer_info, *next_buffer;
1405
	u32 length, staterr;
1406 1407 1408 1409 1410 1411
	unsigned int i;
	int cleaned_count = 0;
	bool cleaned = false;
	unsigned int total_rx_bytes=0, total_rx_packets=0;

	i = rx_ring->next_to_clean;
1412 1413
	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1414 1415
	buffer_info = &rx_ring->buffer_info[i];

1416
	while (staterr & E1000_RXD_STAT_DD) {
1417 1418 1419 1420 1421
		struct sk_buff *skb;

		if (*work_done >= work_to_do)
			break;
		(*work_done)++;
1422
		rmb();	/* read descriptor and rx_buffer_info after status DD */
1423 1424 1425 1426 1427 1428 1429

		skb = buffer_info->skb;
		buffer_info->skb = NULL;

		++i;
		if (i == rx_ring->count)
			i = 0;
1430
		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1431 1432 1433 1434 1435 1436
		prefetch(next_rxd);

		next_buffer = &rx_ring->buffer_info[i];

		cleaned = true;
		cleaned_count++;
1437 1438
		dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
			       DMA_FROM_DEVICE);
1439 1440
		buffer_info->dma = 0;

1441
		length = le16_to_cpu(rx_desc->wb.upper.length);
1442 1443

		/* errors is only valid for DD + EOP descriptors */
1444 1445 1446 1447 1448 1449 1450 1451 1452
		if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
			     (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
			/* recycle both page and skb */
			buffer_info->skb = skb;
			/* an error means any chain goes out the window too */
			if (rx_ring->rx_skb_top)
				dev_kfree_skb_irq(rx_ring->rx_skb_top);
			rx_ring->rx_skb_top = NULL;
			goto next_desc;
1453 1454
		}

1455
#define rxtop (rx_ring->rx_skb_top)
1456
		if (!(staterr & E1000_RXD_STAT_EOP)) {
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
			/* this descriptor is only the beginning (or middle) */
			if (!rxtop) {
				/* this is the beginning of a chain */
				rxtop = skb;
				skb_fill_page_desc(rxtop, 0, buffer_info->page,
				                   0, length);
			} else {
				/* this is the middle of a chain */
				skb_fill_page_desc(rxtop,
				    skb_shinfo(rxtop)->nr_frags,
				    buffer_info->page, 0, length);
				/* re-use the skb, only consumed the page */
				buffer_info->skb = skb;
			}
			e1000_consume_page(buffer_info, rxtop, length);
			goto next_desc;
		} else {
			if (rxtop) {
				/* end of the chain */
				skb_fill_page_desc(rxtop,
				    skb_shinfo(rxtop)->nr_frags,
				    buffer_info->page, 0, length);
				/* re-use the current skb, we only consumed the
				 * page */
				buffer_info->skb = skb;
				skb = rxtop;
				rxtop = NULL;
				e1000_consume_page(buffer_info, skb, length);
			} else {
				/* no chain, got EOP, this buf is the packet
				 * copybreak to save the put_page/alloc_page */
				if (length <= copybreak &&
				    skb_tailroom(skb) >= length) {
					u8 *vaddr;
					vaddr = kmap_atomic(buffer_info->page,
					                   KM_SKB_DATA_SOFTIRQ);
					memcpy(skb_tail_pointer(skb), vaddr,
					       length);
					kunmap_atomic(vaddr,
					              KM_SKB_DATA_SOFTIRQ);
					/* re-use the page, so don't erase
					 * buffer_info->page */
					skb_put(skb, length);
				} else {
					skb_fill_page_desc(skb, 0,
					                   buffer_info->page, 0,
				                           length);
					e1000_consume_page(buffer_info, skb,
					                   length);
				}
			}
		}

		/* Receive Checksum Offload XXX recompute due to CRC strip? */
1511
		e1000_rx_checksum(adapter, staterr,
1512
				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
1513

1514 1515
		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);

1516 1517 1518 1519 1520 1521
		/* probably a little skewed due to removing CRC */
		total_rx_bytes += skb->len;
		total_rx_packets++;

		/* eth type trans needs skb->data to point to something */
		if (!pskb_may_pull(skb, ETH_HLEN)) {
1522
			e_err("pskb_may_pull failed.\n");
1523
			dev_kfree_skb_irq(skb);
1524 1525 1526
			goto next_desc;
		}

1527 1528
		e1000_receive_skb(adapter, netdev, skb, staterr,
				  rx_desc->wb.upper.vlan);
1529 1530

next_desc:
1531
		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1532 1533 1534

		/* return some buffers to hardware, one at a time is too slow */
		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1535
			adapter->alloc_rx_buf(rx_ring, cleaned_count,
1536
					      GFP_ATOMIC);
1537 1538 1539 1540 1541 1542
			cleaned_count = 0;
		}

		/* use prefetched values */
		rx_desc = next_rxd;
		buffer_info = next_buffer;
1543 1544

		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1545 1546 1547 1548 1549
	}
	rx_ring->next_to_clean = i;

	cleaned_count = e1000_desc_unused(rx_ring);
	if (cleaned_count)
1550
		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1551 1552 1553 1554 1555 1556

	adapter->total_rx_bytes += total_rx_bytes;
	adapter->total_rx_packets += total_rx_packets;
	return cleaned;
}

1557 1558
/**
 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1559
 * @rx_ring: Rx descriptor ring
1560
 **/
1561
static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1562
{
1563
	struct e1000_adapter *adapter = rx_ring->adapter;
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	struct e1000_buffer *buffer_info;
	struct e1000_ps_page *ps_page;
	struct pci_dev *pdev = adapter->pdev;
	unsigned int i, j;

	/* Free all the Rx ring sk_buffs */
	for (i = 0; i < rx_ring->count; i++) {
		buffer_info = &rx_ring->buffer_info[i];
		if (buffer_info->dma) {
			if (adapter->clean_rx == e1000_clean_rx_irq)
1574
				dma_unmap_single(&pdev->dev, buffer_info->dma,
1575
						 adapter->rx_buffer_len,
1576
						 DMA_FROM_DEVICE);
1577
			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1578
				dma_unmap_page(&pdev->dev, buffer_info->dma,
1579
				               PAGE_SIZE,
1580
					       DMA_FROM_DEVICE);
1581
			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1582
				dma_unmap_single(&pdev->dev, buffer_info->dma,
1583
						 adapter->rx_ps_bsize0,
1584
						 DMA_FROM_DEVICE);
1585 1586 1587
			buffer_info->dma = 0;
		}

1588 1589 1590 1591 1592
		if (buffer_info->page) {
			put_page(buffer_info->page);
			buffer_info->page = NULL;
		}

1593 1594 1595 1596 1597 1598
		if (buffer_info->skb) {
			dev_kfree_skb(buffer_info->skb);
			buffer_info->skb = NULL;
		}

		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
A
Auke Kok 已提交
1599
			ps_page = &buffer_info->ps_pages[j];
1600 1601
			if (!ps_page->page)
				break;
1602 1603
			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
				       DMA_FROM_DEVICE);
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
			ps_page->dma = 0;
			put_page(ps_page->page);
			ps_page->page = NULL;
		}
	}

	/* there also may be some cached data from a chained receive */
	if (rx_ring->rx_skb_top) {
		dev_kfree_skb(rx_ring->rx_skb_top);
		rx_ring->rx_skb_top = NULL;
	}

	/* Zero out the descriptor ring */
	memset(rx_ring->desc, 0, rx_ring->size);

	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
1621
	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1622

1623 1624
	writel(0, rx_ring->head);
	writel(0, rx_ring->tail);
1625 1626
}

1627 1628 1629 1630 1631
static void e1000e_downshift_workaround(struct work_struct *work)
{
	struct e1000_adapter *adapter = container_of(work,
					struct e1000_adapter, downshift_task);

1632 1633 1634
	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

1635 1636 1637
	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
/**
 * e1000_intr_msi - Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a network interface device structure
 **/
static irqreturn_t e1000_intr_msi(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 icr = er32(ICR);

1650 1651 1652
	/*
	 * read ICR disables interrupts using IAM
	 */
1653

1654
	if (icr & E1000_ICR_LSC) {
1655
		hw->mac.get_link_status = 1;
1656 1657 1658 1659
		/*
		 * ICH8 workaround-- Call gig speed drop workaround on cable
		 * disconnect (LSC) before accessing any PHY registers
		 */
1660 1661
		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
		    (!(er32(STATUS) & E1000_STATUS_LU)))
1662
			schedule_work(&adapter->downshift_task);
1663

1664 1665
		/*
		 * 80003ES2LAN workaround-- For packet buffer work-around on
1666
		 * link down event; disable receives here in the ISR and reset
1667 1668
		 * adapter in watchdog
		 */
1669 1670 1671 1672 1673
		if (netif_carrier_ok(netdev) &&
		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
			/* disable receives */
			u32 rctl = er32(RCTL);
			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1674
			adapter->flags |= FLAG_RX_RESTART_NOW;
1675 1676 1677 1678 1679 1680
		}
		/* guard against interrupt when we're going down */
		if (!test_bit(__E1000_DOWN, &adapter->state))
			mod_timer(&adapter->watchdog_timer, jiffies + 1);
	}

1681
	if (napi_schedule_prep(&adapter->napi)) {
1682 1683 1684 1685
		adapter->total_tx_bytes = 0;
		adapter->total_tx_packets = 0;
		adapter->total_rx_bytes = 0;
		adapter->total_rx_packets = 0;
1686
		__napi_schedule(&adapter->napi);
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	}

	return IRQ_HANDLED;
}

/**
 * e1000_intr - Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a network interface device structure
 **/
static irqreturn_t e1000_intr(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl, icr = er32(ICR);
1703

1704
	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1705 1706
		return IRQ_NONE;  /* Not our interrupt */

1707 1708 1709 1710
	/*
	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
	 * not set, then the adapter didn't send an interrupt
	 */
1711 1712 1713
	if (!(icr & E1000_ICR_INT_ASSERTED))
		return IRQ_NONE;

1714 1715 1716 1717 1718
	/*
	 * Interrupt Auto-Mask...upon reading ICR,
	 * interrupts are masked.  No need for the
	 * IMC write
	 */
1719

1720
	if (icr & E1000_ICR_LSC) {
1721
		hw->mac.get_link_status = 1;
1722 1723 1724 1725
		/*
		 * ICH8 workaround-- Call gig speed drop workaround on cable
		 * disconnect (LSC) before accessing any PHY registers
		 */
1726 1727
		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
		    (!(er32(STATUS) & E1000_STATUS_LU)))
1728
			schedule_work(&adapter->downshift_task);
1729

1730 1731
		/*
		 * 80003ES2LAN workaround--
1732 1733 1734 1735 1736 1737 1738 1739 1740
		 * For packet buffer work-around on link down event;
		 * disable receives here in the ISR and
		 * reset adapter in watchdog
		 */
		if (netif_carrier_ok(netdev) &&
		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
			/* disable receives */
			rctl = er32(RCTL);
			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1741
			adapter->flags |= FLAG_RX_RESTART_NOW;
1742 1743 1744 1745 1746 1747
		}
		/* guard against interrupt when we're going down */
		if (!test_bit(__E1000_DOWN, &adapter->state))
			mod_timer(&adapter->watchdog_timer, jiffies + 1);
	}

1748
	if (napi_schedule_prep(&adapter->napi)) {
1749 1750 1751 1752
		adapter->total_tx_bytes = 0;
		adapter->total_tx_packets = 0;
		adapter->total_rx_bytes = 0;
		adapter->total_rx_packets = 0;
1753
		__napi_schedule(&adapter->napi);
1754 1755 1756 1757 1758
	}

	return IRQ_HANDLED;
}

1759 1760 1761 1762 1763 1764 1765 1766
static irqreturn_t e1000_msix_other(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 icr = er32(ICR);

	if (!(icr & E1000_ICR_INT_ASSERTED)) {
1767 1768
		if (!test_bit(__E1000_DOWN, &adapter->state))
			ew32(IMS, E1000_IMS_OTHER);
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
		return IRQ_NONE;
	}

	if (icr & adapter->eiac_mask)
		ew32(ICS, (icr & adapter->eiac_mask));

	if (icr & E1000_ICR_OTHER) {
		if (!(icr & E1000_ICR_LSC))
			goto no_link_interrupt;
		hw->mac.get_link_status = 1;
		/* guard against interrupt when we're going down */
		if (!test_bit(__E1000_DOWN, &adapter->state))
			mod_timer(&adapter->watchdog_timer, jiffies + 1);
	}

no_link_interrupt:
1785 1786
	if (!test_bit(__E1000_DOWN, &adapter->state))
		ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802

	return IRQ_HANDLED;
}


static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_ring *tx_ring = adapter->tx_ring;


	adapter->total_tx_bytes = 0;
	adapter->total_tx_packets = 0;

1803
	if (!e1000_clean_tx_irq(tx_ring))
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
		/* Ring was not completely cleaned, so fire another interrupt */
		ew32(ICS, tx_ring->ims_val);

	return IRQ_HANDLED;
}

static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
1814
	struct e1000_ring *rx_ring = adapter->rx_ring;
1815 1816 1817 1818

	/* Write the ITR value calculated at the end of the
	 * previous interrupt.
	 */
1819 1820 1821 1822
	if (rx_ring->set_itr) {
		writel(1000000000 / (rx_ring->itr_val * 256),
		       rx_ring->itr_register);
		rx_ring->set_itr = 0;
1823 1824
	}

1825
	if (napi_schedule_prep(&adapter->napi)) {
1826 1827
		adapter->total_rx_bytes = 0;
		adapter->total_rx_packets = 0;
1828
		__napi_schedule(&adapter->napi);
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
	}
	return IRQ_HANDLED;
}

/**
 * e1000_configure_msix - Configure MSI-X hardware
 *
 * e1000_configure_msix sets up the hardware to properly
 * generate MSI-X interrupts.
 **/
static void e1000_configure_msix(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	struct e1000_ring *tx_ring = adapter->tx_ring;
	int vector = 0;
	u32 ctrl_ext, ivar = 0;

	adapter->eiac_mask = 0;

	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
	if (hw->mac.type == e1000_82574) {
		u32 rfctl = er32(RFCTL);
		rfctl |= E1000_RFCTL_ACK_DIS;
		ew32(RFCTL, rfctl);
	}

#define E1000_IVAR_INT_ALLOC_VALID	0x8
	/* Configure Rx vector */
	rx_ring->ims_val = E1000_IMS_RXQ0;
	adapter->eiac_mask |= rx_ring->ims_val;
	if (rx_ring->itr_val)
		writel(1000000000 / (rx_ring->itr_val * 256),
1862
		       rx_ring->itr_register);
1863
	else
1864
		writel(1, rx_ring->itr_register);
1865 1866 1867 1868 1869 1870 1871
	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;

	/* Configure Tx vector */
	tx_ring->ims_val = E1000_IMS_TXQ0;
	vector++;
	if (tx_ring->itr_val)
		writel(1000000000 / (tx_ring->itr_val * 256),
1872
		       tx_ring->itr_register);
1873
	else
1874
		writel(1, tx_ring->itr_register);
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
	adapter->eiac_mask |= tx_ring->ims_val;
	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);

	/* set vector for Other Causes, e.g. link changes */
	vector++;
	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
	if (rx_ring->itr_val)
		writel(1000000000 / (rx_ring->itr_val * 256),
		       hw->hw_addr + E1000_EITR_82574(vector));
	else
		writel(1, hw->hw_addr + E1000_EITR_82574(vector));

	/* Cause Tx interrupts on every write back */
	ivar |= (1 << 31);

	ew32(IVAR, ivar);

	/* enable MSI-X PBA support */
	ctrl_ext = er32(CTRL_EXT);
	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;

	/* Auto-Mask Other interrupts upon ICR read */
#define E1000_EIAC_MASK_82574   0x01F00000
	ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
	ctrl_ext |= E1000_CTRL_EXT_EIAME;
	ew32(CTRL_EXT, ctrl_ext);
	e1e_flush();
}

void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
{
	if (adapter->msix_entries) {
		pci_disable_msix(adapter->pdev);
		kfree(adapter->msix_entries);
		adapter->msix_entries = NULL;
	} else if (adapter->flags & FLAG_MSI_ENABLED) {
		pci_disable_msi(adapter->pdev);
		adapter->flags &= ~FLAG_MSI_ENABLED;
	}
}

/**
 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
 *
 * Attempt to configure interrupts using the best available
 * capabilities of the hardware and kernel.
 **/
void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
{
	int err;
1925
	int i;
1926 1927 1928 1929

	switch (adapter->int_mode) {
	case E1000E_INT_MODE_MSIX:
		if (adapter->flags & FLAG_HAS_MSIX) {
1930 1931
			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
			adapter->msix_entries = kcalloc(adapter->num_vectors,
1932 1933 1934
						      sizeof(struct msix_entry),
						      GFP_KERNEL);
			if (adapter->msix_entries) {
1935
				for (i = 0; i < adapter->num_vectors; i++)
1936 1937 1938 1939
					adapter->msix_entries[i].entry = i;

				err = pci_enable_msix(adapter->pdev,
						      adapter->msix_entries,
1940
						      adapter->num_vectors);
B
Bruce Allan 已提交
1941
				if (err == 0)
1942 1943 1944
					return;
			}
			/* MSI-X failed, so fall through and try MSI */
1945
			e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
1946 1947 1948 1949 1950 1951 1952 1953 1954
			e1000e_reset_interrupt_capability(adapter);
		}
		adapter->int_mode = E1000E_INT_MODE_MSI;
		/* Fall through */
	case E1000E_INT_MODE_MSI:
		if (!pci_enable_msi(adapter->pdev)) {
			adapter->flags |= FLAG_MSI_ENABLED;
		} else {
			adapter->int_mode = E1000E_INT_MODE_LEGACY;
1955
			e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
1956 1957 1958 1959 1960 1961
		}
		/* Fall through */
	case E1000E_INT_MODE_LEGACY:
		/* Don't do anything; this is the system default */
		break;
	}
1962 1963 1964

	/* store the number of vectors being used */
	adapter->num_vectors = 1;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
}

/**
 * e1000_request_msix - Initialize MSI-X interrupts
 *
 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
 * kernel.
 **/
static int e1000_request_msix(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int err = 0, vector = 0;

	if (strlen(netdev->name) < (IFNAMSIZ - 5))
1979 1980 1981
		snprintf(adapter->rx_ring->name,
			 sizeof(adapter->rx_ring->name) - 1,
			 "%s-rx-0", netdev->name);
1982 1983 1984
	else
		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
	err = request_irq(adapter->msix_entries[vector].vector,
1985
			  e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1986 1987 1988
			  netdev);
	if (err)
		goto out;
1989 1990
	adapter->rx_ring->itr_register = adapter->hw.hw_addr +
	    E1000_EITR_82574(vector);
1991 1992 1993 1994
	adapter->rx_ring->itr_val = adapter->itr;
	vector++;

	if (strlen(netdev->name) < (IFNAMSIZ - 5))
1995 1996 1997
		snprintf(adapter->tx_ring->name,
			 sizeof(adapter->tx_ring->name) - 1,
			 "%s-tx-0", netdev->name);
1998 1999 2000
	else
		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
	err = request_irq(adapter->msix_entries[vector].vector,
2001
			  e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2002 2003 2004
			  netdev);
	if (err)
		goto out;
2005 2006
	adapter->tx_ring->itr_register = adapter->hw.hw_addr +
	    E1000_EITR_82574(vector);
2007 2008 2009 2010
	adapter->tx_ring->itr_val = adapter->itr;
	vector++;

	err = request_irq(adapter->msix_entries[vector].vector,
2011
			  e1000_msix_other, 0, netdev->name, netdev);
2012 2013 2014 2015 2016 2017 2018 2019 2020
	if (err)
		goto out;

	e1000_configure_msix(adapter);
	return 0;
out:
	return err;
}

2021 2022 2023 2024 2025 2026
/**
 * e1000_request_irq - initialize interrupts
 *
 * Attempts to configure interrupts using the best available
 * capabilities of the hardware and kernel.
 **/
2027 2028 2029 2030 2031
static int e1000_request_irq(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int err;

2032 2033 2034 2035 2036 2037 2038 2039
	if (adapter->msix_entries) {
		err = e1000_request_msix(adapter);
		if (!err)
			return err;
		/* fall back to MSI */
		e1000e_reset_interrupt_capability(adapter);
		adapter->int_mode = E1000E_INT_MODE_MSI;
		e1000e_set_interrupt_capability(adapter);
2040
	}
2041
	if (adapter->flags & FLAG_MSI_ENABLED) {
2042
		err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2043 2044 2045
				  netdev->name, netdev);
		if (!err)
			return err;
2046

2047 2048 2049
		/* fall back to legacy interrupt */
		e1000e_reset_interrupt_capability(adapter);
		adapter->int_mode = E1000E_INT_MODE_LEGACY;
2050 2051
	}

2052
	err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2053 2054 2055 2056
			  netdev->name, netdev);
	if (err)
		e_err("Unable to allocate interrupt, Error: %d\n", err);

2057 2058 2059 2060 2061 2062 2063
	return err;
}

static void e1000_free_irq(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
	if (adapter->msix_entries) {
		int vector = 0;

		free_irq(adapter->msix_entries[vector].vector, netdev);
		vector++;

		free_irq(adapter->msix_entries[vector].vector, netdev);
		vector++;

		/* Other Causes interrupt vector */
		free_irq(adapter->msix_entries[vector].vector, netdev);
		return;
2076
	}
2077 2078

	free_irq(adapter->pdev->irq, netdev);
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
}

/**
 * e1000_irq_disable - Mask off interrupt generation on the NIC
 **/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	ew32(IMC, ~0);
2089 2090
	if (adapter->msix_entries)
		ew32(EIAC_82574, 0);
2091
	e1e_flush();
2092 2093 2094 2095 2096 2097 2098 2099

	if (adapter->msix_entries) {
		int i;
		for (i = 0; i < adapter->num_vectors; i++)
			synchronize_irq(adapter->msix_entries[i].vector);
	} else {
		synchronize_irq(adapter->pdev->irq);
	}
2100 2101 2102 2103 2104 2105 2106 2107 2108
}

/**
 * e1000_irq_enable - Enable default interrupt generation settings
 **/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

2109 2110 2111 2112 2113 2114
	if (adapter->msix_entries) {
		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
	} else {
		ew32(IMS, IMS_ENABLE_MASK);
	}
J
Jesse Brandeburg 已提交
2115
	e1e_flush();
2116 2117 2118
}

/**
2119
 * e1000e_get_hw_control - get control of the h/w from f/w
2120 2121
 * @adapter: address of board private structure
 *
2122
 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2123 2124 2125 2126
 * For ASF and Pass Through versions of f/w this means that
 * the driver is loaded. For AMT version (only with 82573)
 * of the f/w this means that the network i/f is open.
 **/
2127
void e1000e_get_hw_control(struct e1000_adapter *adapter)
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
{
	struct e1000_hw *hw = &adapter->hw;
	u32 ctrl_ext;
	u32 swsm;

	/* Let firmware know the driver has taken over */
	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
		swsm = er32(SWSM);
		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
		ctrl_ext = er32(CTRL_EXT);
2139
		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2140 2141 2142 2143
	}
}

/**
2144
 * e1000e_release_hw_control - release control of the h/w to f/w
2145 2146
 * @adapter: address of board private structure
 *
2147
 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2148 2149 2150 2151 2152
 * For ASF and Pass Through versions of f/w this means that the
 * driver is no longer loaded. For AMT version (only with 82573) i
 * of the f/w this means that the network i/f is closed.
 *
 **/
2153
void e1000e_release_hw_control(struct e1000_adapter *adapter)
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
{
	struct e1000_hw *hw = &adapter->hw;
	u32 ctrl_ext;
	u32 swsm;

	/* Let firmware taken over control of h/w */
	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
		swsm = er32(SWSM);
		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
		ctrl_ext = er32(CTRL_EXT);
2165
		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
	}
}

/**
 * @e1000_alloc_ring - allocate memory for a ring structure
 **/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
				struct e1000_ring *ring)
{
	struct pci_dev *pdev = adapter->pdev;

	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
					GFP_KERNEL);
	if (!ring->desc)
		return -ENOMEM;

	return 0;
}

/**
 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2187
 * @tx_ring: Tx descriptor ring
2188 2189 2190
 *
 * Return 0 on success, negative on failure
 **/
2191
int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2192
{
2193
	struct e1000_adapter *adapter = tx_ring->adapter;
2194 2195 2196
	int err = -ENOMEM, size;

	size = sizeof(struct e1000_buffer) * tx_ring->count;
E
Eric Dumazet 已提交
2197
	tx_ring->buffer_info = vzalloc(size);
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	if (!tx_ring->buffer_info)
		goto err;

	/* round up to nearest 4K */
	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
	tx_ring->size = ALIGN(tx_ring->size, 4096);

	err = e1000_alloc_ring_dma(adapter, tx_ring);
	if (err)
		goto err;

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;

	return 0;
err:
	vfree(tx_ring->buffer_info);
2215
	e_err("Unable to allocate memory for the transmit descriptor ring\n");
2216 2217 2218 2219 2220
	return err;
}

/**
 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2221
 * @rx_ring: Rx descriptor ring
2222 2223 2224
 *
 * Returns 0 on success, negative on failure
 **/
2225
int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2226
{
2227
	struct e1000_adapter *adapter = rx_ring->adapter;
A
Auke Kok 已提交
2228 2229
	struct e1000_buffer *buffer_info;
	int i, size, desc_len, err = -ENOMEM;
2230 2231

	size = sizeof(struct e1000_buffer) * rx_ring->count;
E
Eric Dumazet 已提交
2232
	rx_ring->buffer_info = vzalloc(size);
2233 2234 2235
	if (!rx_ring->buffer_info)
		goto err;

A
Auke Kok 已提交
2236 2237 2238 2239 2240 2241 2242 2243
	for (i = 0; i < rx_ring->count; i++) {
		buffer_info = &rx_ring->buffer_info[i];
		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
						sizeof(struct e1000_ps_page),
						GFP_KERNEL);
		if (!buffer_info->ps_pages)
			goto err_pages;
	}
2244 2245 2246 2247 2248 2249 2250 2251 2252

	desc_len = sizeof(union e1000_rx_desc_packet_split);

	/* Round up to nearest 4K */
	rx_ring->size = rx_ring->count * desc_len;
	rx_ring->size = ALIGN(rx_ring->size, 4096);

	err = e1000_alloc_ring_dma(adapter, rx_ring);
	if (err)
A
Auke Kok 已提交
2253
		goto err_pages;
2254 2255 2256 2257 2258 2259

	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
	rx_ring->rx_skb_top = NULL;

	return 0;
A
Auke Kok 已提交
2260 2261 2262 2263 2264 2265

err_pages:
	for (i = 0; i < rx_ring->count; i++) {
		buffer_info = &rx_ring->buffer_info[i];
		kfree(buffer_info->ps_pages);
	}
2266 2267
err:
	vfree(rx_ring->buffer_info);
2268
	e_err("Unable to allocate memory for the receive descriptor ring\n");
2269 2270 2271 2272 2273
	return err;
}

/**
 * e1000_clean_tx_ring - Free Tx Buffers
2274
 * @tx_ring: Tx descriptor ring
2275
 **/
2276
static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2277
{
2278
	struct e1000_adapter *adapter = tx_ring->adapter;
2279 2280 2281 2282 2283 2284
	struct e1000_buffer *buffer_info;
	unsigned long size;
	unsigned int i;

	for (i = 0; i < tx_ring->count; i++) {
		buffer_info = &tx_ring->buffer_info[i];
2285
		e1000_put_txbuf(tx_ring, buffer_info);
2286 2287
	}

2288
	netdev_reset_queue(adapter->netdev);
2289 2290 2291 2292 2293 2294 2295 2296
	size = sizeof(struct e1000_buffer) * tx_ring->count;
	memset(tx_ring->buffer_info, 0, size);

	memset(tx_ring->desc, 0, tx_ring->size);

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;

2297 2298
	writel(0, tx_ring->head);
	writel(0, tx_ring->tail);
2299 2300 2301 2302
}

/**
 * e1000e_free_tx_resources - Free Tx Resources per Queue
2303
 * @tx_ring: Tx descriptor ring
2304 2305 2306
 *
 * Free all transmit software resources
 **/
2307
void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2308
{
2309
	struct e1000_adapter *adapter = tx_ring->adapter;
2310 2311
	struct pci_dev *pdev = adapter->pdev;

2312
	e1000_clean_tx_ring(tx_ring);
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323

	vfree(tx_ring->buffer_info);
	tx_ring->buffer_info = NULL;

	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
			  tx_ring->dma);
	tx_ring->desc = NULL;
}

/**
 * e1000e_free_rx_resources - Free Rx Resources
2324
 * @rx_ring: Rx descriptor ring
2325 2326 2327
 *
 * Free all receive software resources
 **/
2328
void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2329
{
2330
	struct e1000_adapter *adapter = rx_ring->adapter;
2331
	struct pci_dev *pdev = adapter->pdev;
A
Auke Kok 已提交
2332
	int i;
2333

2334
	e1000_clean_rx_ring(rx_ring);
2335

B
Bruce Allan 已提交
2336
	for (i = 0; i < rx_ring->count; i++)
A
Auke Kok 已提交
2337 2338
		kfree(rx_ring->buffer_info[i].ps_pages);

2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
	vfree(rx_ring->buffer_info);
	rx_ring->buffer_info = NULL;

	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
			  rx_ring->dma);
	rx_ring->desc = NULL;
}

/**
 * e1000_update_itr - update the dynamic ITR value based on statistics
2349 2350 2351 2352 2353
 * @adapter: pointer to adapter
 * @itr_setting: current adapter->itr
 * @packets: the number of packets during this measurement interval
 * @bytes: the number of bytes during this measurement interval
 *
2354 2355 2356 2357 2358 2359
 *      Stores a new ITR value based on packets and byte
 *      counts during the last interrupt.  The advantage of per interrupt
 *      computation is faster updates and more accurate ITR for the current
 *      traffic pattern.  Constants in this function were computed
 *      based on theoretical maximum wire speed and thresholds were set based
 *      on testing data as well as attempting to minimize response time
2360 2361
 *      while increasing bulk throughput.  This functionality is controlled
 *      by the InterruptThrottleRate module parameter.
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
 **/
static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
				     u16 itr_setting, int packets,
				     int bytes)
{
	unsigned int retval = itr_setting;

	if (packets == 0)
		goto update_itr_done;

	switch (itr_setting) {
	case lowest_latency:
		/* handle TSO and jumbo frames */
		if (bytes/packets > 8000)
			retval = bulk_latency;
B
Bruce Allan 已提交
2377
		else if ((packets < 5) && (bytes > 512))
2378 2379 2380 2381 2382
			retval = low_latency;
		break;
	case low_latency:  /* 50 usec aka 20000 ints/s */
		if (bytes > 10000) {
			/* this if handles the TSO accounting */
B
Bruce Allan 已提交
2383
			if (bytes/packets > 8000)
2384
				retval = bulk_latency;
B
Bruce Allan 已提交
2385
			else if ((packets < 10) || ((bytes/packets) > 1200))
2386
				retval = bulk_latency;
B
Bruce Allan 已提交
2387
			else if ((packets > 35))
2388 2389 2390 2391 2392 2393 2394 2395 2396
				retval = lowest_latency;
		} else if (bytes/packets > 2000) {
			retval = bulk_latency;
		} else if (packets <= 2 && bytes < 512) {
			retval = lowest_latency;
		}
		break;
	case bulk_latency: /* 250 usec aka 4000 ints/s */
		if (bytes > 25000) {
B
Bruce Allan 已提交
2397
			if (packets > 35)
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
				retval = low_latency;
		} else if (bytes < 6000) {
			retval = low_latency;
		}
		break;
	}

update_itr_done:
	return retval;
}

static void e1000_set_itr(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u16 current_itr;
	u32 new_itr = adapter->itr;

	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
	if (adapter->link_speed != SPEED_1000) {
		current_itr = 0;
		new_itr = 4000;
		goto set_itr_now;
	}

2422 2423 2424 2425 2426
	if (adapter->flags2 & FLAG2_DISABLE_AIM) {
		new_itr = 0;
		goto set_itr_now;
	}

2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
	adapter->tx_itr = e1000_update_itr(adapter,
				    adapter->tx_itr,
				    adapter->total_tx_packets,
				    adapter->total_tx_bytes);
	/* conservative mode (itr 3) eliminates the lowest_latency setting */
	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
		adapter->tx_itr = low_latency;

	adapter->rx_itr = e1000_update_itr(adapter,
				    adapter->rx_itr,
				    adapter->total_rx_packets,
				    adapter->total_rx_bytes);
	/* conservative mode (itr 3) eliminates the lowest_latency setting */
	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
		adapter->rx_itr = low_latency;

	current_itr = max(adapter->rx_itr, adapter->tx_itr);

	switch (current_itr) {
	/* counts and packets in update_itr are dependent on these numbers */
	case lowest_latency:
		new_itr = 70000;
		break;
	case low_latency:
		new_itr = 20000; /* aka hwitr = ~200 */
		break;
	case bulk_latency:
		new_itr = 4000;
		break;
	default:
		break;
	}

set_itr_now:
	if (new_itr != adapter->itr) {
2462 2463
		/*
		 * this attempts to bias the interrupt rate towards Bulk
2464
		 * by adding intermediate steps when interrupt rate is
2465 2466
		 * increasing
		 */
2467 2468 2469 2470
		new_itr = new_itr > adapter->itr ?
			     min(adapter->itr + (new_itr >> 2), new_itr) :
			     new_itr;
		adapter->itr = new_itr;
2471 2472 2473 2474
		adapter->rx_ring->itr_val = new_itr;
		if (adapter->msix_entries)
			adapter->rx_ring->set_itr = 1;
		else
2475 2476 2477 2478
			if (new_itr)
				ew32(ITR, 1000000000 / (new_itr * 256));
			else
				ew32(ITR, 0);
2479 2480 2481
	}
}

2482 2483 2484 2485 2486 2487
/**
 * e1000_alloc_queues - Allocate memory for all rings
 * @adapter: board private structure to initialize
 **/
static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
{
2488 2489 2490
	int size = sizeof(struct e1000_ring);

	adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2491 2492
	if (!adapter->tx_ring)
		goto err;
2493 2494
	adapter->tx_ring->count = adapter->tx_ring_count;
	adapter->tx_ring->adapter = adapter;
2495

2496
	adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2497 2498
	if (!adapter->rx_ring)
		goto err;
2499 2500
	adapter->rx_ring->count = adapter->rx_ring_count;
	adapter->rx_ring->adapter = adapter;
2501 2502 2503 2504 2505 2506 2507 2508 2509

	return 0;
err:
	e_err("Unable to allocate memory for queues\n");
	kfree(adapter->rx_ring);
	kfree(adapter->tx_ring);
	return -ENOMEM;
}

2510 2511
/**
 * e1000_clean - NAPI Rx polling callback
2512
 * @napi: struct associated with this polling callback
2513
 * @budget: amount of packets driver is allowed to process this poll
2514 2515 2516 2517
 **/
static int e1000_clean(struct napi_struct *napi, int budget)
{
	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2518
	struct e1000_hw *hw = &adapter->hw;
2519
	struct net_device *poll_dev = adapter->netdev;
2520
	int tx_cleaned = 1, work_done = 0;
2521

2522
	adapter = netdev_priv(poll_dev);
2523

2524 2525 2526 2527
	if (adapter->msix_entries &&
	    !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
		goto clean_rx;

2528
	tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2529

2530
clean_rx:
2531
	adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2532

2533
	if (!tx_cleaned)
2534
		work_done = budget;
2535

2536 2537
	/* If budget not fully consumed, exit the polling mode */
	if (work_done < budget) {
2538 2539
		if (adapter->itr_setting & 3)
			e1000_set_itr(adapter);
2540
		napi_complete(napi);
2541 2542 2543 2544 2545 2546
		if (!test_bit(__E1000_DOWN, &adapter->state)) {
			if (adapter->msix_entries)
				ew32(IMS, adapter->rx_ring->ims_val);
			else
				e1000_irq_enable(adapter);
		}
2547 2548 2549 2550 2551
	}

	return work_done;
}

2552
static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2553 2554 2555 2556 2557 2558 2559 2560 2561
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 vfta, index;

	/* don't update vlan cookie if already programmed */
	if ((adapter->hw.mng_cookie.status &
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
	    (vid == adapter->mng_vlan_id))
2562
		return 0;
2563

2564
	/* add VID to filter table */
2565 2566 2567 2568 2569 2570
	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
		index = (vid >> 5) & 0x7F;
		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
		vfta |= (1 << (vid & 0x1F));
		hw->mac.ops.write_vfta(hw, index, vfta);
	}
J
Jeff Kirsher 已提交
2571 2572

	set_bit(vid, adapter->active_vlans);
2573 2574

	return 0;
2575 2576
}

2577
static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2578 2579 2580 2581 2582 2583 2584 2585 2586
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 vfta, index;

	if ((adapter->hw.mng_cookie.status &
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
	    (vid == adapter->mng_vlan_id)) {
		/* release control to f/w */
2587
		e1000e_release_hw_control(adapter);
2588
		return 0;
2589 2590 2591
	}

	/* remove VID from filter table */
2592 2593 2594 2595 2596 2597
	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
		index = (vid >> 5) & 0x7F;
		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
		vfta &= ~(1 << (vid & 0x1F));
		hw->mac.ops.write_vfta(hw, index, vfta);
	}
J
Jeff Kirsher 已提交
2598 2599

	clear_bit(vid, adapter->active_vlans);
2600 2601

	return 0;
2602 2603
}

J
Jeff Kirsher 已提交
2604 2605 2606 2607 2608
/**
 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
 * @adapter: board private structure to initialize
 **/
static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2609 2610
{
	struct net_device *netdev = adapter->netdev;
J
Jeff Kirsher 已提交
2611 2612
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl;
2613

J
Jeff Kirsher 已提交
2614 2615 2616 2617 2618 2619 2620 2621 2622
	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
		/* disable VLAN receive filtering */
		rctl = er32(RCTL);
		rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
		ew32(RCTL, rctl);

		if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2623 2624 2625 2626
		}
	}
}

J
Jeff Kirsher 已提交
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
/**
 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
 * @adapter: board private structure to initialize
 **/
static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl;

	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
		/* enable VLAN receive filtering */
		rctl = er32(RCTL);
		rctl |= E1000_RCTL_VFE;
		rctl &= ~E1000_RCTL_CFIEN;
		ew32(RCTL, rctl);
	}
}
2644

J
Jeff Kirsher 已提交
2645 2646 2647 2648 2649
/**
 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
 * @adapter: board private structure to initialize
 **/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2650 2651
{
	struct e1000_hw *hw = &adapter->hw;
J
Jeff Kirsher 已提交
2652
	u32 ctrl;
2653

J
Jeff Kirsher 已提交
2654 2655 2656 2657 2658
	/* disable VLAN tag insert/strip */
	ctrl = er32(CTRL);
	ctrl &= ~E1000_CTRL_VME;
	ew32(CTRL, ctrl);
}
2659

J
Jeff Kirsher 已提交
2660 2661 2662 2663 2664 2665 2666 2667
/**
 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
 * @adapter: board private structure to initialize
 **/
static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 ctrl;
2668

J
Jeff Kirsher 已提交
2669 2670 2671 2672 2673
	/* enable VLAN tag insert/strip */
	ctrl = er32(CTRL);
	ctrl |= E1000_CTRL_VME;
	ew32(CTRL, ctrl);
}
2674

J
Jeff Kirsher 已提交
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	u16 vid = adapter->hw.mng_cookie.vlan_id;
	u16 old_vid = adapter->mng_vlan_id;

	if (adapter->hw.mng_cookie.status &
	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
		e1000_vlan_rx_add_vid(netdev, vid);
		adapter->mng_vlan_id = vid;
2685 2686
	}

J
Jeff Kirsher 已提交
2687 2688
	if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
		e1000_vlan_rx_kill_vid(netdev, old_vid);
2689 2690 2691 2692 2693 2694
}

static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
	u16 vid;

J
Jeff Kirsher 已提交
2695
	e1000_vlan_rx_add_vid(adapter->netdev, 0);
2696

J
Jeff Kirsher 已提交
2697
	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2698 2699 2700
		e1000_vlan_rx_add_vid(adapter->netdev, vid);
}

2701
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2702 2703
{
	struct e1000_hw *hw = &adapter->hw;
2704
	u32 manc, manc2h, mdef, i, j;
2705 2706 2707 2708 2709 2710

	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
		return;

	manc = er32(MANC);

2711 2712
	/*
	 * enable receiving management packets to the host. this will probably
2713
	 * generate destination unreachable messages from the host OS, but
2714 2715
	 * the packets will be handled on SMBUS
	 */
2716 2717
	manc |= E1000_MANC_EN_MNG2HOST;
	manc2h = er32(MANC2H);
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732

	switch (hw->mac.type) {
	default:
		manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
		break;
	case e1000_82574:
	case e1000_82583:
		/*
		 * Check if IPMI pass-through decision filter already exists;
		 * if so, enable it.
		 */
		for (i = 0, j = 0; i < 8; i++) {
			mdef = er32(MDEF(i));

			/* Ignore filters with anything other than IPMI ports */
2733
			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760
				continue;

			/* Enable this decision filter in MANC2H */
			if (mdef)
				manc2h |= (1 << i);

			j |= mdef;
		}

		if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
			break;

		/* Create new decision filter in an empty filter */
		for (i = 0, j = 0; i < 8; i++)
			if (er32(MDEF(i)) == 0) {
				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
					       E1000_MDEF_PORT_664));
				manc2h |= (1 << 1);
				j++;
				break;
			}

		if (!j)
			e_warn("Unable to create IPMI pass-through filter\n");
		break;
	}

2761 2762 2763 2764 2765
	ew32(MANC2H, manc2h);
	ew32(MANC, manc);
}

/**
2766
 * e1000_configure_tx - Configure Transmit Unit after Reset
2767 2768 2769 2770 2771 2772 2773 2774 2775
 * @adapter: board private structure
 *
 * Configure the Tx unit of the MAC after a reset.
 **/
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_ring *tx_ring = adapter->tx_ring;
	u64 tdba;
2776
	u32 tdlen, tarc;
2777 2778 2779 2780

	/* Setup the HW Tx Head and Tail descriptor pointers */
	tdba = tx_ring->dma;
	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2781
	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2782 2783 2784 2785
	ew32(TDBAH, (tdba >> 32));
	ew32(TDLEN, tdlen);
	ew32(TDH, 0);
	ew32(TDT, 0);
2786 2787
	tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
2788 2789 2790

	/* Set the Tx Interrupt Delay register */
	ew32(TIDV, adapter->tx_int_delay);
2791
	/* Tx irq moderation */
2792 2793
	ew32(TADV, adapter->tx_abs_int_delay);

2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
	if (adapter->flags2 & FLAG2_DMA_BURST) {
		u32 txdctl = er32(TXDCTL(0));
		txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
			    E1000_TXDCTL_WTHRESH);
		/*
		 * set up some performance related parameters to encourage the
		 * hardware to use the bus more efficiently in bursts, depends
		 * on the tx_int_delay to be enabled,
		 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
		 * hthresh = 1 ==> prefetch when one or more available
		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
		 * BEWARE: this seems to work but should be considered first if
2806
		 * there are Tx hangs or other Tx related bugs
2807 2808 2809 2810
		 */
		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
		ew32(TXDCTL(0), txdctl);
	}
2811 2812
	/* erratum work around: set txdctl the same for both queues */
	ew32(TXDCTL(1), er32(TXDCTL(0)));
2813

2814
	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2815
		tarc = er32(TARC(0));
2816 2817 2818 2819
		/*
		 * set the speed mode bit, we'll clear it if we're not at
		 * gigabit link later
		 */
2820 2821
#define SPEED_MODE_BIT (1 << 21)
		tarc |= SPEED_MODE_BIT;
2822
		ew32(TARC(0), tarc);
2823 2824 2825 2826
	}

	/* errata: program both queues to unweighted RR */
	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2827
		tarc = er32(TARC(0));
2828
		tarc |= 1;
2829 2830
		ew32(TARC(0), tarc);
		tarc = er32(TARC(1));
2831
		tarc |= 1;
2832
		ew32(TARC(1), tarc);
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
	}

	/* Setup Transmit Descriptor Settings for eop descriptor */
	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;

	/* only set IDE if we are delaying interrupts using the timers */
	if (adapter->tx_int_delay)
		adapter->txd_cmd |= E1000_TXD_CMD_IDE;

	/* enable Report Status bit */
	adapter->txd_cmd |= E1000_TXD_CMD_RS;

2845
	e1000e_config_collision_dist(hw);
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
}

/**
 * e1000_setup_rctl - configure the receive control registers
 * @adapter: Board private structure
 **/
#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl, rfctl;
	u32 pages = 0;

2860 2861 2862 2863 2864 2865 2866 2867
	/* Workaround Si errata on 82579 - configure jumbo frame flow */
	if (hw->mac.type == e1000_pch2lan) {
		s32 ret_val;

		if (adapter->netdev->mtu > ETH_DATA_LEN)
			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
		else
			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2868 2869 2870

		if (ret_val)
			e_dbg("failed to enable jumbo frame workaround mode\n");
2871 2872
	}

2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
	/* Program MC offset vector base */
	rctl = er32(RCTL);
	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);

	/* Do not Store bad packets */
	rctl &= ~E1000_RCTL_SBP;

	/* Enable Long Packet receive */
	if (adapter->netdev->mtu <= ETH_DATA_LEN)
		rctl &= ~E1000_RCTL_LPE;
	else
		rctl |= E1000_RCTL_LPE;

J
Jeff Kirsher 已提交
2889 2890 2891 2892 2893 2894
	/* Some systems expect that the CRC is included in SMBUS traffic. The
	 * hardware strips the CRC before sending to both SMBUS (BMC) and to
	 * host memory when this is enabled
	 */
	if (adapter->flags2 & FLAG2_CRC_STRIPPING)
		rctl |= E1000_RCTL_SECRC;
2895

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
	/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
	if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
		u16 phy_data;

		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
		phy_data &= 0xfff8;
		phy_data |= (1 << 2);
		e1e_wphy(hw, PHY_REG(770, 26), phy_data);

		e1e_rphy(hw, 22, &phy_data);
		phy_data &= 0x0fff;
		phy_data |= (1 << 14);
		e1e_wphy(hw, 0x10, 0x2823);
		e1e_wphy(hw, 0x11, 0x0003);
		e1e_wphy(hw, 22, phy_data);
	}

2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
	/* Setup buffer sizes */
	rctl &= ~E1000_RCTL_SZ_4096;
	rctl |= E1000_RCTL_BSEX;
	switch (adapter->rx_buffer_len) {
	case 2048:
	default:
		rctl |= E1000_RCTL_SZ_2048;
		rctl &= ~E1000_RCTL_BSEX;
		break;
	case 4096:
		rctl |= E1000_RCTL_SZ_4096;
		break;
	case 8192:
		rctl |= E1000_RCTL_SZ_8192;
		break;
	case 16384:
		rctl |= E1000_RCTL_SZ_16384;
		break;
	}

2933 2934 2935 2936
	/* Enable Extended Status in all Receive Descriptors */
	rfctl = er32(RFCTL);
	rfctl |= E1000_RFCTL_EXTEN;

2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
	/*
	 * 82571 and greater support packet-split where the protocol
	 * header is placed in skb->data and the packet data is
	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
	 * In the case of a non-split, skb->data is linearly filled,
	 * followed by the page buffers.  Therefore, skb->data is
	 * sized to hold the largest protocol header.
	 *
	 * allocations using alloc_page take too long for regular MTU
	 * so only enable packet split for jumbo frames
	 *
	 * Using pages when the page size is greater than 16k wastes
	 * a lot of memory, since we allocate 3 pages at all times
	 * per packet.
	 */
	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2953
	if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2954
		adapter->rx_ps_pages = pages;
2955 2956
	else
		adapter->rx_ps_pages = 0;
2957 2958

	if (adapter->rx_ps_pages) {
2959 2960
		u32 psrctl = 0;

2961 2962 2963 2964
		/*
		 * disable packet split support for IPv6 extension headers,
		 * because some malformed IPv6 headers can hang the Rx
		 */
2965 2966 2967
		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
			  E1000_RFCTL_NEW_IPV6_EXT_DIS);

A
Auke Kok 已提交
2968 2969
		/* Enable Packet split descriptors */
		rctl |= E1000_RCTL_DTYP_PS;
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989

		psrctl |= adapter->rx_ps_bsize0 >>
			E1000_PSRCTL_BSIZE0_SHIFT;

		switch (adapter->rx_ps_pages) {
		case 3:
			psrctl |= PAGE_SIZE <<
				E1000_PSRCTL_BSIZE3_SHIFT;
		case 2:
			psrctl |= PAGE_SIZE <<
				E1000_PSRCTL_BSIZE2_SHIFT;
		case 1:
			psrctl |= PAGE_SIZE >>
				E1000_PSRCTL_BSIZE1_SHIFT;
			break;
		}

		ew32(PSRCTL, psrctl);
	}

2990
	ew32(RFCTL, rfctl);
2991
	ew32(RCTL, rctl);
2992 2993
	/* just started the receive unit, no need to restart */
	adapter->flags &= ~FLAG_RX_RESTART_NOW;
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011
}

/**
 * e1000_configure_rx - Configure Receive Unit after Reset
 * @adapter: board private structure
 *
 * Configure the Rx unit of the MAC after a reset.
 **/
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	u64 rdba;
	u32 rdlen, rctl, rxcsum, ctrl_ext;

	if (adapter->rx_ps_pages) {
		/* this is a 32 byte descriptor */
		rdlen = rx_ring->count *
3012
		    sizeof(union e1000_rx_desc_packet_split);
3013 3014
		adapter->clean_rx = e1000_clean_rx_irq_ps;
		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3015
	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3016
		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3017 3018
		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3019
	} else {
3020
		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3021 3022 3023 3024 3025 3026
		adapter->clean_rx = e1000_clean_rx_irq;
		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
	}

	/* disable receives while setting up the descriptors */
	rctl = er32(RCTL);
3027 3028
	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
		ew32(RCTL, rctl & ~E1000_RCTL_EN);
3029
	e1e_flush();
3030
	usleep_range(10000, 20000);
3031

3032 3033 3034 3035
	if (adapter->flags2 & FLAG2_DMA_BURST) {
		/*
		 * set the writeback threshold (only takes effect if the RDTR
		 * is set). set GRAN=1 and write back up to 0x4 worth, and
3036
		 * enable prefetching of 0x20 Rx descriptors
3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
		 * granularity = 01
		 * wthresh = 04,
		 * hthresh = 04,
		 * pthresh = 0x20
		 */
		ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
		ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);

		/*
		 * override the delay timers for enabling bursting, only if
		 * the value was not set by the user via module options
		 */
		if (adapter->rx_int_delay == DEFAULT_RDTR)
			adapter->rx_int_delay = BURST_RDTR;
		if (adapter->rx_abs_int_delay == DEFAULT_RADV)
			adapter->rx_abs_int_delay = BURST_RADV;
	}

3055 3056 3057 3058 3059
	/* set the Receive Delay Timer Register */
	ew32(RDTR, adapter->rx_int_delay);

	/* irq moderation */
	ew32(RADV, adapter->rx_abs_int_delay);
3060
	if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3061
		ew32(ITR, 1000000000 / (adapter->itr * 256));
3062 3063 3064 3065 3066 3067 3068 3069

	ctrl_ext = er32(CTRL_EXT);
	/* Auto-Mask interrupts upon ICR access */
	ctrl_ext |= E1000_CTRL_EXT_IAME;
	ew32(IAM, 0xffffffff);
	ew32(CTRL_EXT, ctrl_ext);
	e1e_flush();

3070 3071 3072 3073
	/*
	 * Setup the HW Rx Head and Tail Descriptor Pointers and
	 * the Base and Length of the Rx Descriptor Ring
	 */
3074
	rdba = rx_ring->dma;
3075
	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
3076 3077 3078 3079
	ew32(RDBAH, (rdba >> 32));
	ew32(RDLEN, rdlen);
	ew32(RDH, 0);
	ew32(RDT, 0);
3080 3081
	rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
3082 3083 3084

	/* Enable Receive Checksum Offload for TCP and UDP */
	rxcsum = er32(RXCSUM);
3085
	if (adapter->netdev->features & NETIF_F_RXCSUM) {
3086 3087
		rxcsum |= E1000_RXCSUM_TUOFL;

3088 3089 3090 3091
		/*
		 * IPv4 payload checksum for UDP fragments must be
		 * used in conjunction with packet-split.
		 */
3092 3093 3094 3095 3096 3097 3098 3099
		if (adapter->rx_ps_pages)
			rxcsum |= E1000_RXCSUM_IPPCSE;
	} else {
		rxcsum &= ~E1000_RXCSUM_TUOFL;
		/* no need to clear IPPCSE as it defaults to 0 */
	}
	ew32(RXCSUM, rxcsum);

3100 3101 3102 3103 3104
	if (adapter->hw.mac.type == e1000_pch2lan) {
		/*
		 * With jumbo frames, excessive C-state transition
		 * latencies result in dropped transactions.
		 */
3105 3106 3107
		if (adapter->netdev->mtu > ETH_DATA_LEN) {
			u32 rxdctl = er32(RXDCTL(0));
			ew32(RXDCTL(0), rxdctl | 0x3);
3108
			pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3109
		} else {
3110 3111
			pm_qos_update_request(&adapter->netdev->pm_qos_req,
					      PM_QOS_DEFAULT_VALUE);
3112
		}
3113
	}
3114 3115 3116 3117 3118 3119

	/* Enable Receives */
	ew32(RCTL, rctl);
}

/**
3120 3121
 * e1000e_write_mc_addr_list - write multicast addresses to MTA
 * @netdev: network interface device structure
3122
 *
3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
 * Writes multicast address list to the MTA hash table.
 * Returns: -ENOMEM on failure
 *                0 on no addresses written
 *                X on writing X addresses to MTA
 */
static int e1000e_write_mc_addr_list(struct net_device *netdev)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	struct netdev_hw_addr *ha;
	u8 *mta_list;
	int i;

	if (netdev_mc_empty(netdev)) {
		/* nothing to program, so clear mc list */
		hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
		return 0;
	}

	mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
	if (!mta_list)
		return -ENOMEM;

	/* update_mc_addr_list expects a packed array of only addresses. */
	i = 0;
	netdev_for_each_mc_addr(ha, netdev)
		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);

	hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
	kfree(mta_list);

	return netdev_mc_count(netdev);
}

/**
 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
 * @netdev: network interface device structure
3160
 *
3161 3162 3163 3164
 * Writes unicast address list to the RAR table.
 * Returns: -ENOMEM on failure/insufficient address space
 *                0 on no addresses written
 *                X on writing X addresses to the RAR table
3165
 **/
3166
static int e1000e_write_uc_addr_list(struct net_device *netdev)
3167
{
3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	unsigned int rar_entries = hw->mac.rar_entry_count;
	int count = 0;

	/* save a rar entry for our hardware address */
	rar_entries--;

	/* save a rar entry for the LAA workaround */
	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
		rar_entries--;

	/* return ENOMEM indicating insufficient memory for addresses */
	if (netdev_uc_count(netdev) > rar_entries)
		return -ENOMEM;

	if (!netdev_uc_empty(netdev) && rar_entries) {
		struct netdev_hw_addr *ha;

		/*
		 * write the addresses in reverse order to avoid write
		 * combining
		 */
		netdev_for_each_uc_addr(ha, netdev) {
			if (!rar_entries)
				break;
			e1000e_rar_set(hw, ha->addr, rar_entries--);
			count++;
		}
	}

	/* zero out the remaining RAR entries not used above */
	for (; rar_entries > 0; rar_entries--) {
		ew32(RAH(rar_entries), 0);
		ew32(RAL(rar_entries), 0);
	}
	e1e_flush();

	return count;
3207 3208 3209
}

/**
3210
 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3211 3212
 * @netdev: network interface device structure
 *
3213 3214 3215
 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
 * address list or the network interface flags are updated.  This routine is
 * responsible for configuring the hardware for proper unicast, multicast,
3216 3217
 * promiscuous mode, and all-multi behavior.
 **/
3218
static void e1000e_set_rx_mode(struct net_device *netdev)
3219 3220 3221 3222 3223 3224 3225 3226
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl;

	/* Check for Promiscuous and All Multicast modes */
	rctl = er32(RCTL);

3227 3228 3229
	/* clear the affected bits */
	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);

3230 3231
	if (netdev->flags & IFF_PROMISC) {
		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
J
Jeff Kirsher 已提交
3232 3233
		/* Do not hardware filter VLANs in promisc mode */
		e1000e_vlan_filter_disable(adapter);
3234
	} else {
3235
		int count;
3236 3237 3238
		if (netdev->flags & IFF_ALLMULTI) {
			rctl |= E1000_RCTL_MPE;
		} else {
3239 3240 3241 3242 3243 3244 3245 3246
			/*
			 * Write addresses to the MTA, if the attempt fails
			 * then we should just turn on promiscuous mode so
			 * that we can at least receive multicast traffic
			 */
			count = e1000e_write_mc_addr_list(netdev);
			if (count < 0)
				rctl |= E1000_RCTL_MPE;
3247
		}
J
Jeff Kirsher 已提交
3248
		e1000e_vlan_filter_enable(adapter);
3249
		/*
3250 3251 3252
		 * Write addresses to available RAR registers, if there is not
		 * sufficient space to store all the addresses then enable
		 * unicast promiscuous mode
3253
		 */
3254 3255 3256
		count = e1000e_write_uc_addr_list(netdev);
		if (count < 0)
			rctl |= E1000_RCTL_UPE;
3257
	}
J
Jeff Kirsher 已提交
3258

3259 3260
	ew32(RCTL, rctl);

J
Jeff Kirsher 已提交
3261 3262 3263 3264
	if (netdev->features & NETIF_F_HW_VLAN_RX)
		e1000e_vlan_strip_enable(adapter);
	else
		e1000e_vlan_strip_disable(adapter);
3265 3266
}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 mrqc, rxcsum;
	int i;
	static const u32 rsskey[10] = {
		0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
		0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
	};

	/* Fill out hash function seed */
	for (i = 0; i < 10; i++)
		ew32(RSSRK(i), rsskey[i]);

	/* Direct all traffic to queue 0 */
	for (i = 0; i < 32; i++)
		ew32(RETA(i), 0);

	/*
	 * Disable raw packet checksumming so that RSS hash is placed in
	 * descriptor on writeback.
	 */
	rxcsum = er32(RXCSUM);
	rxcsum |= E1000_RXCSUM_PCSD;

	ew32(RXCSUM, rxcsum);

	mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
		E1000_MRQC_RSS_FIELD_IPV4_TCP |
		E1000_MRQC_RSS_FIELD_IPV6 |
		E1000_MRQC_RSS_FIELD_IPV6_TCP |
		E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);

	ew32(MRQC, mrqc);
}

3303
/**
3304
 * e1000_configure - configure the hardware for Rx and Tx
3305 3306 3307 3308
 * @adapter: private board structure
 **/
static void e1000_configure(struct e1000_adapter *adapter)
{
3309 3310
	struct e1000_ring *rx_ring = adapter->rx_ring;

3311
	e1000e_set_rx_mode(adapter->netdev);
3312 3313

	e1000_restore_vlan(adapter);
3314
	e1000_init_manageability_pt(adapter);
3315 3316

	e1000_configure_tx(adapter);
3317 3318 3319

	if (adapter->netdev->features & NETIF_F_RXHASH)
		e1000e_setup_rss_hash(adapter);
3320 3321
	e1000_setup_rctl(adapter);
	e1000_configure_rx(adapter);
3322
	adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
}

/**
 * e1000e_power_up_phy - restore link in case the phy was powered down
 * @adapter: address of board private structure
 *
 * The phy may be powered down to save power and turn off link when the
 * driver is unloaded and wake on lan is not enabled (among others)
 * *** this routine MUST be followed by a call to e1000e_reset ***
 **/
void e1000e_power_up_phy(struct e1000_adapter *adapter)
{
3335 3336
	if (adapter->hw.phy.ops.power_up)
		adapter->hw.phy.ops.power_up(&adapter->hw);
3337 3338 3339 3340 3341 3342 3343

	adapter->hw.mac.ops.setup_link(&adapter->hw);
}

/**
 * e1000_power_down_phy - Power down the PHY
 *
3344 3345
 * Power down the PHY so no link is implied when interface is down.
 * The PHY cannot be powered down if management or WoL is active.
3346 3347 3348 3349
 */
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
	/* WoL is enabled */
3350
	if (adapter->wol)
3351 3352
		return;

3353 3354
	if (adapter->hw.phy.ops.power_down)
		adapter->hw.phy.ops.power_down(&adapter->hw);
3355 3356 3357 3358 3359 3360 3361 3362
}

/**
 * e1000e_reset - bring the hardware into a known good state
 *
 * This function boots the hardware and enables some settings that
 * require a configuration cycle of the hardware - those cannot be
 * set/changed during runtime. After reset the device needs to be
3363
 * properly configured for Rx, Tx etc.
3364 3365 3366 3367
 */
void e1000e_reset(struct e1000_adapter *adapter)
{
	struct e1000_mac_info *mac = &adapter->hw.mac;
3368
	struct e1000_fc_info *fc = &adapter->hw.fc;
3369 3370
	struct e1000_hw *hw = &adapter->hw;
	u32 tx_space, min_tx_space, min_rx_space;
3371
	u32 pba = adapter->pba;
3372 3373
	u16 hwm;

3374
	/* reset Packet Buffer Allocation to default */
3375
	ew32(PBA, pba);
3376

3377
	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3378 3379
		/*
		 * To maintain wire speed transmits, the Tx FIFO should be
3380 3381 3382 3383
		 * large enough to accommodate two full transmit packets,
		 * rounded up to the next 1KB and expressed in KB.  Likewise,
		 * the Rx FIFO should be large enough to accommodate at least
		 * one full receive packet and is similarly rounded up and
3384 3385
		 * expressed in KB.
		 */
3386
		pba = er32(PBA);
3387
		/* upper 16 bits has Tx packet buffer allocation size in KB */
3388
		tx_space = pba >> 16;
3389
		/* lower 16 bits has Rx packet buffer allocation size in KB */
3390
		pba &= 0xffff;
3391
		/*
3392
		 * the Tx fifo also stores 16 bytes of information about the Tx
3393
		 * but don't include ethernet FCS because hardware appends it
3394 3395
		 */
		min_tx_space = (adapter->max_frame_size +
3396 3397 3398 3399 3400
				sizeof(struct e1000_tx_desc) -
				ETH_FCS_LEN) * 2;
		min_tx_space = ALIGN(min_tx_space, 1024);
		min_tx_space >>= 10;
		/* software strips receive CRC, so leave room for it */
3401
		min_rx_space = adapter->max_frame_size;
3402 3403 3404
		min_rx_space = ALIGN(min_rx_space, 1024);
		min_rx_space >>= 10;

3405 3406
		/*
		 * If current Tx allocation is less than the min Tx FIFO size,
3407
		 * and the min Tx FIFO size is less than the current Rx FIFO
3408 3409
		 * allocation, take space away from current Rx allocation
		 */
3410 3411 3412
		if ((tx_space < min_tx_space) &&
		    ((min_tx_space - tx_space) < pba)) {
			pba -= min_tx_space - tx_space;
3413

3414
			/*
3415
			 * if short on Rx space, Rx wins and must trump Tx
3416 3417
			 * adjustment or use Early Receive if available
			 */
3418
			if (pba < min_rx_space)
3419
				pba = min_rx_space;
3420
		}
3421 3422

		ew32(PBA, pba);
3423 3424
	}

3425 3426 3427
	/*
	 * flow control settings
	 *
3428
	 * The high water mark must be low enough to fit one full frame
3429 3430 3431
	 * (or the size used for early receive) above it in the Rx FIFO.
	 * Set it to the lower of:
	 * - 90% of the Rx FIFO size, and
3432
	 * - the full Rx FIFO size minus one full frame
3433
	 */
3434 3435 3436 3437 3438 3439 3440 3441
	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
		fc->pause_time = 0xFFFF;
	else
		fc->pause_time = E1000_FC_PAUSE_TIME;
	fc->send_xon = 1;
	fc->current_mode = fc->requested_mode;

	switch (hw->mac.type) {
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
	case e1000_ich9lan:
	case e1000_ich10lan:
		if (adapter->netdev->mtu > ETH_DATA_LEN) {
			pba = 14;
			ew32(PBA, pba);
			fc->high_water = 0x2800;
			fc->low_water = fc->high_water - 8;
			break;
		}
		/* fall-through */
3452
	default:
3453 3454
		hwm = min(((pba << 10) * 9 / 10),
			  ((pba << 10) - adapter->max_frame_size));
3455 3456 3457 3458 3459

		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
		fc->low_water = fc->high_water - 8;
		break;
	case e1000_pchlan:
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470
		/*
		 * Workaround PCH LOM adapter hangs with certain network
		 * loads.  If hangs persist, try disabling Tx flow control.
		 */
		if (adapter->netdev->mtu > ETH_DATA_LEN) {
			fc->high_water = 0x3500;
			fc->low_water  = 0x1500;
		} else {
			fc->high_water = 0x5000;
			fc->low_water  = 0x3000;
		}
3471
		fc->refresh_time = 0x1000;
3472 3473 3474 3475 3476 3477
		break;
	case e1000_pch2lan:
		fc->high_water = 0x05C20;
		fc->low_water = 0x05048;
		fc->pause_time = 0x0650;
		fc->refresh_time = 0x0400;
3478 3479 3480 3481
		if (adapter->netdev->mtu > ETH_DATA_LEN) {
			pba = 14;
			ew32(PBA, pba);
		}
3482
		break;
3483
	}
3484

3485 3486
	/*
	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3487
	 * fit in receive buffer.
3488 3489
	 */
	if (adapter->itr_setting & 0x3) {
3490
		if ((adapter->max_frame_size * 2) > (pba << 10)) {
3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
			if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
				dev_info(&adapter->pdev->dev,
					"Interrupt Throttle Rate turned off\n");
				adapter->flags2 |= FLAG2_DISABLE_AIM;
				ew32(ITR, 0);
			}
		} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
			dev_info(&adapter->pdev->dev,
				 "Interrupt Throttle Rate turned on\n");
			adapter->flags2 &= ~FLAG2_DISABLE_AIM;
			adapter->itr = 20000;
			ew32(ITR, 1000000000 / (adapter->itr * 256));
		}
	}

3506 3507
	/* Allow time for pending master requests to run */
	mac->ops.reset_hw(hw);
3508 3509 3510 3511 3512

	/*
	 * For parts with AMT enabled, let the firmware know
	 * that the network interface is in control
	 */
J
Jesse Brandeburg 已提交
3513
	if (adapter->flags & FLAG_HAS_AMT)
3514
		e1000e_get_hw_control(adapter);
3515

3516 3517 3518
	ew32(WUC, 0);

	if (mac->ops.init_hw(hw))
3519
		e_err("Hardware Error\n");
3520 3521 3522 3523 3524 3525 3526

	e1000_update_mng_vlan(adapter);

	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
	ew32(VET, ETH_P_8021Q);

	e1000e_reset_adaptive(hw);
3527 3528 3529 3530 3531 3532 3533

	if (!netif_running(adapter->netdev) &&
	    !test_bit(__E1000_TESTING, &adapter->state)) {
		e1000_power_down_phy(adapter);
		return;
	}

3534 3535
	e1000_get_phy_info(hw);

3536 3537
	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
	    !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3538
		u16 phy_data = 0;
3539 3540
		/*
		 * speed up time to link by disabling smart power down, ignore
3541
		 * the return value of this function because there is nothing
3542 3543
		 * different we would do if it failed
		 */
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
		phy_data &= ~IGP02E1000_PM_SPD;
		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
	}
}

int e1000e_up(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	/* hardware has been reset, we need to reload some things */
	e1000_configure(adapter);

	clear_bit(__E1000_DOWN, &adapter->state);

3559 3560
	if (adapter->msix_entries)
		e1000_configure_msix(adapter);
3561 3562
	e1000_irq_enable(adapter);

3563
	netif_start_queue(adapter->netdev);
3564

3565
	/* fire a link change interrupt to start the watchdog */
3566 3567 3568 3569 3570
	if (adapter->msix_entries)
		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
	else
		ew32(ICS, E1000_ICS_LSC);

3571 3572 3573
	return 0;
}

3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	if (!(adapter->flags2 & FLAG2_DMA_BURST))
		return;

	/* flush pending descriptor writebacks to memory */
	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);

	/* execute the writes immediately */
	e1e_flush();
}

J
Jeff Kirsher 已提交
3589 3590
static void e1000e_update_stats(struct e1000_adapter *adapter);

3591 3592 3593 3594 3595 3596
void e1000e_down(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct e1000_hw *hw = &adapter->hw;
	u32 tctl, rctl;

3597 3598 3599 3600
	/*
	 * signal that we're down so the interrupt handler does not
	 * reschedule our watchdog timer
	 */
3601 3602 3603 3604
	set_bit(__E1000_DOWN, &adapter->state);

	/* disable receives in the hardware */
	rctl = er32(RCTL);
3605 3606
	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
		ew32(RCTL, rctl & ~E1000_RCTL_EN);
3607 3608
	/* flush and sleep below */

3609
	netif_stop_queue(netdev);
3610 3611 3612 3613 3614

	/* disable transmits in the hardware */
	tctl = er32(TCTL);
	tctl &= ~E1000_TCTL_EN;
	ew32(TCTL, tctl);
3615

3616 3617
	/* flush both disables and wait for them to finish */
	e1e_flush();
3618
	usleep_range(10000, 20000);
3619 3620 3621 3622 3623 3624 3625

	e1000_irq_disable(adapter);

	del_timer_sync(&adapter->watchdog_timer);
	del_timer_sync(&adapter->phy_info_timer);

	netif_carrier_off(netdev);
J
Jeff Kirsher 已提交
3626 3627 3628 3629 3630

	spin_lock(&adapter->stats64_lock);
	e1000e_update_stats(adapter);
	spin_unlock(&adapter->stats64_lock);

3631
	e1000e_flush_descriptors(adapter);
3632 3633
	e1000_clean_tx_ring(adapter->tx_ring);
	e1000_clean_rx_ring(adapter->rx_ring);
3634

3635 3636 3637
	adapter->link_speed = 0;
	adapter->link_duplex = 0;

3638 3639
	if (!pci_channel_offline(adapter->pdev))
		e1000e_reset(adapter);
3640

3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
	/*
	 * TODO: for power management, we could drop the link and
	 * pci_disable_device here.
	 */
}

void e1000e_reinit_locked(struct e1000_adapter *adapter)
{
	might_sleep();
	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3651
		usleep_range(1000, 2000);
3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670
	e1000e_down(adapter);
	e1000e_up(adapter);
	clear_bit(__E1000_RESETTING, &adapter->state);
}

/**
 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
 * @adapter: board private structure to initialize
 *
 * e1000_sw_init initializes the Adapter private data structure.
 * Fields are initialized based on PCI device information and
 * OS network device settings (MTU size).
 **/
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;

	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
	adapter->rx_ps_bsize0 = 128;
3671 3672
	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3673 3674
	adapter->tx_ring_count = E1000_DEFAULT_TXD;
	adapter->rx_ring_count = E1000_DEFAULT_RXD;
3675

J
Jeff Kirsher 已提交
3676 3677
	spin_lock_init(&adapter->stats64_lock);

3678
	e1000e_set_interrupt_capability(adapter);
3679

3680 3681
	if (e1000_alloc_queues(adapter))
		return -ENOMEM;
3682 3683 3684 3685 3686 3687 3688 3689

	/* Explicitly disable IRQ since the NIC can be in any state. */
	e1000_irq_disable(adapter);

	set_bit(__E1000_DOWN, &adapter->state);
	return 0;
}

3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701
/**
 * e1000_intr_msi_test - Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a network interface device structure
 **/
static irqreturn_t e1000_intr_msi_test(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 icr = er32(ICR);

3702
	e_dbg("icr is %08X\n", icr);
3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
	if (icr & E1000_ICR_RXSEQ) {
		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
		wmb();
	}

	return IRQ_HANDLED;
}

/**
 * e1000_test_msi_interrupt - Returns 0 for successful test
 * @adapter: board private struct
 *
 * code flow taken from tg3.c
 **/
static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct e1000_hw *hw = &adapter->hw;
	int err;

	/* poll_enable hasn't been called yet, so don't need disable */
	/* clear any pending events */
	er32(ICR);

	/* free the real vector and request a test handler */
	e1000_free_irq(adapter);
3729
	e1000e_reset_interrupt_capability(adapter);
3730 3731 3732 3733 3734 3735 3736 3737 3738

	/* Assume that the test fails, if it succeeds then the test
	 * MSI irq handler will unset this flag */
	adapter->flags |= FLAG_MSI_TEST_FAILED;

	err = pci_enable_msi(adapter->pdev);
	if (err)
		goto msi_test_failed;

3739
	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759
			  netdev->name, netdev);
	if (err) {
		pci_disable_msi(adapter->pdev);
		goto msi_test_failed;
	}

	wmb();

	e1000_irq_enable(adapter);

	/* fire an unusual interrupt on the test handler */
	ew32(ICS, E1000_ICS_RXSEQ);
	e1e_flush();
	msleep(50);

	e1000_irq_disable(adapter);

	rmb();

	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3760
		adapter->int_mode = E1000E_INT_MODE_LEGACY;
3761
		e_info("MSI interrupt test failed, using legacy interrupt.\n");
3762
	} else {
3763
		e_dbg("MSI interrupt test succeeded!\n");
3764
	}
3765 3766 3767 3768 3769

	free_irq(adapter->pdev->irq, netdev);
	pci_disable_msi(adapter->pdev);

msi_test_failed:
3770
	e1000e_set_interrupt_capability(adapter);
3771
	return e1000_request_irq(adapter);
3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789
}

/**
 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
 * @adapter: board private struct
 *
 * code flow taken from tg3.c, called with e1000 interrupts disabled.
 **/
static int e1000_test_msi(struct e1000_adapter *adapter)
{
	int err;
	u16 pci_cmd;

	if (!(adapter->flags & FLAG_MSI_ENABLED))
		return 0;

	/* disable SERR in case the MSI write causes a master abort */
	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3790 3791 3792
	if (pci_cmd & PCI_COMMAND_SERR)
		pci_write_config_word(adapter->pdev, PCI_COMMAND,
				      pci_cmd & ~PCI_COMMAND_SERR);
3793 3794 3795

	err = e1000_test_msi_interrupt(adapter);

3796 3797 3798 3799 3800 3801
	/* re-enable SERR */
	if (pci_cmd & PCI_COMMAND_SERR) {
		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
		pci_cmd |= PCI_COMMAND_SERR;
		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
	}
3802 3803 3804 3805

	return err;
}

3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821
/**
 * e1000_open - Called when a network interface is made active
 * @netdev: network interface device structure
 *
 * Returns 0 on success, negative value on failure
 *
 * The open entry point is called when a network interface is made
 * active by the system (IFF_UP).  At this point all resources needed
 * for transmit and receive operations are allocated, the interrupt
 * handler is registered with the OS, the watchdog timer is started,
 * and the stack is notified that the interface is ready.
 **/
static int e1000_open(struct net_device *netdev)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
3822
	struct pci_dev *pdev = adapter->pdev;
3823 3824 3825 3826 3827 3828
	int err;

	/* disallow open during test */
	if (test_bit(__E1000_TESTING, &adapter->state))
		return -EBUSY;

3829 3830
	pm_runtime_get_sync(&pdev->dev);

3831 3832
	netif_carrier_off(netdev);

3833
	/* allocate transmit descriptors */
3834
	err = e1000e_setup_tx_resources(adapter->tx_ring);
3835 3836 3837 3838
	if (err)
		goto err_setup_tx;

	/* allocate receive descriptors */
3839
	err = e1000e_setup_rx_resources(adapter->rx_ring);
3840 3841 3842
	if (err)
		goto err_setup_rx;

3843 3844 3845 3846 3847
	/*
	 * If AMT is enabled, let the firmware know that the network
	 * interface is now open and reset the part to a known state.
	 */
	if (adapter->flags & FLAG_HAS_AMT) {
3848
		e1000e_get_hw_control(adapter);
3849 3850 3851
		e1000e_reset(adapter);
	}

3852 3853 3854 3855 3856 3857 3858
	e1000e_power_up_phy(adapter);

	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
	if ((adapter->hw.mng_cookie.status &
	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
		e1000_update_mng_vlan(adapter);

3859 3860
	/* DMA latency requirement to workaround jumbo issue */
	if (adapter->hw.mac.type == e1000_pch2lan)
3861 3862 3863
		pm_qos_add_request(&adapter->netdev->pm_qos_req,
				   PM_QOS_CPU_DMA_LATENCY,
				   PM_QOS_DEFAULT_VALUE);
3864

3865 3866
	/*
	 * before we allocate an interrupt, we must be ready to handle it.
3867 3868
	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
	 * as soon as we call pci_request_irq, so we have to setup our
3869 3870
	 * clean_rx handler before we do so.
	 */
3871 3872 3873 3874 3875 3876
	e1000_configure(adapter);

	err = e1000_request_irq(adapter);
	if (err)
		goto err_req_irq;

3877 3878 3879 3880 3881
	/*
	 * Work around PCIe errata with MSI interrupts causing some chipsets to
	 * ignore e1000e MSI messages, which means we need to test our MSI
	 * interrupt now
	 */
3882
	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3883 3884 3885 3886 3887 3888 3889
		err = e1000_test_msi(adapter);
		if (err) {
			e_err("Interrupt allocation failed\n");
			goto err_req_irq;
		}
	}

3890 3891 3892 3893 3894 3895 3896
	/* From here on the code is the same as e1000e_up() */
	clear_bit(__E1000_DOWN, &adapter->state);

	napi_enable(&adapter->napi);

	e1000_irq_enable(adapter);

3897
	adapter->tx_hang_recheck = false;
3898
	netif_start_queue(netdev);
3899

3900 3901 3902
	adapter->idle_check = true;
	pm_runtime_put(&pdev->dev);

3903
	/* fire a link status change interrupt to start the watchdog */
3904 3905 3906 3907
	if (adapter->msix_entries)
		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
	else
		ew32(ICS, E1000_ICS_LSC);
3908 3909 3910 3911

	return 0;

err_req_irq:
3912
	e1000e_release_hw_control(adapter);
3913
	e1000_power_down_phy(adapter);
3914
	e1000e_free_rx_resources(adapter->rx_ring);
3915
err_setup_rx:
3916
	e1000e_free_tx_resources(adapter->tx_ring);
3917 3918
err_setup_tx:
	e1000e_reset(adapter);
3919
	pm_runtime_put_sync(&pdev->dev);
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937

	return err;
}

/**
 * e1000_close - Disables a network interface
 * @netdev: network interface device structure
 *
 * Returns 0, this is not allowed to fail
 *
 * The close entry point is called when an interface is de-activated
 * by the OS.  The hardware is still under the drivers control, but
 * needs to be disabled.  A global MAC reset is issued to stop the
 * hardware, and all transmit and receive resources are freed.
 **/
static int e1000_close(struct net_device *netdev)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
3938
	struct pci_dev *pdev = adapter->pdev;
3939 3940

	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3941 3942 3943

	pm_runtime_get_sync(&pdev->dev);

3944 3945
	napi_disable(&adapter->napi);

3946 3947 3948 3949
	if (!test_bit(__E1000_DOWN, &adapter->state)) {
		e1000e_down(adapter);
		e1000_free_irq(adapter);
	}
3950 3951
	e1000_power_down_phy(adapter);

3952 3953
	e1000e_free_tx_resources(adapter->tx_ring);
	e1000e_free_rx_resources(adapter->rx_ring);
3954

3955 3956 3957 3958
	/*
	 * kill manageability vlan ID if supported, but not if a vlan with
	 * the same ID is registered on the host OS (let 8021q kill it)
	 */
J
Jeff Kirsher 已提交
3959 3960
	if (adapter->hw.mng_cookie.status &
	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3961 3962
		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);

3963 3964 3965 3966
	/*
	 * If AMT is enabled, let the firmware know that the network
	 * interface is now closed
	 */
3967 3968 3969
	if ((adapter->flags & FLAG_HAS_AMT) &&
	    !test_bit(__E1000_TESTING, &adapter->state))
		e1000e_release_hw_control(adapter);
3970

3971
	if (adapter->hw.mac.type == e1000_pch2lan)
3972
		pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3973

3974 3975
	pm_runtime_put_sync(&pdev->dev);

3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001
	return 0;
}
/**
 * e1000_set_mac - Change the Ethernet Address of the NIC
 * @netdev: network interface device structure
 * @p: pointer to an address structure
 *
 * Returns 0 on success, negative on failure
 **/
static int e1000_set_mac(struct net_device *netdev, void *p)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);

	e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);

	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
		/* activate the work around */
		e1000e_set_laa_state_82571(&adapter->hw, 1);

4002 4003
		/*
		 * Hold a copy of the LAA in RAR[14] This is done so that
4004 4005 4006 4007
		 * between the time RAR[0] gets clobbered  and the time it
		 * gets fixed (in e1000_watchdog), the actual LAA is in one
		 * of the RARs and no incoming packets directed to this port
		 * are dropped. Eventually the LAA will be in RAR[0] and
4008 4009
		 * RAR[14]
		 */
4010 4011 4012 4013 4014 4015 4016 4017
		e1000e_rar_set(&adapter->hw,
			      adapter->hw.mac.addr,
			      adapter->hw.mac.rar_entry_count - 1);
	}

	return 0;
}

4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
/**
 * e1000e_update_phy_task - work thread to update phy
 * @work: pointer to our work struct
 *
 * this worker thread exists because we must acquire a
 * semaphore to read the phy, which we could msleep while
 * waiting for it, and we can't msleep in a timer.
 **/
static void e1000e_update_phy_task(struct work_struct *work)
{
	struct e1000_adapter *adapter = container_of(work,
					struct e1000_adapter, update_phy_task);
4030 4031 4032 4033

	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

4034 4035 4036
	e1000_get_phy_info(&adapter->hw);
}

4037 4038 4039 4040
/*
 * Need to wait a few seconds after link up to get diagnostic information from
 * the phy
 */
4041 4042 4043
static void e1000_update_phy_info(unsigned long data)
{
	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4044 4045 4046 4047

	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

4048
	schedule_work(&adapter->update_phy_task);
4049 4050
}

4051 4052 4053
/**
 * e1000e_update_phy_stats - Update the PHY statistics counters
 * @adapter: board private structure
4054 4055
 *
 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
 **/
static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	s32 ret_val;
	u16 phy_data;

	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
		return;

	/*
	 * A page set is expensive so check if already on desired page.
	 * If not, set to the page with the PHY status registers.
	 */
4071
	hw->phy.addr = 1;
4072 4073 4074 4075
	ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
					   &phy_data);
	if (ret_val)
		goto release;
4076 4077 4078
	if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
		ret_val = hw->phy.ops.set_page(hw,
					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4079 4080 4081 4082 4083
		if (ret_val)
			goto release;
	}

	/* Single Collision Count */
4084 4085
	hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4086 4087 4088 4089
	if (!ret_val)
		adapter->stats.scc += phy_data;

	/* Excessive Collision Count */
4090 4091
	hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4092 4093 4094 4095
	if (!ret_val)
		adapter->stats.ecol += phy_data;

	/* Multiple Collision Count */
4096 4097
	hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4098 4099 4100 4101
	if (!ret_val)
		adapter->stats.mcc += phy_data;

	/* Late Collision Count */
4102 4103
	hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4104 4105 4106 4107
	if (!ret_val)
		adapter->stats.latecol += phy_data;

	/* Collision Count - also used for adaptive IFS */
4108 4109
	hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4110 4111 4112 4113
	if (!ret_val)
		hw->mac.collision_delta = phy_data;

	/* Defer Count */
4114 4115
	hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4116 4117 4118 4119
	if (!ret_val)
		adapter->stats.dc += phy_data;

	/* Transmit with no CRS */
4120 4121
	hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
	ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4122 4123 4124 4125 4126 4127 4128
	if (!ret_val)
		adapter->stats.tncrs += phy_data;

release:
	hw->phy.ops.release(hw);
}

4129 4130 4131 4132
/**
 * e1000e_update_stats - Update the board statistics counters
 * @adapter: board private structure
 **/
J
Jeff Kirsher 已提交
4133
static void e1000e_update_stats(struct e1000_adapter *adapter)
4134
{
4135
	struct net_device *netdev = adapter->netdev;
4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149
	struct e1000_hw *hw = &adapter->hw;
	struct pci_dev *pdev = adapter->pdev;

	/*
	 * Prevent stats update while adapter is being reset, or if the pci
	 * connection is down.
	 */
	if (adapter->link_speed == 0)
		return;
	if (pci_channel_offline(pdev))
		return;

	adapter->stats.crcerrs += er32(CRCERRS);
	adapter->stats.gprc += er32(GPRC);
4150 4151
	adapter->stats.gorc += er32(GORCL);
	er32(GORCH); /* Clear gorc */
4152 4153 4154 4155 4156
	adapter->stats.bprc += er32(BPRC);
	adapter->stats.mprc += er32(MPRC);
	adapter->stats.roc += er32(ROC);

	adapter->stats.mpc += er32(MPC);
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175

	/* Half-duplex statistics */
	if (adapter->link_duplex == HALF_DUPLEX) {
		if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
			e1000e_update_phy_stats(adapter);
		} else {
			adapter->stats.scc += er32(SCC);
			adapter->stats.ecol += er32(ECOL);
			adapter->stats.mcc += er32(MCC);
			adapter->stats.latecol += er32(LATECOL);
			adapter->stats.dc += er32(DC);

			hw->mac.collision_delta = er32(COLC);

			if ((hw->mac.type != e1000_82574) &&
			    (hw->mac.type != e1000_82583))
				adapter->stats.tncrs += er32(TNCRS);
		}
		adapter->stats.colc += hw->mac.collision_delta;
4176
	}
4177

4178 4179 4180 4181 4182
	adapter->stats.xonrxc += er32(XONRXC);
	adapter->stats.xontxc += er32(XONTXC);
	adapter->stats.xoffrxc += er32(XOFFRXC);
	adapter->stats.xofftxc += er32(XOFFTXC);
	adapter->stats.gptc += er32(GPTC);
4183 4184
	adapter->stats.gotc += er32(GOTCL);
	er32(GOTCH); /* Clear gotc */
4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
	adapter->stats.rnbc += er32(RNBC);
	adapter->stats.ruc += er32(RUC);

	adapter->stats.mptc += er32(MPTC);
	adapter->stats.bptc += er32(BPTC);

	/* used for adaptive IFS */

	hw->mac.tx_packet_delta = er32(TPT);
	adapter->stats.tpt += hw->mac.tx_packet_delta;

	adapter->stats.algnerrc += er32(ALGNERRC);
	adapter->stats.rxerrc += er32(RXERRC);
	adapter->stats.cexterr += er32(CEXTERR);
	adapter->stats.tsctc += er32(TSCTC);
	adapter->stats.tsctfc += er32(TSCTFC);

	/* Fill out the OS statistics structure */
4203 4204
	netdev->stats.multicast = adapter->stats.mprc;
	netdev->stats.collisions = adapter->stats.colc;
4205 4206 4207

	/* Rx Errors */

4208 4209 4210 4211
	/*
	 * RLEC on some newer hardware can be incorrect so build
	 * our own version based on RUC and ROC
	 */
4212
	netdev->stats.rx_errors = adapter->stats.rxerrc +
4213 4214 4215
		adapter->stats.crcerrs + adapter->stats.algnerrc +
		adapter->stats.ruc + adapter->stats.roc +
		adapter->stats.cexterr;
4216
	netdev->stats.rx_length_errors = adapter->stats.ruc +
4217
					      adapter->stats.roc;
4218 4219 4220
	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
	netdev->stats.rx_missed_errors = adapter->stats.mpc;
4221 4222

	/* Tx Errors */
4223
	netdev->stats.tx_errors = adapter->stats.ecol +
4224
				       adapter->stats.latecol;
4225 4226 4227
	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
	netdev->stats.tx_window_errors = adapter->stats.latecol;
	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4228 4229 4230 4231 4232 4233 4234 4235 4236

	/* Tx Dropped needs to be maintained elsewhere */

	/* Management Stats */
	adapter->stats.mgptc += er32(MGTPTC);
	adapter->stats.mgprc += er32(MGTPRC);
	adapter->stats.mgpdc += er32(MGTPDC);
}

4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247
/**
 * e1000_phy_read_status - Update the PHY register status snapshot
 * @adapter: board private structure
 **/
static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_phy_regs *phy = &adapter->phy_regs;

	if ((er32(STATUS) & E1000_STATUS_LU) &&
	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4248 4249
		int ret_val;

4250 4251 4252 4253 4254 4255 4256 4257 4258
		ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
		ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
		ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
		ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
		ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
		ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
		ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
		ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
		if (ret_val)
4259
			e_warn("Error reading PHY register\n");
4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278
	} else {
		/*
		 * Do not read PHY registers if link is not up
		 * Set values to typical power-on defaults
		 */
		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
			     BMSR_ERCAP);
		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
				  ADVERTISE_ALL | ADVERTISE_CSMA);
		phy->lpa = 0;
		phy->expansion = EXPANSION_ENABLENPAGE;
		phy->ctrl1000 = ADVERTISE_1000FULL;
		phy->stat1000 = 0;
		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
	}
}

4279 4280 4281 4282 4283
static void e1000_print_link_info(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 ctrl = er32(CTRL);

4284
	/* Link status message must follow this format for user tools */
4285 4286 4287 4288 4289 4290 4291
	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
		adapter->netdev->name,
		adapter->link_speed,
		adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
		(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
		(ctrl & E1000_CTRL_RFCE) ? "Rx" :
		(ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4292 4293
}

4294
static bool e1000e_has_link(struct e1000_adapter *adapter)
4295 4296
{
	struct e1000_hw *hw = &adapter->hw;
4297
	bool link_active = false;
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311
	s32 ret_val = 0;

	/*
	 * get_link_status is set on LSC (link status) interrupt or
	 * Rx sequence error interrupt.  get_link_status will stay
	 * false until the check_for_link establishes link
	 * for copper adapters ONLY
	 */
	switch (hw->phy.media_type) {
	case e1000_media_type_copper:
		if (hw->mac.get_link_status) {
			ret_val = hw->mac.ops.check_for_link(hw);
			link_active = !hw->mac.get_link_status;
		} else {
4312
			link_active = true;
4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330
		}
		break;
	case e1000_media_type_fiber:
		ret_val = hw->mac.ops.check_for_link(hw);
		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
		break;
	case e1000_media_type_internal_serdes:
		ret_val = hw->mac.ops.check_for_link(hw);
		link_active = adapter->hw.mac.serdes_has_link;
		break;
	default:
	case e1000_media_type_unknown:
		break;
	}

	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4331
		e_info("Gigabit has been disabled, downgrading speed\n");
4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348
	}

	return link_active;
}

static void e1000e_enable_receives(struct e1000_adapter *adapter)
{
	/* make sure the receive unit is started */
	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
	    (adapter->flags & FLAG_RX_RESTART_NOW)) {
		struct e1000_hw *hw = &adapter->hw;
		u32 rctl = er32(RCTL);
		ew32(RCTL, rctl | E1000_RCTL_EN);
		adapter->flags &= ~FLAG_RX_RESTART_NOW;
	}
}

4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367
static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;

	/*
	 * With 82574 controllers, PHY needs to be checked periodically
	 * for hung state and reset, if two calls return true
	 */
	if (e1000_check_phy_82574(hw))
		adapter->phy_hang_count++;
	else
		adapter->phy_hang_count = 0;

	if (adapter->phy_hang_count > 1) {
		adapter->phy_hang_count = 0;
		schedule_work(&adapter->reset_task);
	}
}

4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387
/**
 * e1000_watchdog - Timer Call-back
 * @data: pointer to adapter cast into an unsigned long
 **/
static void e1000_watchdog(unsigned long data)
{
	struct e1000_adapter *adapter = (struct e1000_adapter *) data;

	/* Do the rest outside of interrupt context */
	schedule_work(&adapter->watchdog_task);

	/* TODO: make this use queue_delayed_work() */
}

static void e1000_watchdog_task(struct work_struct *work)
{
	struct e1000_adapter *adapter = container_of(work,
					struct e1000_adapter, watchdog_task);
	struct net_device *netdev = adapter->netdev;
	struct e1000_mac_info *mac = &adapter->hw.mac;
B
Bruce Allan 已提交
4388
	struct e1000_phy_info *phy = &adapter->hw.phy;
4389 4390 4391 4392
	struct e1000_ring *tx_ring = adapter->tx_ring;
	struct e1000_hw *hw = &adapter->hw;
	u32 link, tctl;

4393 4394 4395
	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

4396
	link = e1000e_has_link(adapter);
4397
	if ((netif_carrier_ok(netdev)) && link) {
4398 4399 4400
		/* Cancel scheduled suspend requests. */
		pm_runtime_resume(netdev->dev.parent);

4401
		e1000e_enable_receives(adapter);
4402 4403 4404 4405 4406 4407 4408 4409 4410
		goto link_up;
	}

	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
		e1000_update_mng_vlan(adapter);

	if (link) {
		if (!netif_carrier_ok(netdev)) {
4411
			bool txb2b = true;
4412 4413 4414 4415

			/* Cancel scheduled suspend requests. */
			pm_runtime_resume(netdev->dev.parent);

4416
			/* update snapshot of PHY registers on LSC */
4417
			e1000_phy_read_status(adapter);
4418 4419 4420 4421
			mac->ops.get_link_up_info(&adapter->hw,
						   &adapter->link_speed,
						   &adapter->link_duplex);
			e1000_print_link_info(adapter);
4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
			/*
			 * On supported PHYs, check for duplex mismatch only
			 * if link has autonegotiated at 10/100 half
			 */
			if ((hw->phy.type == e1000_phy_igp_3 ||
			     hw->phy.type == e1000_phy_bm) &&
			    (hw->mac.autoneg == true) &&
			    (adapter->link_speed == SPEED_10 ||
			     adapter->link_speed == SPEED_100) &&
			    (adapter->link_duplex == HALF_DUPLEX)) {
				u16 autoneg_exp;

				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);

				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4437
					e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
4438 4439
			}

4440
			/* adjust timeout factor according to speed/duplex */
4441 4442 4443
			adapter->tx_timeout_factor = 1;
			switch (adapter->link_speed) {
			case SPEED_10:
4444
				txb2b = false;
4445
				adapter->tx_timeout_factor = 16;
4446 4447
				break;
			case SPEED_100:
4448
				txb2b = false;
4449
				adapter->tx_timeout_factor = 10;
4450 4451 4452
				break;
			}

4453 4454 4455 4456
			/*
			 * workaround: re-program speed mode bit after
			 * link-up event
			 */
4457 4458 4459
			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
			    !txb2b) {
				u32 tarc0;
4460
				tarc0 = er32(TARC(0));
4461
				tarc0 &= ~SPEED_MODE_BIT;
4462
				ew32(TARC(0), tarc0);
4463 4464
			}

4465 4466 4467 4468
			/*
			 * disable TSO for pcie and 10/100 speeds, to avoid
			 * some hardware issues
			 */
4469 4470 4471 4472
			if (!(adapter->flags & FLAG_TSO_FORCE)) {
				switch (adapter->link_speed) {
				case SPEED_10:
				case SPEED_100:
4473
					e_info("10/100 speed: disabling TSO\n");
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486
					netdev->features &= ~NETIF_F_TSO;
					netdev->features &= ~NETIF_F_TSO6;
					break;
				case SPEED_1000:
					netdev->features |= NETIF_F_TSO;
					netdev->features |= NETIF_F_TSO6;
					break;
				default:
					/* oops */
					break;
				}
			}

4487 4488 4489 4490
			/*
			 * enable transmits in the hardware, need to do this
			 * after setting TARC(0)
			 */
4491 4492 4493 4494
			tctl = er32(TCTL);
			tctl |= E1000_TCTL_EN;
			ew32(TCTL, tctl);

B
Bruce Allan 已提交
4495 4496 4497 4498 4499 4500 4501
                        /*
			 * Perform any post-link-up configuration before
			 * reporting link up.
			 */
			if (phy->ops.cfg_on_link_up)
				phy->ops.cfg_on_link_up(hw);

4502 4503 4504 4505 4506 4507 4508 4509 4510 4511
			netif_carrier_on(netdev);

			if (!test_bit(__E1000_DOWN, &adapter->state))
				mod_timer(&adapter->phy_info_timer,
					  round_jiffies(jiffies + 2 * HZ));
		}
	} else {
		if (netif_carrier_ok(netdev)) {
			adapter->link_speed = 0;
			adapter->link_duplex = 0;
4512 4513 4514
			/* Link status message must follow this format */
			printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
			       adapter->netdev->name);
4515 4516 4517 4518 4519 4520 4521
			netif_carrier_off(netdev);
			if (!test_bit(__E1000_DOWN, &adapter->state))
				mod_timer(&adapter->phy_info_timer,
					  round_jiffies(jiffies + 2 * HZ));

			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
				schedule_work(&adapter->reset_task);
4522 4523 4524
			else
				pm_schedule_suspend(netdev->dev.parent,
							LINK_TIMEOUT);
4525 4526 4527 4528
		}
	}

link_up:
J
Jeff Kirsher 已提交
4529
	spin_lock(&adapter->stats64_lock);
4530 4531 4532 4533 4534 4535 4536
	e1000e_update_stats(adapter);

	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
	adapter->tpt_old = adapter->stats.tpt;
	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
	adapter->colc_old = adapter->stats.colc;

4537 4538 4539 4540
	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
	adapter->gorc_old = adapter->stats.gorc;
	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
	adapter->gotc_old = adapter->stats.gotc;
4541
	spin_unlock(&adapter->stats64_lock);
4542 4543 4544

	e1000e_update_adaptive(&adapter->hw);

4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
	if (!netif_carrier_ok(netdev) &&
	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
		/*
		 * We've lost link, so the controller stops DMA,
		 * but we've got queued Tx work that's never going
		 * to get done, so reset controller to flush Tx.
		 * (Do the reset outside of interrupt context).
		 */
		schedule_work(&adapter->reset_task);
		/* return immediately since reset is imminent */
		return;
4556 4557
	}

4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573
	/* Simple mode for Interrupt Throttle Rate (ITR) */
	if (adapter->itr_setting == 4) {
		/*
		 * Symmetric Tx/Rx gets a reduced ITR=2000;
		 * Total asymmetrical Tx or Rx gets ITR=8000;
		 * everyone else is between 2000-8000.
		 */
		u32 goc = (adapter->gotc + adapter->gorc) / 10000;
		u32 dif = (adapter->gotc > adapter->gorc ?
			    adapter->gotc - adapter->gorc :
			    adapter->gorc - adapter->gotc) / 10000;
		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;

		ew32(ITR, 1000000000 / (itr * 256));
	}

4574
	/* Cause software interrupt to ensure Rx ring is cleaned */
4575 4576 4577 4578
	if (adapter->msix_entries)
		ew32(ICS, adapter->rx_ring->ims_val);
	else
		ew32(ICS, E1000_ICS_RXDMT0);
4579

4580 4581 4582
	/* flush pending descriptors to memory before detecting Tx hang */
	e1000e_flush_descriptors(adapter);

4583
	/* Force detection of hung controller every watchdog period */
4584
	adapter->detect_tx_hung = true;
4585

4586 4587 4588 4589
	/*
	 * With 82571 controllers, LAA may be overwritten due to controller
	 * reset from the other port. Set the appropriate LAA in RAR[0]
	 */
4590 4591 4592
	if (e1000e_get_laa_state_82571(hw))
		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);

4593 4594 4595
	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
		e1000e_check_82574_phy_workaround(adapter);

4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608
	/* Reset the timer */
	if (!test_bit(__E1000_DOWN, &adapter->state))
		mod_timer(&adapter->watchdog_timer,
			  round_jiffies(jiffies + 2 * HZ));
}

#define E1000_TX_FLAGS_CSUM		0x00000001
#define E1000_TX_FLAGS_VLAN		0x00000002
#define E1000_TX_FLAGS_TSO		0x00000004
#define E1000_TX_FLAGS_IPV4		0x00000008
#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT	16

4609
static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4610 4611 4612 4613 4614 4615 4616 4617
{
	struct e1000_context_desc *context_desc;
	struct e1000_buffer *buffer_info;
	unsigned int i;
	u32 cmd_length = 0;
	u16 ipcse = 0, tucse, mss;
	u8 ipcss, ipcso, tucss, tucso, hdr_len;

4618 4619
	if (!skb_is_gso(skb))
		return 0;
4620

4621
	if (skb_header_cloned(skb)) {
4622 4623
		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);

4624 4625
		if (err)
			return err;
4626 4627
	}

4628 4629 4630 4631 4632 4633 4634 4635 4636 4637
	hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	mss = skb_shinfo(skb)->gso_size;
	if (skb->protocol == htons(ETH_P_IP)) {
		struct iphdr *iph = ip_hdr(skb);
		iph->tot_len = 0;
		iph->check = 0;
		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
		                                         0, IPPROTO_TCP, 0);
		cmd_length = E1000_TXD_CMD_IP;
		ipcse = skb_transport_offset(skb) - 1;
4638
	} else if (skb_is_gso_v6(skb)) {
4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676
		ipv6_hdr(skb)->payload_len = 0;
		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
		                                       &ipv6_hdr(skb)->daddr,
		                                       0, IPPROTO_TCP, 0);
		ipcse = 0;
	}
	ipcss = skb_network_offset(skb);
	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
	tucss = skb_transport_offset(skb);
	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
	tucse = 0;

	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
	               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));

	i = tx_ring->next_to_use;
	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
	buffer_info = &tx_ring->buffer_info[i];

	context_desc->lower_setup.ip_fields.ipcss  = ipcss;
	context_desc->lower_setup.ip_fields.ipcso  = ipcso;
	context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
	context_desc->upper_setup.tcp_fields.tucss = tucss;
	context_desc->upper_setup.tcp_fields.tucso = tucso;
	context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
	context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
	context_desc->cmd_and_length = cpu_to_le32(cmd_length);

	buffer_info->time_stamp = jiffies;
	buffer_info->next_to_watch = i;

	i++;
	if (i == tx_ring->count)
		i = 0;
	tx_ring->next_to_use = i;

	return 1;
4677 4678
}

4679
static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4680
{
4681
	struct e1000_adapter *adapter = tx_ring->adapter;
4682 4683 4684 4685
	struct e1000_context_desc *context_desc;
	struct e1000_buffer *buffer_info;
	unsigned int i;
	u8 css;
4686
	u32 cmd_len = E1000_TXD_CMD_DEXT;
4687
	__be16 protocol;
4688

4689 4690
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;
4691

4692 4693 4694 4695 4696
	if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
	else
		protocol = skb->protocol;

A
Arthur Jones 已提交
4697
	switch (protocol) {
4698
	case cpu_to_be16(ETH_P_IP):
4699 4700 4701
		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
			cmd_len |= E1000_TXD_CMD_TCP;
		break;
4702
	case cpu_to_be16(ETH_P_IPV6):
4703 4704 4705 4706 4707 4708
		/* XXX not handling all IPV6 headers */
		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
			cmd_len |= E1000_TXD_CMD_TCP;
		break;
	default:
		if (unlikely(net_ratelimit()))
4709 4710
			e_warn("checksum_partial proto=%x!\n",
			       be16_to_cpu(protocol));
4711
		break;
4712 4713
	}

4714
	css = skb_checksum_start_offset(skb);
4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736

	i = tx_ring->next_to_use;
	buffer_info = &tx_ring->buffer_info[i];
	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);

	context_desc->lower_setup.ip_config = 0;
	context_desc->upper_setup.tcp_fields.tucss = css;
	context_desc->upper_setup.tcp_fields.tucso =
				css + skb->csum_offset;
	context_desc->upper_setup.tcp_fields.tucse = 0;
	context_desc->tcp_seg_setup.data = 0;
	context_desc->cmd_and_length = cpu_to_le32(cmd_len);

	buffer_info->time_stamp = jiffies;
	buffer_info->next_to_watch = i;

	i++;
	if (i == tx_ring->count)
		i = 0;
	tx_ring->next_to_use = i;

	return 1;
4737 4738 4739 4740 4741
}

#define E1000_MAX_PER_TXD	8192
#define E1000_MAX_TXD_PWR	12

4742 4743 4744
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
			unsigned int first, unsigned int max_per_txd,
			unsigned int nr_frags, unsigned int mss)
4745
{
4746
	struct e1000_adapter *adapter = tx_ring->adapter;
4747
	struct pci_dev *pdev = adapter->pdev;
4748
	struct e1000_buffer *buffer_info;
J
Jesse Brandeburg 已提交
4749
	unsigned int len = skb_headlen(skb);
4750
	unsigned int offset = 0, size, count = 0, i;
4751
	unsigned int f, bytecount, segs;
4752 4753 4754 4755

	i = tx_ring->next_to_use;

	while (len) {
4756
		buffer_info = &tx_ring->buffer_info[i];
4757 4758 4759 4760 4761
		size = min(len, max_per_txd);

		buffer_info->length = size;
		buffer_info->time_stamp = jiffies;
		buffer_info->next_to_watch = i;
4762 4763
		buffer_info->dma = dma_map_single(&pdev->dev,
						  skb->data + offset,
4764
						  size, DMA_TO_DEVICE);
4765
		buffer_info->mapped_as_page = false;
4766
		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4767
			goto dma_error;
4768 4769 4770

		len -= size;
		offset += size;
4771
		count++;
4772 4773 4774 4775 4776 4777

		if (len) {
			i++;
			if (i == tx_ring->count)
				i = 0;
		}
4778 4779 4780
	}

	for (f = 0; f < nr_frags; f++) {
E
Eric Dumazet 已提交
4781
		const struct skb_frag_struct *frag;
4782 4783

		frag = &skb_shinfo(skb)->frags[f];
E
Eric Dumazet 已提交
4784
		len = skb_frag_size(frag);
4785
		offset = 0;
4786 4787

		while (len) {
4788 4789 4790 4791
			i++;
			if (i == tx_ring->count)
				i = 0;

4792 4793 4794 4795 4796 4797
			buffer_info = &tx_ring->buffer_info[i];
			size = min(len, max_per_txd);

			buffer_info->length = size;
			buffer_info->time_stamp = jiffies;
			buffer_info->next_to_watch = i;
4798 4799
			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
						offset, size, DMA_TO_DEVICE);
4800
			buffer_info->mapped_as_page = true;
4801
			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4802
				goto dma_error;
4803 4804 4805 4806 4807 4808 4809

			len -= size;
			offset += size;
			count++;
		}
	}

4810
	segs = skb_shinfo(skb)->gso_segs ? : 1;
4811 4812 4813
	/* multiply data chunks by size of headers */
	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;

4814
	tx_ring->buffer_info[i].skb = skb;
4815 4816
	tx_ring->buffer_info[i].segs = segs;
	tx_ring->buffer_info[i].bytecount = bytecount;
4817 4818 4819
	tx_ring->buffer_info[first].next_to_watch = i;

	return count;
4820 4821

dma_error:
4822
	dev_err(&pdev->dev, "Tx DMA map failed\n");
4823
	buffer_info->dma = 0;
4824
	if (count)
4825
		count--;
4826 4827

	while (count--) {
4828
		if (i == 0)
4829
			i += tx_ring->count;
4830
		i--;
4831
		buffer_info = &tx_ring->buffer_info[i];
4832
		e1000_put_txbuf(tx_ring, buffer_info);
4833 4834 4835
	}

	return 0;
4836 4837
}

4838
static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4839
{
4840
	struct e1000_adapter *adapter = tx_ring->adapter;
4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
	struct e1000_tx_desc *tx_desc = NULL;
	struct e1000_buffer *buffer_info;
	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
	unsigned int i;

	if (tx_flags & E1000_TX_FLAGS_TSO) {
		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
			     E1000_TXD_CMD_TSE;
		txd_upper |= E1000_TXD_POPTS_TXSM << 8;

		if (tx_flags & E1000_TX_FLAGS_IPV4)
			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
	}

	if (tx_flags & E1000_TX_FLAGS_CSUM) {
		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
	}

	if (tx_flags & E1000_TX_FLAGS_VLAN) {
		txd_lower |= E1000_TXD_CMD_VLE;
		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
	}

	i = tx_ring->next_to_use;

4867
	do {
4868 4869 4870 4871 4872 4873 4874 4875 4876 4877
		buffer_info = &tx_ring->buffer_info[i];
		tx_desc = E1000_TX_DESC(*tx_ring, i);
		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
		tx_desc->lower.data =
			cpu_to_le32(txd_lower | buffer_info->length);
		tx_desc->upper.data = cpu_to_le32(txd_upper);

		i++;
		if (i == tx_ring->count)
			i = 0;
4878
	} while (--count > 0);
4879 4880 4881

	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);

4882 4883
	/*
	 * Force memory writes to complete before letting h/w
4884 4885
	 * know there are new descriptors to fetch.  (Only
	 * applicable for weak-ordered memory model archs,
4886 4887
	 * such as IA-64).
	 */
4888 4889 4890
	wmb();

	tx_ring->next_to_use = i;
4891 4892

	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4893
		e1000e_update_tdt_wa(tx_ring, i);
4894
	else
4895
		writel(i, tx_ring->tail);
4896

4897 4898 4899 4900
	/*
	 * we need this if more than one processor can write to our tail
	 * at a time, it synchronizes IO on IA64/Altix systems
	 */
4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911
	mmiowb();
}

#define MINIMUM_DHCP_PACKET_SIZE 282
static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
				    struct sk_buff *skb)
{
	struct e1000_hw *hw =  &adapter->hw;
	u16 length, offset;

	if (vlan_tx_tag_present(skb)) {
4912 4913
		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
		    (adapter->hw.mng_cookie.status &
4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942
			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
			return 0;
	}

	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
		return 0;

	if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
		return 0;

	{
		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
		struct udphdr *udp;

		if (ip->protocol != IPPROTO_UDP)
			return 0;

		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
		if (ntohs(udp->dest) != 67)
			return 0;

		offset = (u8 *)udp + 8 - skb->data;
		length = skb->len - offset;
		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
	}

	return 0;
}

4943
static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4944
{
4945
	struct e1000_adapter *adapter = tx_ring->adapter;
4946

4947
	netif_stop_queue(adapter->netdev);
4948 4949
	/*
	 * Herbert's original patch had:
4950
	 *  smp_mb__after_netif_stop_queue();
4951 4952
	 * but since that doesn't exist yet, just open code it.
	 */
4953 4954
	smp_mb();

4955 4956 4957 4958
	/*
	 * We need to check again in a case another CPU has just
	 * made room available.
	 */
4959
	if (e1000_desc_unused(tx_ring) < size)
4960 4961 4962
		return -EBUSY;

	/* A reprieve! */
4963
	netif_start_queue(adapter->netdev);
4964 4965 4966 4967
	++adapter->restart_queue;
	return 0;
}

4968
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4969
{
4970
	if (e1000_desc_unused(tx_ring) >= size)
4971
		return 0;
4972
	return __e1000_maybe_stop_tx(tx_ring, size);
4973 4974
}

4975
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
4976 4977
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
				    struct net_device *netdev)
4978 4979 4980 4981 4982 4983 4984
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_ring *tx_ring = adapter->tx_ring;
	unsigned int first;
	unsigned int max_per_txd = E1000_MAX_PER_TXD;
	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
	unsigned int tx_flags = 0;
E
Eric Dumazet 已提交
4985
	unsigned int len = skb_headlen(skb);
4986 4987
	unsigned int nr_frags;
	unsigned int mss;
4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002
	int count = 0;
	int tso;
	unsigned int f;

	if (test_bit(__E1000_DOWN, &adapter->state)) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (skb->len <= 0) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	mss = skb_shinfo(skb)->gso_size;
5003 5004
	/*
	 * The controller does a simple calculation to
5005 5006 5007 5008
	 * make sure there is enough room in the FIFO before
	 * initiating the DMA for each buffer.  The calc is:
	 * 4 = ceil(buffer len/mss).  To make sure we don't
	 * overrun the FIFO, adjust the max buffer len if mss
5009 5010
	 * drops.
	 */
5011 5012 5013 5014 5015
	if (mss) {
		u8 hdr_len;
		max_per_txd = min(mss << 2, max_per_txd);
		max_txd_pwr = fls(max_per_txd) - 1;

5016 5017 5018 5019 5020
		/*
		 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
		 * points to just header, pull a few bytes of payload from
		 * frags into skb->data
		 */
5021
		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5022 5023 5024 5025
		/*
		 * we do this workaround for ES2LAN, but it is un-necessary,
		 * avoiding it could save a lot of cycles
		 */
5026
		if (skb->data_len && (hdr_len == len)) {
5027 5028
			unsigned int pull_size;

5029
			pull_size = min_t(unsigned int, 4, skb->data_len);
5030
			if (!__pskb_pull_tail(skb, pull_size)) {
5031
				e_err("__pskb_pull_tail failed.\n");
5032 5033 5034
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
E
Eric Dumazet 已提交
5035
			len = skb_headlen(skb);
5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047
		}
	}

	/* reserve a descriptor for the offload context */
	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
		count++;
	count++;

	count += TXD_USE_COUNT(len, max_txd_pwr);

	nr_frags = skb_shinfo(skb)->nr_frags;
	for (f = 0; f < nr_frags; f++)
E
Eric Dumazet 已提交
5048
		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5049 5050 5051 5052 5053
				       max_txd_pwr);

	if (adapter->hw.mac.tx_pkt_filtering)
		e1000_transfer_dhcp_info(adapter, skb);

5054 5055 5056 5057
	/*
	 * need: count + 2 desc gap to keep tail from touching
	 * head, otherwise try next time
	 */
5058
	if (e1000_maybe_stop_tx(tx_ring, count + 2))
5059 5060
		return NETDEV_TX_BUSY;

5061
	if (vlan_tx_tag_present(skb)) {
5062 5063 5064 5065 5066 5067
		tx_flags |= E1000_TX_FLAGS_VLAN;
		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
	}

	first = tx_ring->next_to_use;

5068
	tso = e1000_tso(tx_ring, skb);
5069 5070 5071 5072 5073 5074 5075
	if (tso < 0) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (tso)
		tx_flags |= E1000_TX_FLAGS_TSO;
5076
	else if (e1000_tx_csum(tx_ring, skb))
5077 5078
		tx_flags |= E1000_TX_FLAGS_CSUM;

5079 5080
	/*
	 * Old method was to assume IPv4 packet by default if TSO was enabled.
5081
	 * 82571 hardware supports TSO capabilities for IPv6 as well...
5082 5083
	 * no longer assume, we must.
	 */
5084 5085 5086
	if (skb->protocol == htons(ETH_P_IP))
		tx_flags |= E1000_TX_FLAGS_IPV4;

L
Lucas De Marchi 已提交
5087
	/* if count is 0 then mapping error has occurred */
5088
	count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
5089
	if (count) {
5090
		netdev_sent_queue(netdev, skb->len);
5091
		e1000_tx_queue(tx_ring, tx_flags, count);
5092
		/* Make sure there is space in the ring for the next send. */
5093
		e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
5094 5095

	} else {
5096
		dev_kfree_skb_any(skb);
5097 5098
		tx_ring->buffer_info[first].time_stamp = 0;
		tx_ring->next_to_use = first;
5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121
	}

	return NETDEV_TX_OK;
}

/**
 * e1000_tx_timeout - Respond to a Tx Hang
 * @netdev: network interface device structure
 **/
static void e1000_tx_timeout(struct net_device *netdev)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);

	/* Do the reset outside of interrupt context */
	adapter->tx_timeout_count++;
	schedule_work(&adapter->reset_task);
}

static void e1000_reset_task(struct work_struct *work)
{
	struct e1000_adapter *adapter;
	adapter = container_of(work, struct e1000_adapter, reset_task);

5122 5123 5124 5125
	/* don't run the task if already down */
	if (test_bit(__E1000_DOWN, &adapter->state))
		return;

5126 5127 5128 5129 5130
	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
		e1000e_dump(adapter);
		e_err("Reset adapter\n");
	}
5131 5132 5133 5134
	e1000e_reinit_locked(adapter);
}

/**
J
Jeff Kirsher 已提交
5135
 * e1000_get_stats64 - Get System Network Statistics
5136
 * @netdev: network interface device structure
J
Jeff Kirsher 已提交
5137
 * @stats: rtnl_link_stats64 pointer
5138 5139 5140
 *
 * Returns the address of the device statistics structure.
 **/
J
Jeff Kirsher 已提交
5141 5142
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
                                             struct rtnl_link_stats64 *stats)
5143
{
J
Jeff Kirsher 已提交
5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
	struct e1000_adapter *adapter = netdev_priv(netdev);

	memset(stats, 0, sizeof(struct rtnl_link_stats64));
	spin_lock(&adapter->stats64_lock);
	e1000e_update_stats(adapter);
	/* Fill out the OS statistics structure */
	stats->rx_bytes = adapter->stats.gorc;
	stats->rx_packets = adapter->stats.gprc;
	stats->tx_bytes = adapter->stats.gotc;
	stats->tx_packets = adapter->stats.gptc;
	stats->multicast = adapter->stats.mprc;
	stats->collisions = adapter->stats.colc;

	/* Rx Errors */

	/*
	 * RLEC on some newer hardware can be incorrect so build
	 * our own version based on RUC and ROC
	 */
	stats->rx_errors = adapter->stats.rxerrc +
		adapter->stats.crcerrs + adapter->stats.algnerrc +
		adapter->stats.ruc + adapter->stats.roc +
		adapter->stats.cexterr;
	stats->rx_length_errors = adapter->stats.ruc +
					      adapter->stats.roc;
	stats->rx_crc_errors = adapter->stats.crcerrs;
	stats->rx_frame_errors = adapter->stats.algnerrc;
	stats->rx_missed_errors = adapter->stats.mpc;

	/* Tx Errors */
	stats->tx_errors = adapter->stats.ecol +
				       adapter->stats.latecol;
	stats->tx_aborted_errors = adapter->stats.ecol;
	stats->tx_window_errors = adapter->stats.latecol;
	stats->tx_carrier_errors = adapter->stats.tncrs;

	/* Tx Dropped needs to be maintained elsewhere */

	spin_unlock(&adapter->stats64_lock);
	return stats;
5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197
}

/**
 * e1000_change_mtu - Change the Maximum Transfer Unit
 * @netdev: network interface device structure
 * @new_mtu: new value for maximum frame size
 *
 * Returns 0 on success, negative on failure
 **/
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;

5198
	/* Jumbo frame support */
5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214
	if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
		if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
			e_err("Jumbo Frames not supported.\n");
			return -EINVAL;
		}

		/*
		 * IP payload checksum (enabled with jumbos/packet-split when
		 * Rx checksum is enabled) and generation of RSS hash is
		 * mutually exclusive in the hardware.
		 */
		if ((netdev->features & NETIF_F_RXCSUM) &&
		    (netdev->features & NETIF_F_RXHASH)) {
			e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
			return -EINVAL;
		}
5215 5216
	}

5217 5218 5219 5220
	/* Supported frame sizes */
	if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
	    (max_frame > adapter->max_hw_frame_size)) {
		e_err("Unsupported MTU setting\n");
5221 5222 5223
		return -EINVAL;
	}

5224 5225 5226 5227
	/* Jumbo frame workaround on 82579 requires CRC be stripped */
	if ((adapter->hw.mac.type == e1000_pch2lan) &&
	    !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
	    (new_mtu > ETH_DATA_LEN)) {
5228
		e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
5229 5230 5231
		return -EINVAL;
	}

5232 5233 5234 5235 5236 5237 5238 5239
	/* 82573 Errata 17 */
	if (((adapter->hw.mac.type == e1000_82573) ||
	     (adapter->hw.mac.type == e1000_82574)) &&
	    (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
		adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
		e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
	}

5240
	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5241
		usleep_range(1000, 2000);
5242
	/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5243
	adapter->max_frame_size = max_frame;
5244 5245
	e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
5246 5247 5248
	if (netif_running(netdev))
		e1000e_down(adapter);

5249 5250
	/*
	 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5251 5252
	 * means we reserve 2 more, this pushes us to allocate from the next
	 * larger slab size.
5253
	 * i.e. RXBUFFER_2048 --> size-4096 slab
5254 5255
	 * However with the new *_jumbo_rx* routines, jumbo receives will use
	 * fragmented skbs
5256
	 */
5257

5258
	if (max_frame <= 2048)
5259 5260 5261 5262 5263 5264 5265 5266
		adapter->rx_buffer_len = 2048;
	else
		adapter->rx_buffer_len = 4096;

	/* adjust allocation if LPE protects us, and we aren't using SBP */
	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
	     (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5267
					 + ETH_FCS_LEN;
5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284

	if (netif_running(netdev))
		e1000e_up(adapter);
	else
		e1000e_reset(adapter);

	clear_bit(__E1000_RESETTING, &adapter->state);

	return 0;
}

static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
			   int cmd)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct mii_ioctl_data *data = if_mii(ifr);

5285
	if (adapter->hw.phy.media_type != e1000_media_type_copper)
5286 5287 5288 5289 5290 5291 5292
		return -EOPNOTSUPP;

	switch (cmd) {
	case SIOCGMIIPHY:
		data->phy_id = adapter->hw.phy.addr;
		break;
	case SIOCGMIIREG:
5293 5294
		e1000_phy_read_status(adapter);

5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326
		switch (data->reg_num & 0x1F) {
		case MII_BMCR:
			data->val_out = adapter->phy_regs.bmcr;
			break;
		case MII_BMSR:
			data->val_out = adapter->phy_regs.bmsr;
			break;
		case MII_PHYSID1:
			data->val_out = (adapter->hw.phy.id >> 16);
			break;
		case MII_PHYSID2:
			data->val_out = (adapter->hw.phy.id & 0xFFFF);
			break;
		case MII_ADVERTISE:
			data->val_out = adapter->phy_regs.advertise;
			break;
		case MII_LPA:
			data->val_out = adapter->phy_regs.lpa;
			break;
		case MII_EXPANSION:
			data->val_out = adapter->phy_regs.expansion;
			break;
		case MII_CTRL1000:
			data->val_out = adapter->phy_regs.ctrl1000;
			break;
		case MII_STAT1000:
			data->val_out = adapter->phy_regs.stat1000;
			break;
		case MII_ESTATUS:
			data->val_out = adapter->phy_regs.estatus;
			break;
		default:
5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348
			return -EIO;
		}
		break;
	case SIOCSMIIREG:
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}

static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
		return e1000_mii_ioctl(netdev, ifr, cmd);
	default:
		return -EOPNOTSUPP;
	}
}

5349 5350 5351 5352
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
{
	struct e1000_hw *hw = &adapter->hw;
	u32 i, mac_reg;
5353
	u16 phy_reg, wuc_enable;
5354 5355 5356
	int retval = 0;

	/* copy MAC RARs to PHY RARs */
5357
	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5358

5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370
	retval = hw->phy.ops.acquire(hw);
	if (retval) {
		e_err("Could not acquire PHY\n");
		return retval;
	}

	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
	if (retval)
		goto out;

	/* copy MAC MTA to PHY MTA - only needed for pchlan */
5371 5372
	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5373 5374 5375 5376
		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
					   (u16)(mac_reg & 0xFFFF));
		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
					   (u16)((mac_reg >> 16) & 0xFFFF));
5377 5378 5379
	}

	/* configure PHY Rx Control register */
5380
	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396
	mac_reg = er32(RCTL);
	if (mac_reg & E1000_RCTL_UPE)
		phy_reg |= BM_RCTL_UPE;
	if (mac_reg & E1000_RCTL_MPE)
		phy_reg |= BM_RCTL_MPE;
	phy_reg &= ~(BM_RCTL_MO_MASK);
	if (mac_reg & E1000_RCTL_MO_3)
		phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
				<< BM_RCTL_MO_SHIFT);
	if (mac_reg & E1000_RCTL_BAM)
		phy_reg |= BM_RCTL_BAM;
	if (mac_reg & E1000_RCTL_PMCF)
		phy_reg |= BM_RCTL_PMCF;
	mac_reg = er32(CTRL);
	if (mac_reg & E1000_CTRL_RFCE)
		phy_reg |= BM_RCTL_RFCE;
5397
	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5398 5399 5400 5401 5402 5403

	/* enable PHY wakeup in MAC register */
	ew32(WUFC, wufc);
	ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);

	/* configure and enable PHY wakeup in PHY registers */
5404 5405
	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5406 5407

	/* activate PHY wakeup */
5408 5409
	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5410 5411 5412
	if (retval)
		e_err("Could not set PHY Host Wakeup bit\n");
out:
5413
	hw->phy.ops.release(hw);
5414 5415 5416 5417

	return retval;
}

5418 5419
static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
			    bool runtime)
5420 5421 5422 5423 5424
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
	u32 ctrl, ctrl_ext, rctl, status;
5425 5426
	/* Runtime suspend should only enable wakeup for link changes */
	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5427 5428 5429 5430 5431 5432 5433 5434 5435
	int retval = 0;

	netif_device_detach(netdev);

	if (netif_running(netdev)) {
		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
		e1000e_down(adapter);
		e1000_free_irq(adapter);
	}
5436
	e1000e_reset_interrupt_capability(adapter);
5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447

	retval = pci_save_state(pdev);
	if (retval)
		return retval;

	status = er32(STATUS);
	if (status & E1000_STATUS_LU)
		wufc &= ~E1000_WUFC_LNKC;

	if (wufc) {
		e1000_setup_rctl(adapter);
5448
		e1000e_set_rx_mode(netdev);
5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461

		/* turn on all-multi mode if wake on multicast is enabled */
		if (wufc & E1000_WUFC_MC) {
			rctl = er32(RCTL);
			rctl |= E1000_RCTL_MPE;
			ew32(RCTL, rctl);
		}

		ctrl = er32(CTRL);
		/* advertise wake from D3Cold */
		#define E1000_CTRL_ADVD3WUC 0x00100000
		/* phy power management enable */
		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5462 5463 5464
		ctrl |= E1000_CTRL_ADVD3WUC;
		if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
			ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5465 5466
		ew32(CTRL, ctrl);

5467 5468 5469
		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
		    adapter->hw.phy.media_type ==
		    e1000_media_type_internal_serdes) {
5470 5471
			/* keep the laser running in D3 */
			ctrl_ext = er32(CTRL_EXT);
5472
			ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5473 5474 5475
			ew32(CTRL_EXT, ctrl_ext);
		}

5476
		if (adapter->flags & FLAG_IS_ICH)
5477
			e1000_suspend_workarounds_ich8lan(&adapter->hw);
5478

5479 5480 5481
		/* Allow time for pending master requests to run */
		e1000e_disable_pcie_master(&adapter->hw);

5482
		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5483 5484 5485 5486 5487 5488 5489 5490 5491
			/* enable wakeup by the PHY */
			retval = e1000_init_phy_wakeup(adapter, wufc);
			if (retval)
				return retval;
		} else {
			/* enable wakeup by the MAC */
			ew32(WUFC, wufc);
			ew32(WUC, E1000_WUC_PME_EN);
		}
5492 5493 5494 5495 5496
	} else {
		ew32(WUC, 0);
		ew32(WUFC, 0);
	}

5497 5498
	*enable_wake = !!wufc;

5499
	/* make sure adapter isn't asleep if manageability is enabled */
5500 5501
	if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
	    (hw->mac.ops.check_mng_mode(hw)))
5502
		*enable_wake = true;
5503 5504 5505 5506

	if (adapter->hw.phy.type == e1000_phy_igp_3)
		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);

5507 5508 5509 5510
	/*
	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
	 * would have already happened in close and is redundant.
	 */
5511
	e1000e_release_hw_control(adapter);
5512 5513 5514

	pci_disable_device(pdev);

5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534
	return 0;
}

static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
{
	if (sleep && wake) {
		pci_prepare_to_sleep(pdev);
		return;
	}

	pci_wake_from_d3(pdev, wake);
	pci_set_power_state(pdev, PCI_D3hot);
}

static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
                                    bool wake)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

5535 5536 5537 5538 5539 5540 5541 5542
	/*
	 * The pci-e switch on some quad port adapters will report a
	 * correctable error when the MAC transitions from D0 to D3.  To
	 * prevent this we need to mask off the correctable errors on the
	 * downstream port of the pci-e switch.
	 */
	if (adapter->flags & FLAG_IS_QUAD_PORT) {
		struct pci_dev *us_dev = pdev->bus->self;
5543
		int pos = pci_pcie_cap(us_dev);
5544 5545 5546 5547 5548 5549
		u16 devctl;

		pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
		                      (devctl & ~PCI_EXP_DEVCTL_CERE));

5550
		e1000_power_off(pdev, sleep, wake);
5551 5552 5553

		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
	} else {
5554
		e1000_power_off(pdev, sleep, wake);
5555
	}
5556 5557
}

5558 5559 5560
#ifdef CONFIG_PCIEASPM
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
5561
	pci_disable_link_state_locked(pdev, state);
5562 5563 5564
}
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5565 5566
{
	int pos;
5567
	u16 reg16;
5568 5569

	/*
5570 5571
	 * Both device and parent should have the same ASPM setting.
	 * Disable ASPM in downstream component first and then upstream.
5572
	 */
5573 5574 5575 5576 5577
	pos = pci_pcie_cap(pdev);
	pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
	reg16 &= ~state;
	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);

5578 5579 5580
	if (!pdev->bus->self)
		return;

5581 5582 5583 5584 5585 5586
	pos = pci_pcie_cap(pdev->bus->self);
	pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
	reg16 &= ~state;
	pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
}
#endif
5587
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5588 5589 5590 5591 5592 5593
{
	dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
		 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
		 (state & PCIE_LINK_STATE_L1) ? "L1" : "");

	__e1000e_disable_aspm(pdev, state);
5594 5595
}

R
Rafael J. Wysocki 已提交
5596
#ifdef CONFIG_PM
5597
static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5598
{
5599
	return !!adapter->tx_ring->buffer_info;
5600 5601
}

5602
static int __e1000_resume(struct pci_dev *pdev)
5603 5604 5605 5606
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
5607
	u16 aspm_disable_flag = 0;
5608 5609
	u32 err;

5610 5611 5612 5613 5614 5615 5616
	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
		aspm_disable_flag = PCIE_LINK_STATE_L0S;
	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
		aspm_disable_flag |= PCIE_LINK_STATE_L1;
	if (aspm_disable_flag)
		e1000e_disable_aspm(pdev, aspm_disable_flag);

5617 5618
	pci_set_power_state(pdev, PCI_D0);
	pci_restore_state(pdev);
5619
	pci_save_state(pdev);
T
Taku Izumi 已提交
5620

5621
	e1000e_set_interrupt_capability(adapter);
5622 5623 5624 5625 5626 5627
	if (netif_running(netdev)) {
		err = e1000_request_irq(adapter);
		if (err)
			return err;
	}

5628 5629 5630
	if (hw->mac.type == e1000_pch2lan)
		e1000_resume_workarounds_pchlan(&adapter->hw);

5631
	e1000e_power_up_phy(adapter);
5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643

	/* report the system wakeup cause from S3/S4 */
	if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
		u16 phy_data;

		e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
		if (phy_data) {
			e_info("PHY Wakeup cause - %s\n",
				phy_data & E1000_WUS_EX ? "Unicast Packet" :
				phy_data & E1000_WUS_MC ? "Multicast Packet" :
				phy_data & E1000_WUS_BC ? "Broadcast Packet" :
				phy_data & E1000_WUS_MAG ? "Magic Packet" :
5644 5645
				phy_data & E1000_WUS_LNKC ?
				"Link Status Change" : "other");
5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661
		}
		e1e_wphy(&adapter->hw, BM_WUS, ~0);
	} else {
		u32 wus = er32(WUS);
		if (wus) {
			e_info("MAC Wakeup cause - %s\n",
				wus & E1000_WUS_EX ? "Unicast Packet" :
				wus & E1000_WUS_MC ? "Multicast Packet" :
				wus & E1000_WUS_BC ? "Broadcast Packet" :
				wus & E1000_WUS_MAG ? "Magic Packet" :
				wus & E1000_WUS_LNKC ? "Link Status Change" :
				"other");
		}
		ew32(WUS, ~0);
	}

5662 5663
	e1000e_reset(adapter);

5664
	e1000_init_manageability_pt(adapter);
5665 5666 5667 5668 5669 5670

	if (netif_running(netdev))
		e1000e_up(adapter);

	netif_device_attach(netdev);

5671 5672
	/*
	 * If the controller has AMT, do not set DRV_LOAD until the interface
5673
	 * is up.  For all other cases, let the f/w know that the h/w is now
5674 5675
	 * under the control of the driver.
	 */
J
Jesse Brandeburg 已提交
5676
	if (!(adapter->flags & FLAG_HAS_AMT))
5677
		e1000e_get_hw_control(adapter);
5678 5679 5680

	return 0;
}
5681

5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695
#ifdef CONFIG_PM_SLEEP
static int e1000_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	int retval;
	bool wake;

	retval = __e1000_shutdown(pdev, &wake, false);
	if (!retval)
		e1000_complete_shutdown(pdev, true, wake);

	return retval;
}

5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706
static int e1000_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

	if (e1000e_pm_ready(adapter))
		adapter->idle_check = true;

	return __e1000_resume(pdev);
}
5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741
#endif /* CONFIG_PM_SLEEP */

#ifdef CONFIG_PM_RUNTIME
static int e1000_runtime_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

	if (e1000e_pm_ready(adapter)) {
		bool wake;

		__e1000_shutdown(pdev, &wake, true);
	}

	return 0;
}

static int e1000_idle(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

	if (!e1000e_pm_ready(adapter))
		return 0;

	if (adapter->idle_check) {
		adapter->idle_check = false;
		if (!e1000e_has_link(adapter))
			pm_schedule_suspend(dev, MSEC_PER_SEC);
	}

	return -EBUSY;
}
5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754

static int e1000_runtime_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

	if (!e1000e_pm_ready(adapter))
		return 0;

	adapter->idle_check = !dev->power.runtime_auto;
	return __e1000_resume(pdev);
}
5755
#endif /* CONFIG_PM_RUNTIME */
R
Rafael J. Wysocki 已提交
5756
#endif /* CONFIG_PM */
5757 5758 5759

static void e1000_shutdown(struct pci_dev *pdev)
{
5760 5761
	bool wake = false;

5762
	__e1000_shutdown(pdev, &wake, false);
5763 5764 5765

	if (system_state == SYSTEM_POWER_OFF)
		e1000_complete_shutdown(pdev, false, wake);
5766 5767 5768
}

#ifdef CONFIG_NET_POLL_CONTROLLER
5769 5770 5771 5772 5773 5774 5775

static irqreturn_t e1000_intr_msix(int irq, void *data)
{
	struct net_device *netdev = data;
	struct e1000_adapter *adapter = netdev_priv(netdev);

	if (adapter->msix_entries) {
5776 5777
		int vector, msix_irq;

5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799
		vector = 0;
		msix_irq = adapter->msix_entries[vector].vector;
		disable_irq(msix_irq);
		e1000_intr_msix_rx(msix_irq, netdev);
		enable_irq(msix_irq);

		vector++;
		msix_irq = adapter->msix_entries[vector].vector;
		disable_irq(msix_irq);
		e1000_intr_msix_tx(msix_irq, netdev);
		enable_irq(msix_irq);

		vector++;
		msix_irq = adapter->msix_entries[vector].vector;
		disable_irq(msix_irq);
		e1000_msix_other(msix_irq, netdev);
		enable_irq(msix_irq);
	}

	return IRQ_HANDLED;
}

5800 5801 5802 5803 5804 5805 5806 5807 5808
/*
 * Polling 'interrupt' - used by things like netconsole to send skbs
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void e1000_netpoll(struct net_device *netdev)
{
	struct e1000_adapter *adapter = netdev_priv(netdev);

5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823
	switch (adapter->int_mode) {
	case E1000E_INT_MODE_MSIX:
		e1000_intr_msix(adapter->pdev->irq, netdev);
		break;
	case E1000E_INT_MODE_MSI:
		disable_irq(adapter->pdev->irq);
		e1000_intr_msi(adapter->pdev->irq, netdev);
		enable_irq(adapter->pdev->irq);
		break;
	default: /* E1000E_INT_MODE_LEGACY */
		disable_irq(adapter->pdev->irq);
		e1000_intr(adapter->pdev->irq, netdev);
		enable_irq(adapter->pdev->irq);
		break;
	}
5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842
}
#endif

/**
 * e1000_io_error_detected - called when PCI error is detected
 * @pdev: Pointer to PCI device
 * @state: The current pci connection state
 *
 * This function is called after a PCI bus error affecting
 * this device has been detected.
 */
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

	netif_device_detach(netdev);

5843 5844 5845
	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865
	if (netif_running(netdev))
		e1000e_down(adapter);
	pci_disable_device(pdev);

	/* Request a slot slot reset. */
	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * e1000_io_slot_reset - called after the pci bus has been reset.
 * @pdev: Pointer to PCI device
 *
 * Restart the card from scratch, as if from a cold-boot. Implementation
 * resembles the first-half of the e1000_resume routine.
 */
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);
	struct e1000_hw *hw = &adapter->hw;
5866
	u16 aspm_disable_flag = 0;
T
Taku Izumi 已提交
5867
	int err;
J
Jesse Brandeburg 已提交
5868
	pci_ers_result_t result;
5869

5870 5871
	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
		aspm_disable_flag = PCIE_LINK_STATE_L0S;
5872
	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5873 5874 5875 5876
		aspm_disable_flag |= PCIE_LINK_STATE_L1;
	if (aspm_disable_flag)
		e1000e_disable_aspm(pdev, aspm_disable_flag);

5877
	err = pci_enable_device_mem(pdev);
T
Taku Izumi 已提交
5878
	if (err) {
5879 5880
		dev_err(&pdev->dev,
			"Cannot re-enable PCI device after reset.\n");
J
Jesse Brandeburg 已提交
5881 5882 5883
		result = PCI_ERS_RESULT_DISCONNECT;
	} else {
		pci_set_master(pdev);
5884
		pdev->state_saved = true;
J
Jesse Brandeburg 已提交
5885
		pci_restore_state(pdev);
5886

J
Jesse Brandeburg 已提交
5887 5888
		pci_enable_wake(pdev, PCI_D3hot, 0);
		pci_enable_wake(pdev, PCI_D3cold, 0);
5889

J
Jesse Brandeburg 已提交
5890 5891 5892 5893
		e1000e_reset(adapter);
		ew32(WUS, ~0);
		result = PCI_ERS_RESULT_RECOVERED;
	}
5894

J
Jesse Brandeburg 已提交
5895 5896 5897
	pci_cleanup_aer_uncorrect_error_status(pdev);

	return result;
5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912
}

/**
 * e1000_io_resume - called when traffic can start flowing again.
 * @pdev: Pointer to PCI device
 *
 * This callback is called when the error recovery driver tells us that
 * its OK to resume normal operation. Implementation resembles the
 * second-half of the e1000_resume routine.
 */
static void e1000_io_resume(struct pci_dev *pdev)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);

5913
	e1000_init_manageability_pt(adapter);
5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924

	if (netif_running(netdev)) {
		if (e1000e_up(adapter)) {
			dev_err(&pdev->dev,
				"can't bring device back up after reset\n");
			return;
		}
	}

	netif_device_attach(netdev);

5925 5926
	/*
	 * If the controller has AMT, do not set DRV_LOAD until the interface
5927
	 * is up.  For all other cases, let the f/w know that the h/w is now
5928 5929
	 * under the control of the driver.
	 */
J
Jesse Brandeburg 已提交
5930
	if (!(adapter->flags & FLAG_HAS_AMT))
5931
		e1000e_get_hw_control(adapter);
5932 5933 5934 5935 5936 5937 5938

}

static void e1000_print_device_info(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	struct net_device *netdev = adapter->netdev;
5939 5940
	u32 ret_val;
	u8 pba_str[E1000_PBANUM_LENGTH];
5941 5942

	/* print bus type/speed/width info */
5943
	e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5944 5945 5946 5947
	       /* bus width */
	       ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
	        "Width x1"),
	       /* MAC address */
J
Johannes Berg 已提交
5948
	       netdev->dev_addr);
5949 5950
	e_info("Intel(R) PRO/%s Network Connection\n",
	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5951 5952 5953
	ret_val = e1000_read_pba_string_generic(hw, pba_str,
						E1000_PBANUM_LENGTH);
	if (ret_val)
5954
		strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5955 5956
	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
	       hw->mac.type, hw->phy.type, pba_str);
5957 5958
}

5959 5960 5961 5962 5963 5964 5965 5966 5967 5968
static void e1000_eeprom_checks(struct e1000_adapter *adapter)
{
	struct e1000_hw *hw = &adapter->hw;
	int ret_val;
	u16 buf = 0;

	if (hw->mac.type != e1000_82573)
		return;

	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5969 5970
	le16_to_cpus(&buf);
	if (!ret_val && (!(buf & (1 << 0)))) {
5971
		/* Deep Smart Power Down (DSPD) */
5972 5973
		dev_warn(&adapter->pdev->dev,
			 "Warning: detected DSPD enabled in EEPROM\n");
5974 5975 5976
	}
}

5977
static int e1000_set_features(struct net_device *netdev,
5978
			      netdev_features_t features)
5979 5980
{
	struct e1000_adapter *adapter = netdev_priv(netdev);
5981
	netdev_features_t changed = features ^ netdev->features;
5982 5983 5984 5985 5986

	if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
		adapter->flags |= FLAG_TSO_FORCE;

	if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
5987
			 NETIF_F_RXCSUM | NETIF_F_RXHASH)))
5988 5989
		return 0;

5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002
	/*
	 * IP payload checksum (enabled with jumbos/packet-split when Rx
	 * checksum is enabled) and generation of RSS hash is mutually
	 * exclusive in the hardware.
	 */
	if (adapter->rx_ps_pages &&
	    (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
		e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
		return -EINVAL;
	}

	netdev->features = features;

6003 6004 6005 6006 6007 6008 6009 6010
	if (netif_running(netdev))
		e1000e_reinit_locked(adapter);
	else
		e1000e_reset(adapter);

	return 0;
}

6011 6012 6013
static const struct net_device_ops e1000e_netdev_ops = {
	.ndo_open		= e1000_open,
	.ndo_stop		= e1000_close,
6014
	.ndo_start_xmit		= e1000_xmit_frame,
J
Jeff Kirsher 已提交
6015
	.ndo_get_stats64	= e1000e_get_stats64,
6016
	.ndo_set_rx_mode	= e1000e_set_rx_mode,
6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027
	.ndo_set_mac_address	= e1000_set_mac,
	.ndo_change_mtu		= e1000_change_mtu,
	.ndo_do_ioctl		= e1000_ioctl,
	.ndo_tx_timeout		= e1000_tx_timeout,
	.ndo_validate_addr	= eth_validate_addr,

	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= e1000_netpoll,
#endif
6028
	.ndo_set_features = e1000_set_features,
6029 6030
};

6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048
/**
 * e1000_probe - Device Initialization Routine
 * @pdev: PCI device information struct
 * @ent: entry in e1000_pci_tbl
 *
 * Returns 0 on success, negative on failure
 *
 * e1000_probe initializes an adapter identified by a pci_dev structure.
 * The OS initialization, configuring of the adapter private structure,
 * and a hardware reset occur.
 **/
static int __devinit e1000_probe(struct pci_dev *pdev,
				 const struct pci_device_id *ent)
{
	struct net_device *netdev;
	struct e1000_adapter *adapter;
	struct e1000_hw *hw;
	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6049 6050
	resource_size_t mmio_start, mmio_len;
	resource_size_t flash_start, flash_len;
6051 6052

	static int cards_found;
6053
	u16 aspm_disable_flag = 0;
6054 6055 6056 6057
	int i, err, pci_using_dac;
	u16 eeprom_data = 0;
	u16 eeprom_apme_mask = E1000_EEPROM_APME;

6058 6059
	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
		aspm_disable_flag = PCIE_LINK_STATE_L0S;
6060
	if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6061 6062 6063
		aspm_disable_flag |= PCIE_LINK_STATE_L1;
	if (aspm_disable_flag)
		e1000e_disable_aspm(pdev, aspm_disable_flag);
T
Taku Izumi 已提交
6064

6065
	err = pci_enable_device_mem(pdev);
6066 6067 6068 6069
	if (err)
		return err;

	pci_using_dac = 0;
6070
	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6071
	if (!err) {
6072
		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6073 6074 6075
		if (!err)
			pci_using_dac = 1;
	} else {
6076
		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6077
		if (err) {
6078 6079
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
6080
			if (err) {
6081
				dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
6082 6083 6084 6085 6086
				goto err_dma;
			}
		}
	}

6087
	err = pci_request_selected_regions_exclusive(pdev,
6088 6089
	                                  pci_select_bars(pdev, IORESOURCE_MEM),
	                                  e1000e_driver_name);
6090 6091 6092
	if (err)
		goto err_pci_reg;

6093
	/* AER (Advanced Error Reporting) hooks */
6094
	pci_enable_pcie_error_reporting(pdev);
6095

6096
	pci_set_master(pdev);
6097 6098 6099 6100
	/* PCI config space info */
	err = pci_save_state(pdev);
	if (err)
		goto err_alloc_etherdev;
6101 6102 6103 6104 6105 6106 6107 6108

	err = -ENOMEM;
	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
	if (!netdev)
		goto err_alloc_etherdev;

	SET_NETDEV_DEV(netdev, &pdev->dev);

6109 6110
	netdev->irq = pdev->irq;

6111 6112 6113 6114 6115 6116 6117 6118
	pci_set_drvdata(pdev, netdev);
	adapter = netdev_priv(netdev);
	hw = &adapter->hw;
	adapter->netdev = netdev;
	adapter->pdev = pdev;
	adapter->ei = ei;
	adapter->pba = ei->pba;
	adapter->flags = ei->flags;
J
Jeff Kirsher 已提交
6119
	adapter->flags2 = ei->flags2;
6120 6121
	adapter->hw.adapter = adapter;
	adapter->hw.mac.type = ei->mac;
6122
	adapter->max_hw_frame_size = ei->max_hw_frame_size;
6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142
	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;

	mmio_start = pci_resource_start(pdev, 0);
	mmio_len = pci_resource_len(pdev, 0);

	err = -EIO;
	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
	if (!adapter->hw.hw_addr)
		goto err_ioremap;

	if ((adapter->flags & FLAG_HAS_FLASH) &&
	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
		flash_start = pci_resource_start(pdev, 1);
		flash_len = pci_resource_len(pdev, 1);
		adapter->hw.flash_address = ioremap(flash_start, flash_len);
		if (!adapter->hw.flash_address)
			goto err_flashmap;
	}

	/* construct the net_device struct */
6143
	netdev->netdev_ops		= &e1000e_netdev_ops;
6144 6145 6146
	e1000e_set_ethtool_ops(netdev);
	netdev->watchdog_timeo		= 5 * HZ;
	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
6147
	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6148 6149 6150 6151 6152 6153

	netdev->mem_start = mmio_start;
	netdev->mem_end = mmio_start + mmio_len;

	adapter->bd_number = cards_found++;

6154 6155
	e1000e_check_options(adapter);

6156 6157 6158 6159 6160 6161 6162 6163 6164
	/* setup adapter struct */
	err = e1000_sw_init(adapter);
	if (err)
		goto err_sw_init;

	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));

J
Jeff Kirsher 已提交
6165
	err = ei->get_variants(adapter);
6166 6167 6168
	if (err)
		goto err_hw_init;

6169 6170 6171 6172
	if ((adapter->flags & FLAG_IS_ICH) &&
	    (adapter->flags & FLAG_READ_ONLY_NVM))
		e1000e_write_protect_nvm_ich8lan(&adapter->hw);

6173 6174
	hw->mac.ops.get_bus_info(&adapter->hw);

6175
	adapter->hw.phy.autoneg_wait_to_complete = 0;
6176 6177

	/* Copper options */
6178
	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6179 6180 6181 6182 6183 6184
		adapter->hw.phy.mdix = AUTO_ALL_MODES;
		adapter->hw.phy.disable_polarity_correction = 0;
		adapter->hw.phy.ms_type = e1000_ms_hw_default;
	}

	if (e1000_check_reset_block(&adapter->hw))
6185
		e_info("PHY reset is blocked due to SOL/IDER session.\n");
6186

6187 6188 6189 6190 6191 6192
	/* Set initial default active device features */
	netdev->features = (NETIF_F_SG |
			    NETIF_F_HW_VLAN_RX |
			    NETIF_F_HW_VLAN_TX |
			    NETIF_F_TSO |
			    NETIF_F_TSO6 |
6193
			    NETIF_F_RXHASH |
6194 6195 6196 6197 6198
			    NETIF_F_RXCSUM |
			    NETIF_F_HW_CSUM);

	/* Set user-changeable features (subset of all device features) */
	netdev->hw_features = netdev->features;
6199 6200 6201 6202

	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
		netdev->features |= NETIF_F_HW_VLAN_FILTER;

6203 6204 6205 6206
	netdev->vlan_features |= (NETIF_F_SG |
				  NETIF_F_TSO |
				  NETIF_F_TSO6 |
				  NETIF_F_HW_CSUM);
6207

6208 6209
	netdev->priv_flags |= IFF_UNICAST_FLT;

6210
	if (pci_using_dac) {
6211
		netdev->features |= NETIF_F_HIGHDMA;
6212 6213
		netdev->vlan_features |= NETIF_F_HIGHDMA;
	}
6214 6215 6216 6217

	if (e1000e_enable_mng_pass_thru(&adapter->hw))
		adapter->flags |= FLAG_MNG_PT_ENABLED;

6218 6219 6220 6221
	/*
	 * before reading the NVM, reset the controller to
	 * put the device in a known good starting state
	 */
6222 6223 6224 6225 6226 6227 6228 6229 6230 6231
	adapter->hw.mac.ops.reset_hw(&adapter->hw);

	/*
	 * systems with ASPM and others may see the checksum fail on the first
	 * attempt. Let's give it a few tries
	 */
	for (i = 0;; i++) {
		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
			break;
		if (i == 2) {
6232
			e_err("The NVM Checksum Is Not Valid\n");
6233 6234 6235 6236 6237
			err = -EIO;
			goto err_eeprom;
		}
	}

6238 6239
	e1000_eeprom_checks(adapter);

6240
	/* copy the MAC address */
6241
	if (e1000e_read_mac_addr(&adapter->hw))
6242
		e_err("NVM Read Error while reading MAC address\n");
6243 6244 6245 6246 6247

	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);

	if (!is_valid_ether_addr(netdev->perm_addr)) {
J
Johannes Berg 已提交
6248
		e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
6249 6250 6251 6252 6253
		err = -EIO;
		goto err_eeprom;
	}

	init_timer(&adapter->watchdog_timer);
6254
	adapter->watchdog_timer.function = e1000_watchdog;
6255 6256 6257
	adapter->watchdog_timer.data = (unsigned long) adapter;

	init_timer(&adapter->phy_info_timer);
6258
	adapter->phy_info_timer.function = e1000_update_phy_info;
6259 6260 6261 6262
	adapter->phy_info_timer.data = (unsigned long) adapter;

	INIT_WORK(&adapter->reset_task, e1000_reset_task);
	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6263 6264
	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6265
	INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6266 6267 6268

	/* Initialize link parameters. User can change them with ethtool */
	adapter->hw.mac.autoneg = 1;
6269
	adapter->fc_autoneg = true;
6270 6271
	adapter->hw.fc.requested_mode = e1000_fc_default;
	adapter->hw.fc.current_mode = e1000_fc_default;
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285
	adapter->hw.phy.autoneg_advertised = 0x2f;

	/* ring size defaults */
	adapter->rx_ring->count = 256;
	adapter->tx_ring->count = 256;

	/*
	 * Initial Wake on LAN setting - If APM wake is enabled in
	 * the EEPROM, enable the ACPI Magic Packet filter
	 */
	if (adapter->flags & FLAG_APME_IN_WUC) {
		/* APME bit in EEPROM is mapped to WUC.APME */
		eeprom_data = er32(WUC);
		eeprom_apme_mask = E1000_WUC_APME;
6286 6287
		if ((hw->mac.type > e1000_ich10lan) &&
		    (eeprom_data & E1000_WUC_PHY_WAKE))
6288
			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312
	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
		    (adapter->hw.bus.func == 1))
			e1000_read_nvm(&adapter->hw,
				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
		else
			e1000_read_nvm(&adapter->hw,
				NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
	}

	/* fetch WoL from EEPROM */
	if (eeprom_data & eeprom_apme_mask)
		adapter->eeprom_wol |= E1000_WUFC_MAG;

	/*
	 * now that we have the eeprom settings, apply the special cases
	 * where the eeprom may be wrong or the board simply won't support
	 * wake on lan on a particular port
	 */
	if (!(adapter->flags & FLAG_HAS_WOL))
		adapter->eeprom_wol = 0;

	/* initialize the wol settings based on the eeprom settings */
	adapter->wol = adapter->eeprom_wol;
6313
	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6314

6315 6316 6317
	/* save off EEPROM version number */
	e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);

6318 6319 6320
	/* reset the hardware with the new settings */
	e1000e_reset(adapter);

6321 6322
	/*
	 * If the controller has AMT, do not set DRV_LOAD until the interface
6323
	 * is up.  For all other cases, let the f/w know that the h/w is now
6324 6325
	 * under the control of the driver.
	 */
J
Jesse Brandeburg 已提交
6326
	if (!(adapter->flags & FLAG_HAS_AMT))
6327
		e1000e_get_hw_control(adapter);
6328

6329
	strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6330 6331 6332 6333
	err = register_netdev(netdev);
	if (err)
		goto err_register;

6334 6335 6336
	/* carrier off reporting is important to ethtool even BEFORE open */
	netif_carrier_off(netdev);

6337 6338
	e1000_print_device_info(adapter);

6339 6340
	if (pci_dev_run_wake(pdev))
		pm_runtime_put_noidle(&pdev->dev);
6341

6342 6343 6344
	return 0;

err_register:
J
Jesse Brandeburg 已提交
6345
	if (!(adapter->flags & FLAG_HAS_AMT))
6346
		e1000e_release_hw_control(adapter);
6347 6348 6349
err_eeprom:
	if (!e1000_check_reset_block(&adapter->hw))
		e1000_phy_hw_reset(&adapter->hw);
J
Jesse Brandeburg 已提交
6350
err_hw_init:
6351 6352 6353
	kfree(adapter->tx_ring);
	kfree(adapter->rx_ring);
err_sw_init:
J
Jesse Brandeburg 已提交
6354 6355
	if (adapter->hw.flash_address)
		iounmap(adapter->hw.flash_address);
6356
	e1000e_reset_interrupt_capability(adapter);
J
Jesse Brandeburg 已提交
6357
err_flashmap:
6358 6359 6360 6361
	iounmap(adapter->hw.hw_addr);
err_ioremap:
	free_netdev(netdev);
err_alloc_etherdev:
6362 6363
	pci_release_selected_regions(pdev,
	                             pci_select_bars(pdev, IORESOURCE_MEM));
6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382
err_pci_reg:
err_dma:
	pci_disable_device(pdev);
	return err;
}

/**
 * e1000_remove - Device Removal Routine
 * @pdev: PCI device information struct
 *
 * e1000_remove is called by the PCI subsystem to alert the driver
 * that it should release a PCI device.  The could be caused by a
 * Hot-Plug event, or because the driver is going to be removed from
 * memory.
 **/
static void __devexit e1000_remove(struct pci_dev *pdev)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct e1000_adapter *adapter = netdev_priv(netdev);
6383 6384
	bool down = test_bit(__E1000_DOWN, &adapter->state);

6385
	/*
6386 6387
	 * The timers may be rescheduled, so explicitly disable them
	 * from being rescheduled.
6388
	 */
6389 6390
	if (!down)
		set_bit(__E1000_DOWN, &adapter->state);
6391 6392 6393
	del_timer_sync(&adapter->watchdog_timer);
	del_timer_sync(&adapter->phy_info_timer);

6394 6395 6396 6397 6398
	cancel_work_sync(&adapter->reset_task);
	cancel_work_sync(&adapter->watchdog_task);
	cancel_work_sync(&adapter->downshift_task);
	cancel_work_sync(&adapter->update_phy_task);
	cancel_work_sync(&adapter->print_hang_task);
6399

6400 6401 6402
	if (!(netdev->flags & IFF_UP))
		e1000_power_down_phy(adapter);

6403 6404 6405
	/* Don't lie to e1000_close() down the road. */
	if (!down)
		clear_bit(__E1000_DOWN, &adapter->state);
6406 6407
	unregister_netdev(netdev);

6408 6409
	if (pci_dev_run_wake(pdev))
		pm_runtime_get_noresume(&pdev->dev);
6410

6411 6412 6413 6414
	/*
	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
	 * would have already happened in close and is redundant.
	 */
6415
	e1000e_release_hw_control(adapter);
6416

6417
	e1000e_reset_interrupt_capability(adapter);
6418 6419 6420 6421 6422 6423
	kfree(adapter->tx_ring);
	kfree(adapter->rx_ring);

	iounmap(adapter->hw.hw_addr);
	if (adapter->hw.flash_address)
		iounmap(adapter->hw.flash_address);
6424 6425
	pci_release_selected_regions(pdev,
	                             pci_select_bars(pdev, IORESOURCE_MEM));
6426 6427 6428

	free_netdev(netdev);

J
Jesse Brandeburg 已提交
6429
	/* AER disable */
6430
	pci_disable_pcie_error_reporting(pdev);
J
Jesse Brandeburg 已提交
6431

6432 6433 6434 6435 6436 6437 6438 6439 6440 6441
	pci_disable_device(pdev);
}

/* PCI Error Recovery (ERS) */
static struct pci_error_handlers e1000_err_handler = {
	.error_detected = e1000_io_error_detected,
	.slot_reset = e1000_io_slot_reset,
	.resume = e1000_io_resume,
};

6442
static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6443 6444 6445 6446 6447 6448
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6449 6450 6451
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6452

6453 6454 6455 6456
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6457

6458 6459 6460
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6461

6462
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6463
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6464
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6465

6466 6467 6468 6469 6470 6471 6472 6473
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
	  board_80003es2lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
	  board_80003es2lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
	  board_80003es2lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
	  board_80003es2lan },
6474

6475 6476 6477 6478 6479 6480 6481
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
B
Bruce Allan 已提交
6482
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6483

6484 6485 6486 6487 6488
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6489
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6490 6491 6492 6493 6494 6495 6496
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },

	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6497

6498 6499
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6500
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6501

6502 6503 6504 6505 6506
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },

6507 6508 6509
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },

6510
	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */
6511 6512 6513
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);

R
Rafael J. Wysocki 已提交
6514
#ifdef CONFIG_PM
6515
static const struct dev_pm_ops e1000_pm_ops = {
6516 6517 6518
	SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
	SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
				e1000_runtime_resume, e1000_idle)
6519
};
6520
#endif
6521

6522 6523 6524 6525 6526 6527
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
	.name     = e1000e_driver_name,
	.id_table = e1000_pci_tbl,
	.probe    = e1000_probe,
	.remove   = __devexit_p(e1000_remove),
R
Rafael J. Wysocki 已提交
6528
#ifdef CONFIG_PM
6529 6530 6531
	.driver   = {
		.pm = &e1000_pm_ops,
	},
6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545
#endif
	.shutdown = e1000_shutdown,
	.err_handler = &e1000_err_handler
};

/**
 * e1000_init_module - Driver Registration Routine
 *
 * e1000_init_module is the first routine called when the driver is
 * loaded. All it does is register with the PCI subsystem.
 **/
static int __init e1000_init_module(void)
{
	int ret;
6546 6547
	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
		e1000e_driver_version);
B
Bruce Allan 已提交
6548
	pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6549
	ret = pci_register_driver(&e1000_driver);
6550

6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573
	return ret;
}
module_init(e1000_init_module);

/**
 * e1000_exit_module - Driver Exit Cleanup Routine
 *
 * e1000_exit_module is called just before the driver is removed
 * from memory.
 **/
static void __exit e1000_exit_module(void)
{
	pci_unregister_driver(&e1000_driver);
}
module_exit(e1000_exit_module);


MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);

/* e1000_main.c */