bnx2.c 214.4 KB
Newer Older
1 2
/* bnx2.c: Broadcom NX2 network driver.
 *
M
Michael Chan 已提交
3
 * Copyright (c) 2004-2011 Broadcom Corporation
4 5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Written by: Michael Chan  (mchan@broadcom.com)
 */

12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
M
Michael Chan 已提交
13 14 15 16

#include <linux/module.h>
#include <linux/moduleparam.h>

17
#include <linux/stringify.h>
M
Michael Chan 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
31
#include <linux/bitops.h>
M
Michael Chan 已提交
32 33 34 35
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
M
Michael Chan 已提交
36
#include <asm/page.h>
M
Michael Chan 已提交
37 38 39
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
40
#include <linux/if.h>
M
Michael Chan 已提交
41 42
#include <linux/if_vlan.h>
#include <net/ip.h>
43
#include <net/tcp.h>
M
Michael Chan 已提交
44 45 46 47
#include <net/checksum.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
#include <linux/prefetch.h>
48
#include <linux/cache.h>
M
Michael Chan 已提交
49
#include <linux/firmware.h>
B
Benjamin Li 已提交
50
#include <linux/log2.h>
51
#include <linux/aer.h>
M
Michael Chan 已提交
52

53 54 55 56
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
57 58
#include "bnx2.h"
#include "bnx2_fw.h"
D
Denys Vlasenko 已提交
59

60
#define DRV_MODULE_NAME		"bnx2"
61 62
#define DRV_MODULE_VERSION	"2.2.3"
#define DRV_MODULE_RELDATE	"June 27, 2012"
63
#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
M
Michael Chan 已提交
64
#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65
#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
M
Michael Chan 已提交
66 67
#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 69 70 71 72 73

#define RUN_AT(x) (jiffies + (x))

/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT  (5*HZ)

B
Bill Pemberton 已提交
74
static char version[] =
75 76 77
	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";

MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 80
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
M
Michael Chan 已提交
81 82 83 84
MODULE_FIRMWARE(FW_MIPS_FILE_06);
MODULE_FIRMWARE(FW_RV2P_FILE_06);
MODULE_FIRMWARE(FW_MIPS_FILE_09);
MODULE_FIRMWARE(FW_RV2P_FILE_09);
85
MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 87 88 89 90 91 92 93 94 95 96 97

static int disable_msi = 0;

module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");

typedef enum {
	BCM5706 = 0,
	NC370T,
	NC370I,
	BCM5706S,
	NC370F,
M
Michael Chan 已提交
98 99
	BCM5708,
	BCM5708S,
M
Michael Chan 已提交
100
	BCM5709,
101
	BCM5709S,
M
Michael Chan 已提交
102
	BCM5716,
M
Michael Chan 已提交
103
	BCM5716S,
104 105 106
} board_t;

/* indexed by board_t, above */
A
Andrew Morton 已提交
107
static struct {
108
	char *name;
B
Bill Pemberton 已提交
109
} board_info[] = {
110 111 112 113 114
	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
	{ "HP NC370T Multifunction Gigabit Server Adapter" },
	{ "HP NC370i Multifunction Gigabit Server Adapter" },
	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
	{ "HP NC370F Multifunction Gigabit Server Adapter" },
M
Michael Chan 已提交
115 116
	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
M
Michael Chan 已提交
117
	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118
	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
M
Michael Chan 已提交
119
	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
M
Michael Chan 已提交
120
	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 122
	};

M
Michael Chan 已提交
123
static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 125 126 127 128 129
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
M
Michael Chan 已提交
130 131
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 133 134 135
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
M
Michael Chan 已提交
136 137
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
M
Michael Chan 已提交
138 139
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 141
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
M
Michael Chan 已提交
142 143
	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
M
Michael Chan 已提交
144
	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
M
Michael Chan 已提交
145
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 147 148
	{ 0, }
};

149
static const struct flash_spec flash_table[] =
150
{
M
Michael Chan 已提交
151 152
#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153
	/* Slow EEPROM */
154
	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
M
Michael Chan 已提交
155
	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 157
	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
	 "EEPROM - slow"},
158 159
	/* Expansion entry 0001 */
	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
160
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 162
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 0001"},
163 164
	/* Saifun SA25F010 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
165
	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
166
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 168 169 170
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
	 "Non-buffered flash (128kB)"},
	/* Saifun SA25F020 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
171
	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
172
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 174
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
	 "Non-buffered flash (256kB)"},
175 176
	/* Expansion entry 0100 */
	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
177
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 179 180
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 0100"},
	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181
	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
182
	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 184 185 186
	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
187
	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 189 190 191 192
	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
	/* Saifun SA25F005 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
193
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 195 196 197
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
	 "Non-buffered flash (64kB)"},
	/* Fast EEPROM */
	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
M
Michael Chan 已提交
198
	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 200 201 202
	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
	 "EEPROM - fast"},
	/* Expansion entry 1001 */
	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
203
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 205 206 207
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1001"},
	/* Expansion entry 1010 */
	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
208
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 210 211 212
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1010"},
	/* ATMEL AT45DB011B (buffered flash) */
	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
213
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 215 216 217
	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
	 "Buffered flash (128kB)"},
	/* Expansion entry 1100 */
	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
218
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 220 221 222
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1100"},
	/* Expansion entry 1101 */
	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
223
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 225 226 227
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1101"},
	/* Ateml Expansion entry 1110 */
	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
228
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 230 231 232
	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1110 (Atmel)"},
	/* ATMEL AT45DB021B (buffered flash) */
	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
233
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 235
	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
	 "Buffered flash (256kB)"},
236 237
};

238
static const struct flash_spec flash_5709 = {
M
Michael Chan 已提交
239 240 241 242 243 244 245 246
	.flags		= BNX2_NV_BUFFERED,
	.page_bits	= BCM5709_FLASH_PAGE_BITS,
	.page_size	= BCM5709_FLASH_PAGE_SIZE,
	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
	.name		= "5709 Buffered flash (256kB)",
};

247 248
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);

B
Benjamin Li 已提交
249
static void bnx2_init_napi(struct bnx2 *bp);
M
Michael Chan 已提交
250
static void bnx2_del_napi(struct bnx2 *bp);
B
Benjamin Li 已提交
251

252
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
M
Michael Chan 已提交
253
{
M
Michael Chan 已提交
254
	u32 diff;
M
Michael Chan 已提交
255

256 257
	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
	barrier();
M
Michael Chan 已提交
258 259 260 261

	/* The ring uses 256 indices for 255 entries, one of them
	 * needs to be skipped.
	 */
262
	diff = txr->tx_prod - txr->tx_cons;
263
	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
M
Michael Chan 已提交
264
		diff &= 0xffff;
265 266
		if (diff == BNX2_TX_DESC_CNT)
			diff = BNX2_MAX_TX_DESC_CNT;
M
Michael Chan 已提交
267
	}
268
	return bp->tx_ring_size - diff;
M
Michael Chan 已提交
269 270
}

271 272 273
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
M
Michael Chan 已提交
274 275 276
	u32 val;

	spin_lock_bh(&bp->indirect_lock);
277 278
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
M
Michael Chan 已提交
279 280
	spin_unlock_bh(&bp->indirect_lock);
	return val;
281 282 283 284 285
}

static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
M
Michael Chan 已提交
286
	spin_lock_bh(&bp->indirect_lock);
287 288
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
M
Michael Chan 已提交
289
	spin_unlock_bh(&bp->indirect_lock);
290 291
}

292 293 294 295 296 297 298 299 300
static void
bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
{
	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
}

static u32
bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
{
301
	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 303
}

304 305 306 307
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
	offset += cid_addr;
M
Michael Chan 已提交
308
	spin_lock_bh(&bp->indirect_lock);
309
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
310 311
		int i;

312 313 314
		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
M
Michael Chan 已提交
315
		for (i = 0; i < 5; i++) {
316
			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
M
Michael Chan 已提交
317 318 319 320 321
			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
				break;
			udelay(5);
		}
	} else {
322 323
		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
		BNX2_WR(bp, BNX2_CTX_DATA, val);
M
Michael Chan 已提交
324
	}
M
Michael Chan 已提交
325
	spin_unlock_bh(&bp->indirect_lock);
326 327
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
#ifdef BCM_CNIC
static int
bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct drv_ctl_io *io = &info->data.io;

	switch (info->cmd) {
	case DRV_CTL_IO_WR_CMD:
		bnx2_reg_wr_ind(bp, io->offset, io->data);
		break;
	case DRV_CTL_IO_RD_CMD:
		io->data = bnx2_reg_rd_ind(bp, io->offset);
		break;
	case DRV_CTL_CTX_WR_CMD:
		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
{
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	int sb_id;

	if (bp->flags & BNX2_FLAG_USING_MSIX) {
		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
		bnapi->cnic_present = 0;
		sb_id = bp->irq_nvecs;
		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
	} else {
		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
		bnapi->cnic_tag = bnapi->last_status_idx;
		bnapi->cnic_present = 1;
		sb_id = 0;
		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
	}

	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
	cp->irq_arr[0].status_blk = (void *)
		((unsigned long) bnapi->status_blk.msi +
		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
	cp->irq_arr[0].status_blk_num = sb_id;
	cp->num_irq = 1;
}

static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
			      void *data)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	if (ops == NULL)
		return -EINVAL;

	if (cp->drv_state & CNIC_DRV_STATE_REGD)
		return -EBUSY;

390 391 392
	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
		return -ENODEV;

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	bp->cnic_data = data;
	rcu_assign_pointer(bp->cnic_ops, ops);

	cp->num_irq = 0;
	cp->drv_state = CNIC_DRV_STATE_REGD;

	bnx2_setup_cnic_irq_info(bp);

	return 0;
}

static int bnx2_unregister_cnic(struct net_device *dev)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

410
	mutex_lock(&bp->cnic_lock);
411 412
	cp->drv_state = 0;
	bnapi->cnic_present = 0;
413
	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414
	mutex_unlock(&bp->cnic_lock);
415 416 417 418
	synchronize_rcu();
	return 0;
}

S
stephen hemminger 已提交
419
static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 421 422 423
{
	struct bnx2 *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

424 425 426
	if (!cp->max_iscsi_conn)
		return NULL;

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	cp->drv_owner = THIS_MODULE;
	cp->chip_id = bp->chip_id;
	cp->pdev = bp->pdev;
	cp->io_base = bp->regview;
	cp->drv_ctl = bnx2_drv_ctl;
	cp->drv_register_cnic = bnx2_register_cnic;
	cp->drv_unregister_cnic = bnx2_unregister_cnic;

	return cp;
}

static void
bnx2_cnic_stop(struct bnx2 *bp)
{
	struct cnic_ops *c_ops;
	struct cnic_ctl_info info;

444
	mutex_lock(&bp->cnic_lock);
445 446
	c_ops = rcu_dereference_protected(bp->cnic_ops,
					  lockdep_is_held(&bp->cnic_lock));
447 448 449 450
	if (c_ops) {
		info.cmd = CNIC_CTL_STOP_CMD;
		c_ops->cnic_ctl(bp->cnic_data, &info);
	}
451
	mutex_unlock(&bp->cnic_lock);
452 453 454 455 456 457 458 459
}

static void
bnx2_cnic_start(struct bnx2 *bp)
{
	struct cnic_ops *c_ops;
	struct cnic_ctl_info info;

460
	mutex_lock(&bp->cnic_lock);
461 462
	c_ops = rcu_dereference_protected(bp->cnic_ops,
					  lockdep_is_held(&bp->cnic_lock));
463 464 465 466 467 468 469 470 471
	if (c_ops) {
		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];

			bnapi->cnic_tag = bnapi->last_status_idx;
		}
		info.cmd = CNIC_CTL_START_CMD;
		c_ops->cnic_ctl(bp->cnic_data, &info);
	}
472
	mutex_unlock(&bp->cnic_lock);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
}

#else

static void
bnx2_cnic_stop(struct bnx2 *bp)
{
}

static void
bnx2_cnic_start(struct bnx2 *bp)
{
}

#endif

489 490 491 492 493 494
static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{
	u32 val1;
	int i, ret;

495
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 498
		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;

499 500
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 502 503 504 505 506 507

		udelay(40);
	}

	val1 = (bp->phy_addr << 21) | (reg << 16) |
		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
		BNX2_EMAC_MDIO_COMM_START_BUSY;
508
	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 510 511 512

	for (i = 0; i < 50; i++) {
		udelay(10);

513
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 515 516
		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
			udelay(5);

517
			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
			val1 &= BNX2_EMAC_MDIO_COMM_DATA;

			break;
		}
	}

	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
		*val = 0x0;
		ret = -EBUSY;
	}
	else {
		*val = val1;
		ret = 0;
	}

533
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 536
		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;

537 538
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 540 541 542 543 544 545 546 547 548 549 550 551

		udelay(40);
	}

	return ret;
}

static int
bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
{
	u32 val1;
	int i, ret;

552
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 555
		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;

556 557
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 559 560 561 562 563 564

		udelay(40);
	}

	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565
	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566

567 568 569
	for (i = 0; i < 50; i++) {
		udelay(10);

570
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 572 573 574 575 576 577 578 579 580 581
		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
			udelay(5);
			break;
		}
	}

	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
        	ret = -EBUSY;
	else
		ret = 0;

582
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 585
		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;

586 587
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 589 590 591 592 593 594 595 596 597

		udelay(40);
	}

	return ret;
}

static void
bnx2_disable_int(struct bnx2 *bp)
{
598 599 600 601 602
	int i;
	struct bnx2_napi *bnapi;

	for (i = 0; i < bp->irq_nvecs; i++) {
		bnapi = &bp->bnx2_napi[i];
603
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 605
		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
	}
606
	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 608 609 610 611
}

static void
bnx2_enable_int(struct bnx2 *bp)
{
612 613
	int i;
	struct bnx2_napi *bnapi;
614

615 616
	for (i = 0; i < bp->irq_nvecs; i++) {
		bnapi = &bp->bnx2_napi[i];
617

618 619 620 621
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
			bnapi->last_status_idx);
622

623 624 625
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
			bnapi->last_status_idx);
626
	}
627
	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 629 630 631 632
}

static void
bnx2_disable_int_sync(struct bnx2 *bp)
{
633 634
	int i;

635
	atomic_inc(&bp->intr_sem);
636 637 638
	if (!netif_running(bp->dev))
		return;

639
	bnx2_disable_int(bp);
640 641
	for (i = 0; i < bp->irq_nvecs; i++)
		synchronize_irq(bp->irq_tbl[i].vector);
642 643
}

644 645 646
static void
bnx2_napi_disable(struct bnx2 *bp)
{
647 648 649 650
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		napi_disable(&bp->bnx2_napi[i].napi);
651 652 653 654 655
}

static void
bnx2_napi_enable(struct bnx2 *bp)
{
656 657 658 659
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		napi_enable(&bp->bnx2_napi[i].napi);
660 661
}

662
static void
663
bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664
{
665 666
	if (stop_cnic)
		bnx2_cnic_stop(bp);
667
	if (netif_running(bp->dev)) {
668
		bnx2_napi_disable(bp);
669 670
		netif_tx_disable(bp->dev);
	}
671
	bnx2_disable_int_sync(bp);
672
	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 674 675
}

static void
676
bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 678 679
{
	if (atomic_dec_and_test(&bp->intr_sem)) {
		if (netif_running(bp->dev)) {
B
Benjamin Li 已提交
680
			netif_tx_wake_all_queues(bp->dev);
681 682 683 684
			spin_lock_bh(&bp->phy_lock);
			if (bp->link_up)
				netif_carrier_on(bp->dev);
			spin_unlock_bh(&bp->phy_lock);
685
			bnx2_napi_enable(bp);
686
			bnx2_enable_int(bp);
687 688
			if (start_cnic)
				bnx2_cnic_start(bp);
689 690 691 692
		}
	}
}

693 694 695 696 697 698 699 700 701 702
static void
bnx2_free_tx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;

		if (txr->tx_desc_ring) {
703 704 705
			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
					  txr->tx_desc_ring,
					  txr->tx_desc_mapping);
706 707 708 709 710 711 712
			txr->tx_desc_ring = NULL;
		}
		kfree(txr->tx_buf_ring);
		txr->tx_buf_ring = NULL;
	}
}

713 714 715 716 717 718 719 720 721 722 723 724
static void
bnx2_free_rx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;

		for (j = 0; j < bp->rx_max_ring; j++) {
			if (rxr->rx_desc_ring[j])
725 726 727
				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
						  rxr->rx_desc_ring[j],
						  rxr->rx_desc_mapping[j]);
728 729
			rxr->rx_desc_ring[j] = NULL;
		}
730
		vfree(rxr->rx_buf_ring);
731 732 733 734
		rxr->rx_buf_ring = NULL;

		for (j = 0; j < bp->rx_max_pg_ring; j++) {
			if (rxr->rx_pg_desc_ring[j])
735 736 737
				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
						  rxr->rx_pg_desc_ring[j],
						  rxr->rx_pg_desc_mapping[j]);
738
			rxr->rx_pg_desc_ring[j] = NULL;
739
		}
740
		vfree(rxr->rx_pg_ring);
741 742 743 744
		rxr->rx_pg_ring = NULL;
	}
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758
static int
bnx2_alloc_tx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;

		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
		if (txr->tx_buf_ring == NULL)
			return -ENOMEM;

		txr->tx_desc_ring =
759 760
			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
					   &txr->tx_desc_mapping, GFP_KERNEL);
761 762 763 764 765 766
		if (txr->tx_desc_ring == NULL)
			return -ENOMEM;
	}
	return 0;
}

767 768 769 770 771 772 773 774 775 776 777
static int
bnx2_alloc_rx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;

		rxr->rx_buf_ring =
E
Eric Dumazet 已提交
778
			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 780 781 782 783
		if (rxr->rx_buf_ring == NULL)
			return -ENOMEM;

		for (j = 0; j < bp->rx_max_ring; j++) {
			rxr->rx_desc_ring[j] =
784 785 786 787
				dma_alloc_coherent(&bp->pdev->dev,
						   RXBD_RING_SIZE,
						   &rxr->rx_desc_mapping[j],
						   GFP_KERNEL);
788 789 790 791 792 793
			if (rxr->rx_desc_ring[j] == NULL)
				return -ENOMEM;

		}

		if (bp->rx_pg_ring_size) {
E
Eric Dumazet 已提交
794
			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 796 797 798 799 800 801 802
						  bp->rx_max_pg_ring);
			if (rxr->rx_pg_ring == NULL)
				return -ENOMEM;

		}

		for (j = 0; j < bp->rx_max_pg_ring; j++) {
			rxr->rx_pg_desc_ring[j] =
803 804 805 806
				dma_alloc_coherent(&bp->pdev->dev,
						   RXBD_RING_SIZE,
						   &rxr->rx_pg_desc_mapping[j],
						   GFP_KERNEL);
807 808 809 810 811 812 813 814
			if (rxr->rx_pg_desc_ring[j] == NULL)
				return -ENOMEM;

		}
	}
	return 0;
}

815 816 817
static void
bnx2_free_mem(struct bnx2 *bp)
{
818
	int i;
819
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820

821
	bnx2_free_tx_mem(bp);
822
	bnx2_free_rx_mem(bp);
823

M
Michael Chan 已提交
824 825
	for (i = 0; i < bp->ctx_pages; i++) {
		if (bp->ctx_blk[i]) {
826
			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 828
					  bp->ctx_blk[i],
					  bp->ctx_blk_mapping[i]);
M
Michael Chan 已提交
829 830 831
			bp->ctx_blk[i] = NULL;
		}
	}
832
	if (bnapi->status_blk.msi) {
833 834 835
		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
				  bnapi->status_blk.msi,
				  bp->status_blk_mapping);
836
		bnapi->status_blk.msi = NULL;
837
		bp->stats_blk = NULL;
838 839 840 841 842 843
	}
}

static int
bnx2_alloc_mem(struct bnx2 *bp)
{
844
	int i, status_blk_size, err;
845 846
	struct bnx2_napi *bnapi;
	void *status_blk;
847

848 849
	/* Combine status and statistics blocks into one allocation. */
	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850
	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 852
		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 854 855
	bp->status_stats_size = status_blk_size +
				sizeof(struct statistics_block);

856
	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 858
					&bp->status_blk_mapping,
					GFP_KERNEL | __GFP_ZERO);
859
	if (status_blk == NULL)
860 861
		goto alloc_mem_err;

862 863 864 865 866 867
	bnapi = &bp->bnx2_napi[0];
	bnapi->status_blk.msi = status_blk;
	bnapi->hw_tx_cons_ptr =
		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
	bnapi->hw_rx_cons_ptr =
		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868
	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869
		for (i = 1; i < bp->irq_nvecs; i++) {
870 871 872
			struct status_block_msix *sblk;

			bnapi = &bp->bnx2_napi[i];
873

874
			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875 876 877 878 879
			bnapi->status_blk.msix = sblk;
			bnapi->hw_tx_cons_ptr =
				&sblk->status_tx_quick_consumer_index;
			bnapi->hw_rx_cons_ptr =
				&sblk->status_rx_quick_consumer_index;
880 881 882
			bnapi->int_num = i << 24;
		}
	}
883

884
	bp->stats_blk = status_blk + status_blk_size;
885

886
	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887

888
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
889
		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
M
Michael Chan 已提交
890 891 892
		if (bp->ctx_pages == 0)
			bp->ctx_pages = 1;
		for (i = 0; i < bp->ctx_pages; i++) {
893
			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894
						BNX2_PAGE_SIZE,
895 896
						&bp->ctx_blk_mapping[i],
						GFP_KERNEL);
M
Michael Chan 已提交
897 898 899 900
			if (bp->ctx_blk[i] == NULL)
				goto alloc_mem_err;
		}
	}
901

902 903 904 905
	err = bnx2_alloc_rx_mem(bp);
	if (err)
		goto alloc_mem_err;

906 907 908 909
	err = bnx2_alloc_tx_mem(bp);
	if (err)
		goto alloc_mem_err;

910 911 912 913 914 915 916
	return 0;

alloc_mem_err:
	bnx2_free_mem(bp);
	return -ENOMEM;
}

917 918 919 920 921
static void
bnx2_report_fw_link(struct bnx2 *bp)
{
	u32 fw_link_status = 0;

922
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923 924
		return;

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	if (bp->link_up) {
		u32 bmsr;

		switch (bp->line_speed) {
		case SPEED_10:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_10HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_10FULL;
			break;
		case SPEED_100:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_100HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_100FULL;
			break;
		case SPEED_1000:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_1000HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_1000FULL;
			break;
		case SPEED_2500:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_2500HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_2500FULL;
			break;
		}

		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;

		if (bp->autoneg) {
			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;

960 961
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 963

			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964
			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965 966 967 968 969 970 971 972
				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
			else
				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
		}
	}
	else
		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;

973
	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 975
}

M
Michael Chan 已提交
976 977 978
static char *
bnx2_xceiver_str(struct bnx2 *bp)
{
979
	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980
		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981
		 "Copper");
M
Michael Chan 已提交
982 983
}

984 985 986 987 988
static void
bnx2_report_link(struct bnx2 *bp)
{
	if (bp->link_up) {
		netif_carrier_on(bp->dev);
989 990 991 992
		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
			    bnx2_xceiver_str(bp),
			    bp->line_speed,
			    bp->duplex == DUPLEX_FULL ? "full" : "half");
993 994 995

		if (bp->flow_ctrl) {
			if (bp->flow_ctrl & FLOW_CTRL_RX) {
996
				pr_cont(", receive ");
997
				if (bp->flow_ctrl & FLOW_CTRL_TX)
998
					pr_cont("& transmit ");
999 1000
			}
			else {
1001
				pr_cont(", transmit ");
1002
			}
1003
			pr_cont("flow control ON");
1004
		}
1005 1006
		pr_cont("\n");
	} else {
1007
		netif_carrier_off(bp->dev);
1008 1009
		netdev_err(bp->dev, "NIC %s Link is Down\n",
			   bnx2_xceiver_str(bp));
1010
	}
1011 1012

	bnx2_report_fw_link(bp);
1013 1014 1015 1016 1017 1018 1019 1020
}

static void
bnx2_resolve_flow_ctrl(struct bnx2 *bp)
{
	u32 local_adv, remote_adv;

	bp->flow_ctrl = 0;
1021
	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {

		if (bp->duplex == DUPLEX_FULL) {
			bp->flow_ctrl = bp->req_flow_ctrl;
		}
		return;
	}

	if (bp->duplex != DUPLEX_FULL) {
		return;
	}

1034
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035
	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
M
Michael Chan 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		u32 val;

		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
			bp->flow_ctrl |= FLOW_CTRL_TX;
		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
			bp->flow_ctrl |= FLOW_CTRL_RX;
		return;
	}

1046 1047
	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048

1049
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
		u32 new_local_adv = 0;
		u32 new_remote_adv = 0;

		if (local_adv & ADVERTISE_1000XPAUSE)
			new_local_adv |= ADVERTISE_PAUSE_CAP;
		if (local_adv & ADVERTISE_1000XPSE_ASYM)
			new_local_adv |= ADVERTISE_PAUSE_ASYM;
		if (remote_adv & ADVERTISE_1000XPAUSE)
			new_remote_adv |= ADVERTISE_PAUSE_CAP;
		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
			new_remote_adv |= ADVERTISE_PAUSE_ASYM;

		local_adv = new_local_adv;
		remote_adv = new_remote_adv;
	}

	/* See Table 28B-3 of 802.3ab-1999 spec. */
	if (local_adv & ADVERTISE_PAUSE_CAP) {
		if(local_adv & ADVERTISE_PAUSE_ASYM) {
	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
			}
			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
				bp->flow_ctrl = FLOW_CTRL_RX;
			}
		}
		else {
			if (remote_adv & ADVERTISE_PAUSE_CAP) {
				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
			}
		}
	}
	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
			(remote_adv & ADVERTISE_PAUSE_ASYM)) {

			bp->flow_ctrl = FLOW_CTRL_TX;
		}
	}
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
static int
bnx2_5709s_linkup(struct bnx2 *bp)
{
	u32 val, speed;

	bp->link_up = 1;

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
		bp->line_speed = bp->req_line_speed;
		bp->duplex = bp->req_duplex;
		return 0;
	}
	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
	switch (speed) {
		case MII_BNX2_GP_TOP_AN_SPEED_10:
			bp->line_speed = SPEED_10;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_100:
			bp->line_speed = SPEED_100;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_1G:
		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
			bp->line_speed = SPEED_1000;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
			bp->line_speed = SPEED_2500;
			break;
	}
	if (val & MII_BNX2_GP_TOP_AN_FD)
		bp->duplex = DUPLEX_FULL;
	else
		bp->duplex = DUPLEX_HALF;
	return 0;
}

1130
static int
M
Michael Chan 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
bnx2_5708s_linkup(struct bnx2 *bp)
{
	u32 val;

	bp->link_up = 1;
	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
		case BCM5708S_1000X_STAT1_SPEED_10:
			bp->line_speed = SPEED_10;
			break;
		case BCM5708S_1000X_STAT1_SPEED_100:
			bp->line_speed = SPEED_100;
			break;
		case BCM5708S_1000X_STAT1_SPEED_1G:
			bp->line_speed = SPEED_1000;
			break;
		case BCM5708S_1000X_STAT1_SPEED_2G5:
			bp->line_speed = SPEED_2500;
			break;
	}
	if (val & BCM5708S_1000X_STAT1_FD)
		bp->duplex = DUPLEX_FULL;
	else
		bp->duplex = DUPLEX_HALF;

	return 0;
}

static int
bnx2_5706s_linkup(struct bnx2 *bp)
1161 1162 1163 1164 1165 1166
{
	u32 bmcr, local_adv, remote_adv, common;

	bp->link_up = 1;
	bp->line_speed = SPEED_1000;

1167
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (bmcr & BMCR_FULLDPLX) {
		bp->duplex = DUPLEX_FULL;
	}
	else {
		bp->duplex = DUPLEX_HALF;
	}

	if (!(bmcr & BMCR_ANENABLE)) {
		return 0;
	}

1179 1180
	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

	common = local_adv & remote_adv;
	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {

		if (common & ADVERTISE_1000XFULL) {
			bp->duplex = DUPLEX_FULL;
		}
		else {
			bp->duplex = DUPLEX_HALF;
		}
	}

	return 0;
}

static int
bnx2_copper_linkup(struct bnx2 *bp)
{
	u32 bmcr;

1201
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	if (bmcr & BMCR_ANENABLE) {
		u32 local_adv, remote_adv, common;

		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);

		common = local_adv & (remote_adv >> 2);
		if (common & ADVERTISE_1000FULL) {
			bp->line_speed = SPEED_1000;
			bp->duplex = DUPLEX_FULL;
		}
		else if (common & ADVERTISE_1000HALF) {
			bp->line_speed = SPEED_1000;
			bp->duplex = DUPLEX_HALF;
		}
		else {
1218 1219
			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261

			common = local_adv & remote_adv;
			if (common & ADVERTISE_100FULL) {
				bp->line_speed = SPEED_100;
				bp->duplex = DUPLEX_FULL;
			}
			else if (common & ADVERTISE_100HALF) {
				bp->line_speed = SPEED_100;
				bp->duplex = DUPLEX_HALF;
			}
			else if (common & ADVERTISE_10FULL) {
				bp->line_speed = SPEED_10;
				bp->duplex = DUPLEX_FULL;
			}
			else if (common & ADVERTISE_10HALF) {
				bp->line_speed = SPEED_10;
				bp->duplex = DUPLEX_HALF;
			}
			else {
				bp->line_speed = 0;
				bp->link_up = 0;
			}
		}
	}
	else {
		if (bmcr & BMCR_SPEED100) {
			bp->line_speed = SPEED_100;
		}
		else {
			bp->line_speed = SPEED_10;
		}
		if (bmcr & BMCR_FULLDPLX) {
			bp->duplex = DUPLEX_FULL;
		}
		else {
			bp->duplex = DUPLEX_HALF;
		}
	}

	return 0;
}

1262
static void
1263
bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264
{
1265
	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266 1267 1268 1269 1270

	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
	val |= 0x02 << 8;

M
Michael Chan 已提交
1271 1272
	if (bp->flow_ctrl & FLOW_CTRL_TX)
		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273 1274 1275 1276

	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
static void
bnx2_init_all_rx_contexts(struct bnx2 *bp)
{
	int i;
	u32 cid;

	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
		if (i == 1)
			cid = RX_RSS_CID;
		bnx2_init_rx_context(bp, cid);
	}
}

1290
static void
1291 1292 1293 1294
bnx2_set_mac_link(struct bnx2 *bp)
{
	u32 val;

1295
	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1296 1297
	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
		(bp->duplex == DUPLEX_HALF)) {
1298
		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1299 1300 1301
	}

	/* Configure the EMAC mode register. */
1302
	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303 1304

	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
M
Michael Chan 已提交
1305
		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
M
Michael Chan 已提交
1306
		BNX2_EMAC_MODE_25G_MODE);
1307 1308

	if (bp->link_up) {
M
Michael Chan 已提交
1309 1310
		switch (bp->line_speed) {
			case SPEED_10:
1311
				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
M
Michael Chan 已提交
1312
					val |= BNX2_EMAC_MODE_PORT_MII_10M;
M
Michael Chan 已提交
1313 1314 1315 1316 1317 1318 1319
					break;
				}
				/* fall through */
			case SPEED_100:
				val |= BNX2_EMAC_MODE_PORT_MII;
				break;
			case SPEED_2500:
M
Michael Chan 已提交
1320
				val |= BNX2_EMAC_MODE_25G_MODE;
M
Michael Chan 已提交
1321 1322 1323 1324 1325
				/* fall through */
			case SPEED_1000:
				val |= BNX2_EMAC_MODE_PORT_GMII;
				break;
		}
1326 1327 1328 1329 1330 1331 1332 1333
	}
	else {
		val |= BNX2_EMAC_MODE_PORT_GMII;
	}

	/* Set the MAC to operate in the appropriate duplex mode. */
	if (bp->duplex == DUPLEX_HALF)
		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1334
	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335 1336 1337 1338 1339 1340

	/* Enable/disable rx PAUSE. */
	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;

	if (bp->flow_ctrl & FLOW_CTRL_RX)
		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1341
	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342 1343

	/* Enable/disable tx PAUSE. */
1344
	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1345 1346 1347 1348
	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;

	if (bp->flow_ctrl & FLOW_CTRL_TX)
		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1349
	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350 1351

	/* Acknowledge the interrupt. */
1352
	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353

M
Michael Chan 已提交
1354
	bnx2_init_all_rx_contexts(bp);
1355 1356
}

1357 1358 1359
static void
bnx2_enable_bmsr1(struct bnx2 *bp)
{
1360
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1361
	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1362 1363 1364 1365 1366 1367 1368
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_GP_STATUS);
}

static void
bnx2_disable_bmsr1(struct bnx2 *bp)
{
1369
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370
	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371 1372 1373 1374
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
}

1375 1376 1377 1378 1379 1380
static int
bnx2_test_and_enable_2g5(struct bnx2 *bp)
{
	u32 up1;
	int ret = 1;

1381
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1382 1383 1384 1385 1386
		return 0;

	if (bp->autoneg & AUTONEG_SPEED)
		bp->advertising |= ADVERTISED_2500baseX_Full;

1387
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1388 1389
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);

1390 1391 1392 1393 1394 1395 1396
	bnx2_read_phy(bp, bp->mii_up1, &up1);
	if (!(up1 & BCM5708S_UP1_2G5)) {
		up1 |= BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, bp->mii_up1, up1);
		ret = 0;
	}

1397
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1398 1399 1400
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

1401 1402 1403 1404 1405 1406 1407 1408 1409
	return ret;
}

static int
bnx2_test_and_disable_2g5(struct bnx2 *bp)
{
	u32 up1;
	int ret = 0;

1410
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411 1412
		return 0;

1413
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414 1415
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);

1416 1417 1418 1419 1420 1421 1422
	bnx2_read_phy(bp, bp->mii_up1, &up1);
	if (up1 & BCM5708S_UP1_2G5) {
		up1 &= ~BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, bp->mii_up1, up1);
		ret = 1;
	}

1423
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424 1425 1426
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

1427 1428 1429 1430 1431 1432
	return ret;
}

static void
bnx2_enable_forced_2g5(struct bnx2 *bp)
{
1433 1434
	u32 uninitialized_var(bmcr);
	int err;
1435

1436
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437 1438
		return;

1439
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1440 1441 1442 1443
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1444 1445 1446 1447 1448 1449
		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
			val |= MII_BNX2_SD_MISC1_FORCE |
				MII_BNX2_SD_MISC1_FORCE_2_5G;
			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
		}
1450 1451 1452

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454

1455
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1456 1457 1458
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
		if (!err)
			bmcr |= BCM5708S_BMCR_FORCE_2500;
E
Eric Dumazet 已提交
1459 1460
	} else {
		return;
1461 1462
	}

1463 1464 1465
	if (err)
		return;

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
	if (bp->autoneg & AUTONEG_SPEED) {
		bmcr &= ~BMCR_ANENABLE;
		if (bp->req_duplex == DUPLEX_FULL)
			bmcr |= BMCR_FULLDPLX;
	}
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
}

static void
bnx2_disable_forced_2g5(struct bnx2 *bp)
{
1477 1478
	u32 uninitialized_var(bmcr);
	int err;
1479

1480
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481 1482
		return;

1483
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1484 1485 1486 1487
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1488 1489 1490 1491
		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
			val &= ~MII_BNX2_SD_MISC1_FORCE;
			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
		}
1492 1493 1494

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1495
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496

1497
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1498 1499 1500
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
		if (!err)
			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
E
Eric Dumazet 已提交
1501 1502
	} else {
		return;
1503 1504
	}

1505 1506 1507
	if (err)
		return;

1508 1509 1510 1511 1512
	if (bp->autoneg & AUTONEG_SPEED)
		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
static void
bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
{
	u32 val;

	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
	if (start)
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
	else
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
}

1526 1527 1528 1529 1530 1531
static int
bnx2_set_link(struct bnx2 *bp)
{
	u32 bmsr;
	u8 link_up;

M
Michael Chan 已提交
1532
	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1533 1534 1535 1536
		bp->link_up = 1;
		return 0;
	}

1537
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1538 1539
		return 0;

1540 1541
	link_up = bp->link_up;

1542 1543 1544 1545
	bnx2_enable_bmsr1(bp);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_disable_bmsr1(bp);
1546

1547
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1548
	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1549
		u32 val, an_dbg;
1550

1551
		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1552
			bnx2_5706s_force_link_dn(bp, 0);
1553
			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554
		}
1555
		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556 1557 1558 1559 1560 1561 1562

		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);

		if ((val & BNX2_EMAC_STATUS_LINK) &&
		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1563 1564 1565 1566 1567 1568 1569 1570
			bmsr |= BMSR_LSTATUS;
		else
			bmsr &= ~BMSR_LSTATUS;
	}

	if (bmsr & BMSR_LSTATUS) {
		bp->link_up = 1;

1571
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1572
			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
M
Michael Chan 已提交
1573
				bnx2_5706s_linkup(bp);
1574
			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
M
Michael Chan 已提交
1575
				bnx2_5708s_linkup(bp);
1576
			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1577
				bnx2_5709s_linkup(bp);
1578 1579 1580 1581 1582 1583 1584
		}
		else {
			bnx2_copper_linkup(bp);
		}
		bnx2_resolve_flow_ctrl(bp);
	}
	else {
1585
		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1586 1587
		    (bp->autoneg & AUTONEG_SPEED))
			bnx2_disable_forced_2g5(bp);
1588

1589
		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1590 1591 1592 1593 1594 1595
			u32 bmcr;

			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
			bmcr |= BMCR_ANENABLE;
			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);

1596
			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1597
		}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		bp->link_up = 0;
	}

	if (bp->link_up != link_up) {
		bnx2_report_link(bp);
	}

	bnx2_set_mac_link(bp);

	return 0;
}

static int
bnx2_reset_phy(struct bnx2 *bp)
{
	int i;
	u32 reg;

1616
        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617 1618 1619 1620 1621

#define PHY_RESET_MAX_WAIT 100
	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
		udelay(10);

1622
		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
		if (!(reg & BMCR_RESET)) {
			udelay(20);
			break;
		}
	}
	if (i == PHY_RESET_MAX_WAIT) {
		return -EBUSY;
	}
	return 0;
}

static u32
bnx2_phy_get_pause_adv(struct bnx2 *bp)
{
	u32 adv = 0;

	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {

1642
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643 1644 1645 1646 1647 1648 1649
			adv = ADVERTISE_1000XPAUSE;
		}
		else {
			adv = ADVERTISE_PAUSE_CAP;
		}
	}
	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1650
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 1652 1653 1654 1655 1656 1657
			adv = ADVERTISE_1000XPSE_ASYM;
		}
		else {
			adv = ADVERTISE_PAUSE_ASYM;
		}
	}
	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1658
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 1660 1661 1662 1663 1664 1665 1666 1667
			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
		}
		else {
			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
		}
	}
	return adv;
}

1668
static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1669

1670
static int
1671
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1672 1673
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
{
	u32 speed_arg = 0, pause_adv;

	pause_adv = bnx2_phy_get_pause_adv(bp);

	if (bp->autoneg & AUTONEG_SPEED) {
		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
		if (bp->advertising & ADVERTISED_10baseT_Half)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
		if (bp->advertising & ADVERTISED_10baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
		if (bp->advertising & ADVERTISED_100baseT_Half)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
		if (bp->advertising & ADVERTISED_100baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
		if (bp->advertising & ADVERTISED_1000baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
		if (bp->advertising & ADVERTISED_2500baseX_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
	} else {
		if (bp->req_line_speed == SPEED_2500)
			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
		else if (bp->req_line_speed == SPEED_1000)
			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
		else if (bp->req_line_speed == SPEED_100) {
			if (bp->req_duplex == DUPLEX_FULL)
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
			else
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
		} else if (bp->req_line_speed == SPEED_10) {
			if (bp->req_duplex == DUPLEX_FULL)
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
			else
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
		}
	}

	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1713
	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1714 1715 1716 1717 1718 1719
		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;

	if (port == PORT_TP)
		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;

1720
	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721 1722

	spin_unlock_bh(&bp->phy_lock);
1723
	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1724 1725 1726 1727 1728 1729 1730
	spin_lock_bh(&bp->phy_lock);

	return 0;
}

static int
bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1731 1732
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
1733
{
1734
	u32 adv, bmcr;
1735 1736
	u32 new_adv = 0;

1737
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1738
		return bnx2_setup_remote_phy(bp, port);
1739

1740 1741
	if (!(bp->autoneg & AUTONEG_SPEED)) {
		u32 new_bmcr;
M
Michael Chan 已提交
1742 1743
		int force_link_down = 0;

1744 1745 1746 1747 1748 1749 1750
		if (bp->req_line_speed == SPEED_2500) {
			if (!bnx2_test_and_enable_2g5(bp))
				force_link_down = 1;
		} else if (bp->req_line_speed == SPEED_1000) {
			if (bnx2_test_and_disable_2g5(bp))
				force_link_down = 1;
		}
1751
		bnx2_read_phy(bp, bp->mii_adv, &adv);
M
Michael Chan 已提交
1752 1753
		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);

1754
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1755
		new_bmcr = bmcr & ~BMCR_ANENABLE;
M
Michael Chan 已提交
1756
		new_bmcr |= BMCR_SPEED1000;
1757

1758
		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1759 1760 1761 1762 1763 1764 1765
			if (bp->req_line_speed == SPEED_2500)
				bnx2_enable_forced_2g5(bp);
			else if (bp->req_line_speed == SPEED_1000) {
				bnx2_disable_forced_2g5(bp);
				new_bmcr &= ~0x2000;
			}

1766
		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1767 1768 1769 1770
			if (bp->req_line_speed == SPEED_2500)
				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
			else
				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
M
Michael Chan 已提交
1771 1772
		}

1773
		if (bp->req_duplex == DUPLEX_FULL) {
M
Michael Chan 已提交
1774
			adv |= ADVERTISE_1000XFULL;
1775 1776 1777
			new_bmcr |= BMCR_FULLDPLX;
		}
		else {
M
Michael Chan 已提交
1778
			adv |= ADVERTISE_1000XHALF;
1779 1780
			new_bmcr &= ~BMCR_FULLDPLX;
		}
M
Michael Chan 已提交
1781
		if ((new_bmcr != bmcr) || (force_link_down)) {
1782 1783
			/* Force a link down visible on the other side */
			if (bp->link_up) {
1784
				bnx2_write_phy(bp, bp->mii_adv, adv &
M
Michael Chan 已提交
1785 1786
					       ~(ADVERTISE_1000XFULL |
						 ADVERTISE_1000XHALF));
1787
				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1788 1789 1790 1791
					BMCR_ANRESTART | BMCR_ANENABLE);

				bp->link_up = 0;
				netif_carrier_off(bp->dev);
1792
				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
M
Michael Chan 已提交
1793
				bnx2_report_link(bp);
1794
			}
1795 1796
			bnx2_write_phy(bp, bp->mii_adv, adv);
			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 1798 1799
		} else {
			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
1800 1801 1802 1803
		}
		return 0;
	}

1804
	bnx2_test_and_enable_2g5(bp);
M
Michael Chan 已提交
1805

1806 1807 1808 1809 1810
	if (bp->advertising & ADVERTISED_1000baseT_Full)
		new_adv |= ADVERTISE_1000XFULL;

	new_adv |= bnx2_phy_get_pause_adv(bp);

1811 1812
	bnx2_read_phy(bp, bp->mii_adv, &adv);
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813 1814 1815 1816 1817

	bp->serdes_an_pending = 0;
	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
		/* Force a link down visible on the other side */
		if (bp->link_up) {
1818
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
M
Michael Chan 已提交
1819 1820 1821
			spin_unlock_bh(&bp->phy_lock);
			msleep(20);
			spin_lock_bh(&bp->phy_lock);
1822 1823
		}

1824 1825
		bnx2_write_phy(bp, bp->mii_adv, new_adv);
		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826
			BMCR_ANENABLE);
1827 1828 1829 1830 1831 1832 1833 1834
		/* Speed up link-up time when the link partner
		 * does not autonegotiate which is very common
		 * in blade servers. Some blade servers use
		 * IPMI for kerboard input and it's important
		 * to minimize link disruptions. Autoneg. involves
		 * exchanging base pages plus 3 next pages and
		 * normally completes in about 120 msec.
		 */
M
Michael Chan 已提交
1835
		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1836 1837
		bp->serdes_an_pending = 1;
		mod_timer(&bp->timer, jiffies + bp->current_interval);
1838 1839 1840
	} else {
		bnx2_resolve_flow_ctrl(bp);
		bnx2_set_mac_link(bp);
1841 1842 1843 1844 1845 1846
	}

	return 0;
}

#define ETHTOOL_ALL_FIBRE_SPEED						\
1847
	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1848 1849
		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
		(ADVERTISED_1000baseT_Full)
1850 1851 1852 1853 1854 1855 1856 1857

#define ETHTOOL_ALL_COPPER_SPEED					\
	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
	ADVERTISED_1000baseT_Full)

#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858

1859 1860
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)

1861 1862 1863 1864 1865 1866
static void
bnx2_set_default_remote_link(struct bnx2 *bp)
{
	u32 link;

	if (bp->phy_port == PORT_TP)
1867
		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868
	else
1869
		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907

	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
		bp->req_line_speed = 0;
		bp->autoneg |= AUTONEG_SPEED;
		bp->advertising = ADVERTISED_Autoneg;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
			bp->advertising |= ADVERTISED_10baseT_Half;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
			bp->advertising |= ADVERTISED_10baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
			bp->advertising |= ADVERTISED_100baseT_Half;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
			bp->advertising |= ADVERTISED_100baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
			bp->advertising |= ADVERTISED_1000baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
			bp->advertising |= ADVERTISED_2500baseX_Full;
	} else {
		bp->autoneg = 0;
		bp->advertising = 0;
		bp->req_duplex = DUPLEX_FULL;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
			bp->req_line_speed = SPEED_10;
			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
				bp->req_duplex = DUPLEX_HALF;
		}
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
			bp->req_line_speed = SPEED_100;
			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
				bp->req_duplex = DUPLEX_HALF;
		}
		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
			bp->req_line_speed = SPEED_1000;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
			bp->req_line_speed = SPEED_2500;
	}
}

1908 1909 1910
static void
bnx2_set_default_link(struct bnx2 *bp)
{
1911 1912 1913 1914
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
		bnx2_set_default_remote_link(bp);
		return;
	}
1915

1916 1917
	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
	bp->req_line_speed = 0;
1918
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1919 1920 1921 1922
		u32 reg;

		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;

1923
		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
			bp->autoneg = 0;
			bp->req_line_speed = bp->line_speed = SPEED_1000;
			bp->req_duplex = DUPLEX_FULL;
		}
	} else
		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
}

M
Michael Chan 已提交
1934 1935 1936 1937 1938 1939 1940 1941 1942
static void
bnx2_send_heart_beat(struct bnx2 *bp)
{
	u32 msg;
	u32 addr;

	spin_lock(&bp->indirect_lock);
	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1943 1944
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
M
Michael Chan 已提交
1945 1946 1947
	spin_unlock(&bp->indirect_lock);
}

1948 1949 1950 1951 1952 1953 1954
static void
bnx2_remote_phy_event(struct bnx2 *bp)
{
	u32 msg;
	u8 link_up = bp->link_up;
	u8 old_port;

1955
	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956

M
Michael Chan 已提交
1957 1958 1959 1960 1961
	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
		bnx2_send_heart_beat(bp);

	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
		bp->link_up = 0;
	else {
		u32 speed;

		bp->link_up = 1;
		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
		bp->duplex = DUPLEX_FULL;
		switch (speed) {
			case BNX2_LINK_STATUS_10HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1973
				/* fall through */
1974 1975 1976 1977 1978
			case BNX2_LINK_STATUS_10FULL:
				bp->line_speed = SPEED_10;
				break;
			case BNX2_LINK_STATUS_100HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1979
				/* fall through */
1980 1981 1982 1983 1984 1985
			case BNX2_LINK_STATUS_100BASE_T4:
			case BNX2_LINK_STATUS_100FULL:
				bp->line_speed = SPEED_100;
				break;
			case BNX2_LINK_STATUS_1000HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1986
				/* fall through */
1987 1988 1989 1990 1991
			case BNX2_LINK_STATUS_1000FULL:
				bp->line_speed = SPEED_1000;
				break;
			case BNX2_LINK_STATUS_2500HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1992
				/* fall through */
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
			case BNX2_LINK_STATUS_2500FULL:
				bp->line_speed = SPEED_2500;
				break;
			default:
				bp->line_speed = 0;
				break;
		}

		bp->flow_ctrl = 0;
		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
			if (bp->duplex == DUPLEX_FULL)
				bp->flow_ctrl = bp->req_flow_ctrl;
		} else {
			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
				bp->flow_ctrl |= FLOW_CTRL_TX;
			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
				bp->flow_ctrl |= FLOW_CTRL_RX;
		}

		old_port = bp->phy_port;
		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
			bp->phy_port = PORT_FIBRE;
		else
			bp->phy_port = PORT_TP;

		if (old_port != bp->phy_port)
			bnx2_set_default_link(bp);

	}
	if (bp->link_up != link_up)
		bnx2_report_link(bp);

	bnx2_set_mac_link(bp);
}

static int
bnx2_set_remote_link(struct bnx2 *bp)
{
	u32 evt_code;

2034
	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 2036 2037 2038 2039 2040
	switch (evt_code) {
		case BNX2_FW_EVT_CODE_LINK_EVENT:
			bnx2_remote_phy_event(bp);
			break;
		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
		default:
M
Michael Chan 已提交
2041
			bnx2_send_heart_beat(bp);
2042 2043 2044 2045 2046
			break;
	}
	return 0;
}

2047 2048
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
2049 2050
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2051 2052 2053 2054
{
	u32 bmcr;
	u32 new_bmcr;

2055
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056 2057 2058

	if (bp->autoneg & AUTONEG_SPEED) {
		u32 adv_reg, adv1000_reg;
2059 2060
		u32 new_adv = 0;
		u32 new_adv1000 = 0;
2061

2062
		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 2064 2065 2066 2067 2068
		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
			ADVERTISE_PAUSE_ASYM);

		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
		adv1000_reg &= PHY_ALL_1000_SPEED;

2069 2070 2071
		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
		new_adv |= ADVERTISE_CSMA;
		new_adv |= bnx2_phy_get_pause_adv(bp);
2072

2073
		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074

2075 2076
		if ((adv1000_reg != new_adv1000) ||
			(adv_reg != new_adv) ||
2077 2078
			((bmcr & BMCR_ANENABLE) == 0)) {

2079 2080
			bnx2_write_phy(bp, bp->mii_adv, new_adv);
			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2081
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
				BMCR_ANENABLE);
		}
		else if (bp->link_up) {
			/* Flow ctrl may have changed from auto to forced */
			/* or vice-versa. */

			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
		}
		return 0;
	}

	new_bmcr = 0;
	if (bp->req_line_speed == SPEED_100) {
		new_bmcr |= BMCR_SPEED100;
	}
	if (bp->req_duplex == DUPLEX_FULL) {
		new_bmcr |= BMCR_FULLDPLX;
	}
	if (new_bmcr != bmcr) {
		u32 bmsr;

2104 2105
		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106

2107 2108
		if (bmsr & BMSR_LSTATUS) {
			/* Force link down */
2109
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110 2111 2112 2113
			spin_unlock_bh(&bp->phy_lock);
			msleep(50);
			spin_lock_bh(&bp->phy_lock);

2114 2115
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 2117
		}

2118
		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129

		/* Normally, the new speed is setup after the link has
		 * gone down and up again. In some cases, link will not go
		 * down so we need to set up the new speed here.
		 */
		if (bmsr & BMSR_LSTATUS) {
			bp->line_speed = bp->req_line_speed;
			bp->duplex = bp->req_duplex;
			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
		}
2130 2131 2132
	} else {
		bnx2_resolve_flow_ctrl(bp);
		bnx2_set_mac_link(bp);
2133 2134 2135 2136 2137
	}
	return 0;
}

static int
2138
bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 2140
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2141 2142 2143 2144
{
	if (bp->loopback == MAC_LOOPBACK)
		return 0;

2145
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146
		return bnx2_setup_serdes_phy(bp, port);
2147 2148
	}
	else {
2149
		return bnx2_setup_copper_phy(bp);
2150 2151 2152
	}
}

2153
static int
2154
bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
{
	u32 val;

	bp->mii_bmcr = MII_BMCR + 0x10;
	bp->mii_bmsr = MII_BMSR + 0x10;
	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
	bp->mii_adv = MII_ADVERTISE + 0x10;
	bp->mii_lpa = MII_LPA + 0x10;
	bp->mii_up1 = MII_BNX2_OVER1G_UP1;

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169 2170
	if (reset_phy)
		bnx2_reset_phy(bp);
2171 2172 2173 2174 2175 2176 2177 2178 2179 2180

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);

	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
	val |= MII_BNX2_SD_1000XCTL1_FIBER;
	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181
	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
		val |= BCM5708S_UP1_2G5;
	else
		val &= ~BCM5708S_UP1_2G5;
	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);

	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

	return 0;
}

2203
static int
2204
bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
M
Michael Chan 已提交
2205 2206 2207
{
	u32 val;

2208 2209
	if (reset_phy)
		bnx2_reset_phy(bp);
2210 2211 2212

	bp->mii_up1 = BCM5708S_UP1;

M
Michael Chan 已提交
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);

	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);

	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);

2225
	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
M
Michael Chan 已提交
2226 2227 2228 2229 2230
		bnx2_read_phy(bp, BCM5708S_UP1, &val);
		val |= BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, BCM5708S_UP1, val);
	}

2231 2232 2233
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
M
Michael Chan 已提交
2234 2235 2236 2237 2238 2239 2240 2241 2242
		/* increase tx signal amplitude */
		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
			       BCM5708S_BLK_ADDR_TX_MISC);
		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
	}

2243
	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
M
Michael Chan 已提交
2244 2245 2246 2247 2248
	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;

	if (val) {
		u32 is_backplane;

2249
		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
M
Michael Chan 已提交
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
				       BCM5708S_BLK_ADDR_TX_MISC);
			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
				       BCM5708S_BLK_ADDR_DIG);
		}
	}
	return 0;
}

static int
2262
bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263
{
2264 2265
	if (reset_phy)
		bnx2_reset_phy(bp);
2266

2267
	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268

2269
	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2270
		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299

	if (bp->dev->mtu > 1500) {
		u32 val;

		/* Set extended packet length bit */
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);

		bnx2_write_phy(bp, 0x1c, 0x6c00);
		bnx2_read_phy(bp, 0x1c, &val);
		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
	}
	else {
		u32 val;

		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val & ~0x4007);

		bnx2_write_phy(bp, 0x1c, 0x6c00);
		bnx2_read_phy(bp, 0x1c, &val);
		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
	}

	return 0;
}

static int
2300
bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301
{
M
Michael Chan 已提交
2302 2303
	u32 val;

2304 2305
	if (reset_phy)
		bnx2_reset_phy(bp);
2306

2307
	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
		bnx2_write_phy(bp, 0x18, 0x0c00);
		bnx2_write_phy(bp, 0x17, 0x000a);
		bnx2_write_phy(bp, 0x15, 0x310b);
		bnx2_write_phy(bp, 0x17, 0x201f);
		bnx2_write_phy(bp, 0x15, 0x9506);
		bnx2_write_phy(bp, 0x17, 0x401f);
		bnx2_write_phy(bp, 0x15, 0x14e2);
		bnx2_write_phy(bp, 0x18, 0x0400);
	}

2318
	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319 2320 2321 2322 2323 2324 2325
		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
			       MII_BNX2_DSP_EXPAND_REG | 0x8);
		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
		val &= ~(1 << 8);
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
	}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
	if (bp->dev->mtu > 1500) {
		/* Set extended packet length bit */
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val | 0x4000);

		bnx2_read_phy(bp, 0x10, &val);
		bnx2_write_phy(bp, 0x10, val | 0x1);
	}
	else {
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val & ~0x4007);

		bnx2_read_phy(bp, 0x10, &val);
		bnx2_write_phy(bp, 0x10, val & ~0x1);
	}

M
Michael Chan 已提交
2344 2345 2346 2347
	/* ethernet@wirespeed */
	bnx2_write_phy(bp, 0x18, 0x7007);
	bnx2_read_phy(bp, 0x18, &val);
	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348 2349 2350 2351 2352
	return 0;
}


static int
2353
bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 2355
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2356 2357 2358 2359
{
	u32 val;
	int rc = 0;

2360 2361
	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362

2363 2364
	bp->mii_bmcr = MII_BMCR;
	bp->mii_bmsr = MII_BMSR;
2365
	bp->mii_bmsr1 = MII_BMSR;
2366 2367 2368
	bp->mii_adv = MII_ADVERTISE;
	bp->mii_lpa = MII_LPA;

2369
	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370

2371
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372 2373
		goto setup_phy;

2374 2375 2376 2377 2378
	bnx2_read_phy(bp, MII_PHYSID1, &val);
	bp->phy_id = val << 16;
	bnx2_read_phy(bp, MII_PHYSID2, &val);
	bp->phy_id |= val & 0xffff;

2379
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2381
			rc = bnx2_init_5706s_phy(bp, reset_phy);
2382
		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2383
			rc = bnx2_init_5708s_phy(bp, reset_phy);
2384
		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385
			rc = bnx2_init_5709s_phy(bp, reset_phy);
2386 2387
	}
	else {
2388
		rc = bnx2_init_copper_phy(bp, reset_phy);
2389 2390
	}

2391 2392 2393
setup_phy:
	if (!rc)
		rc = bnx2_setup_phy(bp, bp->phy_port);
2394 2395 2396 2397 2398 2399 2400 2401 2402

	return rc;
}

static int
bnx2_set_mac_loopback(struct bnx2 *bp)
{
	u32 mac_mode;

2403
	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2404 2405
	mac_mode &= ~BNX2_EMAC_MODE_PORT;
	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406
	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407 2408 2409 2410
	bp->link_up = 1;
	return 0;
}

M
Michael Chan 已提交
2411 2412 2413 2414 2415 2416 2417 2418 2419
static int bnx2_test_link(struct bnx2 *);

static int
bnx2_set_phy_loopback(struct bnx2 *bp)
{
	u32 mac_mode;
	int rc, i;

	spin_lock_bh(&bp->phy_lock);
2420
	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
M
Michael Chan 已提交
2421 2422 2423 2424 2425 2426 2427 2428
			    BMCR_SPEED1000);
	spin_unlock_bh(&bp->phy_lock);
	if (rc)
		return rc;

	for (i = 0; i < 10; i++) {
		if (bnx2_test_link(bp) == 0)
			break;
M
Michael Chan 已提交
2429
		msleep(100);
M
Michael Chan 已提交
2430 2431
	}

2432
	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
M
Michael Chan 已提交
2433 2434
	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
M
Michael Chan 已提交
2435
		      BNX2_EMAC_MODE_25G_MODE);
M
Michael Chan 已提交
2436 2437

	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438
	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
M
Michael Chan 已提交
2439 2440 2441 2442
	bp->link_up = 1;
	return 0;
}

J
Jeffrey Huang 已提交
2443 2444 2445 2446 2447 2448 2449
static void
bnx2_dump_mcp_state(struct bnx2 *bp)
{
	struct net_device *dev = bp->dev;
	u32 mcp_p0, mcp_p1;

	netdev_err(dev, "<--- start MCP states dump --->\n");
2450
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
J
Jeffrey Huang 已提交
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
		mcp_p0 = BNX2_MCP_STATE_P0;
		mcp_p1 = BNX2_MCP_STATE_P1;
	} else {
		mcp_p0 = BNX2_MCP_STATE_P0_5708;
		mcp_p1 = BNX2_MCP_STATE_P1_5708;
	}
	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
	netdev_err(dev, "DEBUG: shmem states:\n");
	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
		   bnx2_shmem_rd(bp, BNX2_FW_MB),
		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
	pr_cont(" condition[%08x]\n",
		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2478
	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
J
Jeffrey Huang 已提交
2479 2480 2481 2482 2483 2484 2485
	DP_SHMEM_LINE(bp, 0x3cc);
	DP_SHMEM_LINE(bp, 0x3dc);
	DP_SHMEM_LINE(bp, 0x3ec);
	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
	netdev_err(dev, "<--- end MCP states dump --->\n");
}

2486
static int
2487
bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2488 2489 2490 2491 2492 2493 2494
{
	int i;
	u32 val;

	bp->fw_wr_seq++;
	msg_data |= bp->fw_wr_seq;

2495
	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496

2497 2498 2499
	if (!ack)
		return 0;

2500
	/* wait for an acknowledgement. */
M
Michael Chan 已提交
2501
	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2502
		msleep(10);
2503

2504
		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505 2506 2507 2508

		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
			break;
	}
2509 2510
	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
		return 0;
2511 2512

	/* If we timed out, inform the firmware that this is the case. */
2513
	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2514 2515 2516
		msg_data &= ~BNX2_DRV_MSG_CODE;
		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;

2517
		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
J
Jeffrey Huang 已提交
2518 2519 2520 2521
		if (!silent) {
			pr_err("fw sync timeout, reset code = %x\n", msg_data);
			bnx2_dump_mcp_state(bp);
		}
2522 2523 2524 2525

		return -EBUSY;
	}

2526 2527 2528
	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
		return -EIO;

2529 2530 2531
	return 0;
}

M
Michael Chan 已提交
2532 2533 2534 2535 2536 2537 2538
static int
bnx2_init_5709_context(struct bnx2 *bp)
{
	int i, ret = 0;
	u32 val;

	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2539
	val |= (BNX2_PAGE_BITS - 8) << 16;
2540
	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2541
	for (i = 0; i < 10; i++) {
2542
		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2543 2544 2545 2546 2547 2548 2549
		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
			break;
		udelay(2);
	}
	if (val & BNX2_CTX_COMMAND_MEM_INIT)
		return -EBUSY;

M
Michael Chan 已提交
2550 2551 2552
	for (i = 0; i < bp->ctx_pages; i++) {
		int j;

2553
		if (bp->ctx_blk[i])
2554
			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2555 2556 2557
		else
			return -ENOMEM;

2558 2559 2560 2561 2562 2563 2564
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
			(bp->ctx_blk_mapping[i] & 0xffffffff) |
			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
			(u64) bp->ctx_blk_mapping[i] >> 32);
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
M
Michael Chan 已提交
2565 2566
		for (j = 0; j < 10; j++) {

2567
			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
M
Michael Chan 已提交
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
				break;
			udelay(5);
		}
		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

2580 2581 2582 2583 2584 2585 2586 2587
static void
bnx2_init_context(struct bnx2 *bp)
{
	u32 vcid;

	vcid = 96;
	while (vcid) {
		u32 vcid_addr, pcid_addr, offset;
2588
		int i;
2589 2590 2591

		vcid--;

2592
		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
			u32 new_vcid;

			vcid_addr = GET_PCID_ADDR(vcid);
			if (vcid & 0x8) {
				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
			}
			else {
				new_vcid = vcid;
			}
			pcid_addr = GET_PCID_ADDR(new_vcid);
		}
		else {
	    		vcid_addr = GET_CID_ADDR(vcid);
			pcid_addr = vcid_addr;
		}

2609 2610 2611
		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
			vcid_addr += (i << PHY_CTX_SHIFT);
			pcid_addr += (i << PHY_CTX_SHIFT);
2612

2613 2614
			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615

2616 2617
			/* Zero out the context. */
			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
M
Michael Chan 已提交
2618
				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2619
		}
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	}
}

static int
bnx2_alloc_bad_rbuf(struct bnx2 *bp)
{
	u16 *good_mbuf;
	u32 good_mbuf_cnt;
	u32 val;

	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2631
	if (good_mbuf == NULL)
2632 2633
		return -ENOMEM;

2634
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2635 2636 2637 2638 2639
		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);

	good_mbuf_cnt = 0;

	/* Allocate a bunch of mbufs and save the good ones in an array. */
2640
	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641
	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2642 2643
		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
				BNX2_RBUF_COMMAND_ALLOC_REQ);
2644

2645
		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646 2647 2648 2649 2650 2651 2652 2653 2654

		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;

		/* The addresses with Bit 9 set are bad memory blocks. */
		if (!(val & (1 << 9))) {
			good_mbuf[good_mbuf_cnt] = (u16) val;
			good_mbuf_cnt++;
		}

2655
		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	}

	/* Free the good ones back to the mbuf pool thus discarding
	 * all the bad ones. */
	while (good_mbuf_cnt) {
		good_mbuf_cnt--;

		val = good_mbuf[good_mbuf_cnt];
		val = (val << 9) | val | 1;

2666
		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2667 2668 2669 2670 2671 2672
	}
	kfree(good_mbuf);
	return 0;
}

static void
2673
bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2674 2675 2676 2677 2678
{
	u32 val;

	val = (mac_addr[0] << 8) | mac_addr[1];

2679
	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680

2681
	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2682 2683
		(mac_addr[4] << 8) | mac_addr[5];

2684
	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2685 2686
}

2687
static inline int
2688
bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 2690
{
	dma_addr_t mapping;
2691 2692 2693
	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
	struct bnx2_rx_bd *rxbd =
		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2694
	struct page *page = alloc_page(gfp);
2695 2696 2697

	if (!page)
		return -ENOMEM;
2698
	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2699
			       PCI_DMA_FROMDEVICE);
2700
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
B
Benjamin Li 已提交
2701 2702 2703 2704
		__free_page(page);
		return -EIO;
	}

2705
	rx_pg->page = page;
2706
	dma_unmap_addr_set(rx_pg, mapping, mapping);
2707 2708 2709 2710 2711 2712
	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
	return 0;
}

static void
2713
bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714
{
2715
	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2716 2717 2718 2719 2720
	struct page *page = rx_pg->page;

	if (!page)
		return;

2721 2722
	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2723 2724 2725 2726 2727

	__free_page(page);
	rx_pg->page = NULL;
}

2728
static inline int
2729
bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730
{
2731
	u8 *data;
2732
	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733
	dma_addr_t mapping;
2734 2735
	struct bnx2_rx_bd *rxbd =
		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736

2737 2738
	data = kmalloc(bp->rx_buf_size, gfp);
	if (!data)
2739 2740
		return -ENOMEM;

2741 2742 2743
	mapping = dma_map_single(&bp->pdev->dev,
				 get_l2_fhdr(data),
				 bp->rx_buf_use_size,
2744 2745
				 PCI_DMA_FROMDEVICE);
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746
		kfree(data);
B
Benjamin Li 已提交
2747 2748
		return -EIO;
	}
2749

2750
	rx_buf->data = data;
2751
	dma_unmap_addr_set(rx_buf, mapping, mapping);
2752 2753 2754 2755

	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;

2756
	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757 2758 2759 2760

	return 0;
}

2761
static int
2762
bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763
{
2764
	struct status_block *sblk = bnapi->status_blk.msi;
2765
	u32 new_link_state, old_link_state;
2766
	int is_set = 1;
2767

2768 2769
	new_link_state = sblk->status_attn_bits & event;
	old_link_state = sblk->status_attn_bits_ack & event;
2770
	if (new_link_state != old_link_state) {
2771
		if (new_link_state)
2772
			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773
		else
2774
			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2775 2776 2777 2778 2779 2780 2781
	} else
		is_set = 0;

	return is_set;
}

static void
2782
bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783
{
M
Michael Chan 已提交
2784 2785 2786
	spin_lock(&bp->phy_lock);

	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787
		bnx2_set_link(bp);
2788
	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2789 2790
		bnx2_set_remote_link(bp);

M
Michael Chan 已提交
2791 2792
	spin_unlock(&bp->phy_lock);

2793 2794
}

2795
static inline u16
2796
bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2797 2798 2799
{
	u16 cons;

2800 2801 2802
	/* Tell compiler that status block fields can change. */
	barrier();
	cons = *bnapi->hw_tx_cons_ptr;
2803
	barrier();
2804
	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2805 2806 2807 2808
		cons++;
	return cons;
}

M
Michael Chan 已提交
2809 2810
static int
bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811
{
2812
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2813
	u16 hw_cons, sw_cons, sw_ring_cons;
B
Benjamin Li 已提交
2814
	int tx_pkt = 0, index;
E
Eric Dumazet 已提交
2815
	unsigned int tx_bytes = 0;
B
Benjamin Li 已提交
2816 2817 2818 2819
	struct netdev_queue *txq;

	index = (bnapi - bp->bnx2_napi);
	txq = netdev_get_tx_queue(bp->dev, index);
2820

2821
	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2822
	sw_cons = txr->tx_cons;
2823 2824

	while (sw_cons != hw_cons) {
2825
		struct bnx2_sw_tx_bd *tx_buf;
2826 2827 2828
		struct sk_buff *skb;
		int i, last;

2829
		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830

2831
		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2832
		skb = tx_buf->skb;
A
Arjan van de Ven 已提交
2833

E
Eric Dumazet 已提交
2834 2835 2836
		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
		prefetch(&skb->end);

2837
		/* partial BD completions possible with TSO packets */
E
Eric Dumazet 已提交
2838
		if (tx_buf->is_gso) {
2839 2840
			u16 last_idx, last_ring_idx;

E
Eric Dumazet 已提交
2841 2842
			last_idx = sw_cons + tx_buf->nr_frags + 1;
			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2843
			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2844 2845 2846 2847 2848 2849
				last_idx++;
			}
			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
				break;
			}
		}
A
Arjan van de Ven 已提交
2850

2851
		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2852
			skb_headlen(skb), PCI_DMA_TODEVICE);
2853 2854

		tx_buf->skb = NULL;
E
Eric Dumazet 已提交
2855
		last = tx_buf->nr_frags;
2856 2857

		for (i = 0; i < last; i++) {
2858
			struct bnx2_sw_tx_bd *tx_buf;
2859

2860 2861 2862
			sw_cons = BNX2_NEXT_TX_BD(sw_cons);

			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2863
			dma_unmap_page(&bp->pdev->dev,
2864
				dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
2865
				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866
				PCI_DMA_TODEVICE);
2867 2868
		}

2869
		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870

E
Eric Dumazet 已提交
2871
		tx_bytes += skb->len;
2872
		dev_kfree_skb(skb);
M
Michael Chan 已提交
2873 2874 2875
		tx_pkt++;
		if (tx_pkt == budget)
			break;
2876

E
Eric Dumazet 已提交
2877 2878
		if (hw_cons == sw_cons)
			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879 2880
	}

E
Eric Dumazet 已提交
2881
	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882 2883
	txr->hw_tx_cons = hw_cons;
	txr->tx_cons = sw_cons;
B
Benjamin Li 已提交
2884

M
Michael Chan 已提交
2885
	/* Need to make the tx_cons update visible to bnx2_start_xmit()
B
Benjamin Li 已提交
2886
	 * before checking for netif_tx_queue_stopped().  Without the
M
Michael Chan 已提交
2887 2888 2889 2890
	 * memory barrier, there is a small possibility that bnx2_start_xmit()
	 * will miss it and cause the queue to be stopped forever.
	 */
	smp_mb();
2891

B
Benjamin Li 已提交
2892
	if (unlikely(netif_tx_queue_stopped(txq)) &&
2893
		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
B
Benjamin Li 已提交
2894 2895
		__netif_tx_lock(txq, smp_processor_id());
		if ((netif_tx_queue_stopped(txq)) &&
2896
		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
B
Benjamin Li 已提交
2897 2898
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
2899
	}
B
Benjamin Li 已提交
2900

M
Michael Chan 已提交
2901
	return tx_pkt;
2902 2903
}

2904
static void
2905
bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906
			struct sk_buff *skb, int count)
2907
{
2908 2909
	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
	struct bnx2_rx_bd *cons_bd, *prod_bd;
2910
	int i;
B
Benjamin Li 已提交
2911
	u16 hw_prod, prod;
2912
	u16 cons = rxr->rx_pg_cons;
2913

B
Benjamin Li 已提交
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
	cons_rx_pg = &rxr->rx_pg_ring[cons];

	/* The caller was unable to allocate a new page to replace the
	 * last one in the frags array, so we need to recycle that page
	 * and then free the skb.
	 */
	if (skb) {
		struct page *page;
		struct skb_shared_info *shinfo;

		shinfo = skb_shinfo(skb);
		shinfo->nr_frags--;
2926 2927
		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
B
Benjamin Li 已提交
2928 2929 2930 2931 2932 2933 2934

		cons_rx_pg->page = page;
		dev_kfree_skb(skb);
	}

	hw_prod = rxr->rx_pg_prod;

2935
	for (i = 0; i < count; i++) {
2936
		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937

2938 2939
		prod_rx_pg = &rxr->rx_pg_ring[prod];
		cons_rx_pg = &rxr->rx_pg_ring[cons];
2940 2941 2942 2943
		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
						[BNX2_RX_IDX(cons)];
		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
						[BNX2_RX_IDX(prod)];
2944 2945 2946 2947

		if (prod != cons) {
			prod_rx_pg->page = cons_rx_pg->page;
			cons_rx_pg->page = NULL;
2948 2949
			dma_unmap_addr_set(prod_rx_pg, mapping,
				dma_unmap_addr(cons_rx_pg, mapping));
2950 2951 2952 2953 2954

			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;

		}
2955 2956
		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957
	}
2958 2959
	rxr->rx_pg_prod = hw_prod;
	rxr->rx_pg_cons = cons;
2960 2961
}

2962
static inline void
2963 2964
bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
		   u8 *data, u16 cons, u16 prod)
2965
{
2966 2967
	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
	struct bnx2_rx_bd *cons_bd, *prod_bd;
2968

2969 2970
	cons_rx_buf = &rxr->rx_buf_ring[cons];
	prod_rx_buf = &rxr->rx_buf_ring[prod];
2971

2972
	dma_sync_single_for_device(&bp->pdev->dev,
2973
		dma_unmap_addr(cons_rx_buf, mapping),
2974
		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975

2976
	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977

2978
	prod_rx_buf->data = data;
2979

2980 2981
	if (cons == prod)
		return;
2982

2983 2984
	dma_unmap_addr_set(prod_rx_buf, mapping,
			dma_unmap_addr(cons_rx_buf, mapping));
2985

2986 2987
	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2988 2989
	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 2991
}

2992 2993
static struct sk_buff *
bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2994 2995
	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
	    u32 ring_idx)
2996 2997 2998
{
	int err;
	u16 prod = ring_idx & 0xffff;
2999
	struct sk_buff *skb;
3000

3001
	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3002
	if (unlikely(err)) {
3003 3004
		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
error:
3005 3006 3007 3008
		if (hdr_len) {
			unsigned int raw_len = len + 4;
			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;

3009
			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3010
		}
3011
		return NULL;
3012 3013
	}

3014
	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3015
			 PCI_DMA_FROMDEVICE);
3016
	skb = build_skb(data, 0);
3017 3018 3019 3020 3021
	if (!skb) {
		kfree(data);
		goto error;
	}
	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3022 3023
	if (hdr_len == 0) {
		skb_put(skb, len);
3024
		return skb;
3025 3026
	} else {
		unsigned int i, frag_len, frag_size, pages;
3027
		struct bnx2_sw_pg *rx_pg;
3028 3029
		u16 pg_cons = rxr->rx_pg_cons;
		u16 pg_prod = rxr->rx_pg_prod;
3030 3031 3032 3033 3034 3035

		frag_size = len + 4 - hdr_len;
		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
		skb_put(skb, hdr_len);

		for (i = 0; i < pages; i++) {
B
Benjamin Li 已提交
3036 3037
			dma_addr_t mapping_old;

3038 3039 3040 3041
			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
			if (unlikely(frag_len <= 4)) {
				unsigned int tail = 4 - frag_len;

3042 3043 3044
				rxr->rx_pg_cons = pg_cons;
				rxr->rx_pg_prod = pg_prod;
				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045
							pages - i);
3046 3047 3048 3049 3050 3051
				skb->len -= tail;
				if (i == 0) {
					skb->tail -= tail;
				} else {
					skb_frag_t *frag =
						&skb_shinfo(skb)->frags[i - 1];
E
Eric Dumazet 已提交
3052
					skb_frag_size_sub(frag, tail);
3053 3054
					skb->data_len -= tail;
				}
3055
				return skb;
3056
			}
3057
			rx_pg = &rxr->rx_pg_ring[pg_cons];
3058

B
Benjamin Li 已提交
3059 3060 3061
			/* Don't unmap yet.  If we're unable to allocate a new
			 * page, we need to recycle the page and the DMA addr.
			 */
3062
			mapping_old = dma_unmap_addr(rx_pg, mapping);
3063 3064 3065 3066 3067 3068
			if (i == pages - 1)
				frag_len -= 4;

			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
			rx_pg->page = NULL;

3069
			err = bnx2_alloc_rx_page(bp, rxr,
3070
						 BNX2_RX_PG_RING_IDX(pg_prod),
3071
						 GFP_ATOMIC);
3072
			if (unlikely(err)) {
3073 3074 3075
				rxr->rx_pg_cons = pg_cons;
				rxr->rx_pg_prod = pg_prod;
				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076
							pages - i);
3077
				return NULL;
3078 3079
			}

3080
			dma_unmap_page(&bp->pdev->dev, mapping_old,
B
Benjamin Li 已提交
3081 3082
				       PAGE_SIZE, PCI_DMA_FROMDEVICE);

3083 3084
			frag_size -= frag_len;
			skb->data_len += frag_len;
3085
			skb->truesize += PAGE_SIZE;
3086 3087
			skb->len += frag_len;

3088 3089
			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090
		}
3091 3092
		rxr->rx_pg_prod = pg_prod;
		rxr->rx_pg_cons = pg_cons;
3093
	}
3094
	return skb;
3095 3096
}

M
Michael Chan 已提交
3097
static inline u16
3098
bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
M
Michael Chan 已提交
3099
{
3100 3101
	u16 cons;

3102 3103 3104
	/* Tell compiler that status block fields can change. */
	barrier();
	cons = *bnapi->hw_rx_cons_ptr;
3105
	barrier();
3106
	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
M
Michael Chan 已提交
3107 3108 3109 3110
		cons++;
	return cons;
}

3111
static int
3112
bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113
{
3114
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115 3116
	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
	struct l2_fhdr *rx_hdr;
3117
	int rx_pkt = 0, pg_ring_used = 0;
3118

3119
	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120 3121
	sw_cons = rxr->rx_cons;
	sw_prod = rxr->rx_prod;
3122 3123 3124 3125 3126 3127

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();
	while (sw_cons != hw_cons) {
3128
		unsigned int len, hdr_len;
3129
		u32 status;
3130
		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3131
		struct sk_buff *skb;
3132
		dma_addr_t dma_addr;
3133
		u8 *data;
3134
		u16 next_ring_idx;
3135

3136 3137
		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138

3139
		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3140 3141
		data = rx_buf->data;
		rx_buf->data = NULL;
3142

3143 3144
		rx_hdr = get_l2_fhdr(data);
		prefetch(rx_hdr);
3145

3146
		dma_addr = dma_unmap_addr(rx_buf, mapping);
3147

3148
		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3149 3150
			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
			PCI_DMA_FROMDEVICE);
3151

3152 3153
		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3154 3155
		prefetch(get_l2_fhdr(next_rx_buf->data));

3156
		len = rx_hdr->l2_fhdr_pkt_len;
3157
		status = rx_hdr->l2_fhdr_status;
3158

3159 3160 3161 3162 3163 3164 3165 3166 3167
		hdr_len = 0;
		if (status & L2_FHDR_STATUS_SPLIT) {
			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
			pg_ring_used = 1;
		} else if (len > bp->rx_jumbo_thresh) {
			hdr_len = bp->rx_jumbo_thresh;
			pg_ring_used = 1;
		}

3168 3169 3170 3171 3172 3173
		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
				       L2_FHDR_ERRORS_PHY_DECODE |
				       L2_FHDR_ERRORS_ALIGNMENT |
				       L2_FHDR_ERRORS_TOO_SHORT |
				       L2_FHDR_ERRORS_GIANT_FRAME))) {

3174
			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
					  sw_ring_prod);
			if (pg_ring_used) {
				int pages;

				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;

				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
			}
			goto next_rx;
		}

3186
		len -= 4;
3187

3188
		if (len <= bp->rx_copy_thresh) {
3189 3190 3191
			skb = netdev_alloc_skb(bp->dev, len + 6);
			if (skb == NULL) {
				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192 3193 3194
						  sw_ring_prod);
				goto next_rx;
			}
3195 3196

			/* aligned copy */
3197 3198 3199 3200 3201
			memcpy(skb->data,
			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
			       len + 6);
			skb_reserve(skb, 6);
			skb_put(skb, len);
3202

3203
			bnx2_reuse_rx_data(bp, rxr, data,
3204 3205
				sw_ring_cons, sw_ring_prod);

3206 3207 3208 3209 3210 3211
		} else {
			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
					  (sw_ring_cons << 16) | sw_ring_prod);
			if (!skb)
				goto next_rx;
		}
3212
		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3213 3214
		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3215

3216 3217 3218
		skb->protocol = eth_type_trans(skb, bp->dev);

		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
A
Alexey Dobriyan 已提交
3219
			(ntohs(skb->protocol) != 0x8100)) {
3220

3221
			dev_kfree_skb(skb);
3222 3223 3224 3225
			goto next_rx;

		}

3226
		skb_checksum_none_assert(skb);
3227
		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3228 3229 3230
			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
			L2_FHDR_STATUS_UDP_DATAGRAM))) {

3231 3232
			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3233 3234
				skb->ip_summed = CHECKSUM_UNNECESSARY;
		}
3235 3236 3237 3238
		if ((bp->dev->features & NETIF_F_RXHASH) &&
		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
		     L2_FHDR_STATUS_USE_RXHASH))
			skb->rxhash = rx_hdr->l2_fhdr_hash;
3239

3240
		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241
		napi_gro_receive(&bnapi->napi, skb);
3242 3243 3244
		rx_pkt++;

next_rx:
3245 3246
		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247 3248 3249

		if ((rx_pkt == budget))
			break;
M
Michael Chan 已提交
3250 3251 3252

		/* Refresh hw_cons to see if there is new work */
		if (sw_cons == hw_cons) {
3253
			hw_cons = bnx2_get_hw_rx_cons(bnapi);
M
Michael Chan 已提交
3254 3255
			rmb();
		}
3256
	}
3257 3258
	rxr->rx_cons = sw_cons;
	rxr->rx_prod = sw_prod;
3259

3260
	if (pg_ring_used)
3261
		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262

3263
	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264

3265
	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276

	mmiowb();

	return rx_pkt;

}

/* MSI ISR - The only difference between this and the INTx ISR
 * is that the MSI interrupt is always serviced.
 */
static irqreturn_t
3277
bnx2_msi(int irq, void *dev_instance)
3278
{
3279 3280
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3281

3282
	prefetch(bnapi->status_blk.msi);
3283
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284 3285 3286 3287
		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);

	/* Return here if interrupt is disabled. */
3288 3289
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;
3290

3291
	napi_schedule(&bnapi->napi);
3292

3293
	return IRQ_HANDLED;
3294 3295
}

3296 3297 3298
static irqreturn_t
bnx2_msi_1shot(int irq, void *dev_instance)
{
3299 3300
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3301

3302
	prefetch(bnapi->status_blk.msi);
3303 3304 3305 3306 3307

	/* Return here if interrupt is disabled. */
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;

3308
	napi_schedule(&bnapi->napi);
3309 3310 3311 3312

	return IRQ_HANDLED;
}

3313
static irqreturn_t
3314
bnx2_interrupt(int irq, void *dev_instance)
3315
{
3316 3317
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3318
	struct status_block *sblk = bnapi->status_blk.msi;
3319 3320 3321 3322 3323 3324 3325

	/* When using INTx, it is possible for the interrupt to arrive
	 * at the CPU before the status block posted prior to the
	 * interrupt. Reading a register will flush the status block.
	 * When using MSI, the MSI message will always complete after
	 * the status block write.
	 */
3326
	if ((sblk->status_idx == bnapi->last_status_idx) &&
3327
	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328
	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329
		return IRQ_NONE;
3330

3331
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332 3333 3334
		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);

3335 3336 3337
	/* Read back to deassert IRQ immediately to avoid too many
	 * spurious interrupts.
	 */
3338
	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339

3340
	/* Return here if interrupt is shared and is disabled. */
3341 3342
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;
3343

3344
	if (napi_schedule_prep(&bnapi->napi)) {
3345
		bnapi->last_status_idx = sblk->status_idx;
3346
		__napi_schedule(&bnapi->napi);
3347
	}
3348

3349
	return IRQ_HANDLED;
3350 3351
}

M
Michael Chan 已提交
3352
static inline int
3353
bnx2_has_fast_work(struct bnx2_napi *bnapi)
M
Michael Chan 已提交
3354
{
3355
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
M
Michael Chan 已提交
3357

3358
	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359
	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
M
Michael Chan 已提交
3360
		return 1;
3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
	return 0;
}

#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
				 STATUS_ATTN_BITS_TIMER_ABORT)

static inline int
bnx2_has_work(struct bnx2_napi *bnapi)
{
	struct status_block *sblk = bnapi->status_blk.msi;

	if (bnx2_has_fast_work(bnapi))
		return 1;
M
Michael Chan 已提交
3374

3375 3376 3377 3378 3379
#ifdef BCM_CNIC
	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
		return 1;
#endif

3380 3381
	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
M
Michael Chan 已提交
3382 3383 3384 3385 3386
		return 1;

	return 0;
}

3387 3388 3389 3390 3391 3392 3393
static void
bnx2_chk_missed_msi(struct bnx2 *bp)
{
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	u32 msi_ctrl;

	if (bnx2_has_work(bnapi)) {
3394
		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395 3396 3397 3398
		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
			return;

		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399 3400 3401
			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402 3403 3404 3405 3406 3407 3408
			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
		}
	}

	bp->idle_chk_status_idx = bnapi->last_status_idx;
}

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425
#ifdef BCM_CNIC
static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
	struct cnic_ops *c_ops;

	if (!bnapi->cnic_present)
		return;

	rcu_read_lock();
	c_ops = rcu_dereference(bp->cnic_ops);
	if (c_ops)
		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
						      bnapi->status_blk.msi);
	rcu_read_unlock();
}
#endif

3426
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427
{
3428
	struct status_block *sblk = bnapi->status_blk.msi;
3429 3430
	u32 status_attn_bits = sblk->status_attn_bits;
	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431

3432 3433
	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434

3435
		bnx2_phy_int(bp, bnapi);
M
Michael Chan 已提交
3436 3437 3438 3439

		/* This is needed to take care of transient status
		 * during link changes.
		 */
3440 3441 3442
		BNX2_WR(bp, BNX2_HC_COMMAND,
			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
		BNX2_RD(bp, BNX2_HC_COMMAND);
3443
	}
3444 3445 3446 3447 3448 3449 3450
}

static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
			  int work_done, int budget)
{
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451

3452
	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
M
Michael Chan 已提交
3453
		bnx2_tx_int(bp, bnapi, 0);
3454

3455
	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456
		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457

3458 3459 3460
	return work_done;
}

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
static int bnx2_poll_msix(struct napi_struct *napi, int budget)
{
	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
	struct bnx2 *bp = bnapi->bp;
	int work_done = 0;
	struct status_block_msix *sblk = bnapi->status_blk.msix;

	while (1) {
		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
		if (unlikely(work_done >= budget))
			break;

		bnapi->last_status_idx = sblk->status_idx;
		/* status idx must be read before checking for more work. */
		rmb();
		if (likely(!bnx2_has_fast_work(bnapi))) {

3478
			napi_complete(napi);
3479 3480 3481
			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				bnapi->last_status_idx);
3482 3483 3484 3485 3486 3487
			break;
		}
	}
	return work_done;
}

3488 3489
static int bnx2_poll(struct napi_struct *napi, int budget)
{
3490 3491
	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
	struct bnx2 *bp = bnapi->bp;
3492
	int work_done = 0;
3493
	struct status_block *sblk = bnapi->status_blk.msi;
3494 3495

	while (1) {
3496 3497
		bnx2_poll_link(bp, bnapi);

3498
		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
M
Michael Chan 已提交
3499

3500 3501 3502 3503
#ifdef BCM_CNIC
		bnx2_poll_cnic(bp, bnapi);
#endif

3504
		/* bnapi->last_status_idx is used below to tell the hw how
M
Michael Chan 已提交
3505 3506 3507
		 * much work has been processed, so we must read it before
		 * checking for more work.
		 */
3508
		bnapi->last_status_idx = sblk->status_idx;
3509 3510 3511 3512

		if (unlikely(work_done >= budget))
			break;

M
Michael Chan 已提交
3513
		rmb();
3514
		if (likely(!bnx2_has_work(bnapi))) {
3515
			napi_complete(napi);
3516
			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517 3518 3519
				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
					bnapi->last_status_idx);
M
Michael Chan 已提交
3520
				break;
3521
			}
3522 3523 3524 3525 3526 3527 3528 3529
			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
				bnapi->last_status_idx);

			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				bnapi->last_status_idx);
3530 3531
			break;
		}
3532 3533
	}

3534
	return work_done;
3535 3536
}

H
Herbert Xu 已提交
3537
/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538 3539 3540 3541 3542
 * from set_multicast.
 */
static void
bnx2_set_rx_mode(struct net_device *dev)
{
M
Michael Chan 已提交
3543
	struct bnx2 *bp = netdev_priv(dev);
3544
	u32 rx_mode, sort_mode;
J
Jiri Pirko 已提交
3545
	struct netdev_hw_addr *ha;
3546 3547
	int i;

3548 3549 3550
	if (!netif_running(dev))
		return;

3551
	spin_lock_bh(&bp->phy_lock);
3552 3553 3554 3555

	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556 3557
	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558 3559 3560 3561
		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
	if (dev->flags & IFF_PROMISC) {
		/* Promiscuous mode. */
		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
M
Michael Chan 已提交
3562 3563
		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3564 3565 3566
	}
	else if (dev->flags & IFF_ALLMULTI) {
		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567 3568
			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
				0xffffffff);
3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580
        	}
		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
	}
	else {
		/* Accept one or more multicast(s). */
		u32 mc_filter[NUM_MC_HASH_REGISTERS];
		u32 regidx;
		u32 bit;
		u32 crc;

		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);

3581 3582
		netdev_for_each_mc_addr(ha, dev) {
			crc = ether_crc_le(ETH_ALEN, ha->addr);
3583 3584 3585 3586 3587 3588 3589
			bit = crc & 0xff;
			regidx = (bit & 0xe0) >> 5;
			bit &= 0x1f;
			mc_filter[regidx] |= (1 << bit);
		}

		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590 3591
			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
				mc_filter[i]);
3592 3593 3594 3595 3596
		}

		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
	}

3597
	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598 3599 3600 3601 3602
		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
			     BNX2_RPM_SORT_USER0_PROM_VLAN;
	} else if (!(dev->flags & IFF_PROMISC)) {
		/* Add all entries into to the match filter list */
J
Jiri Pirko 已提交
3603
		i = 0;
3604
		netdev_for_each_uc_addr(ha, dev) {
J
Jiri Pirko 已提交
3605
			bnx2_set_mac_addr(bp, ha->addr,
3606 3607 3608
					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
			sort_mode |= (1 <<
				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
J
Jiri Pirko 已提交
3609
			i++;
3610 3611 3612 3613
		}

	}

3614 3615
	if (rx_mode != bp->rx_mode) {
		bp->rx_mode = rx_mode;
3616
		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617 3618
	}

3619 3620 3621
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622

3623
	spin_unlock_bh(&bp->phy_lock);
3624 3625
}

3626
static int
M
Michael Chan 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
check_fw_section(const struct firmware *fw,
		 const struct bnx2_fw_file_section *section,
		 u32 alignment, bool non_empty)
{
	u32 offset = be32_to_cpu(section->offset);
	u32 len = be32_to_cpu(section->len);

	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
		return -EINVAL;
	if ((non_empty && len == 0) || len > fw->size - offset ||
	    len & (alignment - 1))
		return -EINVAL;
	return 0;
}

3642
static int
M
Michael Chan 已提交
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
check_mips_fw_entry(const struct firmware *fw,
		    const struct bnx2_mips_fw_file_entry *entry)
{
	if (check_fw_section(fw, &entry->text, 4, true) ||
	    check_fw_section(fw, &entry->data, 4, false) ||
	    check_fw_section(fw, &entry->rodata, 4, false))
		return -EINVAL;
	return 0;
}

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
static void bnx2_release_firmware(struct bnx2 *bp)
{
	if (bp->rv2p_firmware) {
		release_firmware(bp->mips_firmware);
		release_firmware(bp->rv2p_firmware);
		bp->rv2p_firmware = NULL;
	}
}

static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663
{
M
Michael Chan 已提交
3664
	const char *mips_fw_file, *rv2p_fw_file;
B
Bastian Blank 已提交
3665 3666
	const struct bnx2_mips_fw_file *mips_fw;
	const struct bnx2_rv2p_fw_file *rv2p_fw;
M
Michael Chan 已提交
3667 3668
	int rc;

3669
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
3670
		mips_fw_file = FW_MIPS_FILE_09;
3671 3672
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673 3674 3675
			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
		else
			rv2p_fw_file = FW_RV2P_FILE_09;
M
Michael Chan 已提交
3676 3677 3678 3679 3680 3681 3682
	} else {
		mips_fw_file = FW_MIPS_FILE_06;
		rv2p_fw_file = FW_RV2P_FILE_06;
	}

	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
	if (rc) {
3683
		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684
		goto out;
M
Michael Chan 已提交
3685 3686 3687 3688
	}

	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
	if (rc) {
3689
		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690
		goto err_release_mips_firmware;
M
Michael Chan 已提交
3691
	}
B
Bastian Blank 已提交
3692 3693 3694 3695 3696 3697 3698 3699
	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700
		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701 3702
		rc = -EINVAL;
		goto err_release_firmware;
M
Michael Chan 已提交
3703
	}
B
Bastian Blank 已提交
3704 3705 3706
	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707
		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708 3709
		rc = -EINVAL;
		goto err_release_firmware;
M
Michael Chan 已提交
3710
	}
3711 3712
out:
	return rc;
M
Michael Chan 已提交
3713

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724
err_release_firmware:
	release_firmware(bp->rv2p_firmware);
	bp->rv2p_firmware = NULL;
err_release_mips_firmware:
	release_firmware(bp->mips_firmware);
	goto out;
}

static int bnx2_request_firmware(struct bnx2 *bp)
{
	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
M
Michael Chan 已提交
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
}

static u32
rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
{
	switch (idx) {
	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
		rv2p_code |= RV2P_BD_PAGE_SIZE;
		break;
	}
	return rv2p_code;
}

static int
load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
{
	u32 rv2p_code_len, file_offset;
	__be32 *rv2p_code;
3745
	int i;
M
Michael Chan 已提交
3746 3747 3748 3749 3750 3751
	u32 val, cmd, addr;

	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
	file_offset = be32_to_cpu(fw_entry->rv2p.offset);

	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752

M
Michael Chan 已提交
3753 3754 3755 3756 3757 3758
	if (rv2p_proc == RV2P_PROC1) {
		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
		addr = BNX2_RV2P_PROC1_ADDR_CMD;
	} else {
		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759
	}
3760 3761

	for (i = 0; i < rv2p_code_len; i += 8) {
3762
		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763
		rv2p_code++;
3764
		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765 3766
		rv2p_code++;

M
Michael Chan 已提交
3767
		val = (i / 8) | cmd;
3768
		BNX2_WR(bp, addr, val);
M
Michael Chan 已提交
3769 3770 3771 3772 3773 3774 3775 3776 3777
	}

	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
	for (i = 0; i < 8; i++) {
		u32 loc, code;

		loc = be32_to_cpu(fw_entry->fixup[i]);
		if (loc && ((loc * 4) < rv2p_code_len)) {
			code = be32_to_cpu(*(rv2p_code + loc - 1));
3778
			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
M
Michael Chan 已提交
3779 3780
			code = be32_to_cpu(*(rv2p_code + loc));
			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781
			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
M
Michael Chan 已提交
3782 3783

			val = (loc / 2) | cmd;
3784
			BNX2_WR(bp, addr, val);
3785 3786 3787 3788 3789
		}
	}

	/* Reset the processor, un-stall is done later. */
	if (rv2p_proc == RV2P_PROC1) {
3790
		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791 3792
	}
	else {
3793
		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794
	}
M
Michael Chan 已提交
3795 3796

	return 0;
3797 3798
}

3799
static int
M
Michael Chan 已提交
3800 3801
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
	    const struct bnx2_mips_fw_file_entry *fw_entry)
3802
{
M
Michael Chan 已提交
3803 3804
	u32 addr, len, file_offset;
	__be32 *data;
3805 3806 3807 3808
	u32 offset;
	u32 val;

	/* Halt the CPU. */
3809
	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810
	val |= cpu_reg->mode_value_halt;
3811 3812
	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813 3814

	/* Load the Text area. */
M
Michael Chan 已提交
3815 3816 3817 3818
	addr = be32_to_cpu(fw_entry->text.addr);
	len = be32_to_cpu(fw_entry->text.len);
	file_offset = be32_to_cpu(fw_entry->text.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);
M
Michael Chan 已提交
3819

M
Michael Chan 已提交
3820 3821
	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3822 3823
		int j;

M
Michael Chan 已提交
3824 3825
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826 3827
	}

M
Michael Chan 已提交
3828 3829 3830 3831 3832
	/* Load the Data area. */
	addr = be32_to_cpu(fw_entry->data.addr);
	len = be32_to_cpu(fw_entry->data.len);
	file_offset = be32_to_cpu(fw_entry->data.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833

M
Michael Chan 已提交
3834 3835
	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3836 3837
		int j;

M
Michael Chan 已提交
3838 3839
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840 3841 3842
	}

	/* Load the Read-Only area. */
M
Michael Chan 已提交
3843 3844 3845 3846 3847 3848 3849
	addr = be32_to_cpu(fw_entry->rodata.addr);
	len = be32_to_cpu(fw_entry->rodata.len);
	file_offset = be32_to_cpu(fw_entry->rodata.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);

	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3850 3851
		int j;

M
Michael Chan 已提交
3852 3853
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854 3855 3856
	}

	/* Clear the pre-fetch instruction. */
3857
	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
M
Michael Chan 已提交
3858 3859 3860

	val = be32_to_cpu(fw_entry->start_addr);
	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861 3862

	/* Start the CPU. */
3863
	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864
	val &= ~cpu_reg->mode_value_halt;
3865 3866
	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867 3868

	return 0;
3869 3870
}

3871
static int
3872 3873
bnx2_init_cpus(struct bnx2 *bp)
{
M
Michael Chan 已提交
3874 3875 3876 3877 3878
	const struct bnx2_mips_fw_file *mips_fw =
		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
	const struct bnx2_rv2p_fw_file *rv2p_fw =
		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
	int rc;
3879 3880

	/* Initialize the RV2P processor. */
M
Michael Chan 已提交
3881 3882
	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883 3884

	/* Initialize the RX Processor. */
M
Michael Chan 已提交
3885
	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886 3887 3888
	if (rc)
		goto init_cpu_err;

3889
	/* Initialize the TX Processor. */
M
Michael Chan 已提交
3890
	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891 3892 3893
	if (rc)
		goto init_cpu_err;

3894
	/* Initialize the TX Patch-up Processor. */
M
Michael Chan 已提交
3895
	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896 3897 3898
	if (rc)
		goto init_cpu_err;

3899
	/* Initialize the Completion Processor. */
M
Michael Chan 已提交
3900
	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901 3902 3903
	if (rc)
		goto init_cpu_err;

M
Michael Chan 已提交
3904
	/* Initialize the Command Processor. */
M
Michael Chan 已提交
3905
	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906

3907 3908
init_cpu_err:
	return rc;
3909 3910 3911
}

static int
3912
bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3913 3914 3915 3916 3917 3918
{
	u16 pmcsr;

	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);

	switch (state) {
3919
	case PCI_D0: {
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
		u32 val;

		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
			PCI_PM_CTRL_PME_STATUS);

		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
			/* delay required during transition out of D3hot */
			msleep(20);

3930
		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3931 3932
		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
		val &= ~BNX2_EMAC_MODE_MPKT;
3933
		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3934

3935
		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3936
		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3937
		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3938 3939
		break;
	}
3940
	case PCI_D3hot: {
3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
		int i;
		u32 val, wol_msg;

		if (bp->wol) {
			u32 advertising;
			u8 autoneg;

			autoneg = bp->autoneg;
			advertising = bp->advertising;

M
Michael Chan 已提交
3951 3952 3953 3954 3955 3956 3957 3958
			if (bp->phy_port == PORT_TP) {
				bp->autoneg = AUTONEG_SPEED;
				bp->advertising = ADVERTISED_10baseT_Half |
					ADVERTISED_10baseT_Full |
					ADVERTISED_100baseT_Half |
					ADVERTISED_100baseT_Full |
					ADVERTISED_Autoneg;
			}
3959

M
Michael Chan 已提交
3960 3961 3962
			spin_lock_bh(&bp->phy_lock);
			bnx2_setup_phy(bp, bp->phy_port);
			spin_unlock_bh(&bp->phy_lock);
3963 3964 3965 3966

			bp->autoneg = autoneg;
			bp->advertising = advertising;

3967
			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3968

3969
			val = BNX2_RD(bp, BNX2_EMAC_MODE);
3970 3971 3972

			/* Enable port mode. */
			val &= ~BNX2_EMAC_MODE_PORT;
M
Michael Chan 已提交
3973
			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3974 3975
			       BNX2_EMAC_MODE_ACPI_RCVD |
			       BNX2_EMAC_MODE_MPKT;
M
Michael Chan 已提交
3976 3977 3978 3979 3980 3981 3982
			if (bp->phy_port == PORT_TP)
				val |= BNX2_EMAC_MODE_PORT_MII;
			else {
				val |= BNX2_EMAC_MODE_PORT_GMII;
				if (bp->line_speed == SPEED_2500)
					val |= BNX2_EMAC_MODE_25G_MODE;
			}
3983

3984
			BNX2_WR(bp, BNX2_EMAC_MODE, val);
3985 3986 3987

			/* receive all multicast */
			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3988 3989
				BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
					0xffffffff);
3990
			}
3991 3992
			BNX2_WR(bp, BNX2_EMAC_RX_MODE,
				BNX2_EMAC_RX_MODE_SORT_MODE);
3993 3994 3995

			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
			      BNX2_RPM_SORT_USER0_MC_EN;
3996 3997 3998 3999
			BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
			BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
			BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
				BNX2_RPM_SORT_USER0_ENA);
4000 4001

			/* Need to enable EMAC and RPM for WOL. */
4002 4003 4004 4005
			BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
				BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
				BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
				BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4006

4007
			val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4008
			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4009
			BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4010 4011 4012 4013 4014 4015 4016

			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
		}
		else {
			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
		}

4017
		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4018 4019
			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
				     1, 0);
4020 4021

		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4022 4023
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055

			if (bp->wol)
				pmcsr |= 3;
		}
		else {
			pmcsr |= 3;
		}
		if (bp->wol) {
			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
		}
		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
				      pmcsr);

		/* No more memory access after this point until
		 * device is brought back to D0.
		 */
		udelay(50);
		break;
	}
	default:
		return -EINVAL;
	}
	return 0;
}

static int
bnx2_acquire_nvram_lock(struct bnx2 *bp)
{
	u32 val;
	int j;

	/* Request access to the flash interface. */
4056
	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4057
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058
		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077
		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
			break;

		udelay(5);
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_release_nvram_lock(struct bnx2 *bp)
{
	int j;
	u32 val;

	/* Relinquish nvram interface. */
4078
	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4079 4080

	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4081
		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
			break;

		udelay(5);
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}


static int
bnx2_enable_nvram_write(struct bnx2 *bp)
{
	u32 val;

4100 4101
	val = BNX2_RD(bp, BNX2_MISC_CFG);
	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4102

M
Michael Chan 已提交
4103
	if (bp->flash_info->flags & BNX2_NV_WREN) {
4104 4105
		int j;

4106 4107 4108
		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
		BNX2_WR(bp, BNX2_NVM_COMMAND,
			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4109 4110 4111 4112

		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
			udelay(5);

4113
			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128
			if (val & BNX2_NVM_COMMAND_DONE)
				break;
		}

		if (j >= NVRAM_TIMEOUT_COUNT)
			return -EBUSY;
	}
	return 0;
}

static void
bnx2_disable_nvram_write(struct bnx2 *bp)
{
	u32 val;

4129 4130
	val = BNX2_RD(bp, BNX2_MISC_CFG);
	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4131 4132 4133 4134 4135 4136 4137 4138
}


static void
bnx2_enable_nvram_access(struct bnx2 *bp)
{
	u32 val;

4139
	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4140
	/* Enable both bits, even on read. */
4141 4142
	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4143 4144 4145 4146 4147 4148 4149
}

static void
bnx2_disable_nvram_access(struct bnx2 *bp)
{
	u32 val;

4150
	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4151
	/* Disable both bits, even after read. */
4152
	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
			BNX2_NVM_ACCESS_ENABLE_WR_EN));
}

static int
bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
{
	u32 cmd;
	int j;

M
Michael Chan 已提交
4163
	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4164 4165 4166 4167 4168 4169 4170 4171
		/* Buffered flash, no erase needed */
		return 0;

	/* Build an erase command */
	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
	      BNX2_NVM_COMMAND_DOIT;

	/* Need to clear DONE bit separately. */
4172
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4173 4174

	/* Address of the NVRAM to read from. */
4175
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4176 4177

	/* Issue an erase command. */
4178
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4179 4180 4181 4182 4183 4184 4185

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		u32 val;

		udelay(5);

4186
		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205
		if (val & BNX2_NVM_COMMAND_DONE)
			break;
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
{
	u32 cmd;
	int j;

	/* Build the command word. */
	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;

M
Michael Chan 已提交
4206 4207
	/* Calculate an offset of a buffered flash, not needed for 5709. */
	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4208 4209 4210 4211 4212 4213
		offset = ((offset / bp->flash_info->page_size) <<
			   bp->flash_info->page_bits) +
			  (offset % bp->flash_info->page_size);
	}

	/* Need to clear DONE bit separately. */
4214
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215 4216

	/* Address of the NVRAM to read from. */
4217
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4218 4219

	/* Issue a read command. */
4220
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4221 4222 4223 4224 4225 4226 4227

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		u32 val;

		udelay(5);

4228
		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4229
		if (val & BNX2_NVM_COMMAND_DONE) {
4230
			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
A
Al Viro 已提交
4231
			memcpy(ret_val, &v, 4);
4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244
			break;
		}
	}
	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}


static int
bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
{
A
Al Viro 已提交
4245 4246
	u32 cmd;
	__be32 val32;
4247 4248 4249 4250 4251
	int j;

	/* Build the command word. */
	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;

M
Michael Chan 已提交
4252 4253
	/* Calculate an offset of a buffered flash, not needed for 5709. */
	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254 4255 4256 4257 4258 4259
		offset = ((offset / bp->flash_info->page_size) <<
			  bp->flash_info->page_bits) +
			 (offset % bp->flash_info->page_size);
	}

	/* Need to clear DONE bit separately. */
4260
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261 4262 4263 4264

	memcpy(&val32, val, 4);

	/* Write the data. */
4265
	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4266 4267

	/* Address of the NVRAM to write to. */
4268
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4269 4270

	/* Issue the write command. */
4271
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4272 4273 4274 4275 4276

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		udelay(5);

4277
		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289
			break;
	}
	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_init_nvram(struct bnx2 *bp)
{
	u32 val;
M
Michael Chan 已提交
4290
	int j, entry_count, rc = 0;
4291
	const struct flash_spec *flash;
4292

4293
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
4294 4295 4296 4297
		bp->flash_info = &flash_5709;
		goto get_flash_size;
	}

4298
	/* Determine the selected interface. */
4299
	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4300

4301
	entry_count = ARRAY_SIZE(flash_table);
4302 4303 4304 4305 4306

	if (val & 0x40000000) {

		/* Flash interface has been reconfigured */
		for (j = 0, flash = &flash_table[0]; j < entry_count;
4307 4308 4309
		     j++, flash++) {
			if ((val & FLASH_BACKUP_STRAP_MASK) ==
			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4310 4311 4312 4313 4314 4315
				bp->flash_info = flash;
				break;
			}
		}
	}
	else {
4316
		u32 mask;
4317 4318
		/* Not yet been reconfigured */

4319 4320 4321 4322 4323
		if (val & (1 << 23))
			mask = FLASH_BACKUP_STRAP_MASK;
		else
			mask = FLASH_STRAP_MASK;

4324 4325 4326
		for (j = 0, flash = &flash_table[0]; j < entry_count;
			j++, flash++) {

4327
			if ((val & mask) == (flash->strapping & mask)) {
4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
				bp->flash_info = flash;

				/* Request access to the flash interface. */
				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
					return rc;

				/* Enable access to flash interface */
				bnx2_enable_nvram_access(bp);

				/* Reconfigure the flash interface */
4338 4339 4340 4341
				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353

				/* Disable access to flash interface */
				bnx2_disable_nvram_access(bp);
				bnx2_release_nvram_lock(bp);

				break;
			}
		}
	} /* if (val & 0x40000000) */

	if (j == entry_count) {
		bp->flash_info = NULL;
4354
		pr_alert("Unknown flash/EEPROM type\n");
M
Michael Chan 已提交
4355
		return -ENODEV;
4356 4357
	}

M
Michael Chan 已提交
4358
get_flash_size:
4359
	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
M
Michael Chan 已提交
4360 4361 4362 4363 4364 4365
	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
	if (val)
		bp->flash_size = val;
	else
		bp->flash_size = bp->flash_info->total_size;

4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
	return rc;
}

static int
bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
		int buf_size)
{
	int rc = 0;
	u32 cmd_flags, offset32, len32, extra;

	if (buf_size == 0)
		return 0;

	/* Request access to the flash interface. */
	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
		return rc;

	/* Enable access to flash interface */
	bnx2_enable_nvram_access(bp);

	len32 = buf_size;
	offset32 = offset;
	extra = 0;

	cmd_flags = 0;

	if (offset32 & 3) {
		u8 buf[4];
		u32 pre_len;

		offset32 &= ~3;
		pre_len = 4 - (offset & 3);

		if (pre_len >= len32) {
			pre_len = len32;
			cmd_flags = BNX2_NVM_COMMAND_FIRST |
				    BNX2_NVM_COMMAND_LAST;
		}
		else {
			cmd_flags = BNX2_NVM_COMMAND_FIRST;
		}

		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		if (rc)
			return rc;

		memcpy(ret_buf, buf + (offset & 3), pre_len);

		offset32 += 4;
		ret_buf += pre_len;
		len32 -= pre_len;
	}
	if (len32 & 3) {
		extra = 4 - (len32 & 3);
		len32 = (len32 + 4) & ~3;
	}

	if (len32 == 4) {
		u8 buf[4];

		if (cmd_flags)
			cmd_flags = BNX2_NVM_COMMAND_LAST;
		else
			cmd_flags = BNX2_NVM_COMMAND_FIRST |
				    BNX2_NVM_COMMAND_LAST;

		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		memcpy(ret_buf, buf, 4 - extra);
	}
	else if (len32 > 0) {
		u8 buf[4];

		/* Read the first word. */
		if (cmd_flags)
			cmd_flags = 0;
		else
			cmd_flags = BNX2_NVM_COMMAND_FIRST;

		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);

		/* Advance to the next dword. */
		offset32 += 4;
		ret_buf += 4;
		len32 -= 4;

		while (len32 > 4 && rc == 0) {
			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);

			/* Advance to the next dword. */
			offset32 += 4;
			ret_buf += 4;
			len32 -= 4;
		}

		if (rc)
			return rc;

		cmd_flags = BNX2_NVM_COMMAND_LAST;
		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		memcpy(ret_buf, buf, 4 - extra);
	}

	/* Disable access to flash interface */
	bnx2_disable_nvram_access(bp);

	bnx2_release_nvram_lock(bp);

	return rc;
}

static int
bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
		int buf_size)
{
	u32 written, offset32, len32;
4484
	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
	int rc = 0;
	int align_start, align_end;

	buf = data_buf;
	offset32 = offset;
	len32 = buf_size;
	align_start = align_end = 0;

	if ((align_start = (offset32 & 3))) {
		offset32 &= ~3;
M
Michael Chan 已提交
4495 4496 4497
		len32 += align_start;
		if (len32 < 4)
			len32 = 4;
4498 4499 4500 4501 4502
		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
			return rc;
	}

	if (len32 & 3) {
M
Michael Chan 已提交
4503 4504 4505 4506
		align_end = 4 - (len32 & 3);
		len32 += align_end;
		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
			return rc;
4507 4508 4509
	}

	if (align_start || align_end) {
4510 4511
		align_buf = kmalloc(len32, GFP_KERNEL);
		if (align_buf == NULL)
4512 4513
			return -ENOMEM;
		if (align_start) {
4514
			memcpy(align_buf, start, 4);
4515 4516
		}
		if (align_end) {
4517
			memcpy(align_buf + len32 - 4, end, 4);
4518
		}
4519 4520
		memcpy(align_buf + align_start, data_buf, buf_size);
		buf = align_buf;
4521 4522
	}

M
Michael Chan 已提交
4523
	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4524 4525 4526 4527 4528 4529 4530
		flash_buffer = kmalloc(264, GFP_KERNEL);
		if (flash_buffer == NULL) {
			rc = -ENOMEM;
			goto nvram_write_end;
		}
	}

4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544
	written = 0;
	while ((written < len32) && (rc == 0)) {
		u32 page_start, page_end, data_start, data_end;
		u32 addr, cmd_flags;
		int i;

	        /* Find the page_start addr */
		page_start = offset32 + written;
		page_start -= (page_start % bp->flash_info->page_size);
		/* Find the page_end addr */
		page_end = page_start + bp->flash_info->page_size;
		/* Find the data_start addr */
		data_start = (written == 0) ? offset32 : page_start;
		/* Find the data_end addr */
4545
		data_end = (page_end > offset32 + len32) ?
4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
			(offset32 + len32) : page_end;

		/* Request access to the flash interface. */
		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
			goto nvram_write_end;

		/* Enable access to flash interface */
		bnx2_enable_nvram_access(bp);

		cmd_flags = BNX2_NVM_COMMAND_FIRST;
M
Michael Chan 已提交
4556
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4557 4558 4559 4560 4561 4562 4563 4564 4565
			int j;

			/* Read the whole page into the buffer
			 * (non-buffer flash only) */
			for (j = 0; j < bp->flash_info->page_size; j += 4) {
				if (j == (bp->flash_info->page_size - 4)) {
					cmd_flags |= BNX2_NVM_COMMAND_LAST;
				}
				rc = bnx2_nvram_read_dword(bp,
4566 4567
					page_start + j,
					&flash_buffer[j],
4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583
					cmd_flags);

				if (rc)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Enable writes to flash interface (unlock write-protect) */
		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
			goto nvram_write_end;

		/* Loop to write back the buffer data from page_start to
		 * data_start */
		i = 0;
M
Michael Chan 已提交
4584
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
M
Michael Chan 已提交
4585 4586 4587 4588 4589 4590 4591
			/* Erase the page */
			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
				goto nvram_write_end;

			/* Re-enable the write again for the actual write */
			bnx2_enable_nvram_write(bp);

4592 4593
			for (addr = page_start; addr < data_start;
				addr += 4, i += 4) {
4594

4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605
				rc = bnx2_nvram_write_dword(bp, addr,
					&flash_buffer[i], cmd_flags);

				if (rc != 0)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Loop to write the new data from data_start to data_end */
4606
		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4607
			if ((addr == page_end - 4) ||
M
Michael Chan 已提交
4608
				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624
				 (addr == data_end - 4))) {

				cmd_flags |= BNX2_NVM_COMMAND_LAST;
			}
			rc = bnx2_nvram_write_dword(bp, addr, buf,
				cmd_flags);

			if (rc != 0)
				goto nvram_write_end;

			cmd_flags = 0;
			buf += 4;
		}

		/* Loop to write back the buffer data from data_end
		 * to page_end */
M
Michael Chan 已提交
4625
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4626 4627
			for (addr = data_end; addr < page_end;
				addr += 4, i += 4) {
4628

4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653
				if (addr == page_end-4) {
					cmd_flags = BNX2_NVM_COMMAND_LAST;
                		}
				rc = bnx2_nvram_write_dword(bp, addr,
					&flash_buffer[i], cmd_flags);

				if (rc != 0)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Disable writes to flash interface (lock write-protect) */
		bnx2_disable_nvram_write(bp);

		/* Disable access to flash interface */
		bnx2_disable_nvram_access(bp);
		bnx2_release_nvram_lock(bp);

		/* Increment written */
		written += data_end - data_start;
	}

nvram_write_end:
4654 4655
	kfree(flash_buffer);
	kfree(align_buf);
4656 4657 4658
	return rc;
}

4659
static void
4660
bnx2_init_fw_cap(struct bnx2 *bp)
4661
{
4662
	u32 val, sig = 0;
4663

4664
	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4665 4666 4667 4668
	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;

	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4669

4670
	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4671 4672 4673
	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
		return;

4674 4675 4676 4677 4678 4679 4680 4681 4682
	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
	}

	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
		u32 link;

4683
		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4684

4685 4686
		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4687 4688 4689
			bp->phy_port = PORT_FIBRE;
		else
			bp->phy_port = PORT_TP;
4690

4691 4692
		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4693
	}
4694 4695 4696

	if (netif_running(bp->dev) && sig)
		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4697 4698
}

4699 4700 4701
static void
bnx2_setup_msix_tbl(struct bnx2 *bp)
{
4702
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4703

4704 4705
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4706 4707
}

4708 4709 4710 4711 4712
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
	u32 val;
	int i, rc = 0;
4713
	u8 old_port;
4714 4715 4716

	/* Wait for the current PCI transaction to complete before
	 * issuing a reset. */
4717 4718
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4719 4720 4721 4722 4723 4724
		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
E
Eddie Wai 已提交
4725 4726
		udelay(5);
	} else {  /* 5709 */
4727
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
E
Eddie Wai 已提交
4728
		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4729 4730
		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
E
Eddie Wai 已提交
4731 4732 4733

		for (i = 0; i < 100; i++) {
			msleep(1);
4734
			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
E
Eddie Wai 已提交
4735 4736 4737 4738
			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
				break;
		}
	}
4739

4740
	/* Wait for the firmware to tell us it is ok to issue a reset. */
4741
	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4742

4743 4744
	/* Deposit a driver reset signature so the firmware knows that
	 * this is a soft reset. */
4745 4746
	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4747 4748 4749

	/* Do a dummy read to force the chip to complete all current transaction
	 * before we issue a reset. */
4750
	val = BNX2_RD(bp, BNX2_MISC_ID);
4751

4752
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4753 4754
		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
		BNX2_RD(bp, BNX2_MISC_COMMAND);
4755
		udelay(5);
4756

4757 4758
		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4759

4760
		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4761

4762 4763 4764 4765 4766 4767
	} else {
		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;

		/* Chip reset. */
4768
		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4769

4770 4771 4772 4773
		/* Reading back any register after chip reset will hang the
		 * bus on 5706 A0 and A1.  The msleep below provides plenty
		 * of margin for write posting.
		 */
4774 4775
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
A
Arjan van de Ven 已提交
4776
			msleep(20);
4777

4778 4779
		/* Reset takes approximate 30 usec */
		for (i = 0; i < 10; i++) {
4780
			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4781 4782 4783 4784 4785 4786 4787 4788
			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
				break;
			udelay(10);
		}

		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4789
			pr_err("Chip reset did not complete\n");
4790 4791
			return -EBUSY;
		}
4792 4793 4794
	}

	/* Make sure byte swapping is properly configured. */
4795
	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4796
	if (val != 0x01020304) {
4797
		pr_err("Chip not in correct endian mode\n");
4798 4799 4800 4801
		return -ENODEV;
	}

	/* Wait for the firmware to finish its initialization. */
4802
	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4803 4804
	if (rc)
		return rc;
4805

4806
	spin_lock_bh(&bp->phy_lock);
4807
	old_port = bp->phy_port;
4808
	bnx2_init_fw_cap(bp);
4809 4810
	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
	    old_port != bp->phy_port)
4811 4812 4813
		bnx2_set_default_remote_link(bp);
	spin_unlock_bh(&bp->phy_lock);

4814
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4815 4816
		/* Adjust the voltage regular to two steps lower.  The default
		 * of this register is 0x0000000e. */
4817
		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4818 4819 4820 4821 4822

		/* Remove bad rbuf memory from the free pool. */
		rc = bnx2_alloc_bad_rbuf(bp);
	}

4823
	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4824
		bnx2_setup_msix_tbl(bp);
4825
		/* Prevent MSIX table reads and write from timing out */
4826
		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4827 4828
			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
	}
4829

4830 4831 4832 4833 4834 4835
	return rc;
}

static int
bnx2_init_chip(struct bnx2 *bp)
{
4836
	u32 val, mtu;
4837
	int rc, i;
4838 4839

	/* Make sure the interrupt is not active. */
4840
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4841 4842 4843 4844

	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
#ifdef __BIG_ENDIAN
4845
	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4846
#endif
4847
	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4848 4849 4850 4851 4852
	      DMA_READ_CHANS << 12 |
	      DMA_WRITE_CHANS << 16;

	val |= (0x2 << 20) | (1 << 11);

4853
	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4854 4855
		val |= (1 << 23);

4856 4857 4858
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
	    !(bp->flags & BNX2_FLAG_PCIX))
4859 4860
		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;

4861
	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4862

4863
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4864
		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4865
		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4866
		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4867 4868
	}

4869
	if (bp->flags & BNX2_FLAG_PCIX) {
4870 4871 4872 4873 4874 4875 4876 4877
		u16 val16;

		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
				     &val16);
		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
				      val16 & ~PCI_X_CMD_ERO);
	}

4878 4879 4880 4881
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4882 4883 4884

	/* Initialize context mapping and zero out the quick contexts.  The
	 * context block must have already been enabled. */
4885
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4886 4887 4888 4889
		rc = bnx2_init_5709_context(bp);
		if (rc)
			return rc;
	} else
M
Michael Chan 已提交
4890
		bnx2_init_context(bp);
4891

4892 4893 4894
	if ((rc = bnx2_init_cpus(bp)) != 0)
		return rc;

4895 4896
	bnx2_init_nvram(bp);

4897
	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4898

4899
	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4900 4901
	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4902
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4903
		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4904
		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4905 4906
			val |= BNX2_MQ_CONFIG_HALT_DIS;
	}
4907

4908
	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4909 4910

	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4911 4912
	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4913

4914
	val = (BNX2_PAGE_BITS - 8) << 24;
4915
	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4916 4917

	/* Configure page size. */
4918
	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4919
	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4920
	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4921
	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4922 4923 4924 4925 4926 4927 4928

	val = bp->mac_addr[0] +
	      (bp->mac_addr[1] << 8) +
	      (bp->mac_addr[2] << 16) +
	      bp->mac_addr[3] +
	      (bp->mac_addr[4] << 8) +
	      (bp->mac_addr[5] << 16);
4929
	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4930 4931

	/* Program the MTU.  Also include 4 bytes for CRC32. */
4932 4933
	mtu = bp->dev->mtu;
	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4934 4935
	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4936
	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4937

4938 4939 4940 4941 4942 4943 4944
	if (mtu < 1500)
		mtu = 1500;

	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));

4945
	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4946 4947 4948
	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
		bp->bnx2_napi[i].last_status_idx = 0;

4949 4950
	bp->idle_chk_status_idx = 0xffff;

4951 4952 4953
	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;

	/* Set up how to generate a link change interrupt. */
4954
	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4955

4956 4957 4958
	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
		(u64) bp->status_blk_mapping & 0xffffffff);
	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4959

4960 4961 4962 4963
	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
		(u64) bp->stats_blk_mapping & 0xffffffff);
	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
		(u64) bp->stats_blk_mapping >> 32);
4964

4965 4966
	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4967

4968 4969
	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4970

4971 4972
	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4973

4974
	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4975

4976
	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4977

4978 4979
	BNX2_WR(bp, BNX2_HC_COM_TICKS,
		(bp->com_ticks_int << 16) | bp->com_ticks);
4980

4981 4982
	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4983

4984
	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4985
		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4986
	else
4987 4988
		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4989

4990
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4991
		val = BNX2_HC_CONFIG_COLLECT_STATS;
4992
	else {
4993 4994
		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
		      BNX2_HC_CONFIG_COLLECT_STATS;
4995 4996
	}

4997
	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4998 4999
		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5000

M
Michael Chan 已提交
5001 5002 5003 5004
		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
	}

	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5005
		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
M
Michael Chan 已提交
5006

5007
	BNX2_WR(bp, BNX2_HC_CONFIG, val);
M
Michael Chan 已提交
5008

M
Michael Chan 已提交
5009 5010 5011 5012 5013
	if (bp->rx_ticks < 25)
		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
	else
		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);

M
Michael Chan 已提交
5014 5015 5016 5017
	for (i = 1; i < bp->irq_nvecs; i++) {
		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
			   BNX2_HC_SB_CONFIG_1;

5018
		BNX2_WR(bp, base,
5019
			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
M
Michael Chan 已提交
5020
			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5021 5022
			BNX2_HC_SB_CONFIG_1_ONE_SHOT);

5023
		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5024 5025 5026
			(bp->tx_quick_cons_trip_int << 16) |
			 bp->tx_quick_cons_trip);

5027
		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5028 5029
			(bp->tx_ticks_int << 16) | bp->tx_ticks);

5030 5031
		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
			(bp->rx_quick_cons_trip_int << 16) |
M
Michael Chan 已提交
5032
			bp->rx_quick_cons_trip);
5033

5034
		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
M
Michael Chan 已提交
5035 5036
			(bp->rx_ticks_int << 16) | bp->rx_ticks);
	}
5037

5038
	/* Clear internal stats counters. */
5039
	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5040

5041
	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5042 5043 5044 5045

	/* Initialize the receive filter. */
	bnx2_set_rx_mode(bp->dev);

5046
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5047
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
M
Michael Chan 已提交
5048
		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5049
		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
M
Michael Chan 已提交
5050
	}
5051
	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5052
			  1, 0);
5053

5054 5055
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5056 5057 5058

	udelay(20);

5059
	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
M
Michael Chan 已提交
5060

5061
	return rc;
5062 5063
}

5064 5065 5066 5067
static void
bnx2_clear_ring_states(struct bnx2 *bp)
{
	struct bnx2_napi *bnapi;
5068
	struct bnx2_tx_ring_info *txr;
5069
	struct bnx2_rx_ring_info *rxr;
5070 5071 5072 5073
	int i;

	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
		bnapi = &bp->bnx2_napi[i];
5074
		txr = &bnapi->tx_ring;
5075
		rxr = &bnapi->rx_ring;
5076

5077 5078
		txr->tx_cons = 0;
		txr->hw_tx_cons = 0;
5079 5080 5081 5082 5083
		rxr->rx_prod_bseq = 0;
		rxr->rx_prod = 0;
		rxr->rx_cons = 0;
		rxr->rx_pg_prod = 0;
		rxr->rx_pg_cons = 0;
5084 5085 5086
	}
}

M
Michael Chan 已提交
5087
static void
5088
bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
M
Michael Chan 已提交
5089 5090
{
	u32 val, offset0, offset1, offset2, offset3;
M
Michael Chan 已提交
5091
	u32 cid_addr = GET_CID_ADDR(cid);
M
Michael Chan 已提交
5092

5093
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
		offset0 = BNX2_L2CTX_TYPE_XI;
		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
	} else {
		offset0 = BNX2_L2CTX_TYPE;
		offset1 = BNX2_L2CTX_CMD_TYPE;
		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
	}
	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
M
Michael Chan 已提交
5105
	bnx2_ctx_wr(bp, cid_addr, offset0, val);
M
Michael Chan 已提交
5106 5107

	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
M
Michael Chan 已提交
5108
	bnx2_ctx_wr(bp, cid_addr, offset1, val);
M
Michael Chan 已提交
5109

5110
	val = (u64) txr->tx_desc_mapping >> 32;
M
Michael Chan 已提交
5111
	bnx2_ctx_wr(bp, cid_addr, offset2, val);
M
Michael Chan 已提交
5112

5113
	val = (u64) txr->tx_desc_mapping & 0xffffffff;
M
Michael Chan 已提交
5114
	bnx2_ctx_wr(bp, cid_addr, offset3, val);
M
Michael Chan 已提交
5115
}
5116 5117

static void
5118
bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5119
{
5120
	struct bnx2_tx_bd *txbd;
5121 5122
	u32 cid = TX_CID;
	struct bnx2_napi *bnapi;
5123
	struct bnx2_tx_ring_info *txr;
5124

5125 5126 5127 5128 5129 5130 5131
	bnapi = &bp->bnx2_napi[ring_num];
	txr = &bnapi->tx_ring;

	if (ring_num == 0)
		cid = TX_CID;
	else
		cid = TX_TSS_CID + ring_num - 1;
5132

M
Michael Chan 已提交
5133 5134
	bp->tx_wake_thresh = bp->tx_ring_size / 2;

5135
	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5136

5137 5138
	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5139

5140 5141
	txr->tx_prod = 0;
	txr->tx_prod_bseq = 0;
5142

5143 5144
	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5145

5146
	bnx2_init_tx_context(bp, cid, txr);
5147 5148 5149
}

static void
5150 5151
bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
		     u32 buf_size, int num_rings)
5152 5153
{
	int i;
5154
	struct bnx2_rx_bd *rxbd;
5155

5156
	for (i = 0; i < num_rings; i++) {
5157
		int j;
5158

5159
		rxbd = &rx_ring[i][0];
5160
		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5161
			rxbd->rx_bd_len = buf_size;
5162 5163
			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
		}
5164
		if (i == (num_rings - 1))
5165 5166 5167
			j = 0;
		else
			j = i + 1;
5168 5169
		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5170
	}
5171 5172 5173
}

static void
5174
bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5175 5176 5177
{
	int i;
	u16 prod, ring_prod;
5178 5179 5180 5181 5182 5183 5184 5185 5186 5187
	u32 cid, rx_cid_addr, val;
	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;

	if (ring_num == 0)
		cid = RX_CID;
	else
		cid = RX_RSS_CID + ring_num - 1;

	rx_cid_addr = GET_CID_ADDR(cid);
5188

5189
	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5190 5191
			     bp->rx_buf_use_size, bp->rx_max_ring);

5192
	bnx2_init_rx_context(bp, cid);
5193

5194
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5195 5196
		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5197 5198
	}

M
Michael Chan 已提交
5199
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5200
	if (bp->rx_pg_ring_size) {
5201 5202
		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
				     rxr->rx_pg_desc_mapping,
5203 5204
				     PAGE_SIZE, bp->rx_max_pg_ring);
		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
M
Michael Chan 已提交
5205 5206
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
M
Michael Chan 已提交
5207
		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5208

5209
		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
M
Michael Chan 已提交
5210
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5211

5212
		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
M
Michael Chan 已提交
5213
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5214

5215
		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5216
			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5217
	}
5218

5219
	val = (u64) rxr->rx_desc_mapping[0] >> 32;
M
Michael Chan 已提交
5220
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5221

5222
	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
M
Michael Chan 已提交
5223
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5224

5225
	ring_prod = prod = rxr->rx_pg_prod;
5226
	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5227
		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5228 5229
			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
				    ring_num, i, bp->rx_pg_ring_size);
5230
			break;
5231
		}
5232 5233
		prod = BNX2_NEXT_RX_BD(prod);
		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5234
	}
5235
	rxr->rx_pg_prod = prod;
5236

5237
	ring_prod = prod = rxr->rx_prod;
5238
	for (i = 0; i < bp->rx_ring_size; i++) {
5239
		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5240 5241
			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
				    ring_num, i, bp->rx_ring_size);
5242
			break;
5243
		}
5244 5245
		prod = BNX2_NEXT_RX_BD(prod);
		ring_prod = BNX2_RX_RING_IDX(prod);
5246
	}
5247
	rxr->rx_prod = prod;
5248

5249 5250 5251
	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5252

5253 5254
	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5255

5256
	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5257 5258
}

5259 5260 5261 5262
static void
bnx2_init_all_rings(struct bnx2 *bp)
{
	int i;
M
Michael Chan 已提交
5263
	u32 val;
5264 5265 5266

	bnx2_clear_ring_states(bp);

5267
	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5268 5269 5270 5271
	for (i = 0; i < bp->num_tx_rings; i++)
		bnx2_init_tx_ring(bp, i);

	if (bp->num_tx_rings > 1)
5272 5273
		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
			(TX_TSS_CID << 7));
5274

5275
	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
M
Michael Chan 已提交
5276 5277
	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);

5278 5279
	for (i = 0; i < bp->num_rx_rings; i++)
		bnx2_init_rx_ring(bp, i);
M
Michael Chan 已提交
5280 5281

	if (bp->num_rx_rings > 1) {
M
Michael Chan 已提交
5282
		u32 tbl_32 = 0;
M
Michael Chan 已提交
5283 5284

		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
M
Michael Chan 已提交
5285 5286 5287 5288
			int shift = (i % 8) << 2;

			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
			if ((i % 8) == 7) {
5289 5290
				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
M
Michael Chan 已提交
5291 5292 5293 5294 5295
					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
					BNX2_RLUP_RSS_COMMAND_WRITE |
					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
				tbl_32 = 0;
			}
M
Michael Chan 已提交
5296 5297 5298 5299 5300
		}

		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;

5301
		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
M
Michael Chan 已提交
5302 5303

	}
5304 5305
}

5306
static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5307
{
5308
	u32 max, num_rings = 1;
5309

5310 5311
	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
		ring_size -= BNX2_MAX_RX_DESC_CNT;
5312 5313 5314
		num_rings++;
	}
	/* round to next power of 2 */
5315
	max = max_size;
5316 5317 5318 5319 5320 5321
	while ((max & num_rings) == 0)
		max >>= 1;

	if (num_rings != max)
		max <<= 1;

5322 5323 5324 5325 5326 5327
	return max;
}

static void
bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
{
M
Michael Chan 已提交
5328
	u32 rx_size, rx_space, jumbo_size;
5329 5330

	/* 8 for CRC and VLAN */
5331
	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5332

M
Michael Chan 已提交
5333
	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5334
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
M
Michael Chan 已提交
5335

5336
	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5337 5338 5339
	bp->rx_pg_ring_size = 0;
	bp->rx_max_pg_ring = 0;
	bp->rx_max_pg_ring_idx = 0;
5340
	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
M
Michael Chan 已提交
5341 5342 5343
		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;

		jumbo_size = size * pages;
5344 5345
		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
M
Michael Chan 已提交
5346 5347 5348

		bp->rx_pg_ring_size = jumbo_size;
		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5349 5350 5351
							BNX2_MAX_RX_PG_RINGS);
		bp->rx_max_pg_ring_idx =
			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5352
		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
M
Michael Chan 已提交
5353 5354
		bp->rx_copy_thresh = 0;
	}
5355 5356

	bp->rx_buf_use_size = rx_size;
5357 5358 5359
	/* hw alignment + build_skb() overhead*/
	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5360
	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5361
	bp->rx_ring_size = size;
5362 5363
	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5364 5365
}

5366 5367 5368 5369 5370
static void
bnx2_free_tx_skbs(struct bnx2 *bp)
{
	int i;

5371 5372 5373 5374
	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
		int j;
5375

5376
		if (txr->tx_buf_ring == NULL)
5377 5378
			continue;

5379 5380
		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5381
			struct sk_buff *skb = tx_buf->skb;
5382
			int k, last;
5383 5384

			if (skb == NULL) {
5385
				j = BNX2_NEXT_TX_BD(j);
5386 5387 5388
				continue;
			}

5389
			dma_unmap_single(&bp->pdev->dev,
5390
					 dma_unmap_addr(tx_buf, mapping),
5391 5392
					 skb_headlen(skb),
					 PCI_DMA_TODEVICE);
5393

5394
			tx_buf->skb = NULL;
5395

5396
			last = tx_buf->nr_frags;
5397 5398 5399
			j = BNX2_NEXT_TX_BD(j);
			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5400
				dma_unmap_page(&bp->pdev->dev,
5401
					dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
5402
					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5403 5404
					PCI_DMA_TODEVICE);
			}
5405
			dev_kfree_skb(skb);
5406
		}
E
Eric Dumazet 已提交
5407
		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5408 5409 5410 5411 5412 5413 5414 5415
	}
}

static void
bnx2_free_rx_skbs(struct bnx2 *bp)
{
	int i;

5416 5417 5418 5419
	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;
5420

5421 5422
		if (rxr->rx_buf_ring == NULL)
			return;
5423

5424
		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5425
			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5426
			u8 *data = rx_buf->data;
5427

5428
			if (data == NULL)
5429
				continue;
5430

5431
			dma_unmap_single(&bp->pdev->dev,
5432
					 dma_unmap_addr(rx_buf, mapping),
5433 5434
					 bp->rx_buf_use_size,
					 PCI_DMA_FROMDEVICE);
5435

5436
			rx_buf->data = NULL;
5437

5438
			kfree(data);
5439 5440 5441
		}
		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
			bnx2_free_rx_page(bp, rxr, j);
5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
	}
}

static void
bnx2_free_skbs(struct bnx2 *bp)
{
	bnx2_free_tx_skbs(bp);
	bnx2_free_rx_skbs(bp);
}

static int
bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
{
	int rc;

	rc = bnx2_reset_chip(bp, reset_code);
	bnx2_free_skbs(bp);
	if (rc)
		return rc;

5462 5463 5464
	if ((rc = bnx2_init_chip(bp)) != 0)
		return rc;

5465
	bnx2_init_all_rings(bp);
5466 5467 5468 5469
	return 0;
}

static int
5470
bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5471 5472 5473 5474 5475 5476
{
	int rc;

	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
		return rc;

M
Michael Chan 已提交
5477
	spin_lock_bh(&bp->phy_lock);
5478
	bnx2_init_phy(bp, reset_phy);
5479
	bnx2_set_link(bp);
5480 5481
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
		bnx2_remote_phy_event(bp);
5482
	spin_unlock_bh(&bp->phy_lock);
5483 5484 5485
	return 0;
}

M
Michael Chan 已提交
5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500
static int
bnx2_shutdown_chip(struct bnx2 *bp)
{
	u32 reset_code;

	if (bp->flags & BNX2_FLAG_NO_WOL)
		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
	else if (bp->wol)
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
	else
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;

	return bnx2_reset_chip(bp, reset_code);
}

5501 5502 5503 5504
static int
bnx2_test_registers(struct bnx2 *bp)
{
	int ret;
5505
	int i, is_5709;
5506
	static const struct {
5507 5508
		u16   offset;
		u16   flags;
5509
#define BNX2_FL_NOT_5709	1
5510 5511 5512 5513 5514 5515 5516
		u32   rw_mask;
		u32   ro_mask;
	} reg_tbl[] = {
		{ 0x006c, 0, 0x00000000, 0x0000003f },
		{ 0x0090, 0, 0xffffffff, 0x00000000 },
		{ 0x0094, 0, 0x00000000, 0x00000000 },

5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536
		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },

		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },

		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5537 5538

		{ 0x1000, 0, 0x00000000, 0x00000001 },
M
Michael Chan 已提交
5539
		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5540 5541 5542 5543

		{ 0x1408, 0, 0x01c00800, 0x00000000 },
		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
		{ 0x14a8, 0, 0x00000000, 0x000001ff },
M
Michael Chan 已提交
5544
		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621
		{ 0x14b0, 0, 0x00000002, 0x00000001 },
		{ 0x14b8, 0, 0x00000000, 0x00000000 },
		{ 0x14c0, 0, 0x00000000, 0x00000009 },
		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
		{ 0x14cc, 0, 0x00000000, 0x00000001 },
		{ 0x14d0, 0, 0xffffffff, 0x00000000 },

		{ 0x1800, 0, 0x00000000, 0x00000001 },
		{ 0x1804, 0, 0x00000000, 0x00000003 },

		{ 0x2800, 0, 0x00000000, 0x00000001 },
		{ 0x2804, 0, 0x00000000, 0x00003f01 },
		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
		{ 0x2810, 0, 0xffff0000, 0x00000000 },
		{ 0x2814, 0, 0xffff0000, 0x00000000 },
		{ 0x2818, 0, 0xffff0000, 0x00000000 },
		{ 0x281c, 0, 0xffff0000, 0x00000000 },
		{ 0x2834, 0, 0xffffffff, 0x00000000 },
		{ 0x2840, 0, 0x00000000, 0xffffffff },
		{ 0x2844, 0, 0x00000000, 0xffffffff },
		{ 0x2848, 0, 0xffffffff, 0x00000000 },
		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },

		{ 0x2c00, 0, 0x00000000, 0x00000011 },
		{ 0x2c04, 0, 0x00000000, 0x00030007 },

		{ 0x3c00, 0, 0x00000000, 0x00000001 },
		{ 0x3c04, 0, 0x00000000, 0x00070000 },
		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
		{ 0x3c14, 0, 0x00000000, 0xffffffff },
		{ 0x3c18, 0, 0x00000000, 0xffffffff },
		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
		{ 0x3c20, 0, 0xffffff00, 0x00000000 },

		{ 0x5004, 0, 0x00000000, 0x0000007f },
		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },

		{ 0x5c00, 0, 0x00000000, 0x00000001 },
		{ 0x5c04, 0, 0x00000000, 0x0003000f },
		{ 0x5c08, 0, 0x00000003, 0x00000000 },
		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
		{ 0x5c10, 0, 0x00000000, 0xffffffff },
		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
		{ 0x5c88, 0, 0x00000000, 0x00077373 },
		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },

		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
		{ 0x680c, 0, 0xffffffff, 0x00000000 },
		{ 0x6810, 0, 0xffffffff, 0x00000000 },
		{ 0x6814, 0, 0xffffffff, 0x00000000 },
		{ 0x6818, 0, 0xffffffff, 0x00000000 },
		{ 0x681c, 0, 0xffffffff, 0x00000000 },
		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
		{ 0x684c, 0, 0xffffffff, 0x00000000 },
		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },

		{ 0xffff, 0, 0x00000000, 0x00000000 },
	};

	ret = 0;
5622
	is_5709 = 0;
5623
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5624 5625
		is_5709 = 1;

5626 5627
	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
		u32 offset, rw_mask, ro_mask, save_val, val;
5628 5629 5630 5631
		u16 flags = reg_tbl[i].flags;

		if (is_5709 && (flags & BNX2_FL_NOT_5709))
			continue;
5632 5633 5634 5635 5636

		offset = (u32) reg_tbl[i].offset;
		rw_mask = reg_tbl[i].rw_mask;
		ro_mask = reg_tbl[i].ro_mask;

5637
		save_val = readl(bp->regview + offset);
5638

5639
		writel(0, bp->regview + offset);
5640

5641
		val = readl(bp->regview + offset);
5642 5643 5644 5645 5646 5647 5648 5649
		if ((val & rw_mask) != 0) {
			goto reg_test_err;
		}

		if ((val & ro_mask) != (save_val & ro_mask)) {
			goto reg_test_err;
		}

5650
		writel(0xffffffff, bp->regview + offset);
5651

5652
		val = readl(bp->regview + offset);
5653 5654 5655 5656 5657 5658 5659 5660
		if ((val & rw_mask) != rw_mask) {
			goto reg_test_err;
		}

		if ((val & ro_mask) != (save_val & ro_mask)) {
			goto reg_test_err;
		}

5661
		writel(save_val, bp->regview + offset);
5662 5663 5664
		continue;

reg_test_err:
5665
		writel(save_val, bp->regview + offset);
5666 5667 5668 5669 5670 5671 5672 5673 5674
		ret = -ENODEV;
		break;
	}
	return ret;
}

static int
bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
{
5675
	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5676 5677 5678 5679 5680 5681 5682 5683
		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
	int i;

	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
		u32 offset;

		for (offset = 0; offset < size; offset += 4) {

5684
			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5685

5686
			if (bnx2_reg_rd_ind(bp, start + offset) !=
5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699
				test_pattern[i]) {
				return -ENODEV;
			}
		}
	}
	return 0;
}

static int
bnx2_test_memory(struct bnx2 *bp)
{
	int ret = 0;
	int i;
5700
	static struct mem_entry {
5701 5702
		u32   offset;
		u32   len;
5703
	} mem_tbl_5706[] = {
5704
		{ 0x60000,  0x4000 },
M
Michael Chan 已提交
5705
		{ 0xa0000,  0x3000 },
5706 5707 5708 5709 5710
		{ 0xe0000,  0x4000 },
		{ 0x120000, 0x4000 },
		{ 0x1a0000, 0x4000 },
		{ 0x160000, 0x4000 },
		{ 0xffffffff, 0    },
5711 5712 5713 5714 5715 5716 5717 5718
	},
	mem_tbl_5709[] = {
		{ 0x60000,  0x4000 },
		{ 0xa0000,  0x3000 },
		{ 0xe0000,  0x4000 },
		{ 0x120000, 0x4000 },
		{ 0x1a0000, 0x4000 },
		{ 0xffffffff, 0    },
5719
	};
5720 5721
	struct mem_entry *mem_tbl;

5722
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5723 5724 5725
		mem_tbl = mem_tbl_5709;
	else
		mem_tbl = mem_tbl_5706;
5726 5727 5728 5729 5730 5731 5732

	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
			mem_tbl[i].len)) != 0) {
			return ret;
		}
	}
5733

5734 5735 5736
	return ret;
}

M
Michael Chan 已提交
5737 5738 5739
#define BNX2_MAC_LOOPBACK	0
#define BNX2_PHY_LOOPBACK	1

5740
static int
M
Michael Chan 已提交
5741
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5742 5743
{
	unsigned int pkt_size, num_pkts, i;
5744 5745
	struct sk_buff *skb;
	u8 *data;
5746
	unsigned char *packet;
M
Michael Chan 已提交
5747
	u16 rx_start_idx, rx_idx;
5748
	dma_addr_t map;
5749 5750
	struct bnx2_tx_bd *txbd;
	struct bnx2_sw_bd *rx_buf;
5751 5752
	struct l2_fhdr *rx_hdr;
	int ret = -ENODEV;
5753
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5754
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5755
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5756 5757

	tx_napi = bnapi;
5758

5759
	txr = &tx_napi->tx_ring;
5760
	rxr = &bnapi->rx_ring;
M
Michael Chan 已提交
5761 5762 5763 5764 5765
	if (loopback_mode == BNX2_MAC_LOOPBACK) {
		bp->loopback = MAC_LOOPBACK;
		bnx2_set_mac_loopback(bp);
	}
	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5766
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5767 5768
			return 0;

M
Michael Chan 已提交
5769
		bp->loopback = PHY_LOOPBACK;
M
Michael Chan 已提交
5770 5771 5772 5773
		bnx2_set_phy_loopback(bp);
	}
	else
		return -EINVAL;
5774

M
Michael Chan 已提交
5775
	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5776
	skb = netdev_alloc_skb(bp->dev, pkt_size);
5777 5778
	if (!skb)
		return -ENOMEM;
5779
	packet = skb_put(skb, pkt_size);
M
Michael Chan 已提交
5780
	memcpy(packet, bp->dev->dev_addr, 6);
5781 5782 5783 5784
	memset(packet + 6, 0x0, 8);
	for (i = 14; i < pkt_size; i++)
		packet[i] = (unsigned char) (i & 0xff);

5785 5786 5787
	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
			     PCI_DMA_TODEVICE);
	if (dma_mapping_error(&bp->pdev->dev, map)) {
B
Benjamin Li 已提交
5788 5789 5790
		dev_kfree_skb(skb);
		return -EIO;
	}
5791

5792 5793
	BNX2_WR(bp, BNX2_HC_COMMAND,
		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
M
Michael Chan 已提交
5794

5795
	BNX2_RD(bp, BNX2_HC_COMMAND);
5796 5797

	udelay(5);
5798
	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5799 5800 5801

	num_pkts = 0;

5802
	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5803 5804 5805 5806 5807 5808 5809

	txbd->tx_bd_haddr_hi = (u64) map >> 32;
	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
	txbd->tx_bd_mss_nbytes = pkt_size;
	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;

	num_pkts++;
5810
	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5811
	txr->tx_prod_bseq += pkt_size;
5812

5813 5814
	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5815 5816 5817

	udelay(100);

5818 5819
	BNX2_WR(bp, BNX2_HC_COMMAND,
		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
M
Michael Chan 已提交
5820

5821
	BNX2_RD(bp, BNX2_HC_COMMAND);
5822 5823 5824

	udelay(5);

5825
	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5826
	dev_kfree_skb(skb);
5827

5828
	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5829 5830
		goto loopback_test_done;

5831
	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5832 5833 5834 5835
	if (rx_idx != rx_start_idx + num_pkts) {
		goto loopback_test_done;
	}

5836
	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5837
	data = rx_buf->data;
5838

5839 5840
	rx_hdr = get_l2_fhdr(data);
	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5841

5842
	dma_sync_single_for_cpu(&bp->pdev->dev,
5843
		dma_unmap_addr(rx_buf, mapping),
5844
		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5845

5846
	if (rx_hdr->l2_fhdr_status &
5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860
		(L2_FHDR_ERRORS_BAD_CRC |
		L2_FHDR_ERRORS_PHY_DECODE |
		L2_FHDR_ERRORS_ALIGNMENT |
		L2_FHDR_ERRORS_TOO_SHORT |
		L2_FHDR_ERRORS_GIANT_FRAME)) {

		goto loopback_test_done;
	}

	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
		goto loopback_test_done;
	}

	for (i = 14; i < pkt_size; i++) {
5861
		if (*(data + i) != (unsigned char) (i & 0xff)) {
5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872
			goto loopback_test_done;
		}
	}

	ret = 0;

loopback_test_done:
	bp->loopback = 0;
	return ret;
}

M
Michael Chan 已提交
5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887
#define BNX2_MAC_LOOPBACK_FAILED	1
#define BNX2_PHY_LOOPBACK_FAILED	2
#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
					 BNX2_PHY_LOOPBACK_FAILED)

static int
bnx2_test_loopback(struct bnx2 *bp)
{
	int rc = 0;

	if (!netif_running(bp->dev))
		return BNX2_LOOPBACK_FAILED;

	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
	spin_lock_bh(&bp->phy_lock);
5888
	bnx2_init_phy(bp, 1);
M
Michael Chan 已提交
5889 5890 5891 5892 5893 5894 5895 5896
	spin_unlock_bh(&bp->phy_lock);
	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
		rc |= BNX2_MAC_LOOPBACK_FAILED;
	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
		rc |= BNX2_PHY_LOOPBACK_FAILED;
	return rc;
}

5897 5898 5899 5900 5901 5902
#define NVRAM_SIZE 0x200
#define CRC32_RESIDUAL 0xdebb20e3

static int
bnx2_test_nvram(struct bnx2 *bp)
{
A
Al Viro 已提交
5903
	__be32 buf[NVRAM_SIZE / 4];
5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939
	u8 *data = (u8 *) buf;
	int rc = 0;
	u32 magic, csum;

	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
		goto test_nvram_done;

        magic = be32_to_cpu(buf[0]);
	if (magic != 0x669955aa) {
		rc = -ENODEV;
		goto test_nvram_done;
	}

	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
		goto test_nvram_done;

	csum = ether_crc_le(0x100, data);
	if (csum != CRC32_RESIDUAL) {
		rc = -ENODEV;
		goto test_nvram_done;
	}

	csum = ether_crc_le(0x100, data + 0x100);
	if (csum != CRC32_RESIDUAL) {
		rc = -ENODEV;
	}

test_nvram_done:
	return rc;
}

static int
bnx2_test_link(struct bnx2 *bp)
{
	u32 bmsr;

5940 5941 5942
	if (!netif_running(bp->dev))
		return -ENODEV;

5943
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5944 5945 5946 5947
		if (bp->link_up)
			return 0;
		return -ENODEV;
	}
5948
	spin_lock_bh(&bp->phy_lock);
5949 5950 5951 5952
	bnx2_enable_bmsr1(bp);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_disable_bmsr1(bp);
5953
	spin_unlock_bh(&bp->phy_lock);
5954

5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969
	if (bmsr & BMSR_LSTATUS) {
		return 0;
	}
	return -ENODEV;
}

static int
bnx2_test_intr(struct bnx2 *bp)
{
	int i;
	u16 status_idx;

	if (!netif_running(bp->dev))
		return -ENODEV;

5970
	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5971 5972

	/* This register is not touched during run-time. */
5973 5974
	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
	BNX2_RD(bp, BNX2_HC_COMMAND);
5975 5976

	for (i = 0; i < 10; i++) {
5977
		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990
			status_idx) {

			break;
		}

		msleep_interruptible(10);
	}
	if (i < 10)
		return 0;

	return -ENODEV;
}

5991
/* Determining link for parallel detection. */
5992 5993 5994 5995 5996
static int
bnx2_5706_serdes_has_link(struct bnx2 *bp)
{
	u32 mode_ctl, an_dbg, exp;

5997 5998 5999
	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
		return 0;

6000 6001 6002 6003 6004 6005 6006 6007 6008 6009
	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);

	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
		return 0;

	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);

6010
	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022
		return 0;

	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);

	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
		return 0;

	return 1;
}

6023
static void
6024
bnx2_5706_serdes_timer(struct bnx2 *bp)
6025
{
6026 6027
	int check_link = 1;

6028
	spin_lock(&bp->phy_lock);
6029
	if (bp->serdes_an_pending) {
6030
		bp->serdes_an_pending--;
6031 6032
		check_link = 0;
	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6033
		u32 bmcr;
6034

6035
		bp->current_interval = BNX2_TIMER_INTERVAL;
M
Michael Chan 已提交
6036

6037
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6038

6039
		if (bmcr & BMCR_ANENABLE) {
6040
			if (bnx2_5706_serdes_has_link(bp)) {
6041 6042
				bmcr &= ~BMCR_ANENABLE;
				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6043
				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6044
				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6045
			}
6046
		}
6047 6048
	}
	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6049
		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6050
		u32 phy2;
6051

6052 6053 6054 6055
		bnx2_write_phy(bp, 0x17, 0x0f01);
		bnx2_read_phy(bp, 0x15, &phy2);
		if (phy2 & 0x20) {
			u32 bmcr;
M
Michael Chan 已提交
6056

6057
			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6058
			bmcr |= BMCR_ANENABLE;
6059
			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6060

6061
			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6062 6063
		}
	} else
6064
		bp->current_interval = BNX2_TIMER_INTERVAL;
6065

6066
	if (check_link) {
6067 6068 6069 6070 6071 6072
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);

6073 6074 6075 6076 6077 6078 6079 6080
		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
				bnx2_5706s_force_link_dn(bp, 1);
				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
			} else
				bnx2_set_link(bp);
		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
			bnx2_set_link(bp);
6081
	}
6082 6083
	spin_unlock(&bp->phy_lock);
}
6084

6085 6086 6087
static void
bnx2_5708_serdes_timer(struct bnx2 *bp)
{
6088
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6089 6090
		return;

6091
	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6092 6093 6094
		bp->serdes_an_pending = 0;
		return;
	}
6095

6096 6097 6098 6099 6100
	spin_lock(&bp->phy_lock);
	if (bp->serdes_an_pending)
		bp->serdes_an_pending--;
	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
		u32 bmcr;
6101

6102
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6103
		if (bmcr & BMCR_ANENABLE) {
6104
			bnx2_enable_forced_2g5(bp);
M
Michael Chan 已提交
6105
			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6106
		} else {
6107
			bnx2_disable_forced_2g5(bp);
6108
			bp->serdes_an_pending = 2;
6109
			bp->current_interval = BNX2_TIMER_INTERVAL;
6110 6111
		}

6112
	} else
6113
		bp->current_interval = BNX2_TIMER_INTERVAL;
6114

6115 6116 6117
	spin_unlock(&bp->phy_lock);
}

6118 6119 6120 6121
static void
bnx2_timer(unsigned long data)
{
	struct bnx2 *bp = (struct bnx2 *) data;
6122

6123 6124
	if (!netif_running(bp->dev))
		return;
6125

6126 6127
	if (atomic_read(&bp->intr_sem) != 0)
		goto bnx2_restart_timer;
6128

6129 6130 6131 6132
	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
	     BNX2_FLAG_USING_MSI)
		bnx2_chk_missed_msi(bp);

M
Michael Chan 已提交
6133
	bnx2_send_heart_beat(bp);
6134

6135 6136
	bp->stats_blk->stat_FwRxDrop =
		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6137

6138
	/* workaround occasional corrupted counters */
6139
	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6140 6141
		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
			BNX2_HC_COMMAND_STATS_NOW);
6142

6143
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6144
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6145
			bnx2_5706_serdes_timer(bp);
6146
		else
6147
			bnx2_5708_serdes_timer(bp);
6148 6149 6150
	}

bnx2_restart_timer:
M
Michael Chan 已提交
6151
	mod_timer(&bp->timer, jiffies + bp->current_interval);
6152 6153
}

6154 6155 6156
static int
bnx2_request_irq(struct bnx2 *bp)
{
6157
	unsigned long flags;
6158 6159
	struct bnx2_irq *irq;
	int rc = 0, i;
6160

6161
	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6162 6163 6164
		flags = 0;
	else
		flags = IRQF_SHARED;
6165 6166 6167

	for (i = 0; i < bp->irq_nvecs; i++) {
		irq = &bp->irq_tbl[i];
6168
		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6169
				 &bp->bnx2_napi[i]);
6170 6171 6172 6173
		if (rc)
			break;
		irq->requested = 1;
	}
6174 6175 6176 6177
	return rc;
}

static void
6178
__bnx2_free_irq(struct bnx2 *bp)
6179
{
6180 6181
	struct bnx2_irq *irq;
	int i;
6182

6183 6184 6185
	for (i = 0; i < bp->irq_nvecs; i++) {
		irq = &bp->irq_tbl[i];
		if (irq->requested)
6186
			free_irq(irq->vector, &bp->bnx2_napi[i]);
6187
		irq->requested = 0;
6188
	}
6189 6190 6191 6192 6193 6194 6195
}

static void
bnx2_free_irq(struct bnx2 *bp)
{

	__bnx2_free_irq(bp);
6196
	if (bp->flags & BNX2_FLAG_USING_MSI)
6197
		pci_disable_msi(bp->pdev);
6198
	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6199 6200
		pci_disable_msix(bp->pdev);

6201
	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6202 6203 6204
}

static void
M
Michael Chan 已提交
6205
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6206
{
6207
	int i, total_vecs, rc;
M
Michael Chan 已提交
6208
	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
M
Michael Chan 已提交
6209 6210
	struct net_device *dev = bp->dev;
	const int len = sizeof(bp->irq_tbl[0].name);
M
Michael Chan 已提交
6211

6212
	bnx2_setup_msix_tbl(bp);
6213 6214 6215
	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
M
Michael Chan 已提交
6216

6217 6218
	/*  Need to flush the previous three writes to ensure MSI-X
	 *  is setup properly */
6219
	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6220

M
Michael Chan 已提交
6221 6222 6223 6224 6225
	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
		msix_ent[i].entry = i;
		msix_ent[i].vector = 0;
	}

6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238
	total_vecs = msix_vecs;
#ifdef BCM_CNIC
	total_vecs++;
#endif
	rc = -ENOSPC;
	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
		if (rc <= 0)
			break;
		if (rc > 0)
			total_vecs = rc;
	}

M
Michael Chan 已提交
6239 6240 6241
	if (rc != 0)
		return;

6242 6243 6244 6245
	msix_vecs = total_vecs;
#ifdef BCM_CNIC
	msix_vecs--;
#endif
M
Michael Chan 已提交
6246
	bp->irq_nvecs = msix_vecs;
6247
	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6248
	for (i = 0; i < total_vecs; i++) {
M
Michael Chan 已提交
6249
		bp->irq_tbl[i].vector = msix_ent[i].vector;
6250 6251 6252
		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
		bp->irq_tbl[i].handler = bnx2_msi_1shot;
	}
6253 6254
}

6255
static int
6256 6257
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
6258
	int cpus = netif_get_num_default_rss_queues();
6259 6260 6261 6262 6263 6264 6265 6266 6267 6268
	int msix_vecs;

	if (!bp->num_req_rx_rings)
		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
	else if (!bp->num_req_tx_rings)
		msix_vecs = max(cpus, bp->num_req_rx_rings);
	else
		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);

	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
M
Michael Chan 已提交
6269

6270 6271
	bp->irq_tbl[0].handler = bnx2_interrupt;
	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6272 6273 6274
	bp->irq_nvecs = 1;
	bp->irq_tbl[0].vector = bp->pdev->irq;

6275
	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
M
Michael Chan 已提交
6276
		bnx2_enable_msix(bp, msix_vecs);
6277

6278 6279
	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6280
		if (pci_enable_msi(bp->pdev) == 0) {
6281
			bp->flags |= BNX2_FLAG_USING_MSI;
6282
			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6283
				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6284 6285 6286
				bp->irq_tbl[0].handler = bnx2_msi_1shot;
			} else
				bp->irq_tbl[0].handler = bnx2_msi;
6287 6288

			bp->irq_tbl[0].vector = bp->pdev->irq;
6289 6290
		}
	}
B
Benjamin Li 已提交
6291

6292 6293 6294 6295 6296 6297 6298 6299 6300 6301
	if (!bp->num_req_tx_rings)
		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
	else
		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);

	if (!bp->num_req_rx_rings)
		bp->num_rx_rings = bp->irq_nvecs;
	else
		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);

6302
	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
B
Benjamin Li 已提交
6303

6304
	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6305 6306
}

6307 6308 6309 6310
/* Called with rtnl_lock */
static int
bnx2_open(struct net_device *dev)
{
M
Michael Chan 已提交
6311
	struct bnx2 *bp = netdev_priv(dev);
6312 6313
	int rc;

6314 6315 6316 6317
	rc = bnx2_request_firmware(bp);
	if (rc < 0)
		goto out;

6318 6319
	netif_carrier_off(dev);

6320
	bnx2_set_power_state(bp, PCI_D0);
6321 6322
	bnx2_disable_int(bp);

6323 6324 6325
	rc = bnx2_setup_int_mode(bp, disable_msi);
	if (rc)
		goto open_err;
B
Benjamin Li 已提交
6326
	bnx2_init_napi(bp);
6327
	bnx2_napi_enable(bp);
6328
	rc = bnx2_alloc_mem(bp);
6329 6330
	if (rc)
		goto open_err;
6331

6332
	rc = bnx2_request_irq(bp);
6333 6334
	if (rc)
		goto open_err;
6335

6336
	rc = bnx2_init_nic(bp, 1);
6337 6338
	if (rc)
		goto open_err;
6339

M
Michael Chan 已提交
6340
	mod_timer(&bp->timer, jiffies + bp->current_interval);
6341 6342 6343

	atomic_set(&bp->intr_sem, 0);

6344 6345
	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));

6346 6347
	bnx2_enable_int(bp);

6348
	if (bp->flags & BNX2_FLAG_USING_MSI) {
6349 6350 6351 6352
		/* Test MSI to make sure it is working
		 * If MSI test fails, go back to INTx mode
		 */
		if (bnx2_test_intr(bp) != 0) {
6353
			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6354 6355

			bnx2_disable_int(bp);
6356
			bnx2_free_irq(bp);
6357

6358 6359
			bnx2_setup_int_mode(bp, 1);

6360
			rc = bnx2_init_nic(bp, 0);
6361

6362 6363 6364
			if (!rc)
				rc = bnx2_request_irq(bp);

6365 6366
			if (rc) {
				del_timer_sync(&bp->timer);
6367
				goto open_err;
6368 6369 6370 6371
			}
			bnx2_enable_int(bp);
		}
	}
6372
	if (bp->flags & BNX2_FLAG_USING_MSI)
6373
		netdev_info(dev, "using MSI\n");
6374
	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6375
		netdev_info(dev, "using MSIX\n");
6376

B
Benjamin Li 已提交
6377
	netif_tx_start_all_queues(dev);
6378 6379
out:
	return rc;
6380 6381 6382 6383 6384 6385

open_err:
	bnx2_napi_disable(bp);
	bnx2_free_skbs(bp);
	bnx2_free_irq(bp);
	bnx2_free_mem(bp);
M
Michael Chan 已提交
6386
	bnx2_del_napi(bp);
6387 6388
	bnx2_release_firmware(bp);
	goto out;
6389 6390 6391
}

static void
D
David Howells 已提交
6392
bnx2_reset_task(struct work_struct *work)
6393
{
D
David Howells 已提交
6394
	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6395
	int rc;
6396
	u16 pcicmd;
6397

6398 6399 6400
	rtnl_lock();
	if (!netif_running(bp->dev)) {
		rtnl_unlock();
6401
		return;
6402
	}
6403

6404
	bnx2_netif_stop(bp, true);
6405

6406 6407 6408 6409 6410 6411
	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
		/* in case PCI block has reset */
		pci_restore_state(bp->pdev);
		pci_save_state(bp->pdev);
	}
6412 6413 6414 6415 6416 6417 6418 6419
	rc = bnx2_init_nic(bp, 1);
	if (rc) {
		netdev_err(bp->dev, "failed to reset NIC, closing\n");
		bnx2_napi_enable(bp);
		dev_close(bp->dev);
		rtnl_unlock();
		return;
	}
6420 6421

	atomic_set(&bp->intr_sem, 1);
6422
	bnx2_netif_start(bp, true);
6423
	rtnl_unlock();
6424 6425
}

6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471
#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }

static void
bnx2_dump_ftq(struct bnx2 *bp)
{
	int i;
	u32 reg, bdidx, cid, valid;
	struct net_device *dev = bp->dev;
	static const struct ftq_reg {
		char *name;
		u32 off;
	} ftq_arr[] = {
		BNX2_FTQ_ENTRY(RV2P_P),
		BNX2_FTQ_ENTRY(RV2P_T),
		BNX2_FTQ_ENTRY(RV2P_M),
		BNX2_FTQ_ENTRY(TBDR_),
		BNX2_FTQ_ENTRY(TDMA_),
		BNX2_FTQ_ENTRY(TXP_),
		BNX2_FTQ_ENTRY(TXP_),
		BNX2_FTQ_ENTRY(TPAT_),
		BNX2_FTQ_ENTRY(RXP_C),
		BNX2_FTQ_ENTRY(RXP_),
		BNX2_FTQ_ENTRY(COM_COMXQ_),
		BNX2_FTQ_ENTRY(COM_COMTQ_),
		BNX2_FTQ_ENTRY(COM_COMQ_),
		BNX2_FTQ_ENTRY(CP_CPQ_),
	};

	netdev_err(dev, "<--- start FTQ dump --->\n");
	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));

	netdev_err(dev, "CPU states:\n");
	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
			   reg, bnx2_reg_rd_ind(bp, reg),
			   bnx2_reg_rd_ind(bp, reg + 4),
			   bnx2_reg_rd_ind(bp, reg + 8),
			   bnx2_reg_rd_ind(bp, reg + 0x1c),
			   bnx2_reg_rd_ind(bp, reg + 0x1c),
			   bnx2_reg_rd_ind(bp, reg + 0x20));

	netdev_err(dev, "<--- end FTQ dump --->\n");
	netdev_err(dev, "<--- start TBDC dump --->\n");
	netdev_err(dev, "TBDC free cnt: %ld\n",
6472
		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6473 6474 6475 6476
	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
	for (i = 0; i < 0x20; i++) {
		int j = 0;

6477 6478 6479 6480 6481
		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6482 6483 6484
			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
			j++;

6485 6486 6487
		cid = BNX2_RD(bp, BNX2_TBDC_CID);
		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6488 6489 6490 6491 6492 6493 6494
		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
			   bdidx >> 24, (valid >> 8) & 0x0ff);
	}
	netdev_err(dev, "<--- end TBDC dump --->\n");
}

6495 6496 6497 6498
static void
bnx2_dump_state(struct bnx2 *bp)
{
	struct net_device *dev = bp->dev;
J
Jeffrey Huang 已提交
6499
	u32 val1, val2;
6500 6501 6502 6503 6504 6505 6506

	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
		   atomic_read(&bp->intr_sem), val1);
	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6507
	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6508 6509
		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6510
	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6511
		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6512
	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6513
		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6514
	if (bp->flags & BNX2_FLAG_USING_MSIX)
6515
		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6516
			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6517 6518
}

6519 6520 6521
static void
bnx2_tx_timeout(struct net_device *dev)
{
M
Michael Chan 已提交
6522
	struct bnx2 *bp = netdev_priv(dev);
6523

6524
	bnx2_dump_ftq(bp);
6525
	bnx2_dump_state(bp);
J
Jeffrey Huang 已提交
6526
	bnx2_dump_mcp_state(bp);
6527

6528 6529 6530 6531
	/* This allows the netif to be shutdown gracefully before resetting */
	schedule_work(&bp->reset_task);
}

H
Herbert Xu 已提交
6532
/* Called with netif_tx_lock.
M
Michael Chan 已提交
6533 6534
 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
 * netif_wake_queue().
6535
 */
6536
static netdev_tx_t
6537 6538
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
M
Michael Chan 已提交
6539
	struct bnx2 *bp = netdev_priv(dev);
6540
	dma_addr_t mapping;
6541 6542
	struct bnx2_tx_bd *txbd;
	struct bnx2_sw_tx_bd *tx_buf;
6543 6544 6545
	u32 len, vlan_tag_flags, last_frag, mss;
	u16 prod, ring_prod;
	int i;
B
Benjamin Li 已提交
6546 6547 6548 6549 6550 6551 6552 6553 6554
	struct bnx2_napi *bnapi;
	struct bnx2_tx_ring_info *txr;
	struct netdev_queue *txq;

	/*  Determine which tx ring we will be placed on */
	i = skb_get_queue_mapping(skb);
	bnapi = &bp->bnx2_napi[i];
	txr = &bnapi->tx_ring;
	txq = netdev_get_tx_queue(dev, i);
6555

6556
	if (unlikely(bnx2_tx_avail(bp, txr) <
6557
	    (skb_shinfo(skb)->nr_frags + 1))) {
B
Benjamin Li 已提交
6558
		netif_tx_stop_queue(txq);
6559
		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6560 6561 6562 6563

		return NETDEV_TX_BUSY;
	}
	len = skb_headlen(skb);
6564
	prod = txr->tx_prod;
6565
	ring_prod = BNX2_TX_RING_IDX(prod);
6566 6567

	vlan_tag_flags = 0;
6568
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6569 6570 6571
		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
	}

6572
	if (vlan_tx_tag_present(skb)) {
6573 6574 6575
		vlan_tag_flags |=
			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
	}
6576

6577
	if ((mss = skb_shinfo(skb)->gso_size)) {
6578
		u32 tcp_opt_len;
6579
		struct iphdr *iph;
6580 6581 6582

		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;

6583 6584 6585 6586 6587
		tcp_opt_len = tcp_optlen(skb);

		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
			u32 tcp_off = skb_transport_offset(skb) -
				      sizeof(struct ipv6hdr) - ETH_HLEN;
6588

6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606
			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
					  TX_BD_FLAGS_SW_FLAGS;
			if (likely(tcp_off == 0))
				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
			else {
				tcp_off >>= 3;
				vlan_tag_flags |= ((tcp_off & 0x3) <<
						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
						  ((tcp_off & 0x10) <<
						   TX_BD_FLAGS_TCP6_OFF4_SHL);
				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
			}
		} else {
			iph = ip_hdr(skb);
			if (tcp_opt_len || (iph->ihl > 5)) {
				vlan_tag_flags |= ((iph->ihl - 5) +
						   (tcp_opt_len >> 2)) << 8;
			}
6607
		}
6608
	} else
6609 6610
		mss = 0;

6611 6612
	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
B
Benjamin Li 已提交
6613 6614 6615 6616
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

6617
	tx_buf = &txr->tx_buf_ring[ring_prod];
6618
	tx_buf->skb = skb;
6619
	dma_unmap_addr_set(tx_buf, mapping, mapping);
6620

6621
	txbd = &txr->tx_desc_ring[ring_prod];
6622 6623 6624 6625 6626 6627 6628

	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
	txbd->tx_bd_mss_nbytes = len | (mss << 16);
	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;

	last_frag = skb_shinfo(skb)->nr_frags;
E
Eric Dumazet 已提交
6629 6630
	tx_buf->nr_frags = last_frag;
	tx_buf->is_gso = skb_is_gso(skb);
6631 6632

	for (i = 0; i < last_frag; i++) {
E
Eric Dumazet 已提交
6633
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6634

6635 6636
		prod = BNX2_NEXT_TX_BD(prod);
		ring_prod = BNX2_TX_RING_IDX(prod);
6637
		txbd = &txr->tx_desc_ring[ring_prod];
6638

E
Eric Dumazet 已提交
6639
		len = skb_frag_size(frag);
6640
		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6641
					   DMA_TO_DEVICE);
6642
		if (dma_mapping_error(&bp->pdev->dev, mapping))
6643
			goto dma_error;
6644
		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6645
				   mapping);
6646 6647 6648 6649 6650 6651 6652 6653 6654

		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
		txbd->tx_bd_mss_nbytes = len | (mss << 16);
		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;

	}
	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;

6655 6656 6657
	/* Sync BD data before updating TX mailbox */
	wmb();

E
Eric Dumazet 已提交
6658 6659
	netdev_tx_sent_queue(txq, skb->len);

6660
	prod = BNX2_NEXT_TX_BD(prod);
6661
	txr->tx_prod_bseq += skb->len;
6662

6663 6664
	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6665 6666 6667

	mmiowb();

6668
	txr->tx_prod = prod;
6669

6670
	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
B
Benjamin Li 已提交
6671
		netif_tx_stop_queue(txq);
6672 6673 6674 6675 6676 6677 6678

		/* netif_tx_stop_queue() must be done before checking
		 * tx index in bnx2_tx_avail() below, because in
		 * bnx2_tx_int(), we update tx index before checking for
		 * netif_tx_queue_stopped().
		 */
		smp_mb();
6679
		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
B
Benjamin Li 已提交
6680
			netif_tx_wake_queue(txq);
6681 6682
	}

6683 6684 6685 6686 6687 6688 6689
	return NETDEV_TX_OK;
dma_error:
	/* save value of frag that failed */
	last_frag = i;

	/* start back at beginning and unmap skb */
	prod = txr->tx_prod;
6690
	ring_prod = BNX2_TX_RING_IDX(prod);
6691 6692
	tx_buf = &txr->tx_buf_ring[ring_prod];
	tx_buf->skb = NULL;
6693
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6694 6695 6696 6697
			 skb_headlen(skb), PCI_DMA_TODEVICE);

	/* unmap remaining mapped pages */
	for (i = 0; i < last_frag; i++) {
6698 6699
		prod = BNX2_NEXT_TX_BD(prod);
		ring_prod = BNX2_TX_RING_IDX(prod);
6700
		tx_buf = &txr->tx_buf_ring[ring_prod];
6701
		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
6702
			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6703 6704 6705 6706
			       PCI_DMA_TODEVICE);
	}

	dev_kfree_skb(skb);
6707 6708 6709 6710 6711 6712 6713
	return NETDEV_TX_OK;
}

/* Called with rtnl_lock */
static int
bnx2_close(struct net_device *dev)
{
M
Michael Chan 已提交
6714
	struct bnx2 *bp = netdev_priv(dev);
6715

6716
	bnx2_disable_int_sync(bp);
6717
	bnx2_napi_disable(bp);
6718
	netif_tx_disable(dev);
6719
	del_timer_sync(&bp->timer);
M
Michael Chan 已提交
6720
	bnx2_shutdown_chip(bp);
6721
	bnx2_free_irq(bp);
6722 6723
	bnx2_free_skbs(bp);
	bnx2_free_mem(bp);
M
Michael Chan 已提交
6724
	bnx2_del_napi(bp);
6725 6726
	bp->link_up = 0;
	netif_carrier_off(bp->dev);
6727
	bnx2_set_power_state(bp, PCI_D3hot);
6728 6729 6730
	return 0;
}

6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742
static void
bnx2_save_stats(struct bnx2 *bp)
{
	u32 *hw_stats = (u32 *) bp->stats_blk;
	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
	int i;

	/* The 1st 10 counters are 64-bit counters */
	for (i = 0; i < 20; i += 2) {
		u32 hi;
		u64 lo;

6743 6744
		hi = temp_stats[i] + hw_stats[i];
		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6745 6746
		if (lo > 0xffffffff)
			hi++;
6747 6748
		temp_stats[i] = hi;
		temp_stats[i + 1] = lo & 0xffffffff;
6749 6750 6751
	}

	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6752
		temp_stats[i] += hw_stats[i];
6753 6754
}

E
Eric Dumazet 已提交
6755 6756
#define GET_64BIT_NET_STATS64(ctr)		\
	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6757

M
Michael Chan 已提交
6758
#define GET_64BIT_NET_STATS(ctr)				\
6759 6760
	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6761

M
Michael Chan 已提交
6762
#define GET_32BIT_NET_STATS(ctr)				\
6763 6764
	(unsigned long) (bp->stats_blk->ctr +			\
			 bp->temp_stats_blk->ctr)
M
Michael Chan 已提交
6765

E
Eric Dumazet 已提交
6766 6767
static struct rtnl_link_stats64 *
bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6768
{
M
Michael Chan 已提交
6769
	struct bnx2 *bp = netdev_priv(dev);
6770

E
Eric Dumazet 已提交
6771
	if (bp->stats_blk == NULL)
6772
		return net_stats;
E
Eric Dumazet 已提交
6773

6774
	net_stats->rx_packets =
M
Michael Chan 已提交
6775 6776 6777
		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6778 6779

	net_stats->tx_packets =
M
Michael Chan 已提交
6780 6781 6782
		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6783 6784

	net_stats->rx_bytes =
M
Michael Chan 已提交
6785
		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6786 6787

	net_stats->tx_bytes =
M
Michael Chan 已提交
6788
		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6789

6790
	net_stats->multicast =
6791
		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6792

6793
	net_stats->collisions =
M
Michael Chan 已提交
6794
		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6795

6796
	net_stats->rx_length_errors =
M
Michael Chan 已提交
6797 6798
		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6799

6800
	net_stats->rx_over_errors =
M
Michael Chan 已提交
6801 6802
		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6803

6804
	net_stats->rx_frame_errors =
M
Michael Chan 已提交
6805
		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6806

6807
	net_stats->rx_crc_errors =
M
Michael Chan 已提交
6808
		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6809 6810 6811 6812 6813 6814

	net_stats->rx_errors = net_stats->rx_length_errors +
		net_stats->rx_over_errors + net_stats->rx_frame_errors +
		net_stats->rx_crc_errors;

	net_stats->tx_aborted_errors =
M
Michael Chan 已提交
6815 6816
		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6817

6818 6819
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6820 6821 6822
		net_stats->tx_carrier_errors = 0;
	else {
		net_stats->tx_carrier_errors =
M
Michael Chan 已提交
6823
			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6824 6825 6826
	}

	net_stats->tx_errors =
M
Michael Chan 已提交
6827
		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6828 6829 6830
		net_stats->tx_aborted_errors +
		net_stats->tx_carrier_errors;

M
Michael Chan 已提交
6831
	net_stats->rx_missed_errors =
M
Michael Chan 已提交
6832 6833 6834
		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
		GET_32BIT_NET_STATS(stat_FwRxDrop);
M
Michael Chan 已提交
6835

6836 6837 6838 6839 6840 6841 6842 6843
	return net_stats;
}

/* All ethtool functions called with rtnl_lock */

static int
bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
M
Michael Chan 已提交
6844
	struct bnx2 *bp = netdev_priv(dev);
6845
	int support_serdes = 0, support_copper = 0;
6846 6847

	cmd->supported = SUPPORTED_Autoneg;
6848
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6849 6850 6851 6852 6853 6854 6855 6856
		support_serdes = 1;
		support_copper = 1;
	} else if (bp->phy_port == PORT_FIBRE)
		support_serdes = 1;
	else
		support_copper = 1;

	if (support_serdes) {
6857 6858
		cmd->supported |= SUPPORTED_1000baseT_Full |
			SUPPORTED_FIBRE;
6859
		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6860
			cmd->supported |= SUPPORTED_2500baseX_Full;
6861 6862

	}
6863
	if (support_copper) {
6864 6865 6866 6867 6868 6869 6870 6871 6872
		cmd->supported |= SUPPORTED_10baseT_Half |
			SUPPORTED_10baseT_Full |
			SUPPORTED_100baseT_Half |
			SUPPORTED_100baseT_Full |
			SUPPORTED_1000baseT_Full |
			SUPPORTED_TP;

	}

6873 6874
	spin_lock_bh(&bp->phy_lock);
	cmd->port = bp->phy_port;
6875 6876 6877 6878
	cmd->advertising = bp->advertising;

	if (bp->autoneg & AUTONEG_SPEED) {
		cmd->autoneg = AUTONEG_ENABLE;
6879
	} else {
6880 6881 6882 6883
		cmd->autoneg = AUTONEG_DISABLE;
	}

	if (netif_carrier_ok(dev)) {
6884
		ethtool_cmd_speed_set(cmd, bp->line_speed);
6885 6886 6887
		cmd->duplex = bp->duplex;
	}
	else {
6888
		ethtool_cmd_speed_set(cmd, -1);
6889 6890
		cmd->duplex = -1;
	}
6891
	spin_unlock_bh(&bp->phy_lock);
6892 6893 6894 6895 6896 6897

	cmd->transceiver = XCVR_INTERNAL;
	cmd->phy_address = bp->phy_addr;

	return 0;
}
6898

6899 6900 6901
static int
bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
M
Michael Chan 已提交
6902
	struct bnx2 *bp = netdev_priv(dev);
6903 6904 6905 6906
	u8 autoneg = bp->autoneg;
	u8 req_duplex = bp->req_duplex;
	u16 req_line_speed = bp->req_line_speed;
	u32 advertising = bp->advertising;
6907 6908 6909 6910 6911 6912 6913
	int err = -EINVAL;

	spin_lock_bh(&bp->phy_lock);

	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
		goto err_out_unlock;

6914 6915
	if (cmd->port != bp->phy_port &&
	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6916
		goto err_out_unlock;
6917

6918 6919 6920 6921 6922 6923
	/* If device is down, we can store the settings only if the user
	 * is setting the currently active port.
	 */
	if (!netif_running(dev) && cmd->port != bp->phy_port)
		goto err_out_unlock;

6924 6925 6926
	if (cmd->autoneg == AUTONEG_ENABLE) {
		autoneg |= AUTONEG_SPEED;

6927 6928 6929 6930
		advertising = cmd->advertising;
		if (cmd->port == PORT_TP) {
			advertising &= ETHTOOL_ALL_COPPER_SPEED;
			if (!advertising)
6931
				advertising = ETHTOOL_ALL_COPPER_SPEED;
6932 6933 6934 6935
		} else {
			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
			if (!advertising)
				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6936 6937 6938 6939
		}
		advertising |= ADVERTISED_Autoneg;
	}
	else {
6940
		u32 speed = ethtool_cmd_speed(cmd);
6941
		if (cmd->port == PORT_FIBRE) {
6942 6943
			if ((speed != SPEED_1000 &&
			     speed != SPEED_2500) ||
M
Michael Chan 已提交
6944
			    (cmd->duplex != DUPLEX_FULL))
6945
				goto err_out_unlock;
M
Michael Chan 已提交
6946

6947
			if (speed == SPEED_2500 &&
6948
			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6949
				goto err_out_unlock;
6950
		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6951 6952
			goto err_out_unlock;

6953
		autoneg &= ~AUTONEG_SPEED;
6954
		req_line_speed = speed;
6955 6956 6957 6958 6959 6960 6961 6962 6963
		req_duplex = cmd->duplex;
		advertising = 0;
	}

	bp->autoneg = autoneg;
	bp->advertising = advertising;
	bp->req_line_speed = req_line_speed;
	bp->req_duplex = req_duplex;

6964 6965 6966 6967 6968 6969
	err = 0;
	/* If device is down, the new settings will be picked up when it is
	 * brought up.
	 */
	if (netif_running(dev))
		err = bnx2_setup_phy(bp, cmd->port);
6970

6971
err_out_unlock:
6972
	spin_unlock_bh(&bp->phy_lock);
6973

6974
	return err;
6975 6976 6977 6978 6979
}

static void
bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
M
Michael Chan 已提交
6980
	struct bnx2 *bp = netdev_priv(dev);
6981

6982 6983 6984 6985
	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6986 6987
}

M
Michael Chan 已提交
6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001
#define BNX2_REGDUMP_LEN		(32 * 1024)

static int
bnx2_get_regs_len(struct net_device *dev)
{
	return BNX2_REGDUMP_LEN;
}

static void
bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
	u32 *p = _p, i, offset;
	u8 *orig_p = _p;
	struct bnx2 *bp = netdev_priv(dev);
J
Joe Perches 已提交
7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025
	static const u32 reg_boundaries[] = {
		0x0000, 0x0098, 0x0400, 0x045c,
		0x0800, 0x0880, 0x0c00, 0x0c10,
		0x0c30, 0x0d08, 0x1000, 0x101c,
		0x1040, 0x1048, 0x1080, 0x10a4,
		0x1400, 0x1490, 0x1498, 0x14f0,
		0x1500, 0x155c, 0x1580, 0x15dc,
		0x1600, 0x1658, 0x1680, 0x16d8,
		0x1800, 0x1820, 0x1840, 0x1854,
		0x1880, 0x1894, 0x1900, 0x1984,
		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
		0x1c80, 0x1c94, 0x1d00, 0x1d84,
		0x2000, 0x2030, 0x23c0, 0x2400,
		0x2800, 0x2820, 0x2830, 0x2850,
		0x2b40, 0x2c10, 0x2fc0, 0x3058,
		0x3c00, 0x3c94, 0x4000, 0x4010,
		0x4080, 0x4090, 0x43c0, 0x4458,
		0x4c00, 0x4c18, 0x4c40, 0x4c54,
		0x4fc0, 0x5010, 0x53c0, 0x5444,
		0x5c00, 0x5c18, 0x5c80, 0x5c90,
		0x5fc0, 0x6000, 0x6400, 0x6428,
		0x6800, 0x6848, 0x684c, 0x6860,
		0x6888, 0x6910, 0x8000
	};
M
Michael Chan 已提交
7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037

	regs->version = 0;

	memset(p, 0, BNX2_REGDUMP_LEN);

	if (!netif_running(bp->dev))
		return;

	i = 0;
	offset = reg_boundaries[0];
	p += offset;
	while (offset < BNX2_REGDUMP_LEN) {
7038
		*p++ = BNX2_RD(bp, offset);
M
Michael Chan 已提交
7039 7040 7041 7042 7043 7044 7045 7046 7047
		offset += 4;
		if (offset == reg_boundaries[i + 1]) {
			offset = reg_boundaries[i + 2];
			p = (u32 *) (orig_p + offset);
			i += 2;
		}
	}
}

7048 7049 7050
static void
bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
M
Michael Chan 已提交
7051
	struct bnx2 *bp = netdev_priv(dev);
7052

7053
	if (bp->flags & BNX2_FLAG_NO_WOL) {
7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069
		wol->supported = 0;
		wol->wolopts = 0;
	}
	else {
		wol->supported = WAKE_MAGIC;
		if (bp->wol)
			wol->wolopts = WAKE_MAGIC;
		else
			wol->wolopts = 0;
	}
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}

static int
bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
M
Michael Chan 已提交
7070
	struct bnx2 *bp = netdev_priv(dev);
7071 7072 7073 7074 7075

	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;

	if (wol->wolopts & WAKE_MAGIC) {
7076
		if (bp->flags & BNX2_FLAG_NO_WOL)
7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089
			return -EINVAL;

		bp->wol = 1;
	}
	else {
		bp->wol = 0;
	}
	return 0;
}

static int
bnx2_nway_reset(struct net_device *dev)
{
M
Michael Chan 已提交
7090
	struct bnx2 *bp = netdev_priv(dev);
7091 7092
	u32 bmcr;

7093 7094 7095
	if (!netif_running(dev))
		return -EAGAIN;

7096 7097 7098 7099
	if (!(bp->autoneg & AUTONEG_SPEED)) {
		return -EINVAL;
	}

7100
	spin_lock_bh(&bp->phy_lock);
7101

7102
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7103 7104 7105 7106 7107 7108 7109
		int rc;

		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
		spin_unlock_bh(&bp->phy_lock);
		return rc;
	}

7110
	/* Force a link down visible on the other side */
7111
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7112
		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7113
		spin_unlock_bh(&bp->phy_lock);
7114 7115 7116

		msleep(20);

7117
		spin_lock_bh(&bp->phy_lock);
7118

M
Michael Chan 已提交
7119
		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7120 7121
		bp->serdes_an_pending = 1;
		mod_timer(&bp->timer, jiffies + bp->current_interval);
7122 7123
	}

7124
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7125
	bmcr &= ~BMCR_LOOPBACK;
7126
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7127

7128
	spin_unlock_bh(&bp->phy_lock);
7129 7130 7131 7132

	return 0;
}

7133 7134 7135 7136 7137 7138 7139 7140
static u32
bnx2_get_link(struct net_device *dev)
{
	struct bnx2 *bp = netdev_priv(dev);

	return bp->link_up;
}

7141 7142 7143
static int
bnx2_get_eeprom_len(struct net_device *dev)
{
M
Michael Chan 已提交
7144
	struct bnx2 *bp = netdev_priv(dev);
7145

M
Michael Chan 已提交
7146
	if (bp->flash_info == NULL)
7147 7148
		return 0;

M
Michael Chan 已提交
7149
	return (int) bp->flash_size;
7150 7151 7152 7153 7154 7155
}

static int
bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
		u8 *eebuf)
{
M
Michael Chan 已提交
7156
	struct bnx2 *bp = netdev_priv(dev);
7157 7158
	int rc;

7159 7160 7161
	if (!netif_running(dev))
		return -EAGAIN;

7162
	/* parameters already validated in ethtool_get_eeprom */
7163 7164 7165 7166 7167 7168 7169 7170 7171 7172

	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);

	return rc;
}

static int
bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
		u8 *eebuf)
{
M
Michael Chan 已提交
7173
	struct bnx2 *bp = netdev_priv(dev);
7174 7175
	int rc;

7176 7177 7178
	if (!netif_running(dev))
		return -EAGAIN;

7179
	/* parameters already validated in ethtool_set_eeprom */
7180 7181 7182 7183 7184 7185 7186 7187 7188

	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);

	return rc;
}

static int
bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
M
Michael Chan 已提交
7189
	struct bnx2 *bp = netdev_priv(dev);
7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210

	memset(coal, 0, sizeof(struct ethtool_coalesce));

	coal->rx_coalesce_usecs = bp->rx_ticks;
	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;

	coal->tx_coalesce_usecs = bp->tx_ticks;
	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;

	coal->stats_block_coalesce_usecs = bp->stats_ticks;

	return 0;
}

static int
bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
M
Michael Chan 已提交
7211
	struct bnx2 *bp = netdev_priv(dev);
7212 7213 7214 7215

	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;

7216
	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239
	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;

	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;

	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
	if (bp->rx_quick_cons_trip_int > 0xff)
		bp->rx_quick_cons_trip_int = 0xff;

	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;

	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;

	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;

	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
		0xff;

	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7240
	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7241 7242 7243
		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
			bp->stats_ticks = USEC_PER_SEC;
	}
7244 7245 7246
	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7247 7248

	if (netif_running(bp->dev)) {
7249
		bnx2_netif_stop(bp, true);
7250
		bnx2_init_nic(bp, 0);
7251
		bnx2_netif_start(bp, true);
7252 7253 7254 7255 7256 7257 7258 7259
	}

	return 0;
}

static void
bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
M
Michael Chan 已提交
7260
	struct bnx2 *bp = netdev_priv(dev);
7261

7262 7263
	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7264 7265

	ering->rx_pending = bp->rx_ring_size;
7266
	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7267

7268
	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7269 7270 7271 7272
	ering->tx_pending = bp->tx_ring_size;
}

static int
7273
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7274
{
7275
	if (netif_running(bp->dev)) {
7276 7277 7278
		/* Reset will erase chipset stats; save them */
		bnx2_save_stats(bp);

7279
		bnx2_netif_stop(bp, true);
7280
		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7281 7282 7283 7284 7285 7286
		if (reset_irq) {
			bnx2_free_irq(bp);
			bnx2_del_napi(bp);
		} else {
			__bnx2_free_irq(bp);
		}
7287 7288 7289 7290
		bnx2_free_skbs(bp);
		bnx2_free_mem(bp);
	}

7291 7292
	bnx2_set_rx_ring_size(bp, rx);
	bp->tx_ring_size = tx;
7293 7294

	if (netif_running(bp->dev)) {
7295 7296 7297 7298 7299 7300 7301 7302 7303
		int rc = 0;

		if (reset_irq) {
			rc = bnx2_setup_int_mode(bp, disable_msi);
			bnx2_init_napi(bp);
		}

		if (!rc)
			rc = bnx2_alloc_mem(bp);
7304

7305 7306 7307
		if (!rc)
			rc = bnx2_request_irq(bp);

7308 7309 7310 7311 7312 7313
		if (!rc)
			rc = bnx2_init_nic(bp, 0);

		if (rc) {
			bnx2_napi_enable(bp);
			dev_close(bp->dev);
7314
			return rc;
7315
		}
7316 7317 7318 7319 7320 7321 7322
#ifdef BCM_CNIC
		mutex_lock(&bp->cnic_lock);
		/* Let cnic know about the new status block. */
		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
			bnx2_setup_cnic_irq_info(bp);
		mutex_unlock(&bp->cnic_lock);
#endif
7323
		bnx2_netif_start(bp, true);
7324 7325 7326 7327
	}
	return 0;
}

7328 7329 7330 7331 7332 7333
static int
bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
	struct bnx2 *bp = netdev_priv(dev);
	int rc;

7334 7335
	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7336 7337 7338 7339
		(ering->tx_pending <= MAX_SKB_FRAGS)) {

		return -EINVAL;
	}
7340 7341
	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
				   false);
7342 7343 7344
	return rc;
}

7345 7346 7347
static void
bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
M
Michael Chan 已提交
7348
	struct bnx2 *bp = netdev_priv(dev);
7349 7350 7351 7352 7353 7354 7355 7356 7357

	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
}

static int
bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
M
Michael Chan 已提交
7358
	struct bnx2 *bp = netdev_priv(dev);
7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372

	bp->req_flow_ctrl = 0;
	if (epause->rx_pause)
		bp->req_flow_ctrl |= FLOW_CTRL_RX;
	if (epause->tx_pause)
		bp->req_flow_ctrl |= FLOW_CTRL_TX;

	if (epause->autoneg) {
		bp->autoneg |= AUTONEG_FLOW_CTRL;
	}
	else {
		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
	}

7373 7374 7375 7376 7377
	if (netif_running(dev)) {
		spin_lock_bh(&bp->phy_lock);
		bnx2_setup_phy(bp, bp->phy_port);
		spin_unlock_bh(&bp->phy_lock);
	}
7378 7379 7380 7381

	return 0;
}

7382
static struct {
7383
	char string[ETH_GSTRING_LEN];
M
Michael Chan 已提交
7384
} bnx2_stats_str_arr[] = {
7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428
	{ "rx_bytes" },
	{ "rx_error_bytes" },
	{ "tx_bytes" },
	{ "tx_error_bytes" },
	{ "rx_ucast_packets" },
	{ "rx_mcast_packets" },
	{ "rx_bcast_packets" },
	{ "tx_ucast_packets" },
	{ "tx_mcast_packets" },
	{ "tx_bcast_packets" },
	{ "tx_mac_errors" },
	{ "tx_carrier_errors" },
	{ "rx_crc_errors" },
	{ "rx_align_errors" },
	{ "tx_single_collisions" },
	{ "tx_multi_collisions" },
	{ "tx_deferred" },
	{ "tx_excess_collisions" },
	{ "tx_late_collisions" },
	{ "tx_total_collisions" },
	{ "rx_fragments" },
	{ "rx_jabbers" },
	{ "rx_undersize_packets" },
	{ "rx_oversize_packets" },
	{ "rx_64_byte_packets" },
	{ "rx_65_to_127_byte_packets" },
	{ "rx_128_to_255_byte_packets" },
	{ "rx_256_to_511_byte_packets" },
	{ "rx_512_to_1023_byte_packets" },
	{ "rx_1024_to_1522_byte_packets" },
	{ "rx_1523_to_9022_byte_packets" },
	{ "tx_64_byte_packets" },
	{ "tx_65_to_127_byte_packets" },
	{ "tx_128_to_255_byte_packets" },
	{ "tx_256_to_511_byte_packets" },
	{ "tx_512_to_1023_byte_packets" },
	{ "tx_1024_to_1522_byte_packets" },
	{ "tx_1523_to_9022_byte_packets" },
	{ "rx_xon_frames" },
	{ "rx_xoff_frames" },
	{ "tx_xon_frames" },
	{ "tx_xoff_frames" },
	{ "rx_mac_ctrl_frames" },
	{ "rx_filtered_packets" },
M
Michael Chan 已提交
7429
	{ "rx_ftq_discards" },
7430
	{ "rx_discards" },
M
Michael Chan 已提交
7431
	{ "rx_fw_discards" },
7432 7433
};

7434
#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
M
Michael Chan 已提交
7435

7436 7437
#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)

7438
static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449
    STATS_OFFSET32(stat_IfHCInOctets_hi),
    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
    STATS_OFFSET32(stat_IfHCOutOctets_hi),
    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482
    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
    STATS_OFFSET32(stat_EtherStatsCollisions),
    STATS_OFFSET32(stat_EtherStatsFragments),
    STATS_OFFSET32(stat_EtherStatsJabbers),
    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
    STATS_OFFSET32(stat_XonPauseFramesReceived),
    STATS_OFFSET32(stat_XoffPauseFramesReceived),
    STATS_OFFSET32(stat_OutXonSent),
    STATS_OFFSET32(stat_OutXoffSent),
    STATS_OFFSET32(stat_MacControlFramesReceived),
    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
M
Michael Chan 已提交
7483
    STATS_OFFSET32(stat_IfInFTQDiscards),
7484
    STATS_OFFSET32(stat_IfInMBUFDiscards),
M
Michael Chan 已提交
7485
    STATS_OFFSET32(stat_FwRxDrop),
7486 7487 7488 7489
};

/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
 * skipped because of errata.
7490
 */
7491
static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7492 7493 7494 7495
	8,0,8,8,8,8,8,8,8,8,
	4,0,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
M
Michael Chan 已提交
7496
	4,4,4,4,4,4,4,
7497 7498
};

M
Michael Chan 已提交
7499 7500 7501 7502 7503
static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
	8,0,8,8,8,8,8,8,8,8,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
M
Michael Chan 已提交
7504
	4,4,4,4,4,4,4,
M
Michael Chan 已提交
7505 7506
};

7507 7508
#define BNX2_NUM_TESTS 6

7509
static struct {
7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520
	char string[ETH_GSTRING_LEN];
} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
	{ "register_test (offline)" },
	{ "memory_test (offline)" },
	{ "loopback_test (offline)" },
	{ "nvram_test (online)" },
	{ "interrupt_test (online)" },
	{ "link_test (online)" },
};

static int
7521
bnx2_get_sset_count(struct net_device *dev, int sset)
7522
{
7523 7524 7525 7526 7527 7528 7529 7530
	switch (sset) {
	case ETH_SS_TEST:
		return BNX2_NUM_TESTS;
	case ETH_SS_STATS:
		return BNX2_NUM_STATS;
	default:
		return -EOPNOTSUPP;
	}
7531 7532 7533 7534 7535
}

static void
bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{
M
Michael Chan 已提交
7536
	struct bnx2 *bp = netdev_priv(dev);
7537

7538 7539
	bnx2_set_power_state(bp, PCI_D0);

7540 7541
	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
	if (etest->flags & ETH_TEST_FL_OFFLINE) {
M
Michael Chan 已提交
7542 7543
		int i;

7544
		bnx2_netif_stop(bp, true);
7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555
		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
		bnx2_free_skbs(bp);

		if (bnx2_test_registers(bp) != 0) {
			buf[0] = 1;
			etest->flags |= ETH_TEST_FL_FAILED;
		}
		if (bnx2_test_memory(bp) != 0) {
			buf[1] = 1;
			etest->flags |= ETH_TEST_FL_FAILED;
		}
M
Michael Chan 已提交
7556
		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7557 7558
			etest->flags |= ETH_TEST_FL_FAILED;

7559 7560
		if (!netif_running(bp->dev))
			bnx2_shutdown_chip(bp);
7561
		else {
7562
			bnx2_init_nic(bp, 1);
7563
			bnx2_netif_start(bp, true);
7564 7565 7566
		}

		/* wait for link up */
M
Michael Chan 已提交
7567 7568 7569 7570 7571
		for (i = 0; i < 7; i++) {
			if (bp->link_up)
				break;
			msleep_interruptible(1000);
		}
7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587
	}

	if (bnx2_test_nvram(bp) != 0) {
		buf[3] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;
	}
	if (bnx2_test_intr(bp) != 0) {
		buf[4] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;
	}

	if (bnx2_test_link(bp) != 0) {
		buf[5] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;

	}
7588 7589
	if (!netif_running(bp->dev))
		bnx2_set_power_state(bp, PCI_D3hot);
7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610
}

static void
bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
	switch (stringset) {
	case ETH_SS_STATS:
		memcpy(buf, bnx2_stats_str_arr,
			sizeof(bnx2_stats_str_arr));
		break;
	case ETH_SS_TEST:
		memcpy(buf, bnx2_tests_str_arr,
			sizeof(bnx2_tests_str_arr));
		break;
	}
}

static void
bnx2_get_ethtool_stats(struct net_device *dev,
		struct ethtool_stats *stats, u64 *buf)
{
M
Michael Chan 已提交
7611
	struct bnx2 *bp = netdev_priv(dev);
7612 7613
	int i;
	u32 *hw_stats = (u32 *) bp->stats_blk;
7614
	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7615
	u8 *stats_len_arr = NULL;
7616 7617 7618 7619 7620 7621

	if (hw_stats == NULL) {
		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
		return;
	}

7622 7623 7624 7625
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7626
		stats_len_arr = bnx2_5706_stats_len_arr;
M
Michael Chan 已提交
7627 7628
	else
		stats_len_arr = bnx2_5708_stats_len_arr;
7629 7630

	for (i = 0; i < BNX2_NUM_STATS; i++) {
7631 7632
		unsigned long offset;

7633 7634 7635 7636 7637
		if (stats_len_arr[i] == 0) {
			/* skip this counter */
			buf[i] = 0;
			continue;
		}
7638 7639

		offset = bnx2_stats_offset_arr[i];
7640 7641
		if (stats_len_arr[i] == 4) {
			/* 4-byte counter */
7642 7643
			buf[i] = (u64) *(hw_stats + offset) +
				 *(temp_stats + offset);
7644 7645 7646
			continue;
		}
		/* 8-byte counter */
7647 7648 7649 7650
		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
			 *(hw_stats + offset + 1) +
			 (((u64) *(temp_stats + offset)) << 32) +
			 *(temp_stats + offset + 1);
7651 7652 7653 7654
	}
}

static int
S
stephen hemminger 已提交
7655
bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7656
{
M
Michael Chan 已提交
7657
	struct bnx2 *bp = netdev_priv(dev);
7658

S
stephen hemminger 已提交
7659 7660 7661
	switch (state) {
	case ETHTOOL_ID_ACTIVE:
		bnx2_set_power_state(bp, PCI_D0);
7662

7663 7664
		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7665
		return 1;	/* cycle on/off once per second */
7666

S
stephen hemminger 已提交
7667
	case ETHTOOL_ID_ON:
7668 7669 7670 7671 7672 7673
		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
			BNX2_EMAC_LED_1000MB_OVERRIDE |
			BNX2_EMAC_LED_100MB_OVERRIDE |
			BNX2_EMAC_LED_10MB_OVERRIDE |
			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
			BNX2_EMAC_LED_TRAFFIC);
S
stephen hemminger 已提交
7674
		break;
7675

S
stephen hemminger 已提交
7676
	case ETHTOOL_ID_OFF:
7677
		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
S
stephen hemminger 已提交
7678
		break;
7679

S
stephen hemminger 已提交
7680
	case ETHTOOL_ID_INACTIVE:
7681 7682
		BNX2_WR(bp, BNX2_EMAC_LED, 0);
		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
S
stephen hemminger 已提交
7683 7684 7685 7686 7687

		if (!netif_running(dev))
			bnx2_set_power_state(bp, PCI_D3hot);
		break;
	}
7688

7689 7690 7691
	return 0;
}

7692 7693
static netdev_features_t
bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7694 7695 7696
{
	struct bnx2 *bp = netdev_priv(dev);

7697 7698 7699 7700
	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
		features |= NETIF_F_HW_VLAN_RX;

	return features;
7701 7702
}

7703
static int
7704
bnx2_set_features(struct net_device *dev, netdev_features_t features)
7705
{
7706 7707
	struct bnx2 *bp = netdev_priv(dev);

M
Michael Chan 已提交
7708
	/* TSO with VLAN tag won't work with current firmware */
7709 7710 7711 7712
	if (features & NETIF_F_HW_VLAN_TX)
		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
	else
		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7713

7714
	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7715 7716 7717
	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
	    netif_running(dev)) {
		bnx2_netif_stop(bp, false);
7718
		dev->features = features;
7719 7720 7721
		bnx2_set_rx_mode(dev);
		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
		bnx2_netif_start(bp, false);
7722
		return 1;
7723 7724 7725
	}

	return 0;
7726 7727
}

7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775
static void bnx2_get_channels(struct net_device *dev,
			      struct ethtool_channels *channels)
{
	struct bnx2 *bp = netdev_priv(dev);
	u32 max_rx_rings = 1;
	u32 max_tx_rings = 1;

	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
		max_rx_rings = RX_MAX_RINGS;
		max_tx_rings = TX_MAX_RINGS;
	}

	channels->max_rx = max_rx_rings;
	channels->max_tx = max_tx_rings;
	channels->max_other = 0;
	channels->max_combined = 0;
	channels->rx_count = bp->num_rx_rings;
	channels->tx_count = bp->num_tx_rings;
	channels->other_count = 0;
	channels->combined_count = 0;
}

static int bnx2_set_channels(struct net_device *dev,
			      struct ethtool_channels *channels)
{
	struct bnx2 *bp = netdev_priv(dev);
	u32 max_rx_rings = 1;
	u32 max_tx_rings = 1;
	int rc = 0;

	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
		max_rx_rings = RX_MAX_RINGS;
		max_tx_rings = TX_MAX_RINGS;
	}
	if (channels->rx_count > max_rx_rings ||
	    channels->tx_count > max_tx_rings)
		return -EINVAL;

	bp->num_req_rx_rings = channels->rx_count;
	bp->num_req_tx_rings = channels->tx_count;

	if (netif_running(dev))
		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
					   bp->tx_ring_size, true);

	return rc;
}

7776
static const struct ethtool_ops bnx2_ethtool_ops = {
7777 7778 7779
	.get_settings		= bnx2_get_settings,
	.set_settings		= bnx2_set_settings,
	.get_drvinfo		= bnx2_get_drvinfo,
M
Michael Chan 已提交
7780 7781
	.get_regs_len		= bnx2_get_regs_len,
	.get_regs		= bnx2_get_regs,
7782 7783 7784
	.get_wol		= bnx2_get_wol,
	.set_wol		= bnx2_set_wol,
	.nway_reset		= bnx2_nway_reset,
7785
	.get_link		= bnx2_get_link,
7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796
	.get_eeprom_len		= bnx2_get_eeprom_len,
	.get_eeprom		= bnx2_get_eeprom,
	.set_eeprom		= bnx2_set_eeprom,
	.get_coalesce		= bnx2_get_coalesce,
	.set_coalesce		= bnx2_set_coalesce,
	.get_ringparam		= bnx2_get_ringparam,
	.set_ringparam		= bnx2_set_ringparam,
	.get_pauseparam		= bnx2_get_pauseparam,
	.set_pauseparam		= bnx2_set_pauseparam,
	.self_test		= bnx2_self_test,
	.get_strings		= bnx2_get_strings,
S
stephen hemminger 已提交
7797
	.set_phys_id		= bnx2_set_phys_id,
7798
	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7799
	.get_sset_count		= bnx2_get_sset_count,
7800 7801
	.get_channels		= bnx2_get_channels,
	.set_channels		= bnx2_set_channels,
7802 7803 7804 7805 7806 7807
};

/* Called with rtnl_lock */
static int
bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
7808
	struct mii_ioctl_data *data = if_mii(ifr);
M
Michael Chan 已提交
7809
	struct bnx2 *bp = netdev_priv(dev);
7810 7811 7812 7813 7814 7815 7816 7817 7818 7819
	int err;

	switch(cmd) {
	case SIOCGMIIPHY:
		data->phy_id = bp->phy_addr;

		/* fallthru */
	case SIOCGMIIREG: {
		u32 mii_regval;

7820
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7821 7822
			return -EOPNOTSUPP;

7823 7824 7825
		if (!netif_running(dev))
			return -EAGAIN;

7826
		spin_lock_bh(&bp->phy_lock);
7827
		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7828
		spin_unlock_bh(&bp->phy_lock);
7829 7830 7831 7832 7833 7834 7835

		data->val_out = mii_regval;

		return err;
	}

	case SIOCSMIIREG:
7836
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7837 7838
			return -EOPNOTSUPP;

7839 7840 7841
		if (!netif_running(dev))
			return -EAGAIN;

7842
		spin_lock_bh(&bp->phy_lock);
7843
		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7844
		spin_unlock_bh(&bp->phy_lock);
7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859

		return err;

	default:
		/* do nothing */
		break;
	}
	return -EOPNOTSUPP;
}

/* Called with rtnl_lock */
static int
bnx2_change_mac_addr(struct net_device *dev, void *p)
{
	struct sockaddr *addr = p;
M
Michael Chan 已提交
7860
	struct bnx2 *bp = netdev_priv(dev);
7861

7862
	if (!is_valid_ether_addr(addr->sa_data))
7863
		return -EADDRNOTAVAIL;
7864

7865 7866
	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	if (netif_running(dev))
7867
		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7868 7869 7870 7871 7872 7873 7874 7875

	return 0;
}

/* Called with rtnl_lock */
static int
bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
M
Michael Chan 已提交
7876
	struct bnx2 *bp = netdev_priv(dev);
7877 7878 7879 7880 7881 7882

	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
		return -EINVAL;

	dev->mtu = new_mtu;
7883 7884
	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
				     false);
7885 7886
}

A
Alexey Dobriyan 已提交
7887
#ifdef CONFIG_NET_POLL_CONTROLLER
7888 7889 7890
static void
poll_bnx2(struct net_device *dev)
{
M
Michael Chan 已提交
7891
	struct bnx2 *bp = netdev_priv(dev);
7892
	int i;
7893

7894
	for (i = 0; i < bp->irq_nvecs; i++) {
7895 7896 7897 7898 7899
		struct bnx2_irq *irq = &bp->irq_tbl[i];

		disable_irq(irq->vector);
		irq->handler(irq->vector, &bp->bnx2_napi[i]);
		enable_irq(irq->vector);
7900
	}
7901 7902 7903
}
#endif

B
Bill Pemberton 已提交
7904
static void
7905 7906
bnx2_get_5709_media(struct bnx2 *bp)
{
7907
	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7908 7909 7910 7911 7912 7913
	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
	u32 strap;

	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
		return;
	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7914
		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7915 7916 7917 7918 7919 7920 7921 7922
		return;
	}

	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
	else
		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;

7923
	if (bp->func == 0) {
7924 7925 7926 7927
		switch (strap) {
		case 0x4:
		case 0x5:
		case 0x6:
7928
			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7929 7930 7931 7932 7933 7934 7935
			return;
		}
	} else {
		switch (strap) {
		case 0x1:
		case 0x2:
		case 0x4:
7936
			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7937 7938 7939 7940 7941
			return;
		}
	}
}

B
Bill Pemberton 已提交
7942
static void
7943 7944 7945 7946
bnx2_get_pci_speed(struct bnx2 *bp)
{
	u32 reg;

7947
	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7948 7949 7950
	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
		u32 clkreg;

7951
		bp->flags |= BNX2_FLAG_PCIX;
7952

7953
		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989

		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
		switch (clkreg) {
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
			bp->bus_speed_mhz = 133;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
			bp->bus_speed_mhz = 100;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
			bp->bus_speed_mhz = 66;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
			bp->bus_speed_mhz = 50;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
			bp->bus_speed_mhz = 33;
			break;
		}
	}
	else {
		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
			bp->bus_speed_mhz = 66;
		else
			bp->bus_speed_mhz = 33;
	}

	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7990
		bp->flags |= BNX2_FLAG_PCI_32BIT;
7991 7992 7993

}

B
Bill Pemberton 已提交
7994
static void
7995 7996
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
M
Matt Carlson 已提交
7997
	int rc, i, j;
7998
	u8 *data;
M
Matt Carlson 已提交
7999
	unsigned int block_end, rosize, len;
8000

M
Michael Chan 已提交
8001 8002
#define BNX2_VPD_NVRAM_OFFSET	0x300
#define BNX2_VPD_LEN		128
8003 8004 8005 8006 8007 8008
#define BNX2_MAX_VER_SLEN	30

	data = kmalloc(256, GFP_KERNEL);
	if (!data)
		return;

M
Michael Chan 已提交
8009 8010
	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
			     BNX2_VPD_LEN);
8011 8012 8013
	if (rc)
		goto vpd_done;

M
Michael Chan 已提交
8014 8015 8016 8017 8018
	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
		data[i] = data[i + BNX2_VPD_LEN + 3];
		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
		data[i + 3] = data[i + BNX2_VPD_LEN];
8019 8020
	}

M
Matt Carlson 已提交
8021 8022 8023
	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
	if (i < 0)
		goto vpd_done;
8024

M
Matt Carlson 已提交
8025 8026 8027
	rosize = pci_vpd_lrdt_size(&data[i]);
	i += PCI_VPD_LRDT_TAG_SIZE;
	block_end = i + rosize;
8028

M
Matt Carlson 已提交
8029 8030
	if (block_end > BNX2_VPD_LEN)
		goto vpd_done;
8031

M
Matt Carlson 已提交
8032 8033 8034 8035
	j = pci_vpd_find_info_keyword(data, i, rosize,
				      PCI_VPD_RO_KEYWORD_MFR_ID);
	if (j < 0)
		goto vpd_done;
8036

M
Matt Carlson 已提交
8037
	len = pci_vpd_info_field_size(&data[j]);
8038

M
Matt Carlson 已提交
8039 8040 8041 8042
	j += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (j + len > block_end || len != 4 ||
	    memcmp(&data[j], "1028", 4))
		goto vpd_done;
8043

M
Matt Carlson 已提交
8044 8045 8046 8047
	j = pci_vpd_find_info_keyword(data, i, rosize,
				      PCI_VPD_RO_KEYWORD_VENDOR0);
	if (j < 0)
		goto vpd_done;
8048

M
Matt Carlson 已提交
8049
	len = pci_vpd_info_field_size(&data[j]);
8050

M
Matt Carlson 已提交
8051 8052
	j += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8053
		goto vpd_done;
M
Matt Carlson 已提交
8054 8055 8056

	memcpy(bp->fw_version, &data[j], len);
	bp->fw_version[len] = ' ';
8057 8058 8059 8060 8061

vpd_done:
	kfree(data);
}

B
Bill Pemberton 已提交
8062
static int
8063 8064 8065
bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
{
	struct bnx2 *bp;
8066
	int rc, i, j;
8067
	u32 reg;
8068
	u64 dma_mask, persist_dma_mask;
8069
	int err;
8070 8071

	SET_NETDEV_DEV(dev, &pdev->dev);
M
Michael Chan 已提交
8072
	bp = netdev_priv(dev);
8073 8074 8075 8076

	bp->flags = 0;
	bp->phy_flags = 0;

8077 8078 8079 8080 8081 8082 8083 8084
	bp->temp_stats_blk =
		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);

	if (bp->temp_stats_blk == NULL) {
		rc = -ENOMEM;
		goto err_out;
	}

8085 8086 8087
	/* enable device (incl. PCI PM wakeup), and bus-mastering */
	rc = pci_enable_device(pdev);
	if (rc) {
8088
		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8089 8090 8091 8092
		goto err_out;
	}

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8093
		dev_err(&pdev->dev,
8094
			"Cannot find PCI device base address, aborting\n");
8095 8096 8097 8098 8099 8100
		rc = -ENODEV;
		goto err_out_disable;
	}

	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
	if (rc) {
8101
		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8102 8103 8104 8105 8106 8107 8108
		goto err_out_disable;
	}

	pci_set_master(pdev);

	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
	if (bp->pm_cap == 0) {
8109
		dev_err(&pdev->dev,
8110
			"Cannot find power management capability, aborting\n");
8111 8112 8113 8114 8115 8116 8117 8118
		rc = -EIO;
		goto err_out_release;
	}

	bp->dev = dev;
	bp->pdev = pdev;

	spin_lock_init(&bp->phy_lock);
M
Michael Chan 已提交
8119
	spin_lock_init(&bp->indirect_lock);
8120 8121 8122
#ifdef BCM_CNIC
	mutex_init(&bp->cnic_lock);
#endif
D
David Howells 已提交
8123
	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8124

8125 8126
	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
							 TX_MAX_TSS_RINGS + 1));
8127
	if (!bp->regview) {
8128
		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8129 8130 8131 8132
		rc = -ENOMEM;
		goto err_out_release;
	}

8133 8134
	bnx2_set_power_state(bp, PCI_D0);

8135 8136 8137 8138
	/* Configure byte swap and enable write to the reg_window registers.
	 * Rely on CPU to do target byte swapping on big endian systems
	 * The chip's target access swapping will not swap all accesses
	 */
8139 8140 8141
	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8142

8143
	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8144

8145
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8146 8147
		if (!pci_is_pcie(pdev)) {
			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8148 8149 8150
			rc = -EIO;
			goto err_out_unmap;
		}
8151
		bp->flags |= BNX2_FLAG_PCIE;
8152
		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8153
			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8154 8155 8156

		/* AER (Advanced Error Reporting) hooks */
		err = pci_enable_pcie_error_reporting(pdev);
8157 8158
		if (!err)
			bp->flags |= BNX2_FLAG_AER_ENABLED;
8159

8160
	} else {
M
Michael Chan 已提交
8161 8162 8163
		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
		if (bp->pcix_cap == 0) {
			dev_err(&pdev->dev,
8164
				"Cannot find PCIX capability, aborting\n");
M
Michael Chan 已提交
8165 8166 8167
			rc = -EIO;
			goto err_out_unmap;
		}
8168
		bp->flags |= BNX2_FLAG_BROKEN_STATS;
M
Michael Chan 已提交
8169 8170
	}

8171 8172
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8173
		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8174
			bp->flags |= BNX2_FLAG_MSIX_CAP;
8175 8176
	}

8177 8178
	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8179
		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8180
			bp->flags |= BNX2_FLAG_MSI_CAP;
8181 8182
	}

8183
	/* 5708 cannot support DMA addresses > 40-bit.  */
8184
	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8185
		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8186
	else
8187
		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8188 8189 8190 8191 8192 8193 8194

	/* Configure DMA attributes. */
	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
		dev->features |= NETIF_F_HIGHDMA;
		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
		if (rc) {
			dev_err(&pdev->dev,
8195
				"pci_set_consistent_dma_mask failed, aborting\n");
8196 8197
			goto err_out_unmap;
		}
8198
	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8199
		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8200 8201 8202
		goto err_out_unmap;
	}

8203
	if (!(bp->flags & BNX2_FLAG_PCIE))
8204
		bnx2_get_pci_speed(bp);
8205 8206

	/* 5706A0 may falsely detect SERR and PERR. */
8207
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8208
		reg = BNX2_RD(bp, PCI_COMMAND);
8209
		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8210
		BNX2_WR(bp, PCI_COMMAND, reg);
8211
	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8212
		!(bp->flags & BNX2_FLAG_PCIX)) {
8213

8214
		dev_err(&pdev->dev,
8215
			"5706 A1 can only be used in a PCIX bus, aborting\n");
8216 8217 8218 8219 8220
		goto err_out_unmap;
	}

	bnx2_init_nvram(bp);

8221
	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8222

8223 8224 8225
	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
		bp->func = 1;

8226
	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8227
	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8228
		u32 off = bp->func << 2;
8229

8230
		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8231
	} else
8232 8233
		bp->shmem_base = HOST_VIEW_SHMEM_BASE;

8234 8235 8236
	/* Get the permanent MAC address.  First we need to make sure the
	 * firmware is actually running.
	 */
8237
	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8238 8239 8240

	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8241
		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8242 8243 8244 8245
		rc = -ENODEV;
		goto err_out_unmap;
	}

8246 8247 8248
	bnx2_read_vpd_fw_ver(bp);

	j = strlen(bp->fw_version);
8249
	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8250
	for (i = 0; i < 3 && j < 24; i++) {
8251 8252
		u8 num, k, skip0;

8253 8254 8255 8256 8257
		if (i == 0) {
			bp->fw_version[j++] = 'b';
			bp->fw_version[j++] = 'c';
			bp->fw_version[j++] = ' ';
		}
8258 8259 8260 8261 8262 8263 8264 8265 8266 8267
		num = (u8) (reg >> (24 - (i * 8)));
		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
			if (num >= k || !skip0 || k == 1) {
				bp->fw_version[j++] = (num / k) + '0';
				skip0 = 0;
			}
		}
		if (i != 2)
			bp->fw_version[j++] = '.';
	}
8268
	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
M
Michael Chan 已提交
8269 8270 8271 8272
	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
		bp->wol = 1;

	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8273
		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8274 8275

		for (i = 0; i < 30; i++) {
8276
			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8277 8278 8279 8280 8281
			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
				break;
			msleep(10);
		}
	}
8282
	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8283 8284 8285
	reg &= BNX2_CONDITION_MFW_RUN_MASK;
	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8286
		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8287

8288 8289 8290
		if (j < 32)
			bp->fw_version[j++] = ' ';
		for (i = 0; i < 3 && j < 28; i++) {
8291
			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8292
			reg = be32_to_cpu(reg);
8293 8294 8295 8296
			memcpy(&bp->fw_version[j], &reg, 4);
			j += 4;
		}
	}
8297

8298
	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8299 8300 8301
	bp->mac_addr[0] = (u8) (reg >> 8);
	bp->mac_addr[1] = (u8) reg;

8302
	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8303 8304 8305 8306 8307
	bp->mac_addr[2] = (u8) (reg >> 24);
	bp->mac_addr[3] = (u8) (reg >> 16);
	bp->mac_addr[4] = (u8) (reg >> 8);
	bp->mac_addr[5] = (u8) reg;

8308
	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8309
	bnx2_set_rx_ring_size(bp, 255);
8310

8311
	bp->tx_quick_cons_trip_int = 2;
8312
	bp->tx_quick_cons_trip = 20;
8313
	bp->tx_ticks_int = 18;
8314
	bp->tx_ticks = 80;
8315

8316 8317
	bp->rx_quick_cons_trip_int = 2;
	bp->rx_quick_cons_trip = 12;
8318 8319 8320
	bp->rx_ticks_int = 18;
	bp->rx_ticks = 18;

8321
	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8322

8323
	bp->current_interval = BNX2_TIMER_INTERVAL;
8324

M
Michael Chan 已提交
8325 8326
	bp->phy_addr = 1;

8327
	/* Disable WOL support if we are running on a SERDES chip. */
8328
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8329
		bnx2_get_5709_media(bp);
8330
	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8331
		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
M
Michael Chan 已提交
8332

8333
	bp->phy_port = PORT_TP;
8334
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8335
		bp->phy_port = PORT_FIBRE;
8336
		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
M
Michael Chan 已提交
8337
		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8338
			bp->flags |= BNX2_FLAG_NO_WOL;
M
Michael Chan 已提交
8339 8340
			bp->wol = 0;
		}
8341
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8342 8343 8344 8345 8346 8347 8348 8349
			/* Don't do parallel detect on this board because of
			 * some board problems.  The link will not go down
			 * if we do parallel detect.
			 */
			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
			    pdev->subsystem_device == 0x310c)
				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
		} else {
M
Michael Chan 已提交
8350 8351
			bp->phy_addr = 2;
			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8352
				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
M
Michael Chan 已提交
8353
		}
8354 8355
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8356
		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8357 8358 8359
	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8360
		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8361

8362 8363
	bnx2_init_fw_cap(bp);

8364 8365 8366
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8367
	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8368
		bp->flags |= BNX2_FLAG_NO_WOL;
M
Michael Chan 已提交
8369 8370
		bp->wol = 0;
	}
M
Michael Chan 已提交
8371

8372
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383
		bp->tx_quick_cons_trip_int =
			bp->tx_quick_cons_trip;
		bp->tx_ticks_int = bp->tx_ticks;
		bp->rx_quick_cons_trip_int =
			bp->rx_quick_cons_trip;
		bp->rx_ticks_int = bp->rx_ticks;
		bp->comp_prod_trip_int = bp->comp_prod_trip;
		bp->com_ticks_int = bp->com_ticks;
		bp->cmd_ticks_int = bp->cmd_ticks;
	}

8384 8385 8386 8387 8388 8389 8390 8391
	/* Disable MSI on 5706 if AMD 8132 bridge is found.
	 *
	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
	 * with byte enables disabled on the unused 32-bit word.  This is legal
	 * but causes problems on the AMD 8132 which will eventually stop
	 * responding after a while.
	 *
	 * AMD believes this incompatibility is unique to the 5706, and
8392
	 * prefers to locally disable MSI rather than globally disabling it.
8393
	 */
8394
	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8395 8396 8397 8398 8399 8400
		struct pci_dev *amd_8132 = NULL;

		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
						  amd_8132))) {

8401 8402
			if (amd_8132->revision >= 0x10 &&
			    amd_8132->revision <= 0x13) {
8403 8404 8405 8406 8407 8408 8409
				disable_msi = 1;
				pci_dev_put(amd_8132);
				break;
			}
		}
	}

8410
	bnx2_set_default_link(bp);
8411 8412
	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;

M
Michael Chan 已提交
8413
	init_timer(&bp->timer);
8414
	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
M
Michael Chan 已提交
8415 8416 8417
	bp->timer.data = (unsigned long) bp;
	bp->timer.function = bnx2_timer;

8418
#ifdef BCM_CNIC
8419 8420 8421 8422
	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
		bp->cnic_eth_dev.max_iscsi_conn =
			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8423
	bp->cnic_probe = bnx2_cnic_probe;
8424
#endif
8425 8426
	pci_save_state(pdev);

8427 8428 8429
	return 0;

err_out_unmap:
8430
	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8431
		pci_disable_pcie_error_reporting(pdev);
8432 8433
		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
	}
8434

8435 8436
	pci_iounmap(pdev, bp->regview);
	bp->regview = NULL;
8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448

err_out_release:
	pci_release_regions(pdev);

err_out_disable:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);

err_out:
	return rc;
}

B
Bill Pemberton 已提交
8449
static char *
8450 8451 8452 8453
bnx2_bus_string(struct bnx2 *bp, char *str)
{
	char *s = str;

8454
	if (bp->flags & BNX2_FLAG_PCIE) {
8455 8456 8457
		s += sprintf(s, "PCI Express");
	} else {
		s += sprintf(s, "PCI");
8458
		if (bp->flags & BNX2_FLAG_PCIX)
8459
			s += sprintf(s, "-X");
8460
		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8461 8462 8463 8464 8465 8466 8467 8468
			s += sprintf(s, " 32-bit");
		else
			s += sprintf(s, " 64-bit");
		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
	}
	return str;
}

M
Michael Chan 已提交
8469 8470 8471 8472 8473 8474 8475 8476 8477 8478
static void
bnx2_del_napi(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		netif_napi_del(&bp->bnx2_napi[i].napi);
}

static void
8479 8480
bnx2_init_napi(struct bnx2 *bp)
{
8481
	int i;
8482

B
Benjamin Li 已提交
8483
	for (i = 0; i < bp->irq_nvecs; i++) {
8484 8485 8486 8487 8488 8489
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		int (*poll)(struct napi_struct *, int);

		if (i == 0)
			poll = bnx2_poll;
		else
8490
			poll = bnx2_poll_msix;
8491 8492

		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8493 8494
		bnapi->bp = bp;
	}
8495 8496
}

8497 8498 8499 8500
static const struct net_device_ops bnx2_netdev_ops = {
	.ndo_open		= bnx2_open,
	.ndo_start_xmit		= bnx2_start_xmit,
	.ndo_stop		= bnx2_close,
E
Eric Dumazet 已提交
8501
	.ndo_get_stats64	= bnx2_get_stats64,
8502 8503 8504 8505 8506
	.ndo_set_rx_mode	= bnx2_set_rx_mode,
	.ndo_do_ioctl		= bnx2_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= bnx2_change_mac_addr,
	.ndo_change_mtu		= bnx2_change_mtu,
8507 8508
	.ndo_fix_features	= bnx2_fix_features,
	.ndo_set_features	= bnx2_set_features,
8509
	.ndo_tx_timeout		= bnx2_tx_timeout,
A
Alexey Dobriyan 已提交
8510
#ifdef CONFIG_NET_POLL_CONTROLLER
8511 8512 8513 8514
	.ndo_poll_controller	= poll_bnx2,
#endif
};

B
Bill Pemberton 已提交
8515
static int
8516 8517 8518
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int version_printed = 0;
8519
	struct net_device *dev;
8520
	struct bnx2 *bp;
8521
	int rc;
8522
	char str[40];
8523 8524

	if (version_printed++ == 0)
8525
		pr_info("%s", version);
8526 8527

	/* dev zeroed in init_etherdev */
B
Benjamin Li 已提交
8528
	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8529 8530 8531 8532
	if (!dev)
		return -ENOMEM;

	rc = bnx2_init_board(pdev, dev);
8533 8534
	if (rc < 0)
		goto err_free;
8535

8536
	dev->netdev_ops = &bnx2_netdev_ops;
8537 8538 8539
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->ethtool_ops = &bnx2_ethtool_ops;

M
Michael Chan 已提交
8540
	bp = netdev_priv(dev);
8541

8542 8543 8544 8545
	pci_set_drvdata(pdev, dev);

	memcpy(dev->dev_addr, bp->mac_addr, 6);

8546 8547 8548 8549
	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
		NETIF_F_TSO | NETIF_F_TSO_ECN |
		NETIF_F_RXHASH | NETIF_F_RXCSUM;

8550
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8551 8552 8553 8554 8555
		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;

	dev->vlan_features = dev->hw_features;
	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
	dev->features |= dev->hw_features;
8556
	dev->priv_flags |= IFF_UNICAST_FLT;
8557

8558
	if ((rc = register_netdev(dev))) {
8559
		dev_err(&pdev->dev, "Cannot register net device\n");
M
Michael Chan 已提交
8560
		goto error;
8561 8562
	}

8563 8564
	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
		    "node addr %pM\n", board_info[ent->driver_data].name,
8565 8566
		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8567 8568
		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
		    pdev->irq, dev->dev_addr);
8569 8570

	return 0;
M
Michael Chan 已提交
8571 8572

error:
M
Michael Chan 已提交
8573
	pci_iounmap(pdev, bp->regview);
M
Michael Chan 已提交
8574 8575 8576
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
8577
err_free:
M
Michael Chan 已提交
8578 8579
	free_netdev(dev);
	return rc;
8580 8581
}

B
Bill Pemberton 已提交
8582
static void
8583 8584 8585
bnx2_remove_one(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8586
	struct bnx2 *bp = netdev_priv(dev);
8587 8588 8589

	unregister_netdev(dev);

8590
	del_timer_sync(&bp->timer);
8591
	cancel_work_sync(&bp->reset_task);
8592

8593
	pci_iounmap(bp->pdev, bp->regview);
8594

8595 8596
	kfree(bp->temp_stats_blk);

8597
	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8598
		pci_disable_pcie_error_reporting(pdev);
8599 8600
		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
	}
8601

8602 8603
	bnx2_release_firmware(bp);

8604
	free_netdev(dev);
8605

8606 8607 8608 8609 8610 8611
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
}

static int
8612
bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8613 8614
{
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8615
	struct bnx2 *bp = netdev_priv(dev);
8616

8617 8618 8619 8620 8621
	/* PCI register 4 needs to be saved whether netif_running() or not.
	 * MSI address and data need to be saved if using MSI and
	 * netif_running().
	 */
	pci_save_state(pdev);
8622 8623 8624
	if (!netif_running(dev))
		return 0;

8625
	cancel_work_sync(&bp->reset_task);
8626
	bnx2_netif_stop(bp, true);
8627 8628
	netif_device_detach(dev);
	del_timer_sync(&bp->timer);
M
Michael Chan 已提交
8629
	bnx2_shutdown_chip(bp);
8630
	bnx2_free_skbs(bp);
8631
	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8632 8633 8634 8635 8636 8637 8638
	return 0;
}

static int
bnx2_resume(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8639
	struct bnx2 *bp = netdev_priv(dev);
8640

8641
	pci_restore_state(pdev);
8642 8643 8644
	if (!netif_running(dev))
		return 0;

8645
	bnx2_set_power_state(bp, PCI_D0);
8646
	netif_device_attach(dev);
8647
	bnx2_init_nic(bp, 1);
8648
	bnx2_netif_start(bp, true);
8649 8650 8651
	return 0;
}

W
Wendy Xiong 已提交
8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668
/**
 * bnx2_io_error_detected - called when PCI error is detected
 * @pdev: Pointer to PCI device
 * @state: The current pci connection state
 *
 * This function is called after a PCI bus error affecting
 * this device has been detected.
 */
static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
					       pci_channel_state_t state)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);

	rtnl_lock();
	netif_device_detach(dev);

8669 8670 8671 8672 8673
	if (state == pci_channel_io_perm_failure) {
		rtnl_unlock();
		return PCI_ERS_RESULT_DISCONNECT;
	}

W
Wendy Xiong 已提交
8674
	if (netif_running(dev)) {
8675
		bnx2_netif_stop(bp, true);
W
Wendy Xiong 已提交
8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696
		del_timer_sync(&bp->timer);
		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
	}

	pci_disable_device(pdev);
	rtnl_unlock();

	/* Request a slot slot reset. */
	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * bnx2_io_slot_reset - called after the pci bus has been reset.
 * @pdev: Pointer to PCI device
 *
 * Restart the card from scratch, as if from a cold-boot.
 */
static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);
8697 8698
	pci_ers_result_t result;
	int err;
W
Wendy Xiong 已提交
8699 8700 8701 8702

	rtnl_lock();
	if (pci_enable_device(pdev)) {
		dev_err(&pdev->dev,
8703
			"Cannot re-enable PCI device after reset\n");
8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714
		result = PCI_ERS_RESULT_DISCONNECT;
	} else {
		pci_set_master(pdev);
		pci_restore_state(pdev);
		pci_save_state(pdev);

		if (netif_running(dev)) {
			bnx2_set_power_state(bp, PCI_D0);
			bnx2_init_nic(bp, 1);
		}
		result = PCI_ERS_RESULT_RECOVERED;
W
Wendy Xiong 已提交
8715
	}
8716
	rtnl_unlock();
W
Wendy Xiong 已提交
8717

8718
	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8719 8720
		return result;

8721 8722 8723 8724 8725
	err = pci_cleanup_aer_uncorrect_error_status(pdev);
	if (err) {
		dev_err(&pdev->dev,
			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
			 err); /* non-fatal, continue */
W
Wendy Xiong 已提交
8726 8727
	}

8728
	return result;
W
Wendy Xiong 已提交
8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744
}

/**
 * bnx2_io_resume - called when traffic can start flowing again.
 * @pdev: Pointer to PCI device
 *
 * This callback is called when the error recovery driver tells us that
 * its OK to resume normal operation.
 */
static void bnx2_io_resume(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);

	rtnl_lock();
	if (netif_running(dev))
8745
		bnx2_netif_start(bp, true);
W
Wendy Xiong 已提交
8746 8747 8748 8749 8750

	netif_device_attach(dev);
	rtnl_unlock();
}

M
Michael Chan 已提交
8751
static const struct pci_error_handlers bnx2_err_handler = {
W
Wendy Xiong 已提交
8752 8753 8754 8755 8756
	.error_detected	= bnx2_io_error_detected,
	.slot_reset	= bnx2_io_slot_reset,
	.resume		= bnx2_io_resume,
};

8757
static struct pci_driver bnx2_pci_driver = {
8758 8759 8760
	.name		= DRV_MODULE_NAME,
	.id_table	= bnx2_pci_tbl,
	.probe		= bnx2_init_one,
B
Bill Pemberton 已提交
8761
	.remove		= bnx2_remove_one,
8762 8763
	.suspend	= bnx2_suspend,
	.resume		= bnx2_resume,
W
Wendy Xiong 已提交
8764
	.err_handler	= &bnx2_err_handler,
8765 8766 8767 8768
};

static int __init bnx2_init(void)
{
8769
	return pci_register_driver(&bnx2_pci_driver);
8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781
}

static void __exit bnx2_cleanup(void)
{
	pci_unregister_driver(&bnx2_pci_driver);
}

module_init(bnx2_init);
module_exit(bnx2_cleanup);