bnx2.c 214.2 KB
Newer Older
1 2
/* bnx2.c: Broadcom NX2 network driver.
 *
M
Michael Chan 已提交
3
 * Copyright (c) 2004-2013 Broadcom Corporation
4 5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Written by: Michael Chan  (mchan@broadcom.com)
 */

12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
M
Michael Chan 已提交
13 14 15 16

#include <linux/module.h>
#include <linux/moduleparam.h>

17
#include <linux/stringify.h>
M
Michael Chan 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
31
#include <linux/bitops.h>
M
Michael Chan 已提交
32 33 34 35
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
M
Michael Chan 已提交
36
#include <asm/page.h>
M
Michael Chan 已提交
37 38 39
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
40
#include <linux/if.h>
M
Michael Chan 已提交
41 42
#include <linux/if_vlan.h>
#include <net/ip.h>
43
#include <net/tcp.h>
M
Michael Chan 已提交
44 45 46 47
#include <net/checksum.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>
#include <linux/prefetch.h>
48
#include <linux/cache.h>
M
Michael Chan 已提交
49
#include <linux/firmware.h>
B
Benjamin Li 已提交
50
#include <linux/log2.h>
51
#include <linux/aer.h>
M
Michael Chan 已提交
52

53 54 55 56
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
57 58
#include "bnx2.h"
#include "bnx2_fw.h"
D
Denys Vlasenko 已提交
59

60
#define DRV_MODULE_NAME		"bnx2"
M
Michael Chan 已提交
61 62
#define DRV_MODULE_VERSION	"2.2.4"
#define DRV_MODULE_RELDATE	"Aug 05, 2013"
63
#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
M
Michael Chan 已提交
64
#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65
#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
M
Michael Chan 已提交
66 67
#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 69 70 71 72 73

#define RUN_AT(x) (jiffies + (x))

/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT  (5*HZ)

B
Bill Pemberton 已提交
74
static char version[] =
75 76 77
	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";

MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 80
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
M
Michael Chan 已提交
81 82 83 84
MODULE_FIRMWARE(FW_MIPS_FILE_06);
MODULE_FIRMWARE(FW_RV2P_FILE_06);
MODULE_FIRMWARE(FW_MIPS_FILE_09);
MODULE_FIRMWARE(FW_RV2P_FILE_09);
85
MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 87 88 89 90 91 92 93 94 95 96 97

static int disable_msi = 0;

module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");

typedef enum {
	BCM5706 = 0,
	NC370T,
	NC370I,
	BCM5706S,
	NC370F,
M
Michael Chan 已提交
98 99
	BCM5708,
	BCM5708S,
M
Michael Chan 已提交
100
	BCM5709,
101
	BCM5709S,
M
Michael Chan 已提交
102
	BCM5716,
M
Michael Chan 已提交
103
	BCM5716S,
104 105 106
} board_t;

/* indexed by board_t, above */
A
Andrew Morton 已提交
107
static struct {
108
	char *name;
B
Bill Pemberton 已提交
109
} board_info[] = {
110 111 112 113 114
	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
	{ "HP NC370T Multifunction Gigabit Server Adapter" },
	{ "HP NC370i Multifunction Gigabit Server Adapter" },
	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
	{ "HP NC370F Multifunction Gigabit Server Adapter" },
M
Michael Chan 已提交
115 116
	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
M
Michael Chan 已提交
117
	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118
	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
M
Michael Chan 已提交
119
	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
M
Michael Chan 已提交
120
	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 122
	};

M
Michael Chan 已提交
123
static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 125 126 127 128 129
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
M
Michael Chan 已提交
130 131
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 133 134 135
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
M
Michael Chan 已提交
136 137
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
M
Michael Chan 已提交
138 139
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 141
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
M
Michael Chan 已提交
142 143
	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
M
Michael Chan 已提交
144
	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
M
Michael Chan 已提交
145
	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 147 148
	{ 0, }
};

149
static const struct flash_spec flash_table[] =
150
{
M
Michael Chan 已提交
151 152
#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153
	/* Slow EEPROM */
154
	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
M
Michael Chan 已提交
155
	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 157
	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
	 "EEPROM - slow"},
158 159
	/* Expansion entry 0001 */
	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
160
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 162
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 0001"},
163 164
	/* Saifun SA25F010 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
165
	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
166
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 168 169 170
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
	 "Non-buffered flash (128kB)"},
	/* Saifun SA25F020 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
171
	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
172
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 174
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
	 "Non-buffered flash (256kB)"},
175 176
	/* Expansion entry 0100 */
	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
177
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 179 180
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 0100"},
	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181
	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
182
	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 184 185 186
	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
187
	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 189 190 191 192
	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
	/* Saifun SA25F005 (non-buffered flash) */
	/* strap, cfg1, & write1 need updates */
	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
193
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 195 196 197
	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
	 "Non-buffered flash (64kB)"},
	/* Fast EEPROM */
	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
M
Michael Chan 已提交
198
	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 200 201 202
	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
	 "EEPROM - fast"},
	/* Expansion entry 1001 */
	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
203
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 205 206 207
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1001"},
	/* Expansion entry 1010 */
	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
208
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 210 211 212
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1010"},
	/* ATMEL AT45DB011B (buffered flash) */
	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
213
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 215 216 217
	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
	 "Buffered flash (128kB)"},
	/* Expansion entry 1100 */
	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
218
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 220 221 222
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1100"},
	/* Expansion entry 1101 */
	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
M
Michael Chan 已提交
223
	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 225 226 227
	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1101"},
	/* Ateml Expansion entry 1110 */
	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
228
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 230 231 232
	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
	 "Entry 1110 (Atmel)"},
	/* ATMEL AT45DB021B (buffered flash) */
	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
M
Michael Chan 已提交
233
	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 235
	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
	 "Buffered flash (256kB)"},
236 237
};

238
static const struct flash_spec flash_5709 = {
M
Michael Chan 已提交
239 240 241 242 243 244 245 246
	.flags		= BNX2_NV_BUFFERED,
	.page_bits	= BCM5709_FLASH_PAGE_BITS,
	.page_size	= BCM5709_FLASH_PAGE_SIZE,
	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
	.name		= "5709 Buffered flash (256kB)",
};

247 248
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);

B
Benjamin Li 已提交
249
static void bnx2_init_napi(struct bnx2 *bp);
M
Michael Chan 已提交
250
static void bnx2_del_napi(struct bnx2 *bp);
B
Benjamin Li 已提交
251

252
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
M
Michael Chan 已提交
253
{
M
Michael Chan 已提交
254
	u32 diff;
M
Michael Chan 已提交
255

256 257
	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
	barrier();
M
Michael Chan 已提交
258 259 260 261

	/* The ring uses 256 indices for 255 entries, one of them
	 * needs to be skipped.
	 */
262
	diff = txr->tx_prod - txr->tx_cons;
263
	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
M
Michael Chan 已提交
264
		diff &= 0xffff;
265 266
		if (diff == BNX2_TX_DESC_CNT)
			diff = BNX2_MAX_TX_DESC_CNT;
M
Michael Chan 已提交
267
	}
268
	return bp->tx_ring_size - diff;
M
Michael Chan 已提交
269 270
}

271 272 273
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
M
Michael Chan 已提交
274 275 276
	u32 val;

	spin_lock_bh(&bp->indirect_lock);
277 278
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
M
Michael Chan 已提交
279 280
	spin_unlock_bh(&bp->indirect_lock);
	return val;
281 282 283 284 285
}

static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
M
Michael Chan 已提交
286
	spin_lock_bh(&bp->indirect_lock);
287 288
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
M
Michael Chan 已提交
289
	spin_unlock_bh(&bp->indirect_lock);
290 291
}

292 293 294 295 296 297 298 299 300
static void
bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
{
	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
}

static u32
bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
{
301
	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 303
}

304 305 306 307
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
	offset += cid_addr;
M
Michael Chan 已提交
308
	spin_lock_bh(&bp->indirect_lock);
309
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
310 311
		int i;

312 313 314
		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
M
Michael Chan 已提交
315
		for (i = 0; i < 5; i++) {
316
			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
M
Michael Chan 已提交
317 318 319 320 321
			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
				break;
			udelay(5);
		}
	} else {
322 323
		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
		BNX2_WR(bp, BNX2_CTX_DATA, val);
M
Michael Chan 已提交
324
	}
M
Michael Chan 已提交
325
	spin_unlock_bh(&bp->indirect_lock);
326 327
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
#ifdef BCM_CNIC
static int
bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct drv_ctl_io *io = &info->data.io;

	switch (info->cmd) {
	case DRV_CTL_IO_WR_CMD:
		bnx2_reg_wr_ind(bp, io->offset, io->data);
		break;
	case DRV_CTL_IO_RD_CMD:
		io->data = bnx2_reg_rd_ind(bp, io->offset);
		break;
	case DRV_CTL_CTX_WR_CMD:
		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
{
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	int sb_id;

	if (bp->flags & BNX2_FLAG_USING_MSIX) {
		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
		bnapi->cnic_present = 0;
		sb_id = bp->irq_nvecs;
		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
	} else {
		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
		bnapi->cnic_tag = bnapi->last_status_idx;
		bnapi->cnic_present = 1;
		sb_id = 0;
		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
	}

	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
	cp->irq_arr[0].status_blk = (void *)
		((unsigned long) bnapi->status_blk.msi +
		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
	cp->irq_arr[0].status_blk_num = sb_id;
	cp->num_irq = 1;
}

static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
			      void *data)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	if (ops == NULL)
		return -EINVAL;

	if (cp->drv_state & CNIC_DRV_STATE_REGD)
		return -EBUSY;

390 391 392
	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
		return -ENODEV;

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	bp->cnic_data = data;
	rcu_assign_pointer(bp->cnic_ops, ops);

	cp->num_irq = 0;
	cp->drv_state = CNIC_DRV_STATE_REGD;

	bnx2_setup_cnic_irq_info(bp);

	return 0;
}

static int bnx2_unregister_cnic(struct net_device *dev)
{
	struct bnx2 *bp = netdev_priv(dev);
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

410
	mutex_lock(&bp->cnic_lock);
411 412
	cp->drv_state = 0;
	bnapi->cnic_present = 0;
413
	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414
	mutex_unlock(&bp->cnic_lock);
415 416 417 418
	synchronize_rcu();
	return 0;
}

S
stephen hemminger 已提交
419
static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 421 422 423
{
	struct bnx2 *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

424 425 426
	if (!cp->max_iscsi_conn)
		return NULL;

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	cp->drv_owner = THIS_MODULE;
	cp->chip_id = bp->chip_id;
	cp->pdev = bp->pdev;
	cp->io_base = bp->regview;
	cp->drv_ctl = bnx2_drv_ctl;
	cp->drv_register_cnic = bnx2_register_cnic;
	cp->drv_unregister_cnic = bnx2_unregister_cnic;

	return cp;
}

static void
bnx2_cnic_stop(struct bnx2 *bp)
{
	struct cnic_ops *c_ops;
	struct cnic_ctl_info info;

444
	mutex_lock(&bp->cnic_lock);
445 446
	c_ops = rcu_dereference_protected(bp->cnic_ops,
					  lockdep_is_held(&bp->cnic_lock));
447 448 449 450
	if (c_ops) {
		info.cmd = CNIC_CTL_STOP_CMD;
		c_ops->cnic_ctl(bp->cnic_data, &info);
	}
451
	mutex_unlock(&bp->cnic_lock);
452 453 454 455 456 457 458 459
}

static void
bnx2_cnic_start(struct bnx2 *bp)
{
	struct cnic_ops *c_ops;
	struct cnic_ctl_info info;

460
	mutex_lock(&bp->cnic_lock);
461 462
	c_ops = rcu_dereference_protected(bp->cnic_ops,
					  lockdep_is_held(&bp->cnic_lock));
463 464 465 466 467 468 469 470 471
	if (c_ops) {
		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];

			bnapi->cnic_tag = bnapi->last_status_idx;
		}
		info.cmd = CNIC_CTL_START_CMD;
		c_ops->cnic_ctl(bp->cnic_data, &info);
	}
472
	mutex_unlock(&bp->cnic_lock);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
}

#else

static void
bnx2_cnic_stop(struct bnx2 *bp)
{
}

static void
bnx2_cnic_start(struct bnx2 *bp)
{
}

#endif

489 490 491 492 493 494
static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{
	u32 val1;
	int i, ret;

495
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 498
		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;

499 500
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 502 503 504 505 506 507

		udelay(40);
	}

	val1 = (bp->phy_addr << 21) | (reg << 16) |
		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
		BNX2_EMAC_MDIO_COMM_START_BUSY;
508
	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 510 511 512

	for (i = 0; i < 50; i++) {
		udelay(10);

513
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 515 516
		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
			udelay(5);

517
			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
			val1 &= BNX2_EMAC_MDIO_COMM_DATA;

			break;
		}
	}

	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
		*val = 0x0;
		ret = -EBUSY;
	}
	else {
		*val = val1;
		ret = 0;
	}

533
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 536
		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;

537 538
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 540 541 542 543 544 545 546 547 548 549 550 551

		udelay(40);
	}

	return ret;
}

static int
bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
{
	u32 val1;
	int i, ret;

552
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 555
		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;

556 557
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 559 560 561 562 563 564

		udelay(40);
	}

	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565
	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566

567 568 569
	for (i = 0; i < 50; i++) {
		udelay(10);

570
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 572 573 574 575 576 577 578 579 580 581
		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
			udelay(5);
			break;
		}
	}

	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
        	ret = -EBUSY;
	else
		ret = 0;

582
	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583
		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 585
		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;

586 587
		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 589 590 591 592 593 594 595 596 597

		udelay(40);
	}

	return ret;
}

static void
bnx2_disable_int(struct bnx2 *bp)
{
598 599 600 601 602
	int i;
	struct bnx2_napi *bnapi;

	for (i = 0; i < bp->irq_nvecs; i++) {
		bnapi = &bp->bnx2_napi[i];
603
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 605
		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
	}
606
	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 608 609 610 611
}

static void
bnx2_enable_int(struct bnx2 *bp)
{
612 613
	int i;
	struct bnx2_napi *bnapi;
614

615 616
	for (i = 0; i < bp->irq_nvecs; i++) {
		bnapi = &bp->bnx2_napi[i];
617

618 619 620 621
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
			bnapi->last_status_idx);
622

623 624 625
		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
			bnapi->last_status_idx);
626
	}
627
	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 629 630 631 632
}

static void
bnx2_disable_int_sync(struct bnx2 *bp)
{
633 634
	int i;

635
	atomic_inc(&bp->intr_sem);
636 637 638
	if (!netif_running(bp->dev))
		return;

639
	bnx2_disable_int(bp);
640 641
	for (i = 0; i < bp->irq_nvecs; i++)
		synchronize_irq(bp->irq_tbl[i].vector);
642 643
}

644 645 646
static void
bnx2_napi_disable(struct bnx2 *bp)
{
647 648 649 650
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		napi_disable(&bp->bnx2_napi[i].napi);
651 652 653 654 655
}

static void
bnx2_napi_enable(struct bnx2 *bp)
{
656 657 658 659
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		napi_enable(&bp->bnx2_napi[i].napi);
660 661
}

662
static void
663
bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664
{
665 666
	if (stop_cnic)
		bnx2_cnic_stop(bp);
667
	if (netif_running(bp->dev)) {
668
		bnx2_napi_disable(bp);
669 670
		netif_tx_disable(bp->dev);
	}
671
	bnx2_disable_int_sync(bp);
672
	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 674 675
}

static void
676
bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 678 679
{
	if (atomic_dec_and_test(&bp->intr_sem)) {
		if (netif_running(bp->dev)) {
B
Benjamin Li 已提交
680
			netif_tx_wake_all_queues(bp->dev);
681 682 683 684
			spin_lock_bh(&bp->phy_lock);
			if (bp->link_up)
				netif_carrier_on(bp->dev);
			spin_unlock_bh(&bp->phy_lock);
685
			bnx2_napi_enable(bp);
686
			bnx2_enable_int(bp);
687 688
			if (start_cnic)
				bnx2_cnic_start(bp);
689 690 691 692
		}
	}
}

693 694 695 696 697 698 699 700 701 702
static void
bnx2_free_tx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;

		if (txr->tx_desc_ring) {
703 704 705
			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
					  txr->tx_desc_ring,
					  txr->tx_desc_mapping);
706 707 708 709 710 711 712
			txr->tx_desc_ring = NULL;
		}
		kfree(txr->tx_buf_ring);
		txr->tx_buf_ring = NULL;
	}
}

713 714 715 716 717 718 719 720 721 722 723 724
static void
bnx2_free_rx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;

		for (j = 0; j < bp->rx_max_ring; j++) {
			if (rxr->rx_desc_ring[j])
725 726 727
				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
						  rxr->rx_desc_ring[j],
						  rxr->rx_desc_mapping[j]);
728 729
			rxr->rx_desc_ring[j] = NULL;
		}
730
		vfree(rxr->rx_buf_ring);
731 732 733 734
		rxr->rx_buf_ring = NULL;

		for (j = 0; j < bp->rx_max_pg_ring; j++) {
			if (rxr->rx_pg_desc_ring[j])
735 736 737
				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
						  rxr->rx_pg_desc_ring[j],
						  rxr->rx_pg_desc_mapping[j]);
738
			rxr->rx_pg_desc_ring[j] = NULL;
739
		}
740
		vfree(rxr->rx_pg_ring);
741 742 743 744
		rxr->rx_pg_ring = NULL;
	}
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758
static int
bnx2_alloc_tx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;

		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
		if (txr->tx_buf_ring == NULL)
			return -ENOMEM;

		txr->tx_desc_ring =
759 760
			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
					   &txr->tx_desc_mapping, GFP_KERNEL);
761 762 763 764 765 766
		if (txr->tx_desc_ring == NULL)
			return -ENOMEM;
	}
	return 0;
}

767 768 769 770 771 772 773 774 775 776 777
static int
bnx2_alloc_rx_mem(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;

		rxr->rx_buf_ring =
E
Eric Dumazet 已提交
778
			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 780 781 782 783
		if (rxr->rx_buf_ring == NULL)
			return -ENOMEM;

		for (j = 0; j < bp->rx_max_ring; j++) {
			rxr->rx_desc_ring[j] =
784 785 786 787
				dma_alloc_coherent(&bp->pdev->dev,
						   RXBD_RING_SIZE,
						   &rxr->rx_desc_mapping[j],
						   GFP_KERNEL);
788 789 790 791 792 793
			if (rxr->rx_desc_ring[j] == NULL)
				return -ENOMEM;

		}

		if (bp->rx_pg_ring_size) {
E
Eric Dumazet 已提交
794
			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 796 797 798 799 800 801 802
						  bp->rx_max_pg_ring);
			if (rxr->rx_pg_ring == NULL)
				return -ENOMEM;

		}

		for (j = 0; j < bp->rx_max_pg_ring; j++) {
			rxr->rx_pg_desc_ring[j] =
803 804 805 806
				dma_alloc_coherent(&bp->pdev->dev,
						   RXBD_RING_SIZE,
						   &rxr->rx_pg_desc_mapping[j],
						   GFP_KERNEL);
807 808 809 810 811 812 813 814
			if (rxr->rx_pg_desc_ring[j] == NULL)
				return -ENOMEM;

		}
	}
	return 0;
}

815 816 817
static void
bnx2_free_mem(struct bnx2 *bp)
{
818
	int i;
819
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820

821
	bnx2_free_tx_mem(bp);
822
	bnx2_free_rx_mem(bp);
823

M
Michael Chan 已提交
824 825
	for (i = 0; i < bp->ctx_pages; i++) {
		if (bp->ctx_blk[i]) {
826
			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 828
					  bp->ctx_blk[i],
					  bp->ctx_blk_mapping[i]);
M
Michael Chan 已提交
829 830 831
			bp->ctx_blk[i] = NULL;
		}
	}
832
	if (bnapi->status_blk.msi) {
833 834 835
		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
				  bnapi->status_blk.msi,
				  bp->status_blk_mapping);
836
		bnapi->status_blk.msi = NULL;
837
		bp->stats_blk = NULL;
838 839 840 841 842 843
	}
}

static int
bnx2_alloc_mem(struct bnx2 *bp)
{
844
	int i, status_blk_size, err;
845 846
	struct bnx2_napi *bnapi;
	void *status_blk;
847

848 849
	/* Combine status and statistics blocks into one allocation. */
	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850
	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 852
		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 854 855
	bp->status_stats_size = status_blk_size +
				sizeof(struct statistics_block);

856
	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 858
					&bp->status_blk_mapping,
					GFP_KERNEL | __GFP_ZERO);
859
	if (status_blk == NULL)
860 861
		goto alloc_mem_err;

862 863 864 865 866 867
	bnapi = &bp->bnx2_napi[0];
	bnapi->status_blk.msi = status_blk;
	bnapi->hw_tx_cons_ptr =
		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
	bnapi->hw_rx_cons_ptr =
		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868
	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869
		for (i = 1; i < bp->irq_nvecs; i++) {
870 871 872
			struct status_block_msix *sblk;

			bnapi = &bp->bnx2_napi[i];
873

874
			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875 876 877 878 879
			bnapi->status_blk.msix = sblk;
			bnapi->hw_tx_cons_ptr =
				&sblk->status_tx_quick_consumer_index;
			bnapi->hw_rx_cons_ptr =
				&sblk->status_rx_quick_consumer_index;
880 881 882
			bnapi->int_num = i << 24;
		}
	}
883

884
	bp->stats_blk = status_blk + status_blk_size;
885

886
	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887

888
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
889
		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
M
Michael Chan 已提交
890 891 892
		if (bp->ctx_pages == 0)
			bp->ctx_pages = 1;
		for (i = 0; i < bp->ctx_pages; i++) {
893
			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894
						BNX2_PAGE_SIZE,
895 896
						&bp->ctx_blk_mapping[i],
						GFP_KERNEL);
M
Michael Chan 已提交
897 898 899 900
			if (bp->ctx_blk[i] == NULL)
				goto alloc_mem_err;
		}
	}
901

902 903 904 905
	err = bnx2_alloc_rx_mem(bp);
	if (err)
		goto alloc_mem_err;

906 907 908 909
	err = bnx2_alloc_tx_mem(bp);
	if (err)
		goto alloc_mem_err;

910 911 912 913 914 915 916
	return 0;

alloc_mem_err:
	bnx2_free_mem(bp);
	return -ENOMEM;
}

917 918 919 920 921
static void
bnx2_report_fw_link(struct bnx2 *bp)
{
	u32 fw_link_status = 0;

922
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923 924
		return;

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	if (bp->link_up) {
		u32 bmsr;

		switch (bp->line_speed) {
		case SPEED_10:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_10HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_10FULL;
			break;
		case SPEED_100:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_100HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_100FULL;
			break;
		case SPEED_1000:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_1000HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_1000FULL;
			break;
		case SPEED_2500:
			if (bp->duplex == DUPLEX_HALF)
				fw_link_status = BNX2_LINK_STATUS_2500HALF;
			else
				fw_link_status = BNX2_LINK_STATUS_2500FULL;
			break;
		}

		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;

		if (bp->autoneg) {
			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;

960 961
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 963

			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964
			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965 966 967 968 969 970 971 972
				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
			else
				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
		}
	}
	else
		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;

973
	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 975
}

M
Michael Chan 已提交
976 977 978
static char *
bnx2_xceiver_str(struct bnx2 *bp)
{
979
	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980
		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981
		 "Copper");
M
Michael Chan 已提交
982 983
}

984 985 986 987 988
static void
bnx2_report_link(struct bnx2 *bp)
{
	if (bp->link_up) {
		netif_carrier_on(bp->dev);
989 990 991 992
		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
			    bnx2_xceiver_str(bp),
			    bp->line_speed,
			    bp->duplex == DUPLEX_FULL ? "full" : "half");
993 994 995

		if (bp->flow_ctrl) {
			if (bp->flow_ctrl & FLOW_CTRL_RX) {
996
				pr_cont(", receive ");
997
				if (bp->flow_ctrl & FLOW_CTRL_TX)
998
					pr_cont("& transmit ");
999 1000
			}
			else {
1001
				pr_cont(", transmit ");
1002
			}
1003
			pr_cont("flow control ON");
1004
		}
1005 1006
		pr_cont("\n");
	} else {
1007
		netif_carrier_off(bp->dev);
1008 1009
		netdev_err(bp->dev, "NIC %s Link is Down\n",
			   bnx2_xceiver_str(bp));
1010
	}
1011 1012

	bnx2_report_fw_link(bp);
1013 1014 1015 1016 1017 1018 1019 1020
}

static void
bnx2_resolve_flow_ctrl(struct bnx2 *bp)
{
	u32 local_adv, remote_adv;

	bp->flow_ctrl = 0;
1021
	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {

		if (bp->duplex == DUPLEX_FULL) {
			bp->flow_ctrl = bp->req_flow_ctrl;
		}
		return;
	}

	if (bp->duplex != DUPLEX_FULL) {
		return;
	}

1034
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035
	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
M
Michael Chan 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		u32 val;

		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
			bp->flow_ctrl |= FLOW_CTRL_TX;
		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
			bp->flow_ctrl |= FLOW_CTRL_RX;
		return;
	}

1046 1047
	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048

1049
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
		u32 new_local_adv = 0;
		u32 new_remote_adv = 0;

		if (local_adv & ADVERTISE_1000XPAUSE)
			new_local_adv |= ADVERTISE_PAUSE_CAP;
		if (local_adv & ADVERTISE_1000XPSE_ASYM)
			new_local_adv |= ADVERTISE_PAUSE_ASYM;
		if (remote_adv & ADVERTISE_1000XPAUSE)
			new_remote_adv |= ADVERTISE_PAUSE_CAP;
		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
			new_remote_adv |= ADVERTISE_PAUSE_ASYM;

		local_adv = new_local_adv;
		remote_adv = new_remote_adv;
	}

	/* See Table 28B-3 of 802.3ab-1999 spec. */
	if (local_adv & ADVERTISE_PAUSE_CAP) {
		if(local_adv & ADVERTISE_PAUSE_ASYM) {
	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
			}
			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
				bp->flow_ctrl = FLOW_CTRL_RX;
			}
		}
		else {
			if (remote_adv & ADVERTISE_PAUSE_CAP) {
				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
			}
		}
	}
	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
			(remote_adv & ADVERTISE_PAUSE_ASYM)) {

			bp->flow_ctrl = FLOW_CTRL_TX;
		}
	}
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
static int
bnx2_5709s_linkup(struct bnx2 *bp)
{
	u32 val, speed;

	bp->link_up = 1;

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
		bp->line_speed = bp->req_line_speed;
		bp->duplex = bp->req_duplex;
		return 0;
	}
	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
	switch (speed) {
		case MII_BNX2_GP_TOP_AN_SPEED_10:
			bp->line_speed = SPEED_10;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_100:
			bp->line_speed = SPEED_100;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_1G:
		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
			bp->line_speed = SPEED_1000;
			break;
		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
			bp->line_speed = SPEED_2500;
			break;
	}
	if (val & MII_BNX2_GP_TOP_AN_FD)
		bp->duplex = DUPLEX_FULL;
	else
		bp->duplex = DUPLEX_HALF;
	return 0;
}

1130
static int
M
Michael Chan 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
bnx2_5708s_linkup(struct bnx2 *bp)
{
	u32 val;

	bp->link_up = 1;
	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
		case BCM5708S_1000X_STAT1_SPEED_10:
			bp->line_speed = SPEED_10;
			break;
		case BCM5708S_1000X_STAT1_SPEED_100:
			bp->line_speed = SPEED_100;
			break;
		case BCM5708S_1000X_STAT1_SPEED_1G:
			bp->line_speed = SPEED_1000;
			break;
		case BCM5708S_1000X_STAT1_SPEED_2G5:
			bp->line_speed = SPEED_2500;
			break;
	}
	if (val & BCM5708S_1000X_STAT1_FD)
		bp->duplex = DUPLEX_FULL;
	else
		bp->duplex = DUPLEX_HALF;

	return 0;
}

static int
bnx2_5706s_linkup(struct bnx2 *bp)
1161 1162 1163 1164 1165 1166
{
	u32 bmcr, local_adv, remote_adv, common;

	bp->link_up = 1;
	bp->line_speed = SPEED_1000;

1167
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	if (bmcr & BMCR_FULLDPLX) {
		bp->duplex = DUPLEX_FULL;
	}
	else {
		bp->duplex = DUPLEX_HALF;
	}

	if (!(bmcr & BMCR_ANENABLE)) {
		return 0;
	}

1179 1180
	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

	common = local_adv & remote_adv;
	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {

		if (common & ADVERTISE_1000XFULL) {
			bp->duplex = DUPLEX_FULL;
		}
		else {
			bp->duplex = DUPLEX_HALF;
		}
	}

	return 0;
}

static int
bnx2_copper_linkup(struct bnx2 *bp)
{
	u32 bmcr;

1201
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	if (bmcr & BMCR_ANENABLE) {
		u32 local_adv, remote_adv, common;

		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);

		common = local_adv & (remote_adv >> 2);
		if (common & ADVERTISE_1000FULL) {
			bp->line_speed = SPEED_1000;
			bp->duplex = DUPLEX_FULL;
		}
		else if (common & ADVERTISE_1000HALF) {
			bp->line_speed = SPEED_1000;
			bp->duplex = DUPLEX_HALF;
		}
		else {
1218 1219
			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261

			common = local_adv & remote_adv;
			if (common & ADVERTISE_100FULL) {
				bp->line_speed = SPEED_100;
				bp->duplex = DUPLEX_FULL;
			}
			else if (common & ADVERTISE_100HALF) {
				bp->line_speed = SPEED_100;
				bp->duplex = DUPLEX_HALF;
			}
			else if (common & ADVERTISE_10FULL) {
				bp->line_speed = SPEED_10;
				bp->duplex = DUPLEX_FULL;
			}
			else if (common & ADVERTISE_10HALF) {
				bp->line_speed = SPEED_10;
				bp->duplex = DUPLEX_HALF;
			}
			else {
				bp->line_speed = 0;
				bp->link_up = 0;
			}
		}
	}
	else {
		if (bmcr & BMCR_SPEED100) {
			bp->line_speed = SPEED_100;
		}
		else {
			bp->line_speed = SPEED_10;
		}
		if (bmcr & BMCR_FULLDPLX) {
			bp->duplex = DUPLEX_FULL;
		}
		else {
			bp->duplex = DUPLEX_HALF;
		}
	}

	return 0;
}

1262
static void
1263
bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264
{
1265
	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266 1267 1268 1269 1270

	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
	val |= 0x02 << 8;

M
Michael Chan 已提交
1271 1272
	if (bp->flow_ctrl & FLOW_CTRL_TX)
		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273 1274 1275 1276

	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
static void
bnx2_init_all_rx_contexts(struct bnx2 *bp)
{
	int i;
	u32 cid;

	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
		if (i == 1)
			cid = RX_RSS_CID;
		bnx2_init_rx_context(bp, cid);
	}
}

1290
static void
1291 1292 1293 1294
bnx2_set_mac_link(struct bnx2 *bp)
{
	u32 val;

1295
	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1296 1297
	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
		(bp->duplex == DUPLEX_HALF)) {
1298
		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1299 1300 1301
	}

	/* Configure the EMAC mode register. */
1302
	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303 1304

	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
M
Michael Chan 已提交
1305
		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
M
Michael Chan 已提交
1306
		BNX2_EMAC_MODE_25G_MODE);
1307 1308

	if (bp->link_up) {
M
Michael Chan 已提交
1309 1310
		switch (bp->line_speed) {
			case SPEED_10:
1311
				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
M
Michael Chan 已提交
1312
					val |= BNX2_EMAC_MODE_PORT_MII_10M;
M
Michael Chan 已提交
1313 1314 1315 1316 1317 1318 1319
					break;
				}
				/* fall through */
			case SPEED_100:
				val |= BNX2_EMAC_MODE_PORT_MII;
				break;
			case SPEED_2500:
M
Michael Chan 已提交
1320
				val |= BNX2_EMAC_MODE_25G_MODE;
M
Michael Chan 已提交
1321 1322 1323 1324 1325
				/* fall through */
			case SPEED_1000:
				val |= BNX2_EMAC_MODE_PORT_GMII;
				break;
		}
1326 1327 1328 1329 1330 1331 1332 1333
	}
	else {
		val |= BNX2_EMAC_MODE_PORT_GMII;
	}

	/* Set the MAC to operate in the appropriate duplex mode. */
	if (bp->duplex == DUPLEX_HALF)
		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1334
	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335 1336 1337 1338 1339 1340

	/* Enable/disable rx PAUSE. */
	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;

	if (bp->flow_ctrl & FLOW_CTRL_RX)
		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1341
	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342 1343

	/* Enable/disable tx PAUSE. */
1344
	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1345 1346 1347 1348
	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;

	if (bp->flow_ctrl & FLOW_CTRL_TX)
		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1349
	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350 1351

	/* Acknowledge the interrupt. */
1352
	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353

M
Michael Chan 已提交
1354
	bnx2_init_all_rx_contexts(bp);
1355 1356
}

1357 1358 1359
static void
bnx2_enable_bmsr1(struct bnx2 *bp)
{
1360
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1361
	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1362 1363 1364 1365 1366 1367 1368
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_GP_STATUS);
}

static void
bnx2_disable_bmsr1(struct bnx2 *bp)
{
1369
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370
	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371 1372 1373 1374
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
}

1375 1376 1377 1378 1379 1380
static int
bnx2_test_and_enable_2g5(struct bnx2 *bp)
{
	u32 up1;
	int ret = 1;

1381
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1382 1383 1384 1385 1386
		return 0;

	if (bp->autoneg & AUTONEG_SPEED)
		bp->advertising |= ADVERTISED_2500baseX_Full;

1387
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1388 1389
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);

1390 1391 1392 1393 1394 1395 1396
	bnx2_read_phy(bp, bp->mii_up1, &up1);
	if (!(up1 & BCM5708S_UP1_2G5)) {
		up1 |= BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, bp->mii_up1, up1);
		ret = 0;
	}

1397
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1398 1399 1400
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

1401 1402 1403 1404 1405 1406 1407 1408 1409
	return ret;
}

static int
bnx2_test_and_disable_2g5(struct bnx2 *bp)
{
	u32 up1;
	int ret = 0;

1410
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411 1412
		return 0;

1413
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414 1415
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);

1416 1417 1418 1419 1420 1421 1422
	bnx2_read_phy(bp, bp->mii_up1, &up1);
	if (up1 & BCM5708S_UP1_2G5) {
		up1 &= ~BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, bp->mii_up1, up1);
		ret = 1;
	}

1423
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424 1425 1426
		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

1427 1428 1429 1430 1431 1432
	return ret;
}

static void
bnx2_enable_forced_2g5(struct bnx2 *bp)
{
1433 1434
	u32 uninitialized_var(bmcr);
	int err;
1435

1436
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437 1438
		return;

1439
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1440 1441 1442 1443
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1444 1445 1446 1447 1448 1449
		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
			val |= MII_BNX2_SD_MISC1_FORCE |
				MII_BNX2_SD_MISC1_FORCE_2_5G;
			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
		}
1450 1451 1452

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454

1455
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1456 1457 1458
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
		if (!err)
			bmcr |= BCM5708S_BMCR_FORCE_2500;
E
Eric Dumazet 已提交
1459 1460
	} else {
		return;
1461 1462
	}

1463 1464 1465
	if (err)
		return;

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
	if (bp->autoneg & AUTONEG_SPEED) {
		bmcr &= ~BMCR_ANENABLE;
		if (bp->req_duplex == DUPLEX_FULL)
			bmcr |= BMCR_FULLDPLX;
	}
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
}

static void
bnx2_disable_forced_2g5(struct bnx2 *bp)
{
1477 1478
	u32 uninitialized_var(bmcr);
	int err;
1479

1480
	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481 1482
		return;

1483
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1484 1485 1486 1487
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1488 1489 1490 1491
		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
			val &= ~MII_BNX2_SD_MISC1_FORCE;
			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
		}
1492 1493 1494

		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1495
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496

1497
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1498 1499 1500
		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
		if (!err)
			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
E
Eric Dumazet 已提交
1501 1502
	} else {
		return;
1503 1504
	}

1505 1506 1507
	if (err)
		return;

1508 1509 1510 1511 1512
	if (bp->autoneg & AUTONEG_SPEED)
		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
static void
bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
{
	u32 val;

	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
	if (start)
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
	else
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
}

1526 1527 1528 1529 1530 1531
static int
bnx2_set_link(struct bnx2 *bp)
{
	u32 bmsr;
	u8 link_up;

M
Michael Chan 已提交
1532
	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1533 1534 1535 1536
		bp->link_up = 1;
		return 0;
	}

1537
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1538 1539
		return 0;

1540 1541
	link_up = bp->link_up;

1542 1543 1544 1545
	bnx2_enable_bmsr1(bp);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_disable_bmsr1(bp);
1546

1547
	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1548
	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1549
		u32 val, an_dbg;
1550

1551
		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1552
			bnx2_5706s_force_link_dn(bp, 0);
1553
			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554
		}
1555
		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556 1557 1558 1559 1560 1561 1562

		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);

		if ((val & BNX2_EMAC_STATUS_LINK) &&
		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1563 1564 1565 1566 1567 1568 1569 1570
			bmsr |= BMSR_LSTATUS;
		else
			bmsr &= ~BMSR_LSTATUS;
	}

	if (bmsr & BMSR_LSTATUS) {
		bp->link_up = 1;

1571
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1572
			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
M
Michael Chan 已提交
1573
				bnx2_5706s_linkup(bp);
1574
			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
M
Michael Chan 已提交
1575
				bnx2_5708s_linkup(bp);
1576
			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1577
				bnx2_5709s_linkup(bp);
1578 1579 1580 1581 1582 1583 1584
		}
		else {
			bnx2_copper_linkup(bp);
		}
		bnx2_resolve_flow_ctrl(bp);
	}
	else {
1585
		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1586 1587
		    (bp->autoneg & AUTONEG_SPEED))
			bnx2_disable_forced_2g5(bp);
1588

1589
		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1590 1591 1592 1593 1594 1595
			u32 bmcr;

			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
			bmcr |= BMCR_ANENABLE;
			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);

1596
			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1597
		}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		bp->link_up = 0;
	}

	if (bp->link_up != link_up) {
		bnx2_report_link(bp);
	}

	bnx2_set_mac_link(bp);

	return 0;
}

static int
bnx2_reset_phy(struct bnx2 *bp)
{
	int i;
	u32 reg;

1616
        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617 1618 1619 1620 1621

#define PHY_RESET_MAX_WAIT 100
	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
		udelay(10);

1622
		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
		if (!(reg & BMCR_RESET)) {
			udelay(20);
			break;
		}
	}
	if (i == PHY_RESET_MAX_WAIT) {
		return -EBUSY;
	}
	return 0;
}

static u32
bnx2_phy_get_pause_adv(struct bnx2 *bp)
{
	u32 adv = 0;

	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {

1642
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643 1644 1645 1646 1647 1648 1649
			adv = ADVERTISE_1000XPAUSE;
		}
		else {
			adv = ADVERTISE_PAUSE_CAP;
		}
	}
	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1650
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651 1652 1653 1654 1655 1656 1657
			adv = ADVERTISE_1000XPSE_ASYM;
		}
		else {
			adv = ADVERTISE_PAUSE_ASYM;
		}
	}
	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1658
		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 1660 1661 1662 1663 1664 1665 1666 1667
			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
		}
		else {
			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
		}
	}
	return adv;
}

1668
static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1669

1670
static int
1671
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1672 1673
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
{
	u32 speed_arg = 0, pause_adv;

	pause_adv = bnx2_phy_get_pause_adv(bp);

	if (bp->autoneg & AUTONEG_SPEED) {
		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
		if (bp->advertising & ADVERTISED_10baseT_Half)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
		if (bp->advertising & ADVERTISED_10baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
		if (bp->advertising & ADVERTISED_100baseT_Half)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
		if (bp->advertising & ADVERTISED_100baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
		if (bp->advertising & ADVERTISED_1000baseT_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
		if (bp->advertising & ADVERTISED_2500baseX_Full)
			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
	} else {
		if (bp->req_line_speed == SPEED_2500)
			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
		else if (bp->req_line_speed == SPEED_1000)
			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
		else if (bp->req_line_speed == SPEED_100) {
			if (bp->req_duplex == DUPLEX_FULL)
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
			else
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
		} else if (bp->req_line_speed == SPEED_10) {
			if (bp->req_duplex == DUPLEX_FULL)
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
			else
				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
		}
	}

	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1713
	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1714 1715 1716 1717 1718 1719
		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;

	if (port == PORT_TP)
		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;

1720
	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721 1722

	spin_unlock_bh(&bp->phy_lock);
1723
	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1724 1725 1726 1727 1728 1729 1730
	spin_lock_bh(&bp->phy_lock);

	return 0;
}

static int
bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1731 1732
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
1733
{
1734
	u32 adv, bmcr;
1735 1736
	u32 new_adv = 0;

1737
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1738
		return bnx2_setup_remote_phy(bp, port);
1739

1740 1741
	if (!(bp->autoneg & AUTONEG_SPEED)) {
		u32 new_bmcr;
M
Michael Chan 已提交
1742 1743
		int force_link_down = 0;

1744 1745 1746 1747 1748 1749 1750
		if (bp->req_line_speed == SPEED_2500) {
			if (!bnx2_test_and_enable_2g5(bp))
				force_link_down = 1;
		} else if (bp->req_line_speed == SPEED_1000) {
			if (bnx2_test_and_disable_2g5(bp))
				force_link_down = 1;
		}
1751
		bnx2_read_phy(bp, bp->mii_adv, &adv);
M
Michael Chan 已提交
1752 1753
		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);

1754
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1755
		new_bmcr = bmcr & ~BMCR_ANENABLE;
M
Michael Chan 已提交
1756
		new_bmcr |= BMCR_SPEED1000;
1757

1758
		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1759 1760 1761 1762 1763 1764 1765
			if (bp->req_line_speed == SPEED_2500)
				bnx2_enable_forced_2g5(bp);
			else if (bp->req_line_speed == SPEED_1000) {
				bnx2_disable_forced_2g5(bp);
				new_bmcr &= ~0x2000;
			}

1766
		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1767 1768 1769 1770
			if (bp->req_line_speed == SPEED_2500)
				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
			else
				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
M
Michael Chan 已提交
1771 1772
		}

1773
		if (bp->req_duplex == DUPLEX_FULL) {
M
Michael Chan 已提交
1774
			adv |= ADVERTISE_1000XFULL;
1775 1776 1777
			new_bmcr |= BMCR_FULLDPLX;
		}
		else {
M
Michael Chan 已提交
1778
			adv |= ADVERTISE_1000XHALF;
1779 1780
			new_bmcr &= ~BMCR_FULLDPLX;
		}
M
Michael Chan 已提交
1781
		if ((new_bmcr != bmcr) || (force_link_down)) {
1782 1783
			/* Force a link down visible on the other side */
			if (bp->link_up) {
1784
				bnx2_write_phy(bp, bp->mii_adv, adv &
M
Michael Chan 已提交
1785 1786
					       ~(ADVERTISE_1000XFULL |
						 ADVERTISE_1000XHALF));
1787
				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1788 1789 1790 1791
					BMCR_ANRESTART | BMCR_ANENABLE);

				bp->link_up = 0;
				netif_carrier_off(bp->dev);
1792
				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
M
Michael Chan 已提交
1793
				bnx2_report_link(bp);
1794
			}
1795 1796
			bnx2_write_phy(bp, bp->mii_adv, adv);
			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797 1798 1799
		} else {
			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
1800 1801 1802 1803
		}
		return 0;
	}

1804
	bnx2_test_and_enable_2g5(bp);
M
Michael Chan 已提交
1805

1806 1807 1808 1809 1810
	if (bp->advertising & ADVERTISED_1000baseT_Full)
		new_adv |= ADVERTISE_1000XFULL;

	new_adv |= bnx2_phy_get_pause_adv(bp);

1811 1812
	bnx2_read_phy(bp, bp->mii_adv, &adv);
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813 1814 1815 1816 1817

	bp->serdes_an_pending = 0;
	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
		/* Force a link down visible on the other side */
		if (bp->link_up) {
1818
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
M
Michael Chan 已提交
1819 1820 1821
			spin_unlock_bh(&bp->phy_lock);
			msleep(20);
			spin_lock_bh(&bp->phy_lock);
1822 1823
		}

1824 1825
		bnx2_write_phy(bp, bp->mii_adv, new_adv);
		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826
			BMCR_ANENABLE);
1827 1828 1829 1830 1831 1832 1833 1834
		/* Speed up link-up time when the link partner
		 * does not autonegotiate which is very common
		 * in blade servers. Some blade servers use
		 * IPMI for kerboard input and it's important
		 * to minimize link disruptions. Autoneg. involves
		 * exchanging base pages plus 3 next pages and
		 * normally completes in about 120 msec.
		 */
M
Michael Chan 已提交
1835
		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1836 1837
		bp->serdes_an_pending = 1;
		mod_timer(&bp->timer, jiffies + bp->current_interval);
1838 1839 1840
	} else {
		bnx2_resolve_flow_ctrl(bp);
		bnx2_set_mac_link(bp);
1841 1842 1843 1844 1845 1846
	}

	return 0;
}

#define ETHTOOL_ALL_FIBRE_SPEED						\
1847
	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1848 1849
		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
		(ADVERTISED_1000baseT_Full)
1850 1851 1852 1853 1854 1855 1856 1857

#define ETHTOOL_ALL_COPPER_SPEED					\
	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
	ADVERTISED_1000baseT_Full)

#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858

1859 1860
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)

1861 1862 1863 1864 1865 1866
static void
bnx2_set_default_remote_link(struct bnx2 *bp)
{
	u32 link;

	if (bp->phy_port == PORT_TP)
1867
		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868
	else
1869
		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907

	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
		bp->req_line_speed = 0;
		bp->autoneg |= AUTONEG_SPEED;
		bp->advertising = ADVERTISED_Autoneg;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
			bp->advertising |= ADVERTISED_10baseT_Half;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
			bp->advertising |= ADVERTISED_10baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
			bp->advertising |= ADVERTISED_100baseT_Half;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
			bp->advertising |= ADVERTISED_100baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
			bp->advertising |= ADVERTISED_1000baseT_Full;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
			bp->advertising |= ADVERTISED_2500baseX_Full;
	} else {
		bp->autoneg = 0;
		bp->advertising = 0;
		bp->req_duplex = DUPLEX_FULL;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
			bp->req_line_speed = SPEED_10;
			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
				bp->req_duplex = DUPLEX_HALF;
		}
		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
			bp->req_line_speed = SPEED_100;
			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
				bp->req_duplex = DUPLEX_HALF;
		}
		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
			bp->req_line_speed = SPEED_1000;
		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
			bp->req_line_speed = SPEED_2500;
	}
}

1908 1909 1910
static void
bnx2_set_default_link(struct bnx2 *bp)
{
1911 1912 1913 1914
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
		bnx2_set_default_remote_link(bp);
		return;
	}
1915

1916 1917
	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
	bp->req_line_speed = 0;
1918
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1919 1920 1921 1922
		u32 reg;

		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;

1923
		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
			bp->autoneg = 0;
			bp->req_line_speed = bp->line_speed = SPEED_1000;
			bp->req_duplex = DUPLEX_FULL;
		}
	} else
		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
}

M
Michael Chan 已提交
1934 1935 1936 1937 1938 1939 1940 1941 1942
static void
bnx2_send_heart_beat(struct bnx2 *bp)
{
	u32 msg;
	u32 addr;

	spin_lock(&bp->indirect_lock);
	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1943 1944
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
M
Michael Chan 已提交
1945 1946 1947
	spin_unlock(&bp->indirect_lock);
}

1948 1949 1950 1951 1952 1953 1954
static void
bnx2_remote_phy_event(struct bnx2 *bp)
{
	u32 msg;
	u8 link_up = bp->link_up;
	u8 old_port;

1955
	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956

M
Michael Chan 已提交
1957 1958 1959 1960 1961
	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
		bnx2_send_heart_beat(bp);

	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
		bp->link_up = 0;
	else {
		u32 speed;

		bp->link_up = 1;
		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
		bp->duplex = DUPLEX_FULL;
		switch (speed) {
			case BNX2_LINK_STATUS_10HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1973
				/* fall through */
1974 1975 1976 1977 1978
			case BNX2_LINK_STATUS_10FULL:
				bp->line_speed = SPEED_10;
				break;
			case BNX2_LINK_STATUS_100HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1979
				/* fall through */
1980 1981 1982 1983 1984 1985
			case BNX2_LINK_STATUS_100BASE_T4:
			case BNX2_LINK_STATUS_100FULL:
				bp->line_speed = SPEED_100;
				break;
			case BNX2_LINK_STATUS_1000HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1986
				/* fall through */
1987 1988 1989 1990 1991
			case BNX2_LINK_STATUS_1000FULL:
				bp->line_speed = SPEED_1000;
				break;
			case BNX2_LINK_STATUS_2500HALF:
				bp->duplex = DUPLEX_HALF;
M
Michael Chan 已提交
1992
				/* fall through */
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
			case BNX2_LINK_STATUS_2500FULL:
				bp->line_speed = SPEED_2500;
				break;
			default:
				bp->line_speed = 0;
				break;
		}

		bp->flow_ctrl = 0;
		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
			if (bp->duplex == DUPLEX_FULL)
				bp->flow_ctrl = bp->req_flow_ctrl;
		} else {
			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
				bp->flow_ctrl |= FLOW_CTRL_TX;
			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
				bp->flow_ctrl |= FLOW_CTRL_RX;
		}

		old_port = bp->phy_port;
		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
			bp->phy_port = PORT_FIBRE;
		else
			bp->phy_port = PORT_TP;

		if (old_port != bp->phy_port)
			bnx2_set_default_link(bp);

	}
	if (bp->link_up != link_up)
		bnx2_report_link(bp);

	bnx2_set_mac_link(bp);
}

static int
bnx2_set_remote_link(struct bnx2 *bp)
{
	u32 evt_code;

2034
	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035 2036 2037 2038 2039 2040
	switch (evt_code) {
		case BNX2_FW_EVT_CODE_LINK_EVENT:
			bnx2_remote_phy_event(bp);
			break;
		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
		default:
M
Michael Chan 已提交
2041
			bnx2_send_heart_beat(bp);
2042 2043 2044 2045 2046
			break;
	}
	return 0;
}

2047 2048
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
2049 2050
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2051 2052 2053 2054
{
	u32 bmcr;
	u32 new_bmcr;

2055
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056 2057 2058

	if (bp->autoneg & AUTONEG_SPEED) {
		u32 adv_reg, adv1000_reg;
2059 2060
		u32 new_adv = 0;
		u32 new_adv1000 = 0;
2061

2062
		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063 2064 2065 2066 2067 2068
		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
			ADVERTISE_PAUSE_ASYM);

		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
		adv1000_reg &= PHY_ALL_1000_SPEED;

2069 2070 2071
		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
		new_adv |= ADVERTISE_CSMA;
		new_adv |= bnx2_phy_get_pause_adv(bp);
2072

2073
		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074

2075 2076
		if ((adv1000_reg != new_adv1000) ||
			(adv_reg != new_adv) ||
2077 2078
			((bmcr & BMCR_ANENABLE) == 0)) {

2079 2080
			bnx2_write_phy(bp, bp->mii_adv, new_adv);
			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2081
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
				BMCR_ANENABLE);
		}
		else if (bp->link_up) {
			/* Flow ctrl may have changed from auto to forced */
			/* or vice-versa. */

			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
		}
		return 0;
	}

	new_bmcr = 0;
	if (bp->req_line_speed == SPEED_100) {
		new_bmcr |= BMCR_SPEED100;
	}
	if (bp->req_duplex == DUPLEX_FULL) {
		new_bmcr |= BMCR_FULLDPLX;
	}
	if (new_bmcr != bmcr) {
		u32 bmsr;

2104 2105
		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106

2107 2108
		if (bmsr & BMSR_LSTATUS) {
			/* Force link down */
2109
			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110 2111 2112 2113
			spin_unlock_bh(&bp->phy_lock);
			msleep(50);
			spin_lock_bh(&bp->phy_lock);

2114 2115
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 2117
		}

2118
		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129

		/* Normally, the new speed is setup after the link has
		 * gone down and up again. In some cases, link will not go
		 * down so we need to set up the new speed here.
		 */
		if (bmsr & BMSR_LSTATUS) {
			bp->line_speed = bp->req_line_speed;
			bp->duplex = bp->req_duplex;
			bnx2_resolve_flow_ctrl(bp);
			bnx2_set_mac_link(bp);
		}
2130 2131 2132
	} else {
		bnx2_resolve_flow_ctrl(bp);
		bnx2_set_mac_link(bp);
2133 2134 2135 2136 2137
	}
	return 0;
}

static int
2138
bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 2140
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2141 2142 2143 2144
{
	if (bp->loopback == MAC_LOOPBACK)
		return 0;

2145
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146
		return bnx2_setup_serdes_phy(bp, port);
2147 2148
	}
	else {
2149
		return bnx2_setup_copper_phy(bp);
2150 2151 2152
	}
}

2153
static int
2154
bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
{
	u32 val;

	bp->mii_bmcr = MII_BMCR + 0x10;
	bp->mii_bmsr = MII_BMSR + 0x10;
	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
	bp->mii_adv = MII_ADVERTISE + 0x10;
	bp->mii_lpa = MII_LPA + 0x10;
	bp->mii_up1 = MII_BNX2_OVER1G_UP1;

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169 2170
	if (reset_phy)
		bnx2_reset_phy(bp);
2171 2172 2173 2174 2175 2176 2177 2178 2179 2180

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);

	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
	val |= MII_BNX2_SD_1000XCTL1_FIBER;
	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181
	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
		val |= BCM5708S_UP1_2G5;
	else
		val &= ~BCM5708S_UP1_2G5;
	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);

	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);

	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);

	return 0;
}

2203
static int
2204
bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
M
Michael Chan 已提交
2205 2206 2207
{
	u32 val;

2208 2209
	if (reset_phy)
		bnx2_reset_phy(bp);
2210 2211 2212

	bp->mii_up1 = BCM5708S_UP1;

M
Michael Chan 已提交
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);

	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);

	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);

2225
	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
M
Michael Chan 已提交
2226 2227 2228 2229 2230
		bnx2_read_phy(bp, BCM5708S_UP1, &val);
		val |= BCM5708S_UP1_2G5;
		bnx2_write_phy(bp, BCM5708S_UP1, val);
	}

2231 2232 2233
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
M
Michael Chan 已提交
2234 2235 2236 2237 2238 2239 2240 2241 2242
		/* increase tx signal amplitude */
		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
			       BCM5708S_BLK_ADDR_TX_MISC);
		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
	}

2243
	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
M
Michael Chan 已提交
2244 2245 2246 2247 2248
	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;

	if (val) {
		u32 is_backplane;

2249
		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
M
Michael Chan 已提交
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
				       BCM5708S_BLK_ADDR_TX_MISC);
			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
				       BCM5708S_BLK_ADDR_DIG);
		}
	}
	return 0;
}

static int
2262
bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263
{
2264 2265
	if (reset_phy)
		bnx2_reset_phy(bp);
2266

2267
	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268

2269
	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2270
		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299

	if (bp->dev->mtu > 1500) {
		u32 val;

		/* Set extended packet length bit */
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);

		bnx2_write_phy(bp, 0x1c, 0x6c00);
		bnx2_read_phy(bp, 0x1c, &val);
		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
	}
	else {
		u32 val;

		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val & ~0x4007);

		bnx2_write_phy(bp, 0x1c, 0x6c00);
		bnx2_read_phy(bp, 0x1c, &val);
		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
	}

	return 0;
}

static int
2300
bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301
{
M
Michael Chan 已提交
2302 2303
	u32 val;

2304 2305
	if (reset_phy)
		bnx2_reset_phy(bp);
2306

2307
	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
		bnx2_write_phy(bp, 0x18, 0x0c00);
		bnx2_write_phy(bp, 0x17, 0x000a);
		bnx2_write_phy(bp, 0x15, 0x310b);
		bnx2_write_phy(bp, 0x17, 0x201f);
		bnx2_write_phy(bp, 0x15, 0x9506);
		bnx2_write_phy(bp, 0x17, 0x401f);
		bnx2_write_phy(bp, 0x15, 0x14e2);
		bnx2_write_phy(bp, 0x18, 0x0400);
	}

2318
	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319 2320 2321 2322 2323 2324 2325
		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
			       MII_BNX2_DSP_EXPAND_REG | 0x8);
		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
		val &= ~(1 << 8);
		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
	}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
	if (bp->dev->mtu > 1500) {
		/* Set extended packet length bit */
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val | 0x4000);

		bnx2_read_phy(bp, 0x10, &val);
		bnx2_write_phy(bp, 0x10, val | 0x1);
	}
	else {
		bnx2_write_phy(bp, 0x18, 0x7);
		bnx2_read_phy(bp, 0x18, &val);
		bnx2_write_phy(bp, 0x18, val & ~0x4007);

		bnx2_read_phy(bp, 0x10, &val);
		bnx2_write_phy(bp, 0x10, val & ~0x1);
	}

M
Michael Chan 已提交
2344 2345 2346 2347
	/* ethernet@wirespeed */
	bnx2_write_phy(bp, 0x18, 0x7007);
	bnx2_read_phy(bp, 0x18, &val);
	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348 2349 2350 2351 2352
	return 0;
}


static int
2353
bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 2355
__releases(&bp->phy_lock)
__acquires(&bp->phy_lock)
2356 2357 2358 2359
{
	u32 val;
	int rc = 0;

2360 2361
	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362

2363 2364
	bp->mii_bmcr = MII_BMCR;
	bp->mii_bmsr = MII_BMSR;
2365
	bp->mii_bmsr1 = MII_BMSR;
2366 2367 2368
	bp->mii_adv = MII_ADVERTISE;
	bp->mii_lpa = MII_LPA;

2369
	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370

2371
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372 2373
		goto setup_phy;

2374 2375 2376 2377 2378
	bnx2_read_phy(bp, MII_PHYSID1, &val);
	bp->phy_id = val << 16;
	bnx2_read_phy(bp, MII_PHYSID2, &val);
	bp->phy_id |= val & 0xffff;

2379
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2381
			rc = bnx2_init_5706s_phy(bp, reset_phy);
2382
		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2383
			rc = bnx2_init_5708s_phy(bp, reset_phy);
2384
		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385
			rc = bnx2_init_5709s_phy(bp, reset_phy);
2386 2387
	}
	else {
2388
		rc = bnx2_init_copper_phy(bp, reset_phy);
2389 2390
	}

2391 2392 2393
setup_phy:
	if (!rc)
		rc = bnx2_setup_phy(bp, bp->phy_port);
2394 2395 2396 2397 2398 2399 2400 2401 2402

	return rc;
}

static int
bnx2_set_mac_loopback(struct bnx2 *bp)
{
	u32 mac_mode;

2403
	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2404 2405
	mac_mode &= ~BNX2_EMAC_MODE_PORT;
	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406
	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407 2408 2409 2410
	bp->link_up = 1;
	return 0;
}

M
Michael Chan 已提交
2411 2412 2413 2414 2415 2416 2417 2418 2419
static int bnx2_test_link(struct bnx2 *);

static int
bnx2_set_phy_loopback(struct bnx2 *bp)
{
	u32 mac_mode;
	int rc, i;

	spin_lock_bh(&bp->phy_lock);
2420
	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
M
Michael Chan 已提交
2421 2422 2423 2424 2425 2426 2427 2428
			    BMCR_SPEED1000);
	spin_unlock_bh(&bp->phy_lock);
	if (rc)
		return rc;

	for (i = 0; i < 10; i++) {
		if (bnx2_test_link(bp) == 0)
			break;
M
Michael Chan 已提交
2429
		msleep(100);
M
Michael Chan 已提交
2430 2431
	}

2432
	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
M
Michael Chan 已提交
2433 2434
	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
M
Michael Chan 已提交
2435
		      BNX2_EMAC_MODE_25G_MODE);
M
Michael Chan 已提交
2436 2437

	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438
	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
M
Michael Chan 已提交
2439 2440 2441 2442
	bp->link_up = 1;
	return 0;
}

J
Jeffrey Huang 已提交
2443 2444 2445 2446 2447 2448 2449
static void
bnx2_dump_mcp_state(struct bnx2 *bp)
{
	struct net_device *dev = bp->dev;
	u32 mcp_p0, mcp_p1;

	netdev_err(dev, "<--- start MCP states dump --->\n");
2450
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
J
Jeffrey Huang 已提交
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
		mcp_p0 = BNX2_MCP_STATE_P0;
		mcp_p1 = BNX2_MCP_STATE_P1;
	} else {
		mcp_p0 = BNX2_MCP_STATE_P0_5708;
		mcp_p1 = BNX2_MCP_STATE_P1_5708;
	}
	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
	netdev_err(dev, "DEBUG: shmem states:\n");
	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
		   bnx2_shmem_rd(bp, BNX2_FW_MB),
		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
	pr_cont(" condition[%08x]\n",
		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2478
	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
J
Jeffrey Huang 已提交
2479 2480 2481 2482 2483 2484 2485
	DP_SHMEM_LINE(bp, 0x3cc);
	DP_SHMEM_LINE(bp, 0x3dc);
	DP_SHMEM_LINE(bp, 0x3ec);
	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
	netdev_err(dev, "<--- end MCP states dump --->\n");
}

2486
static int
2487
bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2488 2489 2490 2491 2492 2493 2494
{
	int i;
	u32 val;

	bp->fw_wr_seq++;
	msg_data |= bp->fw_wr_seq;

2495
	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496

2497 2498 2499
	if (!ack)
		return 0;

2500
	/* wait for an acknowledgement. */
M
Michael Chan 已提交
2501
	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2502
		msleep(10);
2503

2504
		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505 2506 2507 2508

		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
			break;
	}
2509 2510
	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
		return 0;
2511 2512

	/* If we timed out, inform the firmware that this is the case. */
2513
	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2514 2515 2516
		msg_data &= ~BNX2_DRV_MSG_CODE;
		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;

2517
		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
J
Jeffrey Huang 已提交
2518 2519 2520 2521
		if (!silent) {
			pr_err("fw sync timeout, reset code = %x\n", msg_data);
			bnx2_dump_mcp_state(bp);
		}
2522 2523 2524 2525

		return -EBUSY;
	}

2526 2527 2528
	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
		return -EIO;

2529 2530 2531
	return 0;
}

M
Michael Chan 已提交
2532 2533 2534 2535 2536 2537 2538
static int
bnx2_init_5709_context(struct bnx2 *bp)
{
	int i, ret = 0;
	u32 val;

	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2539
	val |= (BNX2_PAGE_BITS - 8) << 16;
2540
	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2541
	for (i = 0; i < 10; i++) {
2542
		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2543 2544 2545 2546 2547 2548 2549
		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
			break;
		udelay(2);
	}
	if (val & BNX2_CTX_COMMAND_MEM_INIT)
		return -EBUSY;

M
Michael Chan 已提交
2550 2551 2552
	for (i = 0; i < bp->ctx_pages; i++) {
		int j;

2553
		if (bp->ctx_blk[i])
2554
			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2555 2556 2557
		else
			return -ENOMEM;

2558 2559 2560 2561 2562 2563 2564
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
			(bp->ctx_blk_mapping[i] & 0xffffffff) |
			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
			(u64) bp->ctx_blk_mapping[i] >> 32);
		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
M
Michael Chan 已提交
2565 2566
		for (j = 0; j < 10; j++) {

2567
			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
M
Michael Chan 已提交
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
				break;
			udelay(5);
		}
		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
			ret = -EBUSY;
			break;
		}
	}
	return ret;
}

2580 2581 2582 2583 2584 2585 2586 2587
static void
bnx2_init_context(struct bnx2 *bp)
{
	u32 vcid;

	vcid = 96;
	while (vcid) {
		u32 vcid_addr, pcid_addr, offset;
2588
		int i;
2589 2590 2591

		vcid--;

2592
		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
			u32 new_vcid;

			vcid_addr = GET_PCID_ADDR(vcid);
			if (vcid & 0x8) {
				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
			}
			else {
				new_vcid = vcid;
			}
			pcid_addr = GET_PCID_ADDR(new_vcid);
		}
		else {
	    		vcid_addr = GET_CID_ADDR(vcid);
			pcid_addr = vcid_addr;
		}

2609 2610 2611
		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
			vcid_addr += (i << PHY_CTX_SHIFT);
			pcid_addr += (i << PHY_CTX_SHIFT);
2612

2613 2614
			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615

2616 2617
			/* Zero out the context. */
			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
M
Michael Chan 已提交
2618
				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2619
		}
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	}
}

static int
bnx2_alloc_bad_rbuf(struct bnx2 *bp)
{
	u16 *good_mbuf;
	u32 good_mbuf_cnt;
	u32 val;

	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2631
	if (good_mbuf == NULL)
2632 2633
		return -ENOMEM;

2634
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2635 2636 2637 2638 2639
		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);

	good_mbuf_cnt = 0;

	/* Allocate a bunch of mbufs and save the good ones in an array. */
2640
	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641
	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2642 2643
		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
				BNX2_RBUF_COMMAND_ALLOC_REQ);
2644

2645
		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646 2647 2648 2649 2650 2651 2652 2653 2654

		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;

		/* The addresses with Bit 9 set are bad memory blocks. */
		if (!(val & (1 << 9))) {
			good_mbuf[good_mbuf_cnt] = (u16) val;
			good_mbuf_cnt++;
		}

2655
		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	}

	/* Free the good ones back to the mbuf pool thus discarding
	 * all the bad ones. */
	while (good_mbuf_cnt) {
		good_mbuf_cnt--;

		val = good_mbuf[good_mbuf_cnt];
		val = (val << 9) | val | 1;

2666
		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2667 2668 2669 2670 2671 2672
	}
	kfree(good_mbuf);
	return 0;
}

static void
2673
bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2674 2675 2676 2677 2678
{
	u32 val;

	val = (mac_addr[0] << 8) | mac_addr[1];

2679
	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680

2681
	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2682 2683
		(mac_addr[4] << 8) | mac_addr[5];

2684
	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2685 2686
}

2687
static inline int
2688
bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 2690
{
	dma_addr_t mapping;
2691 2692 2693
	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
	struct bnx2_rx_bd *rxbd =
		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2694
	struct page *page = alloc_page(gfp);
2695 2696 2697

	if (!page)
		return -ENOMEM;
2698
	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2699
			       PCI_DMA_FROMDEVICE);
2700
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
B
Benjamin Li 已提交
2701 2702 2703 2704
		__free_page(page);
		return -EIO;
	}

2705
	rx_pg->page = page;
2706
	dma_unmap_addr_set(rx_pg, mapping, mapping);
2707 2708 2709 2710 2711 2712
	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
	return 0;
}

static void
2713
bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714
{
2715
	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2716 2717 2718 2719 2720
	struct page *page = rx_pg->page;

	if (!page)
		return;

2721 2722
	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2723 2724 2725 2726 2727

	__free_page(page);
	rx_pg->page = NULL;
}

2728
static inline int
2729
bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730
{
2731
	u8 *data;
2732
	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733
	dma_addr_t mapping;
2734 2735
	struct bnx2_rx_bd *rxbd =
		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736

2737 2738
	data = kmalloc(bp->rx_buf_size, gfp);
	if (!data)
2739 2740
		return -ENOMEM;

2741 2742 2743
	mapping = dma_map_single(&bp->pdev->dev,
				 get_l2_fhdr(data),
				 bp->rx_buf_use_size,
2744 2745
				 PCI_DMA_FROMDEVICE);
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746
		kfree(data);
B
Benjamin Li 已提交
2747 2748
		return -EIO;
	}
2749

2750
	rx_buf->data = data;
2751
	dma_unmap_addr_set(rx_buf, mapping, mapping);
2752 2753 2754 2755

	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;

2756
	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757 2758 2759 2760

	return 0;
}

2761
static int
2762
bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763
{
2764
	struct status_block *sblk = bnapi->status_blk.msi;
2765
	u32 new_link_state, old_link_state;
2766
	int is_set = 1;
2767

2768 2769
	new_link_state = sblk->status_attn_bits & event;
	old_link_state = sblk->status_attn_bits_ack & event;
2770
	if (new_link_state != old_link_state) {
2771
		if (new_link_state)
2772
			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773
		else
2774
			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2775 2776 2777 2778 2779 2780 2781
	} else
		is_set = 0;

	return is_set;
}

static void
2782
bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783
{
M
Michael Chan 已提交
2784 2785 2786
	spin_lock(&bp->phy_lock);

	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787
		bnx2_set_link(bp);
2788
	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2789 2790
		bnx2_set_remote_link(bp);

M
Michael Chan 已提交
2791 2792
	spin_unlock(&bp->phy_lock);

2793 2794
}

2795
static inline u16
2796
bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2797 2798 2799
{
	u16 cons;

2800 2801 2802
	/* Tell compiler that status block fields can change. */
	barrier();
	cons = *bnapi->hw_tx_cons_ptr;
2803
	barrier();
2804
	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2805 2806 2807 2808
		cons++;
	return cons;
}

M
Michael Chan 已提交
2809 2810
static int
bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811
{
2812
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2813
	u16 hw_cons, sw_cons, sw_ring_cons;
B
Benjamin Li 已提交
2814
	int tx_pkt = 0, index;
E
Eric Dumazet 已提交
2815
	unsigned int tx_bytes = 0;
B
Benjamin Li 已提交
2816 2817 2818 2819
	struct netdev_queue *txq;

	index = (bnapi - bp->bnx2_napi);
	txq = netdev_get_tx_queue(bp->dev, index);
2820

2821
	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2822
	sw_cons = txr->tx_cons;
2823 2824

	while (sw_cons != hw_cons) {
2825
		struct bnx2_sw_tx_bd *tx_buf;
2826 2827 2828
		struct sk_buff *skb;
		int i, last;

2829
		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830

2831
		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2832
		skb = tx_buf->skb;
A
Arjan van de Ven 已提交
2833

E
Eric Dumazet 已提交
2834 2835 2836
		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
		prefetch(&skb->end);

2837
		/* partial BD completions possible with TSO packets */
E
Eric Dumazet 已提交
2838
		if (tx_buf->is_gso) {
2839 2840
			u16 last_idx, last_ring_idx;

E
Eric Dumazet 已提交
2841 2842
			last_idx = sw_cons + tx_buf->nr_frags + 1;
			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2843
			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2844 2845 2846 2847 2848 2849
				last_idx++;
			}
			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
				break;
			}
		}
A
Arjan van de Ven 已提交
2850

2851
		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2852
			skb_headlen(skb), PCI_DMA_TODEVICE);
2853 2854

		tx_buf->skb = NULL;
E
Eric Dumazet 已提交
2855
		last = tx_buf->nr_frags;
2856 2857

		for (i = 0; i < last; i++) {
2858
			struct bnx2_sw_tx_bd *tx_buf;
2859

2860 2861 2862
			sw_cons = BNX2_NEXT_TX_BD(sw_cons);

			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2863
			dma_unmap_page(&bp->pdev->dev,
2864
				dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
2865
				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866
				PCI_DMA_TODEVICE);
2867 2868
		}

2869
		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870

E
Eric Dumazet 已提交
2871
		tx_bytes += skb->len;
2872
		dev_kfree_skb(skb);
M
Michael Chan 已提交
2873 2874 2875
		tx_pkt++;
		if (tx_pkt == budget)
			break;
2876

E
Eric Dumazet 已提交
2877 2878
		if (hw_cons == sw_cons)
			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879 2880
	}

E
Eric Dumazet 已提交
2881
	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882 2883
	txr->hw_tx_cons = hw_cons;
	txr->tx_cons = sw_cons;
B
Benjamin Li 已提交
2884

M
Michael Chan 已提交
2885
	/* Need to make the tx_cons update visible to bnx2_start_xmit()
B
Benjamin Li 已提交
2886
	 * before checking for netif_tx_queue_stopped().  Without the
M
Michael Chan 已提交
2887 2888 2889 2890
	 * memory barrier, there is a small possibility that bnx2_start_xmit()
	 * will miss it and cause the queue to be stopped forever.
	 */
	smp_mb();
2891

B
Benjamin Li 已提交
2892
	if (unlikely(netif_tx_queue_stopped(txq)) &&
2893
		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
B
Benjamin Li 已提交
2894 2895
		__netif_tx_lock(txq, smp_processor_id());
		if ((netif_tx_queue_stopped(txq)) &&
2896
		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
B
Benjamin Li 已提交
2897 2898
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
2899
	}
B
Benjamin Li 已提交
2900

M
Michael Chan 已提交
2901
	return tx_pkt;
2902 2903
}

2904
static void
2905
bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906
			struct sk_buff *skb, int count)
2907
{
2908 2909
	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
	struct bnx2_rx_bd *cons_bd, *prod_bd;
2910
	int i;
B
Benjamin Li 已提交
2911
	u16 hw_prod, prod;
2912
	u16 cons = rxr->rx_pg_cons;
2913

B
Benjamin Li 已提交
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
	cons_rx_pg = &rxr->rx_pg_ring[cons];

	/* The caller was unable to allocate a new page to replace the
	 * last one in the frags array, so we need to recycle that page
	 * and then free the skb.
	 */
	if (skb) {
		struct page *page;
		struct skb_shared_info *shinfo;

		shinfo = skb_shinfo(skb);
		shinfo->nr_frags--;
2926 2927
		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
B
Benjamin Li 已提交
2928 2929 2930 2931 2932 2933 2934

		cons_rx_pg->page = page;
		dev_kfree_skb(skb);
	}

	hw_prod = rxr->rx_pg_prod;

2935
	for (i = 0; i < count; i++) {
2936
		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937

2938 2939
		prod_rx_pg = &rxr->rx_pg_ring[prod];
		cons_rx_pg = &rxr->rx_pg_ring[cons];
2940 2941 2942 2943
		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
						[BNX2_RX_IDX(cons)];
		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
						[BNX2_RX_IDX(prod)];
2944 2945 2946 2947

		if (prod != cons) {
			prod_rx_pg->page = cons_rx_pg->page;
			cons_rx_pg->page = NULL;
2948 2949
			dma_unmap_addr_set(prod_rx_pg, mapping,
				dma_unmap_addr(cons_rx_pg, mapping));
2950 2951 2952 2953 2954

			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;

		}
2955 2956
		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957
	}
2958 2959
	rxr->rx_pg_prod = hw_prod;
	rxr->rx_pg_cons = cons;
2960 2961
}

2962
static inline void
2963 2964
bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
		   u8 *data, u16 cons, u16 prod)
2965
{
2966 2967
	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
	struct bnx2_rx_bd *cons_bd, *prod_bd;
2968

2969 2970
	cons_rx_buf = &rxr->rx_buf_ring[cons];
	prod_rx_buf = &rxr->rx_buf_ring[prod];
2971

2972
	dma_sync_single_for_device(&bp->pdev->dev,
2973
		dma_unmap_addr(cons_rx_buf, mapping),
2974
		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975

2976
	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977

2978
	prod_rx_buf->data = data;
2979

2980 2981
	if (cons == prod)
		return;
2982

2983 2984
	dma_unmap_addr_set(prod_rx_buf, mapping,
			dma_unmap_addr(cons_rx_buf, mapping));
2985

2986 2987
	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2988 2989
	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 2991
}

2992 2993
static struct sk_buff *
bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2994 2995
	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
	    u32 ring_idx)
2996 2997 2998
{
	int err;
	u16 prod = ring_idx & 0xffff;
2999
	struct sk_buff *skb;
3000

3001
	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3002
	if (unlikely(err)) {
3003 3004
		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
error:
3005 3006 3007 3008
		if (hdr_len) {
			unsigned int raw_len = len + 4;
			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;

3009
			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3010
		}
3011
		return NULL;
3012 3013
	}

3014
	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3015
			 PCI_DMA_FROMDEVICE);
3016
	skb = build_skb(data, 0);
3017 3018 3019 3020 3021
	if (!skb) {
		kfree(data);
		goto error;
	}
	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3022 3023
	if (hdr_len == 0) {
		skb_put(skb, len);
3024
		return skb;
3025 3026
	} else {
		unsigned int i, frag_len, frag_size, pages;
3027
		struct bnx2_sw_pg *rx_pg;
3028 3029
		u16 pg_cons = rxr->rx_pg_cons;
		u16 pg_prod = rxr->rx_pg_prod;
3030 3031 3032 3033 3034 3035

		frag_size = len + 4 - hdr_len;
		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
		skb_put(skb, hdr_len);

		for (i = 0; i < pages; i++) {
B
Benjamin Li 已提交
3036 3037
			dma_addr_t mapping_old;

3038 3039 3040 3041
			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
			if (unlikely(frag_len <= 4)) {
				unsigned int tail = 4 - frag_len;

3042 3043 3044
				rxr->rx_pg_cons = pg_cons;
				rxr->rx_pg_prod = pg_prod;
				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045
							pages - i);
3046 3047 3048 3049 3050 3051
				skb->len -= tail;
				if (i == 0) {
					skb->tail -= tail;
				} else {
					skb_frag_t *frag =
						&skb_shinfo(skb)->frags[i - 1];
E
Eric Dumazet 已提交
3052
					skb_frag_size_sub(frag, tail);
3053 3054
					skb->data_len -= tail;
				}
3055
				return skb;
3056
			}
3057
			rx_pg = &rxr->rx_pg_ring[pg_cons];
3058

B
Benjamin Li 已提交
3059 3060 3061
			/* Don't unmap yet.  If we're unable to allocate a new
			 * page, we need to recycle the page and the DMA addr.
			 */
3062
			mapping_old = dma_unmap_addr(rx_pg, mapping);
3063 3064 3065 3066 3067 3068
			if (i == pages - 1)
				frag_len -= 4;

			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
			rx_pg->page = NULL;

3069
			err = bnx2_alloc_rx_page(bp, rxr,
3070
						 BNX2_RX_PG_RING_IDX(pg_prod),
3071
						 GFP_ATOMIC);
3072
			if (unlikely(err)) {
3073 3074 3075
				rxr->rx_pg_cons = pg_cons;
				rxr->rx_pg_prod = pg_prod;
				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076
							pages - i);
3077
				return NULL;
3078 3079
			}

3080
			dma_unmap_page(&bp->pdev->dev, mapping_old,
B
Benjamin Li 已提交
3081 3082
				       PAGE_SIZE, PCI_DMA_FROMDEVICE);

3083 3084
			frag_size -= frag_len;
			skb->data_len += frag_len;
3085
			skb->truesize += PAGE_SIZE;
3086 3087
			skb->len += frag_len;

3088 3089
			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090
		}
3091 3092
		rxr->rx_pg_prod = pg_prod;
		rxr->rx_pg_cons = pg_cons;
3093
	}
3094
	return skb;
3095 3096
}

M
Michael Chan 已提交
3097
static inline u16
3098
bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
M
Michael Chan 已提交
3099
{
3100 3101
	u16 cons;

3102 3103 3104
	/* Tell compiler that status block fields can change. */
	barrier();
	cons = *bnapi->hw_rx_cons_ptr;
3105
	barrier();
3106
	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
M
Michael Chan 已提交
3107 3108 3109 3110
		cons++;
	return cons;
}

3111
static int
3112
bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113
{
3114
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115 3116
	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
	struct l2_fhdr *rx_hdr;
3117
	int rx_pkt = 0, pg_ring_used = 0;
3118

3119
	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120 3121
	sw_cons = rxr->rx_cons;
	sw_prod = rxr->rx_prod;
3122 3123 3124 3125 3126 3127

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();
	while (sw_cons != hw_cons) {
3128
		unsigned int len, hdr_len;
3129
		u32 status;
3130
		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3131
		struct sk_buff *skb;
3132
		dma_addr_t dma_addr;
3133
		u8 *data;
3134
		u16 next_ring_idx;
3135

3136 3137
		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138

3139
		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3140 3141
		data = rx_buf->data;
		rx_buf->data = NULL;
3142

3143 3144
		rx_hdr = get_l2_fhdr(data);
		prefetch(rx_hdr);
3145

3146
		dma_addr = dma_unmap_addr(rx_buf, mapping);
3147

3148
		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3149 3150
			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
			PCI_DMA_FROMDEVICE);
3151

3152 3153
		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3154 3155
		prefetch(get_l2_fhdr(next_rx_buf->data));

3156
		len = rx_hdr->l2_fhdr_pkt_len;
3157
		status = rx_hdr->l2_fhdr_status;
3158

3159 3160 3161 3162 3163 3164 3165 3166 3167
		hdr_len = 0;
		if (status & L2_FHDR_STATUS_SPLIT) {
			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
			pg_ring_used = 1;
		} else if (len > bp->rx_jumbo_thresh) {
			hdr_len = bp->rx_jumbo_thresh;
			pg_ring_used = 1;
		}

3168 3169 3170 3171 3172 3173
		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
				       L2_FHDR_ERRORS_PHY_DECODE |
				       L2_FHDR_ERRORS_ALIGNMENT |
				       L2_FHDR_ERRORS_TOO_SHORT |
				       L2_FHDR_ERRORS_GIANT_FRAME))) {

3174
			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
					  sw_ring_prod);
			if (pg_ring_used) {
				int pages;

				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;

				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
			}
			goto next_rx;
		}

3186
		len -= 4;
3187

3188
		if (len <= bp->rx_copy_thresh) {
3189 3190 3191
			skb = netdev_alloc_skb(bp->dev, len + 6);
			if (skb == NULL) {
				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192 3193 3194
						  sw_ring_prod);
				goto next_rx;
			}
3195 3196

			/* aligned copy */
3197 3198 3199 3200 3201
			memcpy(skb->data,
			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
			       len + 6);
			skb_reserve(skb, 6);
			skb_put(skb, len);
3202

3203
			bnx2_reuse_rx_data(bp, rxr, data,
3204 3205
				sw_ring_cons, sw_ring_prod);

3206 3207 3208 3209 3210 3211
		} else {
			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
					  (sw_ring_cons << 16) | sw_ring_prod);
			if (!skb)
				goto next_rx;
		}
3212
		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3213
		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3214
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3215

3216 3217 3218
		skb->protocol = eth_type_trans(skb, bp->dev);

		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
A
Alexey Dobriyan 已提交
3219
			(ntohs(skb->protocol) != 0x8100)) {
3220

3221
			dev_kfree_skb(skb);
3222 3223 3224 3225
			goto next_rx;

		}

3226
		skb_checksum_none_assert(skb);
3227
		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3228 3229 3230
			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
			L2_FHDR_STATUS_UDP_DATAGRAM))) {

3231 3232
			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3233 3234
				skb->ip_summed = CHECKSUM_UNNECESSARY;
		}
3235 3236 3237 3238
		if ((bp->dev->features & NETIF_F_RXHASH) &&
		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
		     L2_FHDR_STATUS_USE_RXHASH))
			skb->rxhash = rx_hdr->l2_fhdr_hash;
3239

3240
		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241
		napi_gro_receive(&bnapi->napi, skb);
3242 3243 3244
		rx_pkt++;

next_rx:
3245 3246
		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247 3248 3249

		if ((rx_pkt == budget))
			break;
M
Michael Chan 已提交
3250 3251 3252

		/* Refresh hw_cons to see if there is new work */
		if (sw_cons == hw_cons) {
3253
			hw_cons = bnx2_get_hw_rx_cons(bnapi);
M
Michael Chan 已提交
3254 3255
			rmb();
		}
3256
	}
3257 3258
	rxr->rx_cons = sw_cons;
	rxr->rx_prod = sw_prod;
3259

3260
	if (pg_ring_used)
3261
		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262

3263
	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264

3265
	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276

	mmiowb();

	return rx_pkt;

}

/* MSI ISR - The only difference between this and the INTx ISR
 * is that the MSI interrupt is always serviced.
 */
static irqreturn_t
3277
bnx2_msi(int irq, void *dev_instance)
3278
{
3279 3280
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3281

3282
	prefetch(bnapi->status_blk.msi);
3283
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284 3285 3286 3287
		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);

	/* Return here if interrupt is disabled. */
3288 3289
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;
3290

3291
	napi_schedule(&bnapi->napi);
3292

3293
	return IRQ_HANDLED;
3294 3295
}

3296 3297 3298
static irqreturn_t
bnx2_msi_1shot(int irq, void *dev_instance)
{
3299 3300
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3301

3302
	prefetch(bnapi->status_blk.msi);
3303 3304 3305 3306 3307

	/* Return here if interrupt is disabled. */
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;

3308
	napi_schedule(&bnapi->napi);
3309 3310 3311 3312

	return IRQ_HANDLED;
}

3313
static irqreturn_t
3314
bnx2_interrupt(int irq, void *dev_instance)
3315
{
3316 3317
	struct bnx2_napi *bnapi = dev_instance;
	struct bnx2 *bp = bnapi->bp;
3318
	struct status_block *sblk = bnapi->status_blk.msi;
3319 3320 3321 3322 3323 3324 3325

	/* When using INTx, it is possible for the interrupt to arrive
	 * at the CPU before the status block posted prior to the
	 * interrupt. Reading a register will flush the status block.
	 * When using MSI, the MSI message will always complete after
	 * the status block write.
	 */
3326
	if ((sblk->status_idx == bnapi->last_status_idx) &&
3327
	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328
	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329
		return IRQ_NONE;
3330

3331
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332 3333 3334
		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);

3335 3336 3337
	/* Read back to deassert IRQ immediately to avoid too many
	 * spurious interrupts.
	 */
3338
	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339

3340
	/* Return here if interrupt is shared and is disabled. */
3341 3342
	if (unlikely(atomic_read(&bp->intr_sem) != 0))
		return IRQ_HANDLED;
3343

3344
	if (napi_schedule_prep(&bnapi->napi)) {
3345
		bnapi->last_status_idx = sblk->status_idx;
3346
		__napi_schedule(&bnapi->napi);
3347
	}
3348

3349
	return IRQ_HANDLED;
3350 3351
}

M
Michael Chan 已提交
3352
static inline int
3353
bnx2_has_fast_work(struct bnx2_napi *bnapi)
M
Michael Chan 已提交
3354
{
3355
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
M
Michael Chan 已提交
3357

3358
	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359
	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
M
Michael Chan 已提交
3360
		return 1;
3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
	return 0;
}

#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
				 STATUS_ATTN_BITS_TIMER_ABORT)

static inline int
bnx2_has_work(struct bnx2_napi *bnapi)
{
	struct status_block *sblk = bnapi->status_blk.msi;

	if (bnx2_has_fast_work(bnapi))
		return 1;
M
Michael Chan 已提交
3374

3375 3376 3377 3378 3379
#ifdef BCM_CNIC
	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
		return 1;
#endif

3380 3381
	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
M
Michael Chan 已提交
3382 3383 3384 3385 3386
		return 1;

	return 0;
}

3387 3388 3389 3390 3391 3392 3393
static void
bnx2_chk_missed_msi(struct bnx2 *bp)
{
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
	u32 msi_ctrl;

	if (bnx2_has_work(bnapi)) {
3394
		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395 3396 3397 3398
		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
			return;

		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399 3400 3401
			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402 3403 3404 3405 3406 3407 3408
			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
		}
	}

	bp->idle_chk_status_idx = bnapi->last_status_idx;
}

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425
#ifdef BCM_CNIC
static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
	struct cnic_ops *c_ops;

	if (!bnapi->cnic_present)
		return;

	rcu_read_lock();
	c_ops = rcu_dereference(bp->cnic_ops);
	if (c_ops)
		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
						      bnapi->status_blk.msi);
	rcu_read_unlock();
}
#endif

3426
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427
{
3428
	struct status_block *sblk = bnapi->status_blk.msi;
3429 3430
	u32 status_attn_bits = sblk->status_attn_bits;
	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431

3432 3433
	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434

3435
		bnx2_phy_int(bp, bnapi);
M
Michael Chan 已提交
3436 3437 3438 3439

		/* This is needed to take care of transient status
		 * during link changes.
		 */
3440 3441 3442
		BNX2_WR(bp, BNX2_HC_COMMAND,
			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
		BNX2_RD(bp, BNX2_HC_COMMAND);
3443
	}
3444 3445 3446 3447 3448 3449 3450
}

static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
			  int work_done, int budget)
{
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451

3452
	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
M
Michael Chan 已提交
3453
		bnx2_tx_int(bp, bnapi, 0);
3454

3455
	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456
		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457

3458 3459 3460
	return work_done;
}

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
static int bnx2_poll_msix(struct napi_struct *napi, int budget)
{
	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
	struct bnx2 *bp = bnapi->bp;
	int work_done = 0;
	struct status_block_msix *sblk = bnapi->status_blk.msix;

	while (1) {
		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
		if (unlikely(work_done >= budget))
			break;

		bnapi->last_status_idx = sblk->status_idx;
		/* status idx must be read before checking for more work. */
		rmb();
		if (likely(!bnx2_has_fast_work(bnapi))) {

3478
			napi_complete(napi);
3479 3480 3481
			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				bnapi->last_status_idx);
3482 3483 3484 3485 3486 3487
			break;
		}
	}
	return work_done;
}

3488 3489
static int bnx2_poll(struct napi_struct *napi, int budget)
{
3490 3491
	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
	struct bnx2 *bp = bnapi->bp;
3492
	int work_done = 0;
3493
	struct status_block *sblk = bnapi->status_blk.msi;
3494 3495

	while (1) {
3496 3497
		bnx2_poll_link(bp, bnapi);

3498
		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
M
Michael Chan 已提交
3499

3500 3501 3502 3503
#ifdef BCM_CNIC
		bnx2_poll_cnic(bp, bnapi);
#endif

3504
		/* bnapi->last_status_idx is used below to tell the hw how
M
Michael Chan 已提交
3505 3506 3507
		 * much work has been processed, so we must read it before
		 * checking for more work.
		 */
3508
		bnapi->last_status_idx = sblk->status_idx;
3509 3510 3511 3512

		if (unlikely(work_done >= budget))
			break;

M
Michael Chan 已提交
3513
		rmb();
3514
		if (likely(!bnx2_has_work(bnapi))) {
3515
			napi_complete(napi);
3516
			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517 3518 3519
				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
					bnapi->last_status_idx);
M
Michael Chan 已提交
3520
				break;
3521
			}
3522 3523 3524 3525 3526 3527 3528 3529
			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
				bnapi->last_status_idx);

			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
				bnapi->last_status_idx);
3530 3531
			break;
		}
3532 3533
	}

3534
	return work_done;
3535 3536
}

H
Herbert Xu 已提交
3537
/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538 3539 3540 3541 3542
 * from set_multicast.
 */
static void
bnx2_set_rx_mode(struct net_device *dev)
{
M
Michael Chan 已提交
3543
	struct bnx2 *bp = netdev_priv(dev);
3544
	u32 rx_mode, sort_mode;
J
Jiri Pirko 已提交
3545
	struct netdev_hw_addr *ha;
3546 3547
	int i;

3548 3549 3550
	if (!netif_running(dev))
		return;

3551
	spin_lock_bh(&bp->phy_lock);
3552 3553 3554 3555

	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556
	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3557
	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558 3559 3560 3561
		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
	if (dev->flags & IFF_PROMISC) {
		/* Promiscuous mode. */
		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
M
Michael Chan 已提交
3562 3563
		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3564 3565 3566
	}
	else if (dev->flags & IFF_ALLMULTI) {
		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567 3568
			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
				0xffffffff);
3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580
        	}
		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
	}
	else {
		/* Accept one or more multicast(s). */
		u32 mc_filter[NUM_MC_HASH_REGISTERS];
		u32 regidx;
		u32 bit;
		u32 crc;

		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);

3581 3582
		netdev_for_each_mc_addr(ha, dev) {
			crc = ether_crc_le(ETH_ALEN, ha->addr);
3583 3584 3585 3586 3587 3588 3589
			bit = crc & 0xff;
			regidx = (bit & 0xe0) >> 5;
			bit &= 0x1f;
			mc_filter[regidx] |= (1 << bit);
		}

		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590 3591
			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
				mc_filter[i]);
3592 3593 3594 3595 3596
		}

		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
	}

3597
	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598 3599 3600 3601 3602
		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
			     BNX2_RPM_SORT_USER0_PROM_VLAN;
	} else if (!(dev->flags & IFF_PROMISC)) {
		/* Add all entries into to the match filter list */
J
Jiri Pirko 已提交
3603
		i = 0;
3604
		netdev_for_each_uc_addr(ha, dev) {
J
Jiri Pirko 已提交
3605
			bnx2_set_mac_addr(bp, ha->addr,
3606 3607 3608
					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
			sort_mode |= (1 <<
				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
J
Jiri Pirko 已提交
3609
			i++;
3610 3611 3612 3613
		}

	}

3614 3615
	if (rx_mode != bp->rx_mode) {
		bp->rx_mode = rx_mode;
3616
		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617 3618
	}

3619 3620 3621
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622

3623
	spin_unlock_bh(&bp->phy_lock);
3624 3625
}

3626
static int
M
Michael Chan 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
check_fw_section(const struct firmware *fw,
		 const struct bnx2_fw_file_section *section,
		 u32 alignment, bool non_empty)
{
	u32 offset = be32_to_cpu(section->offset);
	u32 len = be32_to_cpu(section->len);

	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
		return -EINVAL;
	if ((non_empty && len == 0) || len > fw->size - offset ||
	    len & (alignment - 1))
		return -EINVAL;
	return 0;
}

3642
static int
M
Michael Chan 已提交
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
check_mips_fw_entry(const struct firmware *fw,
		    const struct bnx2_mips_fw_file_entry *entry)
{
	if (check_fw_section(fw, &entry->text, 4, true) ||
	    check_fw_section(fw, &entry->data, 4, false) ||
	    check_fw_section(fw, &entry->rodata, 4, false))
		return -EINVAL;
	return 0;
}

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
static void bnx2_release_firmware(struct bnx2 *bp)
{
	if (bp->rv2p_firmware) {
		release_firmware(bp->mips_firmware);
		release_firmware(bp->rv2p_firmware);
		bp->rv2p_firmware = NULL;
	}
}

static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663
{
M
Michael Chan 已提交
3664
	const char *mips_fw_file, *rv2p_fw_file;
B
Bastian Blank 已提交
3665 3666
	const struct bnx2_mips_fw_file *mips_fw;
	const struct bnx2_rv2p_fw_file *rv2p_fw;
M
Michael Chan 已提交
3667 3668
	int rc;

3669
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
3670
		mips_fw_file = FW_MIPS_FILE_09;
3671 3672
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673 3674 3675
			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
		else
			rv2p_fw_file = FW_RV2P_FILE_09;
M
Michael Chan 已提交
3676 3677 3678 3679 3680 3681 3682
	} else {
		mips_fw_file = FW_MIPS_FILE_06;
		rv2p_fw_file = FW_RV2P_FILE_06;
	}

	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
	if (rc) {
3683
		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684
		goto out;
M
Michael Chan 已提交
3685 3686 3687 3688
	}

	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
	if (rc) {
3689
		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690
		goto err_release_mips_firmware;
M
Michael Chan 已提交
3691
	}
B
Bastian Blank 已提交
3692 3693 3694 3695 3696 3697 3698 3699
	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700
		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701 3702
		rc = -EINVAL;
		goto err_release_firmware;
M
Michael Chan 已提交
3703
	}
B
Bastian Blank 已提交
3704 3705 3706
	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707
		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708 3709
		rc = -EINVAL;
		goto err_release_firmware;
M
Michael Chan 已提交
3710
	}
3711 3712
out:
	return rc;
M
Michael Chan 已提交
3713

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724
err_release_firmware:
	release_firmware(bp->rv2p_firmware);
	bp->rv2p_firmware = NULL;
err_release_mips_firmware:
	release_firmware(bp->mips_firmware);
	goto out;
}

static int bnx2_request_firmware(struct bnx2 *bp)
{
	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
M
Michael Chan 已提交
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
}

static u32
rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
{
	switch (idx) {
	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
		rv2p_code |= RV2P_BD_PAGE_SIZE;
		break;
	}
	return rv2p_code;
}

static int
load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
{
	u32 rv2p_code_len, file_offset;
	__be32 *rv2p_code;
3745
	int i;
M
Michael Chan 已提交
3746 3747 3748 3749 3750 3751
	u32 val, cmd, addr;

	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
	file_offset = be32_to_cpu(fw_entry->rv2p.offset);

	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752

M
Michael Chan 已提交
3753 3754 3755 3756 3757 3758
	if (rv2p_proc == RV2P_PROC1) {
		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
		addr = BNX2_RV2P_PROC1_ADDR_CMD;
	} else {
		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759
	}
3760 3761

	for (i = 0; i < rv2p_code_len; i += 8) {
3762
		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763
		rv2p_code++;
3764
		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765 3766
		rv2p_code++;

M
Michael Chan 已提交
3767
		val = (i / 8) | cmd;
3768
		BNX2_WR(bp, addr, val);
M
Michael Chan 已提交
3769 3770 3771 3772 3773 3774 3775 3776 3777
	}

	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
	for (i = 0; i < 8; i++) {
		u32 loc, code;

		loc = be32_to_cpu(fw_entry->fixup[i]);
		if (loc && ((loc * 4) < rv2p_code_len)) {
			code = be32_to_cpu(*(rv2p_code + loc - 1));
3778
			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
M
Michael Chan 已提交
3779 3780
			code = be32_to_cpu(*(rv2p_code + loc));
			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781
			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
M
Michael Chan 已提交
3782 3783

			val = (loc / 2) | cmd;
3784
			BNX2_WR(bp, addr, val);
3785 3786 3787 3788 3789
		}
	}

	/* Reset the processor, un-stall is done later. */
	if (rv2p_proc == RV2P_PROC1) {
3790
		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791 3792
	}
	else {
3793
		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794
	}
M
Michael Chan 已提交
3795 3796

	return 0;
3797 3798
}

3799
static int
M
Michael Chan 已提交
3800 3801
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
	    const struct bnx2_mips_fw_file_entry *fw_entry)
3802
{
M
Michael Chan 已提交
3803 3804
	u32 addr, len, file_offset;
	__be32 *data;
3805 3806 3807 3808
	u32 offset;
	u32 val;

	/* Halt the CPU. */
3809
	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810
	val |= cpu_reg->mode_value_halt;
3811 3812
	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813 3814

	/* Load the Text area. */
M
Michael Chan 已提交
3815 3816 3817 3818
	addr = be32_to_cpu(fw_entry->text.addr);
	len = be32_to_cpu(fw_entry->text.len);
	file_offset = be32_to_cpu(fw_entry->text.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);
M
Michael Chan 已提交
3819

M
Michael Chan 已提交
3820 3821
	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3822 3823
		int j;

M
Michael Chan 已提交
3824 3825
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826 3827
	}

M
Michael Chan 已提交
3828 3829 3830 3831 3832
	/* Load the Data area. */
	addr = be32_to_cpu(fw_entry->data.addr);
	len = be32_to_cpu(fw_entry->data.len);
	file_offset = be32_to_cpu(fw_entry->data.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833

M
Michael Chan 已提交
3834 3835
	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3836 3837
		int j;

M
Michael Chan 已提交
3838 3839
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840 3841 3842
	}

	/* Load the Read-Only area. */
M
Michael Chan 已提交
3843 3844 3845 3846 3847 3848 3849
	addr = be32_to_cpu(fw_entry->rodata.addr);
	len = be32_to_cpu(fw_entry->rodata.len);
	file_offset = be32_to_cpu(fw_entry->rodata.offset);
	data = (__be32 *)(bp->mips_firmware->data + file_offset);

	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
	if (len) {
3850 3851
		int j;

M
Michael Chan 已提交
3852 3853
		for (j = 0; j < (len / 4); j++, offset += 4)
			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854 3855 3856
	}

	/* Clear the pre-fetch instruction. */
3857
	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
M
Michael Chan 已提交
3858 3859 3860

	val = be32_to_cpu(fw_entry->start_addr);
	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861 3862

	/* Start the CPU. */
3863
	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864
	val &= ~cpu_reg->mode_value_halt;
3865 3866
	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867 3868

	return 0;
3869 3870
}

3871
static int
3872 3873
bnx2_init_cpus(struct bnx2 *bp)
{
M
Michael Chan 已提交
3874 3875 3876 3877 3878
	const struct bnx2_mips_fw_file *mips_fw =
		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
	const struct bnx2_rv2p_fw_file *rv2p_fw =
		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
	int rc;
3879 3880

	/* Initialize the RV2P processor. */
M
Michael Chan 已提交
3881 3882
	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883 3884

	/* Initialize the RX Processor. */
M
Michael Chan 已提交
3885
	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886 3887 3888
	if (rc)
		goto init_cpu_err;

3889
	/* Initialize the TX Processor. */
M
Michael Chan 已提交
3890
	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891 3892 3893
	if (rc)
		goto init_cpu_err;

3894
	/* Initialize the TX Patch-up Processor. */
M
Michael Chan 已提交
3895
	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896 3897 3898
	if (rc)
		goto init_cpu_err;

3899
	/* Initialize the Completion Processor. */
M
Michael Chan 已提交
3900
	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901 3902 3903
	if (rc)
		goto init_cpu_err;

M
Michael Chan 已提交
3904
	/* Initialize the Command Processor. */
M
Michael Chan 已提交
3905
	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906

3907 3908
init_cpu_err:
	return rc;
3909 3910
}

3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990
static void
bnx2_setup_wol(struct bnx2 *bp)
{
	int i;
	u32 val, wol_msg;

	if (bp->wol) {
		u32 advertising;
		u8 autoneg;

		autoneg = bp->autoneg;
		advertising = bp->advertising;

		if (bp->phy_port == PORT_TP) {
			bp->autoneg = AUTONEG_SPEED;
			bp->advertising = ADVERTISED_10baseT_Half |
				ADVERTISED_10baseT_Full |
				ADVERTISED_100baseT_Half |
				ADVERTISED_100baseT_Full |
				ADVERTISED_Autoneg;
		}

		spin_lock_bh(&bp->phy_lock);
		bnx2_setup_phy(bp, bp->phy_port);
		spin_unlock_bh(&bp->phy_lock);

		bp->autoneg = autoneg;
		bp->advertising = advertising;

		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);

		val = BNX2_RD(bp, BNX2_EMAC_MODE);

		/* Enable port mode. */
		val &= ~BNX2_EMAC_MODE_PORT;
		val |= BNX2_EMAC_MODE_MPKT_RCVD |
		       BNX2_EMAC_MODE_ACPI_RCVD |
		       BNX2_EMAC_MODE_MPKT;
		if (bp->phy_port == PORT_TP) {
			val |= BNX2_EMAC_MODE_PORT_MII;
		} else {
			val |= BNX2_EMAC_MODE_PORT_GMII;
			if (bp->line_speed == SPEED_2500)
				val |= BNX2_EMAC_MODE_25G_MODE;
		}

		BNX2_WR(bp, BNX2_EMAC_MODE, val);

		/* receive all multicast */
		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
				0xffffffff);
		}
		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);

		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);

		/* Need to enable EMAC and RPM for WOL. */
		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);

		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
		BNX2_WR(bp, BNX2_RPM_CONFIG, val);

		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
	} else {
			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
	}

	if (!(bp->flags & BNX2_FLAG_NO_WOL))
		bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);

}

3991
static int
3992
bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3993 3994
{
	switch (state) {
3995
	case PCI_D0: {
3996 3997
		u32 val;

3998 3999
		pci_enable_wake(bp->pdev, PCI_D0, false);
		pci_set_power_state(bp->pdev, PCI_D0);
4000

4001
		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4002 4003
		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
		val &= ~BNX2_EMAC_MODE_MPKT;
4004
		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005

4006
		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4007
		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008
		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4009 4010
		break;
	}
4011
	case PCI_D3hot: {
4012
		bnx2_setup_wol(bp);
4013
		pci_wake_from_d3(bp->pdev, bp->wol);
4014 4015
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4016 4017

			if (bp->wol)
4018 4019 4020
				pci_set_power_state(bp->pdev, PCI_D3hot);
		} else {
			pci_set_power_state(bp->pdev, PCI_D3hot);
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040
		}

		/* No more memory access after this point until
		 * device is brought back to D0.
		 */
		break;
	}
	default:
		return -EINVAL;
	}
	return 0;
}

static int
bnx2_acquire_nvram_lock(struct bnx2 *bp)
{
	u32 val;
	int j;

	/* Request access to the flash interface. */
4041
	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4042
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4043
		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062
		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
			break;

		udelay(5);
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_release_nvram_lock(struct bnx2 *bp)
{
	int j;
	u32 val;

	/* Relinquish nvram interface. */
4063
	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4064 4065

	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4066
		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084
		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
			break;

		udelay(5);
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}


static int
bnx2_enable_nvram_write(struct bnx2 *bp)
{
	u32 val;

4085 4086
	val = BNX2_RD(bp, BNX2_MISC_CFG);
	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4087

M
Michael Chan 已提交
4088
	if (bp->flash_info->flags & BNX2_NV_WREN) {
4089 4090
		int j;

4091 4092 4093
		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
		BNX2_WR(bp, BNX2_NVM_COMMAND,
			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4094 4095 4096 4097

		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
			udelay(5);

4098
			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113
			if (val & BNX2_NVM_COMMAND_DONE)
				break;
		}

		if (j >= NVRAM_TIMEOUT_COUNT)
			return -EBUSY;
	}
	return 0;
}

static void
bnx2_disable_nvram_write(struct bnx2 *bp)
{
	u32 val;

4114 4115
	val = BNX2_RD(bp, BNX2_MISC_CFG);
	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4116 4117 4118 4119 4120 4121 4122 4123
}


static void
bnx2_enable_nvram_access(struct bnx2 *bp)
{
	u32 val;

4124
	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4125
	/* Enable both bits, even on read. */
4126 4127
	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4128 4129 4130 4131 4132 4133 4134
}

static void
bnx2_disable_nvram_access(struct bnx2 *bp)
{
	u32 val;

4135
	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4136
	/* Disable both bits, even after read. */
4137
	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
			BNX2_NVM_ACCESS_ENABLE_WR_EN));
}

static int
bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
{
	u32 cmd;
	int j;

M
Michael Chan 已提交
4148
	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4149 4150 4151 4152 4153 4154 4155 4156
		/* Buffered flash, no erase needed */
		return 0;

	/* Build an erase command */
	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
	      BNX2_NVM_COMMAND_DOIT;

	/* Need to clear DONE bit separately. */
4157
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4158 4159

	/* Address of the NVRAM to read from. */
4160
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4161 4162

	/* Issue an erase command. */
4163
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4164 4165 4166 4167 4168 4169 4170

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		u32 val;

		udelay(5);

4171
		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
		if (val & BNX2_NVM_COMMAND_DONE)
			break;
	}

	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
{
	u32 cmd;
	int j;

	/* Build the command word. */
	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;

M
Michael Chan 已提交
4191 4192
	/* Calculate an offset of a buffered flash, not needed for 5709. */
	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4193 4194 4195 4196 4197 4198
		offset = ((offset / bp->flash_info->page_size) <<
			   bp->flash_info->page_bits) +
			  (offset % bp->flash_info->page_size);
	}

	/* Need to clear DONE bit separately. */
4199
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200 4201

	/* Address of the NVRAM to read from. */
4202
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4203 4204

	/* Issue a read command. */
4205
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4206 4207 4208 4209 4210 4211 4212

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		u32 val;

		udelay(5);

4213
		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4214
		if (val & BNX2_NVM_COMMAND_DONE) {
4215
			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
A
Al Viro 已提交
4216
			memcpy(ret_val, &v, 4);
4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229
			break;
		}
	}
	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}


static int
bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
{
A
Al Viro 已提交
4230 4231
	u32 cmd;
	__be32 val32;
4232 4233 4234 4235 4236
	int j;

	/* Build the command word. */
	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;

M
Michael Chan 已提交
4237 4238
	/* Calculate an offset of a buffered flash, not needed for 5709. */
	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239 4240 4241 4242 4243 4244
		offset = ((offset / bp->flash_info->page_size) <<
			  bp->flash_info->page_bits) +
			 (offset % bp->flash_info->page_size);
	}

	/* Need to clear DONE bit separately. */
4245
	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246 4247 4248 4249

	memcpy(&val32, val, 4);

	/* Write the data. */
4250
	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4251 4252

	/* Address of the NVRAM to write to. */
4253
	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254 4255

	/* Issue the write command. */
4256
	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257 4258 4259 4260 4261

	/* Wait for completion. */
	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
		udelay(5);

4262
		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274
			break;
	}
	if (j >= NVRAM_TIMEOUT_COUNT)
		return -EBUSY;

	return 0;
}

static int
bnx2_init_nvram(struct bnx2 *bp)
{
	u32 val;
M
Michael Chan 已提交
4275
	int j, entry_count, rc = 0;
4276
	const struct flash_spec *flash;
4277

4278
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
4279 4280 4281 4282
		bp->flash_info = &flash_5709;
		goto get_flash_size;
	}

4283
	/* Determine the selected interface. */
4284
	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4285

4286
	entry_count = ARRAY_SIZE(flash_table);
4287 4288 4289 4290 4291

	if (val & 0x40000000) {

		/* Flash interface has been reconfigured */
		for (j = 0, flash = &flash_table[0]; j < entry_count;
4292 4293 4294
		     j++, flash++) {
			if ((val & FLASH_BACKUP_STRAP_MASK) ==
			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4295 4296 4297 4298 4299 4300
				bp->flash_info = flash;
				break;
			}
		}
	}
	else {
4301
		u32 mask;
4302 4303
		/* Not yet been reconfigured */

4304 4305 4306 4307 4308
		if (val & (1 << 23))
			mask = FLASH_BACKUP_STRAP_MASK;
		else
			mask = FLASH_STRAP_MASK;

4309 4310 4311
		for (j = 0, flash = &flash_table[0]; j < entry_count;
			j++, flash++) {

4312
			if ((val & mask) == (flash->strapping & mask)) {
4313 4314 4315 4316 4317 4318 4319 4320 4321 4322
				bp->flash_info = flash;

				/* Request access to the flash interface. */
				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
					return rc;

				/* Enable access to flash interface */
				bnx2_enable_nvram_access(bp);

				/* Reconfigure the flash interface */
4323 4324 4325 4326
				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338

				/* Disable access to flash interface */
				bnx2_disable_nvram_access(bp);
				bnx2_release_nvram_lock(bp);

				break;
			}
		}
	} /* if (val & 0x40000000) */

	if (j == entry_count) {
		bp->flash_info = NULL;
4339
		pr_alert("Unknown flash/EEPROM type\n");
M
Michael Chan 已提交
4340
		return -ENODEV;
4341 4342
	}

M
Michael Chan 已提交
4343
get_flash_size:
4344
	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
M
Michael Chan 已提交
4345 4346 4347 4348 4349 4350
	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
	if (val)
		bp->flash_size = val;
	else
		bp->flash_size = bp->flash_info->total_size;

4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468
	return rc;
}

static int
bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
		int buf_size)
{
	int rc = 0;
	u32 cmd_flags, offset32, len32, extra;

	if (buf_size == 0)
		return 0;

	/* Request access to the flash interface. */
	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
		return rc;

	/* Enable access to flash interface */
	bnx2_enable_nvram_access(bp);

	len32 = buf_size;
	offset32 = offset;
	extra = 0;

	cmd_flags = 0;

	if (offset32 & 3) {
		u8 buf[4];
		u32 pre_len;

		offset32 &= ~3;
		pre_len = 4 - (offset & 3);

		if (pre_len >= len32) {
			pre_len = len32;
			cmd_flags = BNX2_NVM_COMMAND_FIRST |
				    BNX2_NVM_COMMAND_LAST;
		}
		else {
			cmd_flags = BNX2_NVM_COMMAND_FIRST;
		}

		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		if (rc)
			return rc;

		memcpy(ret_buf, buf + (offset & 3), pre_len);

		offset32 += 4;
		ret_buf += pre_len;
		len32 -= pre_len;
	}
	if (len32 & 3) {
		extra = 4 - (len32 & 3);
		len32 = (len32 + 4) & ~3;
	}

	if (len32 == 4) {
		u8 buf[4];

		if (cmd_flags)
			cmd_flags = BNX2_NVM_COMMAND_LAST;
		else
			cmd_flags = BNX2_NVM_COMMAND_FIRST |
				    BNX2_NVM_COMMAND_LAST;

		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		memcpy(ret_buf, buf, 4 - extra);
	}
	else if (len32 > 0) {
		u8 buf[4];

		/* Read the first word. */
		if (cmd_flags)
			cmd_flags = 0;
		else
			cmd_flags = BNX2_NVM_COMMAND_FIRST;

		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);

		/* Advance to the next dword. */
		offset32 += 4;
		ret_buf += 4;
		len32 -= 4;

		while (len32 > 4 && rc == 0) {
			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);

			/* Advance to the next dword. */
			offset32 += 4;
			ret_buf += 4;
			len32 -= 4;
		}

		if (rc)
			return rc;

		cmd_flags = BNX2_NVM_COMMAND_LAST;
		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);

		memcpy(ret_buf, buf, 4 - extra);
	}

	/* Disable access to flash interface */
	bnx2_disable_nvram_access(bp);

	bnx2_release_nvram_lock(bp);

	return rc;
}

static int
bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
		int buf_size)
{
	u32 written, offset32, len32;
4469
	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4470 4471 4472 4473 4474 4475 4476 4477 4478 4479
	int rc = 0;
	int align_start, align_end;

	buf = data_buf;
	offset32 = offset;
	len32 = buf_size;
	align_start = align_end = 0;

	if ((align_start = (offset32 & 3))) {
		offset32 &= ~3;
M
Michael Chan 已提交
4480 4481 4482
		len32 += align_start;
		if (len32 < 4)
			len32 = 4;
4483 4484 4485 4486 4487
		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
			return rc;
	}

	if (len32 & 3) {
M
Michael Chan 已提交
4488 4489 4490 4491
		align_end = 4 - (len32 & 3);
		len32 += align_end;
		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
			return rc;
4492 4493 4494
	}

	if (align_start || align_end) {
4495 4496
		align_buf = kmalloc(len32, GFP_KERNEL);
		if (align_buf == NULL)
4497 4498
			return -ENOMEM;
		if (align_start) {
4499
			memcpy(align_buf, start, 4);
4500 4501
		}
		if (align_end) {
4502
			memcpy(align_buf + len32 - 4, end, 4);
4503
		}
4504 4505
		memcpy(align_buf + align_start, data_buf, buf_size);
		buf = align_buf;
4506 4507
	}

M
Michael Chan 已提交
4508
	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4509 4510 4511 4512 4513 4514 4515
		flash_buffer = kmalloc(264, GFP_KERNEL);
		if (flash_buffer == NULL) {
			rc = -ENOMEM;
			goto nvram_write_end;
		}
	}

4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529
	written = 0;
	while ((written < len32) && (rc == 0)) {
		u32 page_start, page_end, data_start, data_end;
		u32 addr, cmd_flags;
		int i;

	        /* Find the page_start addr */
		page_start = offset32 + written;
		page_start -= (page_start % bp->flash_info->page_size);
		/* Find the page_end addr */
		page_end = page_start + bp->flash_info->page_size;
		/* Find the data_start addr */
		data_start = (written == 0) ? offset32 : page_start;
		/* Find the data_end addr */
4530
		data_end = (page_end > offset32 + len32) ?
4531 4532 4533 4534 4535 4536 4537 4538 4539 4540
			(offset32 + len32) : page_end;

		/* Request access to the flash interface. */
		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
			goto nvram_write_end;

		/* Enable access to flash interface */
		bnx2_enable_nvram_access(bp);

		cmd_flags = BNX2_NVM_COMMAND_FIRST;
M
Michael Chan 已提交
4541
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4542 4543 4544 4545 4546 4547 4548 4549 4550
			int j;

			/* Read the whole page into the buffer
			 * (non-buffer flash only) */
			for (j = 0; j < bp->flash_info->page_size; j += 4) {
				if (j == (bp->flash_info->page_size - 4)) {
					cmd_flags |= BNX2_NVM_COMMAND_LAST;
				}
				rc = bnx2_nvram_read_dword(bp,
4551 4552
					page_start + j,
					&flash_buffer[j],
4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568
					cmd_flags);

				if (rc)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Enable writes to flash interface (unlock write-protect) */
		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
			goto nvram_write_end;

		/* Loop to write back the buffer data from page_start to
		 * data_start */
		i = 0;
M
Michael Chan 已提交
4569
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
M
Michael Chan 已提交
4570 4571 4572 4573 4574 4575 4576
			/* Erase the page */
			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
				goto nvram_write_end;

			/* Re-enable the write again for the actual write */
			bnx2_enable_nvram_write(bp);

4577 4578
			for (addr = page_start; addr < data_start;
				addr += 4, i += 4) {
4579

4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590
				rc = bnx2_nvram_write_dword(bp, addr,
					&flash_buffer[i], cmd_flags);

				if (rc != 0)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Loop to write the new data from data_start to data_end */
4591
		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4592
			if ((addr == page_end - 4) ||
M
Michael Chan 已提交
4593
				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
				 (addr == data_end - 4))) {

				cmd_flags |= BNX2_NVM_COMMAND_LAST;
			}
			rc = bnx2_nvram_write_dword(bp, addr, buf,
				cmd_flags);

			if (rc != 0)
				goto nvram_write_end;

			cmd_flags = 0;
			buf += 4;
		}

		/* Loop to write back the buffer data from data_end
		 * to page_end */
M
Michael Chan 已提交
4610
		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4611 4612
			for (addr = data_end; addr < page_end;
				addr += 4, i += 4) {
4613

4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638
				if (addr == page_end-4) {
					cmd_flags = BNX2_NVM_COMMAND_LAST;
                		}
				rc = bnx2_nvram_write_dword(bp, addr,
					&flash_buffer[i], cmd_flags);

				if (rc != 0)
					goto nvram_write_end;

				cmd_flags = 0;
			}
		}

		/* Disable writes to flash interface (lock write-protect) */
		bnx2_disable_nvram_write(bp);

		/* Disable access to flash interface */
		bnx2_disable_nvram_access(bp);
		bnx2_release_nvram_lock(bp);

		/* Increment written */
		written += data_end - data_start;
	}

nvram_write_end:
4639 4640
	kfree(flash_buffer);
	kfree(align_buf);
4641 4642 4643
	return rc;
}

4644
static void
4645
bnx2_init_fw_cap(struct bnx2 *bp)
4646
{
4647
	u32 val, sig = 0;
4648

4649
	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4650 4651 4652 4653
	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;

	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4654

4655
	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4656 4657 4658
	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
		return;

4659 4660 4661 4662 4663 4664 4665 4666 4667
	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
	}

	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
		u32 link;

4668
		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4669

4670 4671
		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4672 4673 4674
			bp->phy_port = PORT_FIBRE;
		else
			bp->phy_port = PORT_TP;
4675

4676 4677
		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4678
	}
4679 4680 4681

	if (netif_running(bp->dev) && sig)
		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4682 4683
}

4684 4685 4686
static void
bnx2_setup_msix_tbl(struct bnx2 *bp)
{
4687
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4688

4689 4690
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4691 4692
}

4693 4694 4695 4696 4697
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
	u32 val;
	int i, rc = 0;
4698
	u8 old_port;
4699 4700 4701

	/* Wait for the current PCI transaction to complete before
	 * issuing a reset. */
4702 4703
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4704 4705 4706 4707 4708 4709
		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
E
Eddie Wai 已提交
4710 4711
		udelay(5);
	} else {  /* 5709 */
4712
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
E
Eddie Wai 已提交
4713
		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4714 4715
		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
E
Eddie Wai 已提交
4716 4717 4718

		for (i = 0; i < 100; i++) {
			msleep(1);
4719
			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
E
Eddie Wai 已提交
4720 4721 4722 4723
			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
				break;
		}
	}
4724

4725
	/* Wait for the firmware to tell us it is ok to issue a reset. */
4726
	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4727

4728 4729
	/* Deposit a driver reset signature so the firmware knows that
	 * this is a soft reset. */
4730 4731
	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4732 4733 4734

	/* Do a dummy read to force the chip to complete all current transaction
	 * before we issue a reset. */
4735
	val = BNX2_RD(bp, BNX2_MISC_ID);
4736

4737
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4738 4739
		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
		BNX2_RD(bp, BNX2_MISC_COMMAND);
4740
		udelay(5);
4741

4742 4743
		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4744

4745
		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746

4747 4748 4749 4750 4751 4752
	} else {
		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;

		/* Chip reset. */
4753
		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754

4755 4756 4757 4758
		/* Reading back any register after chip reset will hang the
		 * bus on 5706 A0 and A1.  The msleep below provides plenty
		 * of margin for write posting.
		 */
4759 4760
		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
A
Arjan van de Ven 已提交
4761
			msleep(20);
4762

4763 4764
		/* Reset takes approximate 30 usec */
		for (i = 0; i < 10; i++) {
4765
			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4766 4767 4768 4769 4770 4771 4772 4773
			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
				break;
			udelay(10);
		}

		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4774
			pr_err("Chip reset did not complete\n");
4775 4776
			return -EBUSY;
		}
4777 4778 4779
	}

	/* Make sure byte swapping is properly configured. */
4780
	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4781
	if (val != 0x01020304) {
4782
		pr_err("Chip not in correct endian mode\n");
4783 4784 4785 4786
		return -ENODEV;
	}

	/* Wait for the firmware to finish its initialization. */
4787
	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4788 4789
	if (rc)
		return rc;
4790

4791
	spin_lock_bh(&bp->phy_lock);
4792
	old_port = bp->phy_port;
4793
	bnx2_init_fw_cap(bp);
4794 4795
	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
	    old_port != bp->phy_port)
4796 4797 4798
		bnx2_set_default_remote_link(bp);
	spin_unlock_bh(&bp->phy_lock);

4799
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4800 4801
		/* Adjust the voltage regular to two steps lower.  The default
		 * of this register is 0x0000000e. */
4802
		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4803 4804 4805 4806 4807

		/* Remove bad rbuf memory from the free pool. */
		rc = bnx2_alloc_bad_rbuf(bp);
	}

4808
	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4809
		bnx2_setup_msix_tbl(bp);
4810
		/* Prevent MSIX table reads and write from timing out */
4811
		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4812 4813
			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
	}
4814

4815 4816 4817 4818 4819 4820
	return rc;
}

static int
bnx2_init_chip(struct bnx2 *bp)
{
4821
	u32 val, mtu;
4822
	int rc, i;
4823 4824

	/* Make sure the interrupt is not active. */
4825
	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4826 4827 4828 4829

	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
#ifdef __BIG_ENDIAN
4830
	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4831
#endif
4832
	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4833 4834 4835 4836 4837
	      DMA_READ_CHANS << 12 |
	      DMA_WRITE_CHANS << 16;

	val |= (0x2 << 20) | (1 << 11);

4838
	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4839 4840
		val |= (1 << 23);

4841 4842 4843
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
	    !(bp->flags & BNX2_FLAG_PCIX))
4844 4845
		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;

4846
	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4847

4848
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4849
		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4850
		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4851
		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4852 4853
	}

4854
	if (bp->flags & BNX2_FLAG_PCIX) {
4855 4856 4857 4858 4859 4860 4861 4862
		u16 val16;

		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
				     &val16);
		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
				      val16 & ~PCI_X_CMD_ERO);
	}

4863 4864 4865 4866
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4867 4868 4869

	/* Initialize context mapping and zero out the quick contexts.  The
	 * context block must have already been enabled. */
4870
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4871 4872 4873 4874
		rc = bnx2_init_5709_context(bp);
		if (rc)
			return rc;
	} else
M
Michael Chan 已提交
4875
		bnx2_init_context(bp);
4876

4877 4878 4879
	if ((rc = bnx2_init_cpus(bp)) != 0)
		return rc;

4880 4881
	bnx2_init_nvram(bp);

4882
	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4883

4884
	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4885 4886
	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4887
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4888
		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4889
		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4890 4891
			val |= BNX2_MQ_CONFIG_HALT_DIS;
	}
4892

4893
	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4894 4895

	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4896 4897
	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4898

4899
	val = (BNX2_PAGE_BITS - 8) << 24;
4900
	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4901 4902

	/* Configure page size. */
4903
	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4904
	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4905
	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4906
	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4907 4908 4909 4910 4911 4912 4913

	val = bp->mac_addr[0] +
	      (bp->mac_addr[1] << 8) +
	      (bp->mac_addr[2] << 16) +
	      bp->mac_addr[3] +
	      (bp->mac_addr[4] << 8) +
	      (bp->mac_addr[5] << 16);
4914
	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4915 4916

	/* Program the MTU.  Also include 4 bytes for CRC32. */
4917 4918
	mtu = bp->dev->mtu;
	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4919 4920
	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4921
	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4922

4923 4924 4925 4926 4927 4928 4929
	if (mtu < 1500)
		mtu = 1500;

	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));

4930
	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4931 4932 4933
	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
		bp->bnx2_napi[i].last_status_idx = 0;

4934 4935
	bp->idle_chk_status_idx = 0xffff;

4936 4937 4938
	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;

	/* Set up how to generate a link change interrupt. */
4939
	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4940

4941 4942 4943
	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
		(u64) bp->status_blk_mapping & 0xffffffff);
	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4944

4945 4946 4947 4948
	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
		(u64) bp->stats_blk_mapping & 0xffffffff);
	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
		(u64) bp->stats_blk_mapping >> 32);
4949

4950 4951
	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4952

4953 4954
	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4955

4956 4957
	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4958

4959
	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4960

4961
	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962

4963 4964
	BNX2_WR(bp, BNX2_HC_COM_TICKS,
		(bp->com_ticks_int << 16) | bp->com_ticks);
4965

4966 4967
	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4968

4969
	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4970
		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4971
	else
4972 4973
		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4974

4975
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4976
		val = BNX2_HC_CONFIG_COLLECT_STATS;
4977
	else {
4978 4979
		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
		      BNX2_HC_CONFIG_COLLECT_STATS;
4980 4981
	}

4982
	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4983 4984
		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
			BNX2_HC_MSIX_BIT_VECTOR_VAL);
4985

M
Michael Chan 已提交
4986 4987 4988 4989
		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
	}

	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4990
		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
M
Michael Chan 已提交
4991

4992
	BNX2_WR(bp, BNX2_HC_CONFIG, val);
M
Michael Chan 已提交
4993

M
Michael Chan 已提交
4994 4995 4996 4997 4998
	if (bp->rx_ticks < 25)
		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
	else
		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);

M
Michael Chan 已提交
4999 5000 5001 5002
	for (i = 1; i < bp->irq_nvecs; i++) {
		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
			   BNX2_HC_SB_CONFIG_1;

5003
		BNX2_WR(bp, base,
5004
			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
M
Michael Chan 已提交
5005
			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5006 5007
			BNX2_HC_SB_CONFIG_1_ONE_SHOT);

5008
		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5009 5010 5011
			(bp->tx_quick_cons_trip_int << 16) |
			 bp->tx_quick_cons_trip);

5012
		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5013 5014
			(bp->tx_ticks_int << 16) | bp->tx_ticks);

5015 5016
		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
			(bp->rx_quick_cons_trip_int << 16) |
M
Michael Chan 已提交
5017
			bp->rx_quick_cons_trip);
5018

5019
		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
M
Michael Chan 已提交
5020 5021
			(bp->rx_ticks_int << 16) | bp->rx_ticks);
	}
5022

5023
	/* Clear internal stats counters. */
5024
	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5025

5026
	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5027 5028 5029 5030

	/* Initialize the receive filter. */
	bnx2_set_rx_mode(bp->dev);

5031
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5032
		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
M
Michael Chan 已提交
5033
		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5034
		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
M
Michael Chan 已提交
5035
	}
5036
	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5037
			  1, 0);
5038

5039 5040
	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5041 5042 5043

	udelay(20);

5044
	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
M
Michael Chan 已提交
5045

5046
	return rc;
5047 5048
}

5049 5050 5051 5052
static void
bnx2_clear_ring_states(struct bnx2 *bp)
{
	struct bnx2_napi *bnapi;
5053
	struct bnx2_tx_ring_info *txr;
5054
	struct bnx2_rx_ring_info *rxr;
5055 5056 5057 5058
	int i;

	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
		bnapi = &bp->bnx2_napi[i];
5059
		txr = &bnapi->tx_ring;
5060
		rxr = &bnapi->rx_ring;
5061

5062 5063
		txr->tx_cons = 0;
		txr->hw_tx_cons = 0;
5064 5065 5066 5067 5068
		rxr->rx_prod_bseq = 0;
		rxr->rx_prod = 0;
		rxr->rx_cons = 0;
		rxr->rx_pg_prod = 0;
		rxr->rx_pg_cons = 0;
5069 5070 5071
	}
}

M
Michael Chan 已提交
5072
static void
5073
bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
M
Michael Chan 已提交
5074 5075
{
	u32 val, offset0, offset1, offset2, offset3;
M
Michael Chan 已提交
5076
	u32 cid_addr = GET_CID_ADDR(cid);
M
Michael Chan 已提交
5077

5078
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
M
Michael Chan 已提交
5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089
		offset0 = BNX2_L2CTX_TYPE_XI;
		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
	} else {
		offset0 = BNX2_L2CTX_TYPE;
		offset1 = BNX2_L2CTX_CMD_TYPE;
		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
	}
	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
M
Michael Chan 已提交
5090
	bnx2_ctx_wr(bp, cid_addr, offset0, val);
M
Michael Chan 已提交
5091 5092

	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
M
Michael Chan 已提交
5093
	bnx2_ctx_wr(bp, cid_addr, offset1, val);
M
Michael Chan 已提交
5094

5095
	val = (u64) txr->tx_desc_mapping >> 32;
M
Michael Chan 已提交
5096
	bnx2_ctx_wr(bp, cid_addr, offset2, val);
M
Michael Chan 已提交
5097

5098
	val = (u64) txr->tx_desc_mapping & 0xffffffff;
M
Michael Chan 已提交
5099
	bnx2_ctx_wr(bp, cid_addr, offset3, val);
M
Michael Chan 已提交
5100
}
5101 5102

static void
5103
bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5104
{
5105
	struct bnx2_tx_bd *txbd;
5106 5107
	u32 cid = TX_CID;
	struct bnx2_napi *bnapi;
5108
	struct bnx2_tx_ring_info *txr;
5109

5110 5111 5112 5113 5114 5115 5116
	bnapi = &bp->bnx2_napi[ring_num];
	txr = &bnapi->tx_ring;

	if (ring_num == 0)
		cid = TX_CID;
	else
		cid = TX_TSS_CID + ring_num - 1;
5117

M
Michael Chan 已提交
5118 5119
	bp->tx_wake_thresh = bp->tx_ring_size / 2;

5120
	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5121

5122 5123
	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5124

5125 5126
	txr->tx_prod = 0;
	txr->tx_prod_bseq = 0;
5127

5128 5129
	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5130

5131
	bnx2_init_tx_context(bp, cid, txr);
5132 5133 5134
}

static void
5135 5136
bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
		     u32 buf_size, int num_rings)
5137 5138
{
	int i;
5139
	struct bnx2_rx_bd *rxbd;
5140

5141
	for (i = 0; i < num_rings; i++) {
5142
		int j;
5143

5144
		rxbd = &rx_ring[i][0];
5145
		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5146
			rxbd->rx_bd_len = buf_size;
5147 5148
			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
		}
5149
		if (i == (num_rings - 1))
5150 5151 5152
			j = 0;
		else
			j = i + 1;
5153 5154
		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5155
	}
5156 5157 5158
}

static void
5159
bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5160 5161 5162
{
	int i;
	u16 prod, ring_prod;
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172
	u32 cid, rx_cid_addr, val;
	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;

	if (ring_num == 0)
		cid = RX_CID;
	else
		cid = RX_RSS_CID + ring_num - 1;

	rx_cid_addr = GET_CID_ADDR(cid);
5173

5174
	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5175 5176
			     bp->rx_buf_use_size, bp->rx_max_ring);

5177
	bnx2_init_rx_context(bp, cid);
5178

5179
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5180 5181
		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5182 5183
	}

M
Michael Chan 已提交
5184
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5185
	if (bp->rx_pg_ring_size) {
5186 5187
		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
				     rxr->rx_pg_desc_mapping,
5188 5189
				     PAGE_SIZE, bp->rx_max_pg_ring);
		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
M
Michael Chan 已提交
5190 5191
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
M
Michael Chan 已提交
5192
		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5193

5194
		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
M
Michael Chan 已提交
5195
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5196

5197
		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
M
Michael Chan 已提交
5198
		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5199

5200
		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5201
			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5202
	}
5203

5204
	val = (u64) rxr->rx_desc_mapping[0] >> 32;
M
Michael Chan 已提交
5205
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5206

5207
	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
M
Michael Chan 已提交
5208
	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5209

5210
	ring_prod = prod = rxr->rx_pg_prod;
5211
	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5212
		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5213 5214
			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
				    ring_num, i, bp->rx_pg_ring_size);
5215
			break;
5216
		}
5217 5218
		prod = BNX2_NEXT_RX_BD(prod);
		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5219
	}
5220
	rxr->rx_pg_prod = prod;
5221

5222
	ring_prod = prod = rxr->rx_prod;
5223
	for (i = 0; i < bp->rx_ring_size; i++) {
5224
		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5225 5226
			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
				    ring_num, i, bp->rx_ring_size);
5227
			break;
5228
		}
5229 5230
		prod = BNX2_NEXT_RX_BD(prod);
		ring_prod = BNX2_RX_RING_IDX(prod);
5231
	}
5232
	rxr->rx_prod = prod;
5233

5234 5235 5236
	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5237

5238 5239
	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5240

5241
	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5242 5243
}

5244 5245 5246 5247
static void
bnx2_init_all_rings(struct bnx2 *bp)
{
	int i;
M
Michael Chan 已提交
5248
	u32 val;
5249 5250 5251

	bnx2_clear_ring_states(bp);

5252
	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5253 5254 5255 5256
	for (i = 0; i < bp->num_tx_rings; i++)
		bnx2_init_tx_ring(bp, i);

	if (bp->num_tx_rings > 1)
5257 5258
		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
			(TX_TSS_CID << 7));
5259

5260
	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
M
Michael Chan 已提交
5261 5262
	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);

5263 5264
	for (i = 0; i < bp->num_rx_rings; i++)
		bnx2_init_rx_ring(bp, i);
M
Michael Chan 已提交
5265 5266

	if (bp->num_rx_rings > 1) {
M
Michael Chan 已提交
5267
		u32 tbl_32 = 0;
M
Michael Chan 已提交
5268 5269

		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
M
Michael Chan 已提交
5270 5271 5272 5273
			int shift = (i % 8) << 2;

			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
			if ((i % 8) == 7) {
5274 5275
				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
M
Michael Chan 已提交
5276 5277 5278 5279 5280
					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
					BNX2_RLUP_RSS_COMMAND_WRITE |
					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
				tbl_32 = 0;
			}
M
Michael Chan 已提交
5281 5282 5283 5284 5285
		}

		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;

5286
		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
M
Michael Chan 已提交
5287 5288

	}
5289 5290
}

5291
static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5292
{
5293
	u32 max, num_rings = 1;
5294

5295 5296
	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
		ring_size -= BNX2_MAX_RX_DESC_CNT;
5297 5298 5299
		num_rings++;
	}
	/* round to next power of 2 */
5300
	max = max_size;
5301 5302 5303 5304 5305 5306
	while ((max & num_rings) == 0)
		max >>= 1;

	if (num_rings != max)
		max <<= 1;

5307 5308 5309 5310 5311 5312
	return max;
}

static void
bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
{
M
Michael Chan 已提交
5313
	u32 rx_size, rx_space, jumbo_size;
5314 5315

	/* 8 for CRC and VLAN */
5316
	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5317

M
Michael Chan 已提交
5318
	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5319
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
M
Michael Chan 已提交
5320

5321
	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5322 5323 5324
	bp->rx_pg_ring_size = 0;
	bp->rx_max_pg_ring = 0;
	bp->rx_max_pg_ring_idx = 0;
5325
	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
M
Michael Chan 已提交
5326 5327 5328
		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;

		jumbo_size = size * pages;
5329 5330
		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
M
Michael Chan 已提交
5331 5332 5333

		bp->rx_pg_ring_size = jumbo_size;
		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5334 5335 5336
							BNX2_MAX_RX_PG_RINGS);
		bp->rx_max_pg_ring_idx =
			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5337
		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
M
Michael Chan 已提交
5338 5339
		bp->rx_copy_thresh = 0;
	}
5340 5341

	bp->rx_buf_use_size = rx_size;
5342 5343 5344
	/* hw alignment + build_skb() overhead*/
	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5345
	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5346
	bp->rx_ring_size = size;
5347 5348
	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5349 5350
}

5351 5352 5353 5354 5355
static void
bnx2_free_tx_skbs(struct bnx2 *bp)
{
	int i;

5356 5357 5358 5359
	for (i = 0; i < bp->num_tx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
		int j;
5360

5361
		if (txr->tx_buf_ring == NULL)
5362 5363
			continue;

5364 5365
		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5366
			struct sk_buff *skb = tx_buf->skb;
5367
			int k, last;
5368 5369

			if (skb == NULL) {
5370
				j = BNX2_NEXT_TX_BD(j);
5371 5372 5373
				continue;
			}

5374
			dma_unmap_single(&bp->pdev->dev,
5375
					 dma_unmap_addr(tx_buf, mapping),
5376 5377
					 skb_headlen(skb),
					 PCI_DMA_TODEVICE);
5378

5379
			tx_buf->skb = NULL;
5380

5381
			last = tx_buf->nr_frags;
5382 5383 5384
			j = BNX2_NEXT_TX_BD(j);
			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5385
				dma_unmap_page(&bp->pdev->dev,
5386
					dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
5387
					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5388 5389
					PCI_DMA_TODEVICE);
			}
5390
			dev_kfree_skb(skb);
5391
		}
E
Eric Dumazet 已提交
5392
		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5393 5394 5395 5396 5397 5398 5399 5400
	}
}

static void
bnx2_free_rx_skbs(struct bnx2 *bp)
{
	int i;

5401 5402 5403 5404
	for (i = 0; i < bp->num_rx_rings; i++) {
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
		int j;
5405

5406 5407
		if (rxr->rx_buf_ring == NULL)
			return;
5408

5409
		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5410
			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5411
			u8 *data = rx_buf->data;
5412

5413
			if (data == NULL)
5414
				continue;
5415

5416
			dma_unmap_single(&bp->pdev->dev,
5417
					 dma_unmap_addr(rx_buf, mapping),
5418 5419
					 bp->rx_buf_use_size,
					 PCI_DMA_FROMDEVICE);
5420

5421
			rx_buf->data = NULL;
5422

5423
			kfree(data);
5424 5425 5426
		}
		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
			bnx2_free_rx_page(bp, rxr, j);
5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446
	}
}

static void
bnx2_free_skbs(struct bnx2 *bp)
{
	bnx2_free_tx_skbs(bp);
	bnx2_free_rx_skbs(bp);
}

static int
bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
{
	int rc;

	rc = bnx2_reset_chip(bp, reset_code);
	bnx2_free_skbs(bp);
	if (rc)
		return rc;

5447 5448 5449
	if ((rc = bnx2_init_chip(bp)) != 0)
		return rc;

5450
	bnx2_init_all_rings(bp);
5451 5452 5453 5454
	return 0;
}

static int
5455
bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5456 5457 5458 5459 5460 5461
{
	int rc;

	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
		return rc;

M
Michael Chan 已提交
5462
	spin_lock_bh(&bp->phy_lock);
5463
	bnx2_init_phy(bp, reset_phy);
5464
	bnx2_set_link(bp);
5465 5466
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
		bnx2_remote_phy_event(bp);
5467
	spin_unlock_bh(&bp->phy_lock);
5468 5469 5470
	return 0;
}

M
Michael Chan 已提交
5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485
static int
bnx2_shutdown_chip(struct bnx2 *bp)
{
	u32 reset_code;

	if (bp->flags & BNX2_FLAG_NO_WOL)
		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
	else if (bp->wol)
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
	else
		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;

	return bnx2_reset_chip(bp, reset_code);
}

5486 5487 5488 5489
static int
bnx2_test_registers(struct bnx2 *bp)
{
	int ret;
5490
	int i, is_5709;
5491
	static const struct {
5492 5493
		u16   offset;
		u16   flags;
5494
#define BNX2_FL_NOT_5709	1
5495 5496 5497 5498 5499 5500 5501
		u32   rw_mask;
		u32   ro_mask;
	} reg_tbl[] = {
		{ 0x006c, 0, 0x00000000, 0x0000003f },
		{ 0x0090, 0, 0xffffffff, 0x00000000 },
		{ 0x0094, 0, 0x00000000, 0x00000000 },

5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521
		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },

		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },

		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5522 5523

		{ 0x1000, 0, 0x00000000, 0x00000001 },
M
Michael Chan 已提交
5524
		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5525 5526 5527 5528

		{ 0x1408, 0, 0x01c00800, 0x00000000 },
		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
		{ 0x14a8, 0, 0x00000000, 0x000001ff },
M
Michael Chan 已提交
5529
		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606
		{ 0x14b0, 0, 0x00000002, 0x00000001 },
		{ 0x14b8, 0, 0x00000000, 0x00000000 },
		{ 0x14c0, 0, 0x00000000, 0x00000009 },
		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
		{ 0x14cc, 0, 0x00000000, 0x00000001 },
		{ 0x14d0, 0, 0xffffffff, 0x00000000 },

		{ 0x1800, 0, 0x00000000, 0x00000001 },
		{ 0x1804, 0, 0x00000000, 0x00000003 },

		{ 0x2800, 0, 0x00000000, 0x00000001 },
		{ 0x2804, 0, 0x00000000, 0x00003f01 },
		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
		{ 0x2810, 0, 0xffff0000, 0x00000000 },
		{ 0x2814, 0, 0xffff0000, 0x00000000 },
		{ 0x2818, 0, 0xffff0000, 0x00000000 },
		{ 0x281c, 0, 0xffff0000, 0x00000000 },
		{ 0x2834, 0, 0xffffffff, 0x00000000 },
		{ 0x2840, 0, 0x00000000, 0xffffffff },
		{ 0x2844, 0, 0x00000000, 0xffffffff },
		{ 0x2848, 0, 0xffffffff, 0x00000000 },
		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },

		{ 0x2c00, 0, 0x00000000, 0x00000011 },
		{ 0x2c04, 0, 0x00000000, 0x00030007 },

		{ 0x3c00, 0, 0x00000000, 0x00000001 },
		{ 0x3c04, 0, 0x00000000, 0x00070000 },
		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
		{ 0x3c14, 0, 0x00000000, 0xffffffff },
		{ 0x3c18, 0, 0x00000000, 0xffffffff },
		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
		{ 0x3c20, 0, 0xffffff00, 0x00000000 },

		{ 0x5004, 0, 0x00000000, 0x0000007f },
		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },

		{ 0x5c00, 0, 0x00000000, 0x00000001 },
		{ 0x5c04, 0, 0x00000000, 0x0003000f },
		{ 0x5c08, 0, 0x00000003, 0x00000000 },
		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
		{ 0x5c10, 0, 0x00000000, 0xffffffff },
		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
		{ 0x5c88, 0, 0x00000000, 0x00077373 },
		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },

		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
		{ 0x680c, 0, 0xffffffff, 0x00000000 },
		{ 0x6810, 0, 0xffffffff, 0x00000000 },
		{ 0x6814, 0, 0xffffffff, 0x00000000 },
		{ 0x6818, 0, 0xffffffff, 0x00000000 },
		{ 0x681c, 0, 0xffffffff, 0x00000000 },
		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
		{ 0x684c, 0, 0xffffffff, 0x00000000 },
		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },

		{ 0xffff, 0, 0x00000000, 0x00000000 },
	};

	ret = 0;
5607
	is_5709 = 0;
5608
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5609 5610
		is_5709 = 1;

5611 5612
	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
		u32 offset, rw_mask, ro_mask, save_val, val;
5613 5614 5615 5616
		u16 flags = reg_tbl[i].flags;

		if (is_5709 && (flags & BNX2_FL_NOT_5709))
			continue;
5617 5618 5619 5620 5621

		offset = (u32) reg_tbl[i].offset;
		rw_mask = reg_tbl[i].rw_mask;
		ro_mask = reg_tbl[i].ro_mask;

5622
		save_val = readl(bp->regview + offset);
5623

5624
		writel(0, bp->regview + offset);
5625

5626
		val = readl(bp->regview + offset);
5627 5628 5629 5630 5631 5632 5633 5634
		if ((val & rw_mask) != 0) {
			goto reg_test_err;
		}

		if ((val & ro_mask) != (save_val & ro_mask)) {
			goto reg_test_err;
		}

5635
		writel(0xffffffff, bp->regview + offset);
5636

5637
		val = readl(bp->regview + offset);
5638 5639 5640 5641 5642 5643 5644 5645
		if ((val & rw_mask) != rw_mask) {
			goto reg_test_err;
		}

		if ((val & ro_mask) != (save_val & ro_mask)) {
			goto reg_test_err;
		}

5646
		writel(save_val, bp->regview + offset);
5647 5648 5649
		continue;

reg_test_err:
5650
		writel(save_val, bp->regview + offset);
5651 5652 5653 5654 5655 5656 5657 5658 5659
		ret = -ENODEV;
		break;
	}
	return ret;
}

static int
bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
{
5660
	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5661 5662 5663 5664 5665 5666 5667 5668
		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
	int i;

	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
		u32 offset;

		for (offset = 0; offset < size; offset += 4) {

5669
			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5670

5671
			if (bnx2_reg_rd_ind(bp, start + offset) !=
5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684
				test_pattern[i]) {
				return -ENODEV;
			}
		}
	}
	return 0;
}

static int
bnx2_test_memory(struct bnx2 *bp)
{
	int ret = 0;
	int i;
5685
	static struct mem_entry {
5686 5687
		u32   offset;
		u32   len;
5688
	} mem_tbl_5706[] = {
5689
		{ 0x60000,  0x4000 },
M
Michael Chan 已提交
5690
		{ 0xa0000,  0x3000 },
5691 5692 5693 5694 5695
		{ 0xe0000,  0x4000 },
		{ 0x120000, 0x4000 },
		{ 0x1a0000, 0x4000 },
		{ 0x160000, 0x4000 },
		{ 0xffffffff, 0    },
5696 5697 5698 5699 5700 5701 5702 5703
	},
	mem_tbl_5709[] = {
		{ 0x60000,  0x4000 },
		{ 0xa0000,  0x3000 },
		{ 0xe0000,  0x4000 },
		{ 0x120000, 0x4000 },
		{ 0x1a0000, 0x4000 },
		{ 0xffffffff, 0    },
5704
	};
5705 5706
	struct mem_entry *mem_tbl;

5707
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5708 5709 5710
		mem_tbl = mem_tbl_5709;
	else
		mem_tbl = mem_tbl_5706;
5711 5712 5713 5714 5715 5716 5717

	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
			mem_tbl[i].len)) != 0) {
			return ret;
		}
	}
5718

5719 5720 5721
	return ret;
}

M
Michael Chan 已提交
5722 5723 5724
#define BNX2_MAC_LOOPBACK	0
#define BNX2_PHY_LOOPBACK	1

5725
static int
M
Michael Chan 已提交
5726
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5727 5728
{
	unsigned int pkt_size, num_pkts, i;
5729 5730
	struct sk_buff *skb;
	u8 *data;
5731
	unsigned char *packet;
M
Michael Chan 已提交
5732
	u16 rx_start_idx, rx_idx;
5733
	dma_addr_t map;
5734 5735
	struct bnx2_tx_bd *txbd;
	struct bnx2_sw_bd *rx_buf;
5736 5737
	struct l2_fhdr *rx_hdr;
	int ret = -ENODEV;
5738
	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5739
	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5740
	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5741 5742

	tx_napi = bnapi;
5743

5744
	txr = &tx_napi->tx_ring;
5745
	rxr = &bnapi->rx_ring;
M
Michael Chan 已提交
5746 5747 5748 5749 5750
	if (loopback_mode == BNX2_MAC_LOOPBACK) {
		bp->loopback = MAC_LOOPBACK;
		bnx2_set_mac_loopback(bp);
	}
	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5751
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5752 5753
			return 0;

M
Michael Chan 已提交
5754
		bp->loopback = PHY_LOOPBACK;
M
Michael Chan 已提交
5755 5756 5757 5758
		bnx2_set_phy_loopback(bp);
	}
	else
		return -EINVAL;
5759

M
Michael Chan 已提交
5760
	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5761
	skb = netdev_alloc_skb(bp->dev, pkt_size);
5762 5763
	if (!skb)
		return -ENOMEM;
5764
	packet = skb_put(skb, pkt_size);
M
Michael Chan 已提交
5765
	memcpy(packet, bp->dev->dev_addr, 6);
5766 5767 5768 5769
	memset(packet + 6, 0x0, 8);
	for (i = 14; i < pkt_size; i++)
		packet[i] = (unsigned char) (i & 0xff);

5770 5771 5772
	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
			     PCI_DMA_TODEVICE);
	if (dma_mapping_error(&bp->pdev->dev, map)) {
B
Benjamin Li 已提交
5773 5774 5775
		dev_kfree_skb(skb);
		return -EIO;
	}
5776

5777 5778
	BNX2_WR(bp, BNX2_HC_COMMAND,
		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
M
Michael Chan 已提交
5779

5780
	BNX2_RD(bp, BNX2_HC_COMMAND);
5781 5782

	udelay(5);
5783
	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5784 5785 5786

	num_pkts = 0;

5787
	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5788 5789 5790 5791 5792 5793 5794

	txbd->tx_bd_haddr_hi = (u64) map >> 32;
	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
	txbd->tx_bd_mss_nbytes = pkt_size;
	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;

	num_pkts++;
5795
	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5796
	txr->tx_prod_bseq += pkt_size;
5797

5798 5799
	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5800 5801 5802

	udelay(100);

5803 5804
	BNX2_WR(bp, BNX2_HC_COMMAND,
		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
M
Michael Chan 已提交
5805

5806
	BNX2_RD(bp, BNX2_HC_COMMAND);
5807 5808 5809

	udelay(5);

5810
	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5811
	dev_kfree_skb(skb);
5812

5813
	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5814 5815
		goto loopback_test_done;

5816
	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5817 5818 5819 5820
	if (rx_idx != rx_start_idx + num_pkts) {
		goto loopback_test_done;
	}

5821
	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5822
	data = rx_buf->data;
5823

5824 5825
	rx_hdr = get_l2_fhdr(data);
	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5826

5827
	dma_sync_single_for_cpu(&bp->pdev->dev,
5828
		dma_unmap_addr(rx_buf, mapping),
5829
		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5830

5831
	if (rx_hdr->l2_fhdr_status &
5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845
		(L2_FHDR_ERRORS_BAD_CRC |
		L2_FHDR_ERRORS_PHY_DECODE |
		L2_FHDR_ERRORS_ALIGNMENT |
		L2_FHDR_ERRORS_TOO_SHORT |
		L2_FHDR_ERRORS_GIANT_FRAME)) {

		goto loopback_test_done;
	}

	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
		goto loopback_test_done;
	}

	for (i = 14; i < pkt_size; i++) {
5846
		if (*(data + i) != (unsigned char) (i & 0xff)) {
5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857
			goto loopback_test_done;
		}
	}

	ret = 0;

loopback_test_done:
	bp->loopback = 0;
	return ret;
}

M
Michael Chan 已提交
5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872
#define BNX2_MAC_LOOPBACK_FAILED	1
#define BNX2_PHY_LOOPBACK_FAILED	2
#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
					 BNX2_PHY_LOOPBACK_FAILED)

static int
bnx2_test_loopback(struct bnx2 *bp)
{
	int rc = 0;

	if (!netif_running(bp->dev))
		return BNX2_LOOPBACK_FAILED;

	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
	spin_lock_bh(&bp->phy_lock);
5873
	bnx2_init_phy(bp, 1);
M
Michael Chan 已提交
5874 5875 5876 5877 5878 5879 5880 5881
	spin_unlock_bh(&bp->phy_lock);
	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
		rc |= BNX2_MAC_LOOPBACK_FAILED;
	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
		rc |= BNX2_PHY_LOOPBACK_FAILED;
	return rc;
}

5882 5883 5884 5885 5886 5887
#define NVRAM_SIZE 0x200
#define CRC32_RESIDUAL 0xdebb20e3

static int
bnx2_test_nvram(struct bnx2 *bp)
{
A
Al Viro 已提交
5888
	__be32 buf[NVRAM_SIZE / 4];
5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924
	u8 *data = (u8 *) buf;
	int rc = 0;
	u32 magic, csum;

	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
		goto test_nvram_done;

        magic = be32_to_cpu(buf[0]);
	if (magic != 0x669955aa) {
		rc = -ENODEV;
		goto test_nvram_done;
	}

	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
		goto test_nvram_done;

	csum = ether_crc_le(0x100, data);
	if (csum != CRC32_RESIDUAL) {
		rc = -ENODEV;
		goto test_nvram_done;
	}

	csum = ether_crc_le(0x100, data + 0x100);
	if (csum != CRC32_RESIDUAL) {
		rc = -ENODEV;
	}

test_nvram_done:
	return rc;
}

static int
bnx2_test_link(struct bnx2 *bp)
{
	u32 bmsr;

5925 5926 5927
	if (!netif_running(bp->dev))
		return -ENODEV;

5928
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5929 5930 5931 5932
		if (bp->link_up)
			return 0;
		return -ENODEV;
	}
5933
	spin_lock_bh(&bp->phy_lock);
5934 5935 5936 5937
	bnx2_enable_bmsr1(bp);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
	bnx2_disable_bmsr1(bp);
5938
	spin_unlock_bh(&bp->phy_lock);
5939

5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954
	if (bmsr & BMSR_LSTATUS) {
		return 0;
	}
	return -ENODEV;
}

static int
bnx2_test_intr(struct bnx2 *bp)
{
	int i;
	u16 status_idx;

	if (!netif_running(bp->dev))
		return -ENODEV;

5955
	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956 5957

	/* This register is not touched during run-time. */
5958 5959
	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
	BNX2_RD(bp, BNX2_HC_COMMAND);
5960 5961

	for (i = 0; i < 10; i++) {
5962
		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975
			status_idx) {

			break;
		}

		msleep_interruptible(10);
	}
	if (i < 10)
		return 0;

	return -ENODEV;
}

5976
/* Determining link for parallel detection. */
5977 5978 5979 5980 5981
static int
bnx2_5706_serdes_has_link(struct bnx2 *bp)
{
	u32 mode_ctl, an_dbg, exp;

5982 5983 5984
	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
		return 0;

5985 5986 5987 5988 5989 5990 5991 5992 5993 5994
	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);

	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
		return 0;

	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);

5995
	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007
		return 0;

	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);

	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
		return 0;

	return 1;
}

6008
static void
6009
bnx2_5706_serdes_timer(struct bnx2 *bp)
6010
{
6011 6012
	int check_link = 1;

6013
	spin_lock(&bp->phy_lock);
6014
	if (bp->serdes_an_pending) {
6015
		bp->serdes_an_pending--;
6016 6017
		check_link = 0;
	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6018
		u32 bmcr;
6019

6020
		bp->current_interval = BNX2_TIMER_INTERVAL;
M
Michael Chan 已提交
6021

6022
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023

6024
		if (bmcr & BMCR_ANENABLE) {
6025
			if (bnx2_5706_serdes_has_link(bp)) {
6026 6027
				bmcr &= ~BMCR_ANENABLE;
				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6028
				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6029
				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6030
			}
6031
		}
6032 6033
	}
	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6034
		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6035
		u32 phy2;
6036

6037 6038 6039 6040
		bnx2_write_phy(bp, 0x17, 0x0f01);
		bnx2_read_phy(bp, 0x15, &phy2);
		if (phy2 & 0x20) {
			u32 bmcr;
M
Michael Chan 已提交
6041

6042
			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6043
			bmcr |= BMCR_ANENABLE;
6044
			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045

6046
			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6047 6048
		}
	} else
6049
		bp->current_interval = BNX2_TIMER_INTERVAL;
6050

6051
	if (check_link) {
6052 6053 6054 6055 6056 6057
		u32 val;

		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);

6058 6059 6060 6061 6062 6063 6064 6065
		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
				bnx2_5706s_force_link_dn(bp, 1);
				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
			} else
				bnx2_set_link(bp);
		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
			bnx2_set_link(bp);
6066
	}
6067 6068
	spin_unlock(&bp->phy_lock);
}
6069

6070 6071 6072
static void
bnx2_5708_serdes_timer(struct bnx2 *bp)
{
6073
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6074 6075
		return;

6076
	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6077 6078 6079
		bp->serdes_an_pending = 0;
		return;
	}
6080

6081 6082 6083 6084 6085
	spin_lock(&bp->phy_lock);
	if (bp->serdes_an_pending)
		bp->serdes_an_pending--;
	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
		u32 bmcr;
6086

6087
		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6088
		if (bmcr & BMCR_ANENABLE) {
6089
			bnx2_enable_forced_2g5(bp);
M
Michael Chan 已提交
6090
			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6091
		} else {
6092
			bnx2_disable_forced_2g5(bp);
6093
			bp->serdes_an_pending = 2;
6094
			bp->current_interval = BNX2_TIMER_INTERVAL;
6095 6096
		}

6097
	} else
6098
		bp->current_interval = BNX2_TIMER_INTERVAL;
6099

6100 6101 6102
	spin_unlock(&bp->phy_lock);
}

6103 6104 6105 6106
static void
bnx2_timer(unsigned long data)
{
	struct bnx2 *bp = (struct bnx2 *) data;
6107

6108 6109
	if (!netif_running(bp->dev))
		return;
6110

6111 6112
	if (atomic_read(&bp->intr_sem) != 0)
		goto bnx2_restart_timer;
6113

6114 6115 6116 6117
	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
	     BNX2_FLAG_USING_MSI)
		bnx2_chk_missed_msi(bp);

M
Michael Chan 已提交
6118
	bnx2_send_heart_beat(bp);
6119

6120 6121
	bp->stats_blk->stat_FwRxDrop =
		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6122

6123
	/* workaround occasional corrupted counters */
6124
	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6125 6126
		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
			BNX2_HC_COMMAND_STATS_NOW);
6127

6128
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6129
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6130
			bnx2_5706_serdes_timer(bp);
6131
		else
6132
			bnx2_5708_serdes_timer(bp);
6133 6134 6135
	}

bnx2_restart_timer:
M
Michael Chan 已提交
6136
	mod_timer(&bp->timer, jiffies + bp->current_interval);
6137 6138
}

6139 6140 6141
static int
bnx2_request_irq(struct bnx2 *bp)
{
6142
	unsigned long flags;
6143 6144
	struct bnx2_irq *irq;
	int rc = 0, i;
6145

6146
	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6147 6148 6149
		flags = 0;
	else
		flags = IRQF_SHARED;
6150 6151 6152

	for (i = 0; i < bp->irq_nvecs; i++) {
		irq = &bp->irq_tbl[i];
6153
		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6154
				 &bp->bnx2_napi[i]);
6155 6156 6157 6158
		if (rc)
			break;
		irq->requested = 1;
	}
6159 6160 6161 6162
	return rc;
}

static void
6163
__bnx2_free_irq(struct bnx2 *bp)
6164
{
6165 6166
	struct bnx2_irq *irq;
	int i;
6167

6168 6169 6170
	for (i = 0; i < bp->irq_nvecs; i++) {
		irq = &bp->irq_tbl[i];
		if (irq->requested)
6171
			free_irq(irq->vector, &bp->bnx2_napi[i]);
6172
		irq->requested = 0;
6173
	}
6174 6175 6176 6177 6178 6179 6180
}

static void
bnx2_free_irq(struct bnx2 *bp)
{

	__bnx2_free_irq(bp);
6181
	if (bp->flags & BNX2_FLAG_USING_MSI)
6182
		pci_disable_msi(bp->pdev);
6183
	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6184 6185
		pci_disable_msix(bp->pdev);

6186
	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6187 6188 6189
}

static void
M
Michael Chan 已提交
6190
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6191
{
6192
	int i, total_vecs, rc;
M
Michael Chan 已提交
6193
	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
M
Michael Chan 已提交
6194 6195
	struct net_device *dev = bp->dev;
	const int len = sizeof(bp->irq_tbl[0].name);
M
Michael Chan 已提交
6196

6197
	bnx2_setup_msix_tbl(bp);
6198 6199 6200
	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
M
Michael Chan 已提交
6201

6202 6203
	/*  Need to flush the previous three writes to ensure MSI-X
	 *  is setup properly */
6204
	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205

M
Michael Chan 已提交
6206 6207 6208 6209 6210
	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
		msix_ent[i].entry = i;
		msix_ent[i].vector = 0;
	}

6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223
	total_vecs = msix_vecs;
#ifdef BCM_CNIC
	total_vecs++;
#endif
	rc = -ENOSPC;
	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
		if (rc <= 0)
			break;
		if (rc > 0)
			total_vecs = rc;
	}

M
Michael Chan 已提交
6224 6225 6226
	if (rc != 0)
		return;

6227 6228 6229 6230
	msix_vecs = total_vecs;
#ifdef BCM_CNIC
	msix_vecs--;
#endif
M
Michael Chan 已提交
6231
	bp->irq_nvecs = msix_vecs;
6232
	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6233
	for (i = 0; i < total_vecs; i++) {
M
Michael Chan 已提交
6234
		bp->irq_tbl[i].vector = msix_ent[i].vector;
6235 6236 6237
		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
		bp->irq_tbl[i].handler = bnx2_msi_1shot;
	}
6238 6239
}

6240
static int
6241 6242
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
6243
	int cpus = netif_get_num_default_rss_queues();
6244 6245 6246 6247 6248 6249 6250 6251 6252 6253
	int msix_vecs;

	if (!bp->num_req_rx_rings)
		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
	else if (!bp->num_req_tx_rings)
		msix_vecs = max(cpus, bp->num_req_rx_rings);
	else
		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);

	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
M
Michael Chan 已提交
6254

6255 6256
	bp->irq_tbl[0].handler = bnx2_interrupt;
	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257 6258 6259
	bp->irq_nvecs = 1;
	bp->irq_tbl[0].vector = bp->pdev->irq;

6260
	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
M
Michael Chan 已提交
6261
		bnx2_enable_msix(bp, msix_vecs);
6262

6263 6264
	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6265
		if (pci_enable_msi(bp->pdev) == 0) {
6266
			bp->flags |= BNX2_FLAG_USING_MSI;
6267
			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6268
				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6269 6270 6271
				bp->irq_tbl[0].handler = bnx2_msi_1shot;
			} else
				bp->irq_tbl[0].handler = bnx2_msi;
6272 6273

			bp->irq_tbl[0].vector = bp->pdev->irq;
6274 6275
		}
	}
B
Benjamin Li 已提交
6276

6277 6278 6279 6280 6281 6282 6283 6284 6285 6286
	if (!bp->num_req_tx_rings)
		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
	else
		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);

	if (!bp->num_req_rx_rings)
		bp->num_rx_rings = bp->irq_nvecs;
	else
		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);

6287
	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
B
Benjamin Li 已提交
6288

6289
	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6290 6291
}

6292 6293 6294 6295
/* Called with rtnl_lock */
static int
bnx2_open(struct net_device *dev)
{
M
Michael Chan 已提交
6296
	struct bnx2 *bp = netdev_priv(dev);
6297 6298
	int rc;

6299 6300 6301 6302
	rc = bnx2_request_firmware(bp);
	if (rc < 0)
		goto out;

6303 6304
	netif_carrier_off(dev);

6305 6306
	bnx2_disable_int(bp);

6307 6308 6309
	rc = bnx2_setup_int_mode(bp, disable_msi);
	if (rc)
		goto open_err;
B
Benjamin Li 已提交
6310
	bnx2_init_napi(bp);
6311
	bnx2_napi_enable(bp);
6312
	rc = bnx2_alloc_mem(bp);
6313 6314
	if (rc)
		goto open_err;
6315

6316
	rc = bnx2_request_irq(bp);
6317 6318
	if (rc)
		goto open_err;
6319

6320
	rc = bnx2_init_nic(bp, 1);
6321 6322
	if (rc)
		goto open_err;
6323

M
Michael Chan 已提交
6324
	mod_timer(&bp->timer, jiffies + bp->current_interval);
6325 6326 6327

	atomic_set(&bp->intr_sem, 0);

6328 6329
	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));

6330 6331
	bnx2_enable_int(bp);

6332
	if (bp->flags & BNX2_FLAG_USING_MSI) {
6333 6334 6335 6336
		/* Test MSI to make sure it is working
		 * If MSI test fails, go back to INTx mode
		 */
		if (bnx2_test_intr(bp) != 0) {
6337
			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6338 6339

			bnx2_disable_int(bp);
6340
			bnx2_free_irq(bp);
6341

6342 6343
			bnx2_setup_int_mode(bp, 1);

6344
			rc = bnx2_init_nic(bp, 0);
6345

6346 6347 6348
			if (!rc)
				rc = bnx2_request_irq(bp);

6349 6350
			if (rc) {
				del_timer_sync(&bp->timer);
6351
				goto open_err;
6352 6353 6354 6355
			}
			bnx2_enable_int(bp);
		}
	}
6356
	if (bp->flags & BNX2_FLAG_USING_MSI)
6357
		netdev_info(dev, "using MSI\n");
6358
	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6359
		netdev_info(dev, "using MSIX\n");
6360

B
Benjamin Li 已提交
6361
	netif_tx_start_all_queues(dev);
6362 6363
out:
	return rc;
6364 6365 6366 6367 6368 6369

open_err:
	bnx2_napi_disable(bp);
	bnx2_free_skbs(bp);
	bnx2_free_irq(bp);
	bnx2_free_mem(bp);
M
Michael Chan 已提交
6370
	bnx2_del_napi(bp);
6371 6372
	bnx2_release_firmware(bp);
	goto out;
6373 6374 6375
}

static void
D
David Howells 已提交
6376
bnx2_reset_task(struct work_struct *work)
6377
{
D
David Howells 已提交
6378
	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6379
	int rc;
6380
	u16 pcicmd;
6381

6382 6383 6384
	rtnl_lock();
	if (!netif_running(bp->dev)) {
		rtnl_unlock();
6385
		return;
6386
	}
6387

6388
	bnx2_netif_stop(bp, true);
6389

6390 6391 6392 6393 6394 6395
	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
		/* in case PCI block has reset */
		pci_restore_state(bp->pdev);
		pci_save_state(bp->pdev);
	}
6396 6397 6398 6399 6400 6401 6402 6403
	rc = bnx2_init_nic(bp, 1);
	if (rc) {
		netdev_err(bp->dev, "failed to reset NIC, closing\n");
		bnx2_napi_enable(bp);
		dev_close(bp->dev);
		rtnl_unlock();
		return;
	}
6404 6405

	atomic_set(&bp->intr_sem, 1);
6406
	bnx2_netif_start(bp, true);
6407
	rtnl_unlock();
6408 6409
}

6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455
#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }

static void
bnx2_dump_ftq(struct bnx2 *bp)
{
	int i;
	u32 reg, bdidx, cid, valid;
	struct net_device *dev = bp->dev;
	static const struct ftq_reg {
		char *name;
		u32 off;
	} ftq_arr[] = {
		BNX2_FTQ_ENTRY(RV2P_P),
		BNX2_FTQ_ENTRY(RV2P_T),
		BNX2_FTQ_ENTRY(RV2P_M),
		BNX2_FTQ_ENTRY(TBDR_),
		BNX2_FTQ_ENTRY(TDMA_),
		BNX2_FTQ_ENTRY(TXP_),
		BNX2_FTQ_ENTRY(TXP_),
		BNX2_FTQ_ENTRY(TPAT_),
		BNX2_FTQ_ENTRY(RXP_C),
		BNX2_FTQ_ENTRY(RXP_),
		BNX2_FTQ_ENTRY(COM_COMXQ_),
		BNX2_FTQ_ENTRY(COM_COMTQ_),
		BNX2_FTQ_ENTRY(COM_COMQ_),
		BNX2_FTQ_ENTRY(CP_CPQ_),
	};

	netdev_err(dev, "<--- start FTQ dump --->\n");
	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));

	netdev_err(dev, "CPU states:\n");
	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
			   reg, bnx2_reg_rd_ind(bp, reg),
			   bnx2_reg_rd_ind(bp, reg + 4),
			   bnx2_reg_rd_ind(bp, reg + 8),
			   bnx2_reg_rd_ind(bp, reg + 0x1c),
			   bnx2_reg_rd_ind(bp, reg + 0x1c),
			   bnx2_reg_rd_ind(bp, reg + 0x20));

	netdev_err(dev, "<--- end FTQ dump --->\n");
	netdev_err(dev, "<--- start TBDC dump --->\n");
	netdev_err(dev, "TBDC free cnt: %ld\n",
6456
		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6457 6458 6459 6460
	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
	for (i = 0; i < 0x20; i++) {
		int j = 0;

6461 6462 6463 6464 6465
		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6466 6467 6468
			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
			j++;

6469 6470 6471
		cid = BNX2_RD(bp, BNX2_TBDC_CID);
		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6472 6473 6474 6475 6476 6477 6478
		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
			   bdidx >> 24, (valid >> 8) & 0x0ff);
	}
	netdev_err(dev, "<--- end TBDC dump --->\n");
}

6479 6480 6481 6482
static void
bnx2_dump_state(struct bnx2 *bp)
{
	struct net_device *dev = bp->dev;
J
Jeffrey Huang 已提交
6483
	u32 val1, val2;
6484 6485 6486 6487 6488 6489 6490

	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
		   atomic_read(&bp->intr_sem), val1);
	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6491
	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6492 6493
		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6494
	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6495
		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6496
	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6497
		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6498
	if (bp->flags & BNX2_FLAG_USING_MSIX)
6499
		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6500
			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6501 6502
}

6503 6504 6505
static void
bnx2_tx_timeout(struct net_device *dev)
{
M
Michael Chan 已提交
6506
	struct bnx2 *bp = netdev_priv(dev);
6507

6508
	bnx2_dump_ftq(bp);
6509
	bnx2_dump_state(bp);
J
Jeffrey Huang 已提交
6510
	bnx2_dump_mcp_state(bp);
6511

6512 6513 6514 6515
	/* This allows the netif to be shutdown gracefully before resetting */
	schedule_work(&bp->reset_task);
}

H
Herbert Xu 已提交
6516
/* Called with netif_tx_lock.
M
Michael Chan 已提交
6517 6518
 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
 * netif_wake_queue().
6519
 */
6520
static netdev_tx_t
6521 6522
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
M
Michael Chan 已提交
6523
	struct bnx2 *bp = netdev_priv(dev);
6524
	dma_addr_t mapping;
6525 6526
	struct bnx2_tx_bd *txbd;
	struct bnx2_sw_tx_bd *tx_buf;
6527 6528 6529
	u32 len, vlan_tag_flags, last_frag, mss;
	u16 prod, ring_prod;
	int i;
B
Benjamin Li 已提交
6530 6531 6532 6533 6534 6535 6536 6537 6538
	struct bnx2_napi *bnapi;
	struct bnx2_tx_ring_info *txr;
	struct netdev_queue *txq;

	/*  Determine which tx ring we will be placed on */
	i = skb_get_queue_mapping(skb);
	bnapi = &bp->bnx2_napi[i];
	txr = &bnapi->tx_ring;
	txq = netdev_get_tx_queue(dev, i);
6539

6540
	if (unlikely(bnx2_tx_avail(bp, txr) <
6541
	    (skb_shinfo(skb)->nr_frags + 1))) {
B
Benjamin Li 已提交
6542
		netif_tx_stop_queue(txq);
6543
		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6544 6545 6546 6547

		return NETDEV_TX_BUSY;
	}
	len = skb_headlen(skb);
6548
	prod = txr->tx_prod;
6549
	ring_prod = BNX2_TX_RING_IDX(prod);
6550 6551

	vlan_tag_flags = 0;
6552
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6553 6554 6555
		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
	}

6556
	if (vlan_tx_tag_present(skb)) {
6557 6558 6559
		vlan_tag_flags |=
			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
	}
6560

6561
	if ((mss = skb_shinfo(skb)->gso_size)) {
6562
		u32 tcp_opt_len;
6563
		struct iphdr *iph;
6564 6565 6566

		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;

6567 6568 6569 6570 6571
		tcp_opt_len = tcp_optlen(skb);

		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
			u32 tcp_off = skb_transport_offset(skb) -
				      sizeof(struct ipv6hdr) - ETH_HLEN;
6572

6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590
			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
					  TX_BD_FLAGS_SW_FLAGS;
			if (likely(tcp_off == 0))
				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
			else {
				tcp_off >>= 3;
				vlan_tag_flags |= ((tcp_off & 0x3) <<
						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
						  ((tcp_off & 0x10) <<
						   TX_BD_FLAGS_TCP6_OFF4_SHL);
				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
			}
		} else {
			iph = ip_hdr(skb);
			if (tcp_opt_len || (iph->ihl > 5)) {
				vlan_tag_flags |= ((iph->ihl - 5) +
						   (tcp_opt_len >> 2)) << 8;
			}
6591
		}
6592
	} else
6593 6594
		mss = 0;

6595 6596
	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
B
Benjamin Li 已提交
6597 6598 6599 6600
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

6601
	tx_buf = &txr->tx_buf_ring[ring_prod];
6602
	tx_buf->skb = skb;
6603
	dma_unmap_addr_set(tx_buf, mapping, mapping);
6604

6605
	txbd = &txr->tx_desc_ring[ring_prod];
6606 6607 6608 6609 6610 6611 6612

	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
	txbd->tx_bd_mss_nbytes = len | (mss << 16);
	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;

	last_frag = skb_shinfo(skb)->nr_frags;
E
Eric Dumazet 已提交
6613 6614
	tx_buf->nr_frags = last_frag;
	tx_buf->is_gso = skb_is_gso(skb);
6615 6616

	for (i = 0; i < last_frag; i++) {
E
Eric Dumazet 已提交
6617
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6618

6619 6620
		prod = BNX2_NEXT_TX_BD(prod);
		ring_prod = BNX2_TX_RING_IDX(prod);
6621
		txbd = &txr->tx_desc_ring[ring_prod];
6622

E
Eric Dumazet 已提交
6623
		len = skb_frag_size(frag);
6624
		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6625
					   DMA_TO_DEVICE);
6626
		if (dma_mapping_error(&bp->pdev->dev, mapping))
6627
			goto dma_error;
6628
		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6629
				   mapping);
6630 6631 6632 6633 6634 6635 6636 6637 6638

		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
		txbd->tx_bd_mss_nbytes = len | (mss << 16);
		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;

	}
	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;

6639 6640 6641
	/* Sync BD data before updating TX mailbox */
	wmb();

E
Eric Dumazet 已提交
6642 6643
	netdev_tx_sent_queue(txq, skb->len);

6644
	prod = BNX2_NEXT_TX_BD(prod);
6645
	txr->tx_prod_bseq += skb->len;
6646

6647 6648
	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6649 6650 6651

	mmiowb();

6652
	txr->tx_prod = prod;
6653

6654
	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
B
Benjamin Li 已提交
6655
		netif_tx_stop_queue(txq);
6656 6657 6658 6659 6660 6661 6662

		/* netif_tx_stop_queue() must be done before checking
		 * tx index in bnx2_tx_avail() below, because in
		 * bnx2_tx_int(), we update tx index before checking for
		 * netif_tx_queue_stopped().
		 */
		smp_mb();
6663
		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
B
Benjamin Li 已提交
6664
			netif_tx_wake_queue(txq);
6665 6666
	}

6667 6668 6669 6670 6671 6672 6673
	return NETDEV_TX_OK;
dma_error:
	/* save value of frag that failed */
	last_frag = i;

	/* start back at beginning and unmap skb */
	prod = txr->tx_prod;
6674
	ring_prod = BNX2_TX_RING_IDX(prod);
6675 6676
	tx_buf = &txr->tx_buf_ring[ring_prod];
	tx_buf->skb = NULL;
6677
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6678 6679 6680 6681
			 skb_headlen(skb), PCI_DMA_TODEVICE);

	/* unmap remaining mapped pages */
	for (i = 0; i < last_frag; i++) {
6682 6683
		prod = BNX2_NEXT_TX_BD(prod);
		ring_prod = BNX2_TX_RING_IDX(prod);
6684
		tx_buf = &txr->tx_buf_ring[ring_prod];
6685
		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
E
Eric Dumazet 已提交
6686
			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6687 6688 6689 6690
			       PCI_DMA_TODEVICE);
	}

	dev_kfree_skb(skb);
6691 6692 6693 6694 6695 6696 6697
	return NETDEV_TX_OK;
}

/* Called with rtnl_lock */
static int
bnx2_close(struct net_device *dev)
{
M
Michael Chan 已提交
6698
	struct bnx2 *bp = netdev_priv(dev);
6699

6700
	bnx2_disable_int_sync(bp);
6701
	bnx2_napi_disable(bp);
6702
	netif_tx_disable(dev);
6703
	del_timer_sync(&bp->timer);
M
Michael Chan 已提交
6704
	bnx2_shutdown_chip(bp);
6705
	bnx2_free_irq(bp);
6706 6707
	bnx2_free_skbs(bp);
	bnx2_free_mem(bp);
M
Michael Chan 已提交
6708
	bnx2_del_napi(bp);
6709 6710 6711 6712 6713
	bp->link_up = 0;
	netif_carrier_off(bp->dev);
	return 0;
}

6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725
static void
bnx2_save_stats(struct bnx2 *bp)
{
	u32 *hw_stats = (u32 *) bp->stats_blk;
	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
	int i;

	/* The 1st 10 counters are 64-bit counters */
	for (i = 0; i < 20; i += 2) {
		u32 hi;
		u64 lo;

6726 6727
		hi = temp_stats[i] + hw_stats[i];
		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6728 6729
		if (lo > 0xffffffff)
			hi++;
6730 6731
		temp_stats[i] = hi;
		temp_stats[i + 1] = lo & 0xffffffff;
6732 6733 6734
	}

	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6735
		temp_stats[i] += hw_stats[i];
6736 6737
}

E
Eric Dumazet 已提交
6738 6739
#define GET_64BIT_NET_STATS64(ctr)		\
	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6740

M
Michael Chan 已提交
6741
#define GET_64BIT_NET_STATS(ctr)				\
6742 6743
	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6744

M
Michael Chan 已提交
6745
#define GET_32BIT_NET_STATS(ctr)				\
6746 6747
	(unsigned long) (bp->stats_blk->ctr +			\
			 bp->temp_stats_blk->ctr)
M
Michael Chan 已提交
6748

E
Eric Dumazet 已提交
6749 6750
static struct rtnl_link_stats64 *
bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6751
{
M
Michael Chan 已提交
6752
	struct bnx2 *bp = netdev_priv(dev);
6753

E
Eric Dumazet 已提交
6754
	if (bp->stats_blk == NULL)
6755
		return net_stats;
E
Eric Dumazet 已提交
6756

6757
	net_stats->rx_packets =
M
Michael Chan 已提交
6758 6759 6760
		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6761 6762

	net_stats->tx_packets =
M
Michael Chan 已提交
6763 6764 6765
		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6766 6767

	net_stats->rx_bytes =
M
Michael Chan 已提交
6768
		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6769 6770

	net_stats->tx_bytes =
M
Michael Chan 已提交
6771
		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6772

6773
	net_stats->multicast =
6774
		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6775

6776
	net_stats->collisions =
M
Michael Chan 已提交
6777
		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6778

6779
	net_stats->rx_length_errors =
M
Michael Chan 已提交
6780 6781
		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6782

6783
	net_stats->rx_over_errors =
M
Michael Chan 已提交
6784 6785
		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6786

6787
	net_stats->rx_frame_errors =
M
Michael Chan 已提交
6788
		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6789

6790
	net_stats->rx_crc_errors =
M
Michael Chan 已提交
6791
		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6792 6793 6794 6795 6796 6797

	net_stats->rx_errors = net_stats->rx_length_errors +
		net_stats->rx_over_errors + net_stats->rx_frame_errors +
		net_stats->rx_crc_errors;

	net_stats->tx_aborted_errors =
M
Michael Chan 已提交
6798 6799
		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6800

6801 6802
	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6803 6804 6805
		net_stats->tx_carrier_errors = 0;
	else {
		net_stats->tx_carrier_errors =
M
Michael Chan 已提交
6806
			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6807 6808 6809
	}

	net_stats->tx_errors =
M
Michael Chan 已提交
6810
		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6811 6812 6813
		net_stats->tx_aborted_errors +
		net_stats->tx_carrier_errors;

M
Michael Chan 已提交
6814
	net_stats->rx_missed_errors =
M
Michael Chan 已提交
6815 6816 6817
		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
		GET_32BIT_NET_STATS(stat_FwRxDrop);
M
Michael Chan 已提交
6818

6819 6820 6821 6822 6823 6824 6825 6826
	return net_stats;
}

/* All ethtool functions called with rtnl_lock */

static int
bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
M
Michael Chan 已提交
6827
	struct bnx2 *bp = netdev_priv(dev);
6828
	int support_serdes = 0, support_copper = 0;
6829 6830

	cmd->supported = SUPPORTED_Autoneg;
6831
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6832 6833 6834 6835 6836 6837 6838 6839
		support_serdes = 1;
		support_copper = 1;
	} else if (bp->phy_port == PORT_FIBRE)
		support_serdes = 1;
	else
		support_copper = 1;

	if (support_serdes) {
6840 6841
		cmd->supported |= SUPPORTED_1000baseT_Full |
			SUPPORTED_FIBRE;
6842
		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6843
			cmd->supported |= SUPPORTED_2500baseX_Full;
6844 6845

	}
6846
	if (support_copper) {
6847 6848 6849 6850 6851 6852 6853 6854 6855
		cmd->supported |= SUPPORTED_10baseT_Half |
			SUPPORTED_10baseT_Full |
			SUPPORTED_100baseT_Half |
			SUPPORTED_100baseT_Full |
			SUPPORTED_1000baseT_Full |
			SUPPORTED_TP;

	}

6856 6857
	spin_lock_bh(&bp->phy_lock);
	cmd->port = bp->phy_port;
6858 6859 6860 6861
	cmd->advertising = bp->advertising;

	if (bp->autoneg & AUTONEG_SPEED) {
		cmd->autoneg = AUTONEG_ENABLE;
6862
	} else {
6863 6864 6865 6866
		cmd->autoneg = AUTONEG_DISABLE;
	}

	if (netif_carrier_ok(dev)) {
6867
		ethtool_cmd_speed_set(cmd, bp->line_speed);
6868 6869 6870
		cmd->duplex = bp->duplex;
	}
	else {
6871
		ethtool_cmd_speed_set(cmd, -1);
6872 6873
		cmd->duplex = -1;
	}
6874
	spin_unlock_bh(&bp->phy_lock);
6875 6876 6877 6878 6879 6880

	cmd->transceiver = XCVR_INTERNAL;
	cmd->phy_address = bp->phy_addr;

	return 0;
}
6881

6882 6883 6884
static int
bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
M
Michael Chan 已提交
6885
	struct bnx2 *bp = netdev_priv(dev);
6886 6887 6888 6889
	u8 autoneg = bp->autoneg;
	u8 req_duplex = bp->req_duplex;
	u16 req_line_speed = bp->req_line_speed;
	u32 advertising = bp->advertising;
6890 6891 6892 6893 6894 6895 6896
	int err = -EINVAL;

	spin_lock_bh(&bp->phy_lock);

	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
		goto err_out_unlock;

6897 6898
	if (cmd->port != bp->phy_port &&
	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6899
		goto err_out_unlock;
6900

6901 6902 6903 6904 6905 6906
	/* If device is down, we can store the settings only if the user
	 * is setting the currently active port.
	 */
	if (!netif_running(dev) && cmd->port != bp->phy_port)
		goto err_out_unlock;

6907 6908 6909
	if (cmd->autoneg == AUTONEG_ENABLE) {
		autoneg |= AUTONEG_SPEED;

6910 6911 6912 6913
		advertising = cmd->advertising;
		if (cmd->port == PORT_TP) {
			advertising &= ETHTOOL_ALL_COPPER_SPEED;
			if (!advertising)
6914
				advertising = ETHTOOL_ALL_COPPER_SPEED;
6915 6916 6917 6918
		} else {
			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
			if (!advertising)
				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6919 6920 6921 6922
		}
		advertising |= ADVERTISED_Autoneg;
	}
	else {
6923
		u32 speed = ethtool_cmd_speed(cmd);
6924
		if (cmd->port == PORT_FIBRE) {
6925 6926
			if ((speed != SPEED_1000 &&
			     speed != SPEED_2500) ||
M
Michael Chan 已提交
6927
			    (cmd->duplex != DUPLEX_FULL))
6928
				goto err_out_unlock;
M
Michael Chan 已提交
6929

6930
			if (speed == SPEED_2500 &&
6931
			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6932
				goto err_out_unlock;
6933
		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6934 6935
			goto err_out_unlock;

6936
		autoneg &= ~AUTONEG_SPEED;
6937
		req_line_speed = speed;
6938 6939 6940 6941 6942 6943 6944 6945 6946
		req_duplex = cmd->duplex;
		advertising = 0;
	}

	bp->autoneg = autoneg;
	bp->advertising = advertising;
	bp->req_line_speed = req_line_speed;
	bp->req_duplex = req_duplex;

6947 6948 6949 6950 6951 6952
	err = 0;
	/* If device is down, the new settings will be picked up when it is
	 * brought up.
	 */
	if (netif_running(dev))
		err = bnx2_setup_phy(bp, cmd->port);
6953

6954
err_out_unlock:
6955
	spin_unlock_bh(&bp->phy_lock);
6956

6957
	return err;
6958 6959 6960 6961 6962
}

static void
bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
M
Michael Chan 已提交
6963
	struct bnx2 *bp = netdev_priv(dev);
6964

6965 6966 6967 6968
	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6969 6970
}

M
Michael Chan 已提交
6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984
#define BNX2_REGDUMP_LEN		(32 * 1024)

static int
bnx2_get_regs_len(struct net_device *dev)
{
	return BNX2_REGDUMP_LEN;
}

static void
bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
	u32 *p = _p, i, offset;
	u8 *orig_p = _p;
	struct bnx2 *bp = netdev_priv(dev);
J
Joe Perches 已提交
6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008
	static const u32 reg_boundaries[] = {
		0x0000, 0x0098, 0x0400, 0x045c,
		0x0800, 0x0880, 0x0c00, 0x0c10,
		0x0c30, 0x0d08, 0x1000, 0x101c,
		0x1040, 0x1048, 0x1080, 0x10a4,
		0x1400, 0x1490, 0x1498, 0x14f0,
		0x1500, 0x155c, 0x1580, 0x15dc,
		0x1600, 0x1658, 0x1680, 0x16d8,
		0x1800, 0x1820, 0x1840, 0x1854,
		0x1880, 0x1894, 0x1900, 0x1984,
		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
		0x1c80, 0x1c94, 0x1d00, 0x1d84,
		0x2000, 0x2030, 0x23c0, 0x2400,
		0x2800, 0x2820, 0x2830, 0x2850,
		0x2b40, 0x2c10, 0x2fc0, 0x3058,
		0x3c00, 0x3c94, 0x4000, 0x4010,
		0x4080, 0x4090, 0x43c0, 0x4458,
		0x4c00, 0x4c18, 0x4c40, 0x4c54,
		0x4fc0, 0x5010, 0x53c0, 0x5444,
		0x5c00, 0x5c18, 0x5c80, 0x5c90,
		0x5fc0, 0x6000, 0x6400, 0x6428,
		0x6800, 0x6848, 0x684c, 0x6860,
		0x6888, 0x6910, 0x8000
	};
M
Michael Chan 已提交
7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020

	regs->version = 0;

	memset(p, 0, BNX2_REGDUMP_LEN);

	if (!netif_running(bp->dev))
		return;

	i = 0;
	offset = reg_boundaries[0];
	p += offset;
	while (offset < BNX2_REGDUMP_LEN) {
7021
		*p++ = BNX2_RD(bp, offset);
M
Michael Chan 已提交
7022 7023 7024 7025 7026 7027 7028 7029 7030
		offset += 4;
		if (offset == reg_boundaries[i + 1]) {
			offset = reg_boundaries[i + 2];
			p = (u32 *) (orig_p + offset);
			i += 2;
		}
	}
}

7031 7032 7033
static void
bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
M
Michael Chan 已提交
7034
	struct bnx2 *bp = netdev_priv(dev);
7035

7036
	if (bp->flags & BNX2_FLAG_NO_WOL) {
7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052
		wol->supported = 0;
		wol->wolopts = 0;
	}
	else {
		wol->supported = WAKE_MAGIC;
		if (bp->wol)
			wol->wolopts = WAKE_MAGIC;
		else
			wol->wolopts = 0;
	}
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}

static int
bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
M
Michael Chan 已提交
7053
	struct bnx2 *bp = netdev_priv(dev);
7054 7055 7056 7057 7058

	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;

	if (wol->wolopts & WAKE_MAGIC) {
7059
		if (bp->flags & BNX2_FLAG_NO_WOL)
7060 7061 7062 7063 7064 7065 7066
			return -EINVAL;

		bp->wol = 1;
	}
	else {
		bp->wol = 0;
	}
7067 7068 7069

	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);

7070 7071 7072 7073 7074 7075
	return 0;
}

static int
bnx2_nway_reset(struct net_device *dev)
{
M
Michael Chan 已提交
7076
	struct bnx2 *bp = netdev_priv(dev);
7077 7078
	u32 bmcr;

7079 7080 7081
	if (!netif_running(dev))
		return -EAGAIN;

7082 7083 7084 7085
	if (!(bp->autoneg & AUTONEG_SPEED)) {
		return -EINVAL;
	}

7086
	spin_lock_bh(&bp->phy_lock);
7087

7088
	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7089 7090 7091 7092 7093 7094 7095
		int rc;

		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
		spin_unlock_bh(&bp->phy_lock);
		return rc;
	}

7096
	/* Force a link down visible on the other side */
7097
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7098
		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7099
		spin_unlock_bh(&bp->phy_lock);
7100 7101 7102

		msleep(20);

7103
		spin_lock_bh(&bp->phy_lock);
7104

M
Michael Chan 已提交
7105
		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7106 7107
		bp->serdes_an_pending = 1;
		mod_timer(&bp->timer, jiffies + bp->current_interval);
7108 7109
	}

7110
	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7111
	bmcr &= ~BMCR_LOOPBACK;
7112
	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7113

7114
	spin_unlock_bh(&bp->phy_lock);
7115 7116 7117 7118

	return 0;
}

7119 7120 7121 7122 7123 7124 7125 7126
static u32
bnx2_get_link(struct net_device *dev)
{
	struct bnx2 *bp = netdev_priv(dev);

	return bp->link_up;
}

7127 7128 7129
static int
bnx2_get_eeprom_len(struct net_device *dev)
{
M
Michael Chan 已提交
7130
	struct bnx2 *bp = netdev_priv(dev);
7131

M
Michael Chan 已提交
7132
	if (bp->flash_info == NULL)
7133 7134
		return 0;

M
Michael Chan 已提交
7135
	return (int) bp->flash_size;
7136 7137 7138 7139 7140 7141
}

static int
bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
		u8 *eebuf)
{
M
Michael Chan 已提交
7142
	struct bnx2 *bp = netdev_priv(dev);
7143 7144
	int rc;

7145
	/* parameters already validated in ethtool_get_eeprom */
7146 7147 7148 7149 7150 7151 7152 7153 7154 7155

	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);

	return rc;
}

static int
bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
		u8 *eebuf)
{
M
Michael Chan 已提交
7156
	struct bnx2 *bp = netdev_priv(dev);
7157 7158
	int rc;

7159
	/* parameters already validated in ethtool_set_eeprom */
7160 7161 7162 7163 7164 7165 7166 7167 7168

	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);

	return rc;
}

static int
bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
M
Michael Chan 已提交
7169
	struct bnx2 *bp = netdev_priv(dev);
7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190

	memset(coal, 0, sizeof(struct ethtool_coalesce));

	coal->rx_coalesce_usecs = bp->rx_ticks;
	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;

	coal->tx_coalesce_usecs = bp->tx_ticks;
	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;

	coal->stats_block_coalesce_usecs = bp->stats_ticks;

	return 0;
}

static int
bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
M
Michael Chan 已提交
7191
	struct bnx2 *bp = netdev_priv(dev);
7192 7193 7194 7195

	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;

7196
	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219
	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;

	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;

	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
	if (bp->rx_quick_cons_trip_int > 0xff)
		bp->rx_quick_cons_trip_int = 0xff;

	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;

	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;

	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;

	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
		0xff;

	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7220
	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7221 7222 7223
		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
			bp->stats_ticks = USEC_PER_SEC;
	}
7224 7225 7226
	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7227 7228

	if (netif_running(bp->dev)) {
7229
		bnx2_netif_stop(bp, true);
7230
		bnx2_init_nic(bp, 0);
7231
		bnx2_netif_start(bp, true);
7232 7233 7234 7235 7236 7237 7238 7239
	}

	return 0;
}

static void
bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
M
Michael Chan 已提交
7240
	struct bnx2 *bp = netdev_priv(dev);
7241

7242 7243
	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7244 7245

	ering->rx_pending = bp->rx_ring_size;
7246
	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7247

7248
	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7249 7250 7251 7252
	ering->tx_pending = bp->tx_ring_size;
}

static int
7253
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7254
{
7255
	if (netif_running(bp->dev)) {
7256 7257 7258
		/* Reset will erase chipset stats; save them */
		bnx2_save_stats(bp);

7259
		bnx2_netif_stop(bp, true);
7260
		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7261 7262 7263 7264 7265 7266
		if (reset_irq) {
			bnx2_free_irq(bp);
			bnx2_del_napi(bp);
		} else {
			__bnx2_free_irq(bp);
		}
7267 7268 7269 7270
		bnx2_free_skbs(bp);
		bnx2_free_mem(bp);
	}

7271 7272
	bnx2_set_rx_ring_size(bp, rx);
	bp->tx_ring_size = tx;
7273 7274

	if (netif_running(bp->dev)) {
7275 7276 7277 7278 7279 7280 7281 7282 7283
		int rc = 0;

		if (reset_irq) {
			rc = bnx2_setup_int_mode(bp, disable_msi);
			bnx2_init_napi(bp);
		}

		if (!rc)
			rc = bnx2_alloc_mem(bp);
7284

7285 7286 7287
		if (!rc)
			rc = bnx2_request_irq(bp);

7288 7289 7290 7291 7292 7293
		if (!rc)
			rc = bnx2_init_nic(bp, 0);

		if (rc) {
			bnx2_napi_enable(bp);
			dev_close(bp->dev);
7294
			return rc;
7295
		}
7296 7297 7298 7299 7300 7301 7302
#ifdef BCM_CNIC
		mutex_lock(&bp->cnic_lock);
		/* Let cnic know about the new status block. */
		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
			bnx2_setup_cnic_irq_info(bp);
		mutex_unlock(&bp->cnic_lock);
#endif
7303
		bnx2_netif_start(bp, true);
7304 7305 7306 7307
	}
	return 0;
}

7308 7309 7310 7311 7312 7313
static int
bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
	struct bnx2 *bp = netdev_priv(dev);
	int rc;

7314 7315
	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7316 7317 7318 7319
		(ering->tx_pending <= MAX_SKB_FRAGS)) {

		return -EINVAL;
	}
7320 7321
	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
				   false);
7322 7323 7324
	return rc;
}

7325 7326 7327
static void
bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
M
Michael Chan 已提交
7328
	struct bnx2 *bp = netdev_priv(dev);
7329 7330 7331 7332 7333 7334 7335 7336 7337

	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
}

static int
bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
M
Michael Chan 已提交
7338
	struct bnx2 *bp = netdev_priv(dev);
7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352

	bp->req_flow_ctrl = 0;
	if (epause->rx_pause)
		bp->req_flow_ctrl |= FLOW_CTRL_RX;
	if (epause->tx_pause)
		bp->req_flow_ctrl |= FLOW_CTRL_TX;

	if (epause->autoneg) {
		bp->autoneg |= AUTONEG_FLOW_CTRL;
	}
	else {
		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
	}

7353 7354 7355 7356 7357
	if (netif_running(dev)) {
		spin_lock_bh(&bp->phy_lock);
		bnx2_setup_phy(bp, bp->phy_port);
		spin_unlock_bh(&bp->phy_lock);
	}
7358 7359 7360 7361

	return 0;
}

7362
static struct {
7363
	char string[ETH_GSTRING_LEN];
M
Michael Chan 已提交
7364
} bnx2_stats_str_arr[] = {
7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408
	{ "rx_bytes" },
	{ "rx_error_bytes" },
	{ "tx_bytes" },
	{ "tx_error_bytes" },
	{ "rx_ucast_packets" },
	{ "rx_mcast_packets" },
	{ "rx_bcast_packets" },
	{ "tx_ucast_packets" },
	{ "tx_mcast_packets" },
	{ "tx_bcast_packets" },
	{ "tx_mac_errors" },
	{ "tx_carrier_errors" },
	{ "rx_crc_errors" },
	{ "rx_align_errors" },
	{ "tx_single_collisions" },
	{ "tx_multi_collisions" },
	{ "tx_deferred" },
	{ "tx_excess_collisions" },
	{ "tx_late_collisions" },
	{ "tx_total_collisions" },
	{ "rx_fragments" },
	{ "rx_jabbers" },
	{ "rx_undersize_packets" },
	{ "rx_oversize_packets" },
	{ "rx_64_byte_packets" },
	{ "rx_65_to_127_byte_packets" },
	{ "rx_128_to_255_byte_packets" },
	{ "rx_256_to_511_byte_packets" },
	{ "rx_512_to_1023_byte_packets" },
	{ "rx_1024_to_1522_byte_packets" },
	{ "rx_1523_to_9022_byte_packets" },
	{ "tx_64_byte_packets" },
	{ "tx_65_to_127_byte_packets" },
	{ "tx_128_to_255_byte_packets" },
	{ "tx_256_to_511_byte_packets" },
	{ "tx_512_to_1023_byte_packets" },
	{ "tx_1024_to_1522_byte_packets" },
	{ "tx_1523_to_9022_byte_packets" },
	{ "rx_xon_frames" },
	{ "rx_xoff_frames" },
	{ "tx_xon_frames" },
	{ "tx_xoff_frames" },
	{ "rx_mac_ctrl_frames" },
	{ "rx_filtered_packets" },
M
Michael Chan 已提交
7409
	{ "rx_ftq_discards" },
7410
	{ "rx_discards" },
M
Michael Chan 已提交
7411
	{ "rx_fw_discards" },
7412 7413
};

7414
#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
M
Michael Chan 已提交
7415

7416 7417
#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)

7418
static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429
    STATS_OFFSET32(stat_IfHCInOctets_hi),
    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
    STATS_OFFSET32(stat_IfHCOutOctets_hi),
    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462
    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
    STATS_OFFSET32(stat_EtherStatsCollisions),
    STATS_OFFSET32(stat_EtherStatsFragments),
    STATS_OFFSET32(stat_EtherStatsJabbers),
    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
    STATS_OFFSET32(stat_XonPauseFramesReceived),
    STATS_OFFSET32(stat_XoffPauseFramesReceived),
    STATS_OFFSET32(stat_OutXonSent),
    STATS_OFFSET32(stat_OutXoffSent),
    STATS_OFFSET32(stat_MacControlFramesReceived),
    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
M
Michael Chan 已提交
7463
    STATS_OFFSET32(stat_IfInFTQDiscards),
7464
    STATS_OFFSET32(stat_IfInMBUFDiscards),
M
Michael Chan 已提交
7465
    STATS_OFFSET32(stat_FwRxDrop),
7466 7467 7468 7469
};

/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
 * skipped because of errata.
7470
 */
7471
static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7472 7473 7474 7475
	8,0,8,8,8,8,8,8,8,8,
	4,0,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
M
Michael Chan 已提交
7476
	4,4,4,4,4,4,4,
7477 7478
};

M
Michael Chan 已提交
7479 7480 7481 7482 7483
static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
	8,0,8,8,8,8,8,8,8,8,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
	4,4,4,4,4,4,4,4,4,4,
M
Michael Chan 已提交
7484
	4,4,4,4,4,4,4,
M
Michael Chan 已提交
7485 7486
};

7487 7488
#define BNX2_NUM_TESTS 6

7489
static struct {
7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500
	char string[ETH_GSTRING_LEN];
} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
	{ "register_test (offline)" },
	{ "memory_test (offline)" },
	{ "loopback_test (offline)" },
	{ "nvram_test (online)" },
	{ "interrupt_test (online)" },
	{ "link_test (online)" },
};

static int
7501
bnx2_get_sset_count(struct net_device *dev, int sset)
7502
{
7503 7504 7505 7506 7507 7508 7509 7510
	switch (sset) {
	case ETH_SS_TEST:
		return BNX2_NUM_TESTS;
	case ETH_SS_STATS:
		return BNX2_NUM_STATS;
	default:
		return -EOPNOTSUPP;
	}
7511 7512 7513 7514 7515
}

static void
bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{
M
Michael Chan 已提交
7516
	struct bnx2 *bp = netdev_priv(dev);
7517 7518 7519

	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
	if (etest->flags & ETH_TEST_FL_OFFLINE) {
M
Michael Chan 已提交
7520 7521
		int i;

7522
		bnx2_netif_stop(bp, true);
7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533
		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
		bnx2_free_skbs(bp);

		if (bnx2_test_registers(bp) != 0) {
			buf[0] = 1;
			etest->flags |= ETH_TEST_FL_FAILED;
		}
		if (bnx2_test_memory(bp) != 0) {
			buf[1] = 1;
			etest->flags |= ETH_TEST_FL_FAILED;
		}
M
Michael Chan 已提交
7534
		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7535 7536
			etest->flags |= ETH_TEST_FL_FAILED;

7537 7538
		if (!netif_running(bp->dev))
			bnx2_shutdown_chip(bp);
7539
		else {
7540
			bnx2_init_nic(bp, 1);
7541
			bnx2_netif_start(bp, true);
7542 7543 7544
		}

		/* wait for link up */
M
Michael Chan 已提交
7545 7546 7547 7548 7549
		for (i = 0; i < 7; i++) {
			if (bp->link_up)
				break;
			msleep_interruptible(1000);
		}
7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586
	}

	if (bnx2_test_nvram(bp) != 0) {
		buf[3] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;
	}
	if (bnx2_test_intr(bp) != 0) {
		buf[4] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;
	}

	if (bnx2_test_link(bp) != 0) {
		buf[5] = 1;
		etest->flags |= ETH_TEST_FL_FAILED;

	}
}

static void
bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
	switch (stringset) {
	case ETH_SS_STATS:
		memcpy(buf, bnx2_stats_str_arr,
			sizeof(bnx2_stats_str_arr));
		break;
	case ETH_SS_TEST:
		memcpy(buf, bnx2_tests_str_arr,
			sizeof(bnx2_tests_str_arr));
		break;
	}
}

static void
bnx2_get_ethtool_stats(struct net_device *dev,
		struct ethtool_stats *stats, u64 *buf)
{
M
Michael Chan 已提交
7587
	struct bnx2 *bp = netdev_priv(dev);
7588 7589
	int i;
	u32 *hw_stats = (u32 *) bp->stats_blk;
7590
	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7591
	u8 *stats_len_arr = NULL;
7592 7593 7594 7595 7596 7597

	if (hw_stats == NULL) {
		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
		return;
	}

7598 7599 7600 7601
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7602
		stats_len_arr = bnx2_5706_stats_len_arr;
M
Michael Chan 已提交
7603 7604
	else
		stats_len_arr = bnx2_5708_stats_len_arr;
7605 7606

	for (i = 0; i < BNX2_NUM_STATS; i++) {
7607 7608
		unsigned long offset;

7609 7610 7611 7612 7613
		if (stats_len_arr[i] == 0) {
			/* skip this counter */
			buf[i] = 0;
			continue;
		}
7614 7615

		offset = bnx2_stats_offset_arr[i];
7616 7617
		if (stats_len_arr[i] == 4) {
			/* 4-byte counter */
7618 7619
			buf[i] = (u64) *(hw_stats + offset) +
				 *(temp_stats + offset);
7620 7621 7622
			continue;
		}
		/* 8-byte counter */
7623 7624 7625 7626
		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
			 *(hw_stats + offset + 1) +
			 (((u64) *(temp_stats + offset)) << 32) +
			 *(temp_stats + offset + 1);
7627 7628 7629 7630
	}
}

static int
S
stephen hemminger 已提交
7631
bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7632
{
M
Michael Chan 已提交
7633
	struct bnx2 *bp = netdev_priv(dev);
7634

S
stephen hemminger 已提交
7635 7636
	switch (state) {
	case ETHTOOL_ID_ACTIVE:
7637 7638
		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7639
		return 1;	/* cycle on/off once per second */
7640

S
stephen hemminger 已提交
7641
	case ETHTOOL_ID_ON:
7642 7643 7644 7645 7646 7647
		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
			BNX2_EMAC_LED_1000MB_OVERRIDE |
			BNX2_EMAC_LED_100MB_OVERRIDE |
			BNX2_EMAC_LED_10MB_OVERRIDE |
			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
			BNX2_EMAC_LED_TRAFFIC);
S
stephen hemminger 已提交
7648
		break;
7649

S
stephen hemminger 已提交
7650
	case ETHTOOL_ID_OFF:
7651
		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
S
stephen hemminger 已提交
7652
		break;
7653

S
stephen hemminger 已提交
7654
	case ETHTOOL_ID_INACTIVE:
7655 7656
		BNX2_WR(bp, BNX2_EMAC_LED, 0);
		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
S
stephen hemminger 已提交
7657 7658
		break;
	}
7659

7660 7661 7662
	return 0;
}

7663 7664
static netdev_features_t
bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7665 7666 7667
{
	struct bnx2 *bp = netdev_priv(dev);

7668
	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7669
		features |= NETIF_F_HW_VLAN_CTAG_RX;
7670 7671

	return features;
7672 7673
}

7674
static int
7675
bnx2_set_features(struct net_device *dev, netdev_features_t features)
7676
{
7677 7678
	struct bnx2 *bp = netdev_priv(dev);

M
Michael Chan 已提交
7679
	/* TSO with VLAN tag won't work with current firmware */
7680
	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7681 7682 7683
		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
	else
		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7684

7685
	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7686 7687 7688
	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
	    netif_running(dev)) {
		bnx2_netif_stop(bp, false);
7689
		dev->features = features;
7690 7691 7692
		bnx2_set_rx_mode(dev);
		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
		bnx2_netif_start(bp, false);
7693
		return 1;
7694 7695 7696
	}

	return 0;
7697 7698
}

7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746
static void bnx2_get_channels(struct net_device *dev,
			      struct ethtool_channels *channels)
{
	struct bnx2 *bp = netdev_priv(dev);
	u32 max_rx_rings = 1;
	u32 max_tx_rings = 1;

	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
		max_rx_rings = RX_MAX_RINGS;
		max_tx_rings = TX_MAX_RINGS;
	}

	channels->max_rx = max_rx_rings;
	channels->max_tx = max_tx_rings;
	channels->max_other = 0;
	channels->max_combined = 0;
	channels->rx_count = bp->num_rx_rings;
	channels->tx_count = bp->num_tx_rings;
	channels->other_count = 0;
	channels->combined_count = 0;
}

static int bnx2_set_channels(struct net_device *dev,
			      struct ethtool_channels *channels)
{
	struct bnx2 *bp = netdev_priv(dev);
	u32 max_rx_rings = 1;
	u32 max_tx_rings = 1;
	int rc = 0;

	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
		max_rx_rings = RX_MAX_RINGS;
		max_tx_rings = TX_MAX_RINGS;
	}
	if (channels->rx_count > max_rx_rings ||
	    channels->tx_count > max_tx_rings)
		return -EINVAL;

	bp->num_req_rx_rings = channels->rx_count;
	bp->num_req_tx_rings = channels->tx_count;

	if (netif_running(dev))
		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
					   bp->tx_ring_size, true);

	return rc;
}

7747
static const struct ethtool_ops bnx2_ethtool_ops = {
7748 7749 7750
	.get_settings		= bnx2_get_settings,
	.set_settings		= bnx2_set_settings,
	.get_drvinfo		= bnx2_get_drvinfo,
M
Michael Chan 已提交
7751 7752
	.get_regs_len		= bnx2_get_regs_len,
	.get_regs		= bnx2_get_regs,
7753 7754 7755
	.get_wol		= bnx2_get_wol,
	.set_wol		= bnx2_set_wol,
	.nway_reset		= bnx2_nway_reset,
7756
	.get_link		= bnx2_get_link,
7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767
	.get_eeprom_len		= bnx2_get_eeprom_len,
	.get_eeprom		= bnx2_get_eeprom,
	.set_eeprom		= bnx2_set_eeprom,
	.get_coalesce		= bnx2_get_coalesce,
	.set_coalesce		= bnx2_set_coalesce,
	.get_ringparam		= bnx2_get_ringparam,
	.set_ringparam		= bnx2_set_ringparam,
	.get_pauseparam		= bnx2_get_pauseparam,
	.set_pauseparam		= bnx2_set_pauseparam,
	.self_test		= bnx2_self_test,
	.get_strings		= bnx2_get_strings,
S
stephen hemminger 已提交
7768
	.set_phys_id		= bnx2_set_phys_id,
7769
	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7770
	.get_sset_count		= bnx2_get_sset_count,
7771 7772
	.get_channels		= bnx2_get_channels,
	.set_channels		= bnx2_set_channels,
7773 7774 7775 7776 7777 7778
};

/* Called with rtnl_lock */
static int
bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
7779
	struct mii_ioctl_data *data = if_mii(ifr);
M
Michael Chan 已提交
7780
	struct bnx2 *bp = netdev_priv(dev);
7781 7782 7783 7784 7785 7786 7787 7788 7789 7790
	int err;

	switch(cmd) {
	case SIOCGMIIPHY:
		data->phy_id = bp->phy_addr;

		/* fallthru */
	case SIOCGMIIREG: {
		u32 mii_regval;

7791
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7792 7793
			return -EOPNOTSUPP;

7794 7795 7796
		if (!netif_running(dev))
			return -EAGAIN;

7797
		spin_lock_bh(&bp->phy_lock);
7798
		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7799
		spin_unlock_bh(&bp->phy_lock);
7800 7801 7802 7803 7804 7805 7806

		data->val_out = mii_regval;

		return err;
	}

	case SIOCSMIIREG:
7807
		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7808 7809
			return -EOPNOTSUPP;

7810 7811 7812
		if (!netif_running(dev))
			return -EAGAIN;

7813
		spin_lock_bh(&bp->phy_lock);
7814
		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7815
		spin_unlock_bh(&bp->phy_lock);
7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830

		return err;

	default:
		/* do nothing */
		break;
	}
	return -EOPNOTSUPP;
}

/* Called with rtnl_lock */
static int
bnx2_change_mac_addr(struct net_device *dev, void *p)
{
	struct sockaddr *addr = p;
M
Michael Chan 已提交
7831
	struct bnx2 *bp = netdev_priv(dev);
7832

7833
	if (!is_valid_ether_addr(addr->sa_data))
7834
		return -EADDRNOTAVAIL;
7835

7836 7837
	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	if (netif_running(dev))
7838
		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7839 7840 7841 7842 7843 7844 7845 7846

	return 0;
}

/* Called with rtnl_lock */
static int
bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
M
Michael Chan 已提交
7847
	struct bnx2 *bp = netdev_priv(dev);
7848 7849 7850 7851 7852 7853

	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
		return -EINVAL;

	dev->mtu = new_mtu;
7854 7855
	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
				     false);
7856 7857
}

A
Alexey Dobriyan 已提交
7858
#ifdef CONFIG_NET_POLL_CONTROLLER
7859 7860 7861
static void
poll_bnx2(struct net_device *dev)
{
M
Michael Chan 已提交
7862
	struct bnx2 *bp = netdev_priv(dev);
7863
	int i;
7864

7865
	for (i = 0; i < bp->irq_nvecs; i++) {
7866 7867 7868 7869 7870
		struct bnx2_irq *irq = &bp->irq_tbl[i];

		disable_irq(irq->vector);
		irq->handler(irq->vector, &bp->bnx2_napi[i]);
		enable_irq(irq->vector);
7871
	}
7872 7873 7874
}
#endif

B
Bill Pemberton 已提交
7875
static void
7876 7877
bnx2_get_5709_media(struct bnx2 *bp)
{
7878
	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7879 7880 7881 7882 7883 7884
	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
	u32 strap;

	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
		return;
	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7885
		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7886 7887 7888 7889 7890 7891 7892 7893
		return;
	}

	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
	else
		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;

7894
	if (bp->func == 0) {
7895 7896 7897 7898
		switch (strap) {
		case 0x4:
		case 0x5:
		case 0x6:
7899
			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7900 7901 7902 7903 7904 7905 7906
			return;
		}
	} else {
		switch (strap) {
		case 0x1:
		case 0x2:
		case 0x4:
7907
			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7908 7909 7910 7911 7912
			return;
		}
	}
}

B
Bill Pemberton 已提交
7913
static void
7914 7915 7916 7917
bnx2_get_pci_speed(struct bnx2 *bp)
{
	u32 reg;

7918
	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7919 7920 7921
	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
		u32 clkreg;

7922
		bp->flags |= BNX2_FLAG_PCIX;
7923

7924
		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960

		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
		switch (clkreg) {
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
			bp->bus_speed_mhz = 133;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
			bp->bus_speed_mhz = 100;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
			bp->bus_speed_mhz = 66;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
			bp->bus_speed_mhz = 50;
			break;

		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
			bp->bus_speed_mhz = 33;
			break;
		}
	}
	else {
		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
			bp->bus_speed_mhz = 66;
		else
			bp->bus_speed_mhz = 33;
	}

	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7961
		bp->flags |= BNX2_FLAG_PCI_32BIT;
7962 7963 7964

}

B
Bill Pemberton 已提交
7965
static void
7966 7967
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
M
Matt Carlson 已提交
7968
	int rc, i, j;
7969
	u8 *data;
M
Matt Carlson 已提交
7970
	unsigned int block_end, rosize, len;
7971

M
Michael Chan 已提交
7972 7973
#define BNX2_VPD_NVRAM_OFFSET	0x300
#define BNX2_VPD_LEN		128
7974 7975 7976 7977 7978 7979
#define BNX2_MAX_VER_SLEN	30

	data = kmalloc(256, GFP_KERNEL);
	if (!data)
		return;

M
Michael Chan 已提交
7980 7981
	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
			     BNX2_VPD_LEN);
7982 7983 7984
	if (rc)
		goto vpd_done;

M
Michael Chan 已提交
7985 7986 7987 7988 7989
	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
		data[i] = data[i + BNX2_VPD_LEN + 3];
		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
		data[i + 3] = data[i + BNX2_VPD_LEN];
7990 7991
	}

M
Matt Carlson 已提交
7992 7993 7994
	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
	if (i < 0)
		goto vpd_done;
7995

M
Matt Carlson 已提交
7996 7997 7998
	rosize = pci_vpd_lrdt_size(&data[i]);
	i += PCI_VPD_LRDT_TAG_SIZE;
	block_end = i + rosize;
7999

M
Matt Carlson 已提交
8000 8001
	if (block_end > BNX2_VPD_LEN)
		goto vpd_done;
8002

M
Matt Carlson 已提交
8003 8004 8005 8006
	j = pci_vpd_find_info_keyword(data, i, rosize,
				      PCI_VPD_RO_KEYWORD_MFR_ID);
	if (j < 0)
		goto vpd_done;
8007

M
Matt Carlson 已提交
8008
	len = pci_vpd_info_field_size(&data[j]);
8009

M
Matt Carlson 已提交
8010 8011 8012 8013
	j += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (j + len > block_end || len != 4 ||
	    memcmp(&data[j], "1028", 4))
		goto vpd_done;
8014

M
Matt Carlson 已提交
8015 8016 8017 8018
	j = pci_vpd_find_info_keyword(data, i, rosize,
				      PCI_VPD_RO_KEYWORD_VENDOR0);
	if (j < 0)
		goto vpd_done;
8019

M
Matt Carlson 已提交
8020
	len = pci_vpd_info_field_size(&data[j]);
8021

M
Matt Carlson 已提交
8022 8023
	j += PCI_VPD_INFO_FLD_HDR_SIZE;
	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8024
		goto vpd_done;
M
Matt Carlson 已提交
8025 8026 8027

	memcpy(bp->fw_version, &data[j], len);
	bp->fw_version[len] = ' ';
8028 8029 8030 8031 8032

vpd_done:
	kfree(data);
}

B
Bill Pemberton 已提交
8033
static int
8034 8035 8036
bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
{
	struct bnx2 *bp;
8037
	int rc, i, j;
8038
	u32 reg;
8039
	u64 dma_mask, persist_dma_mask;
8040
	int err;
8041 8042

	SET_NETDEV_DEV(dev, &pdev->dev);
M
Michael Chan 已提交
8043
	bp = netdev_priv(dev);
8044 8045 8046 8047

	bp->flags = 0;
	bp->phy_flags = 0;

8048 8049 8050 8051 8052 8053 8054 8055
	bp->temp_stats_blk =
		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);

	if (bp->temp_stats_blk == NULL) {
		rc = -ENOMEM;
		goto err_out;
	}

8056 8057 8058
	/* enable device (incl. PCI PM wakeup), and bus-mastering */
	rc = pci_enable_device(pdev);
	if (rc) {
8059
		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8060 8061 8062 8063
		goto err_out;
	}

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8064
		dev_err(&pdev->dev,
8065
			"Cannot find PCI device base address, aborting\n");
8066 8067 8068 8069 8070 8071
		rc = -ENODEV;
		goto err_out_disable;
	}

	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
	if (rc) {
8072
		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8073 8074 8075 8076 8077
		goto err_out_disable;
	}

	pci_set_master(pdev);

8078
	bp->pm_cap = pdev->pm_cap;
8079
	if (bp->pm_cap == 0) {
8080
		dev_err(&pdev->dev,
8081
			"Cannot find power management capability, aborting\n");
8082 8083 8084 8085 8086 8087 8088 8089
		rc = -EIO;
		goto err_out_release;
	}

	bp->dev = dev;
	bp->pdev = pdev;

	spin_lock_init(&bp->phy_lock);
M
Michael Chan 已提交
8090
	spin_lock_init(&bp->indirect_lock);
8091 8092 8093
#ifdef BCM_CNIC
	mutex_init(&bp->cnic_lock);
#endif
D
David Howells 已提交
8094
	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8095

8096 8097
	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
							 TX_MAX_TSS_RINGS + 1));
8098
	if (!bp->regview) {
8099
		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8100 8101 8102 8103 8104 8105 8106 8107
		rc = -ENOMEM;
		goto err_out_release;
	}

	/* Configure byte swap and enable write to the reg_window registers.
	 * Rely on CPU to do target byte swapping on big endian systems
	 * The chip's target access swapping will not swap all accesses
	 */
8108 8109 8110
	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8111

8112
	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8113

8114
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8115 8116
		if (!pci_is_pcie(pdev)) {
			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8117 8118 8119
			rc = -EIO;
			goto err_out_unmap;
		}
8120
		bp->flags |= BNX2_FLAG_PCIE;
8121
		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8122
			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8123 8124 8125

		/* AER (Advanced Error Reporting) hooks */
		err = pci_enable_pcie_error_reporting(pdev);
8126 8127
		if (!err)
			bp->flags |= BNX2_FLAG_AER_ENABLED;
8128

8129
	} else {
M
Michael Chan 已提交
8130 8131 8132
		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
		if (bp->pcix_cap == 0) {
			dev_err(&pdev->dev,
8133
				"Cannot find PCIX capability, aborting\n");
M
Michael Chan 已提交
8134 8135 8136
			rc = -EIO;
			goto err_out_unmap;
		}
8137
		bp->flags |= BNX2_FLAG_BROKEN_STATS;
M
Michael Chan 已提交
8138 8139
	}

8140 8141
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8142
		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8143
			bp->flags |= BNX2_FLAG_MSIX_CAP;
8144 8145
	}

8146 8147
	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8148
		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8149
			bp->flags |= BNX2_FLAG_MSI_CAP;
8150 8151
	}

8152
	/* 5708 cannot support DMA addresses > 40-bit.  */
8153
	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8154
		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8155
	else
8156
		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8157 8158 8159 8160 8161 8162 8163

	/* Configure DMA attributes. */
	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
		dev->features |= NETIF_F_HIGHDMA;
		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
		if (rc) {
			dev_err(&pdev->dev,
8164
				"pci_set_consistent_dma_mask failed, aborting\n");
8165 8166
			goto err_out_unmap;
		}
8167
	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8168
		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8169 8170 8171
		goto err_out_unmap;
	}

8172
	if (!(bp->flags & BNX2_FLAG_PCIE))
8173
		bnx2_get_pci_speed(bp);
8174 8175

	/* 5706A0 may falsely detect SERR and PERR. */
8176
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8177
		reg = BNX2_RD(bp, PCI_COMMAND);
8178
		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8179
		BNX2_WR(bp, PCI_COMMAND, reg);
8180
	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8181
		!(bp->flags & BNX2_FLAG_PCIX)) {
8182

8183
		dev_err(&pdev->dev,
8184
			"5706 A1 can only be used in a PCIX bus, aborting\n");
8185 8186 8187 8188 8189
		goto err_out_unmap;
	}

	bnx2_init_nvram(bp);

8190
	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8191

8192 8193 8194
	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
		bp->func = 1;

8195
	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8196
	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8197
		u32 off = bp->func << 2;
8198

8199
		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8200
	} else
8201 8202
		bp->shmem_base = HOST_VIEW_SHMEM_BASE;

8203 8204 8205
	/* Get the permanent MAC address.  First we need to make sure the
	 * firmware is actually running.
	 */
8206
	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8207 8208 8209

	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8210
		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8211 8212 8213 8214
		rc = -ENODEV;
		goto err_out_unmap;
	}

8215 8216 8217
	bnx2_read_vpd_fw_ver(bp);

	j = strlen(bp->fw_version);
8218
	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8219
	for (i = 0; i < 3 && j < 24; i++) {
8220 8221
		u8 num, k, skip0;

8222 8223 8224 8225 8226
		if (i == 0) {
			bp->fw_version[j++] = 'b';
			bp->fw_version[j++] = 'c';
			bp->fw_version[j++] = ' ';
		}
8227 8228 8229 8230 8231 8232 8233 8234 8235 8236
		num = (u8) (reg >> (24 - (i * 8)));
		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
			if (num >= k || !skip0 || k == 1) {
				bp->fw_version[j++] = (num / k) + '0';
				skip0 = 0;
			}
		}
		if (i != 2)
			bp->fw_version[j++] = '.';
	}
8237
	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
M
Michael Chan 已提交
8238 8239 8240 8241
	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
		bp->wol = 1;

	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8242
		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8243 8244

		for (i = 0; i < 30; i++) {
8245
			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8246 8247 8248 8249 8250
			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
				break;
			msleep(10);
		}
	}
8251
	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8252 8253 8254
	reg &= BNX2_CONDITION_MFW_RUN_MASK;
	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8255
		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8256

8257 8258 8259
		if (j < 32)
			bp->fw_version[j++] = ' ';
		for (i = 0; i < 3 && j < 28; i++) {
8260
			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8261
			reg = be32_to_cpu(reg);
8262 8263 8264 8265
			memcpy(&bp->fw_version[j], &reg, 4);
			j += 4;
		}
	}
8266

8267
	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8268 8269 8270
	bp->mac_addr[0] = (u8) (reg >> 8);
	bp->mac_addr[1] = (u8) reg;

8271
	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8272 8273 8274 8275 8276
	bp->mac_addr[2] = (u8) (reg >> 24);
	bp->mac_addr[3] = (u8) (reg >> 16);
	bp->mac_addr[4] = (u8) (reg >> 8);
	bp->mac_addr[5] = (u8) reg;

8277
	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8278
	bnx2_set_rx_ring_size(bp, 255);
8279

8280
	bp->tx_quick_cons_trip_int = 2;
8281
	bp->tx_quick_cons_trip = 20;
8282
	bp->tx_ticks_int = 18;
8283
	bp->tx_ticks = 80;
8284

8285 8286
	bp->rx_quick_cons_trip_int = 2;
	bp->rx_quick_cons_trip = 12;
8287 8288 8289
	bp->rx_ticks_int = 18;
	bp->rx_ticks = 18;

8290
	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8291

8292
	bp->current_interval = BNX2_TIMER_INTERVAL;
8293

M
Michael Chan 已提交
8294 8295
	bp->phy_addr = 1;

8296
	/* Disable WOL support if we are running on a SERDES chip. */
8297
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8298
		bnx2_get_5709_media(bp);
8299
	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8300
		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
M
Michael Chan 已提交
8301

8302
	bp->phy_port = PORT_TP;
8303
	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8304
		bp->phy_port = PORT_FIBRE;
8305
		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
M
Michael Chan 已提交
8306
		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8307
			bp->flags |= BNX2_FLAG_NO_WOL;
M
Michael Chan 已提交
8308 8309
			bp->wol = 0;
		}
8310
		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8311 8312 8313 8314 8315 8316 8317 8318
			/* Don't do parallel detect on this board because of
			 * some board problems.  The link will not go down
			 * if we do parallel detect.
			 */
			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
			    pdev->subsystem_device == 0x310c)
				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
		} else {
M
Michael Chan 已提交
8319 8320
			bp->phy_addr = 2;
			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8321
				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
M
Michael Chan 已提交
8322
		}
8323 8324
	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8325
		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8326 8327 8328
	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8329
		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8330

8331 8332
	bnx2_init_fw_cap(bp);

8333 8334 8335
	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8336
	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8337
		bp->flags |= BNX2_FLAG_NO_WOL;
M
Michael Chan 已提交
8338 8339
		bp->wol = 0;
	}
M
Michael Chan 已提交
8340

8341 8342 8343 8344 8345
	if (bp->flags & BNX2_FLAG_NO_WOL)
		device_set_wakeup_capable(&bp->pdev->dev, false);
	else
		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);

8346
	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357
		bp->tx_quick_cons_trip_int =
			bp->tx_quick_cons_trip;
		bp->tx_ticks_int = bp->tx_ticks;
		bp->rx_quick_cons_trip_int =
			bp->rx_quick_cons_trip;
		bp->rx_ticks_int = bp->rx_ticks;
		bp->comp_prod_trip_int = bp->comp_prod_trip;
		bp->com_ticks_int = bp->com_ticks;
		bp->cmd_ticks_int = bp->cmd_ticks;
	}

8358 8359 8360 8361 8362 8363 8364 8365
	/* Disable MSI on 5706 if AMD 8132 bridge is found.
	 *
	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
	 * with byte enables disabled on the unused 32-bit word.  This is legal
	 * but causes problems on the AMD 8132 which will eventually stop
	 * responding after a while.
	 *
	 * AMD believes this incompatibility is unique to the 5706, and
8366
	 * prefers to locally disable MSI rather than globally disabling it.
8367
	 */
8368
	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8369 8370 8371 8372 8373 8374
		struct pci_dev *amd_8132 = NULL;

		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
						  amd_8132))) {

8375 8376
			if (amd_8132->revision >= 0x10 &&
			    amd_8132->revision <= 0x13) {
8377 8378 8379 8380 8381 8382 8383
				disable_msi = 1;
				pci_dev_put(amd_8132);
				break;
			}
		}
	}

8384
	bnx2_set_default_link(bp);
8385 8386
	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;

M
Michael Chan 已提交
8387
	init_timer(&bp->timer);
8388
	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
M
Michael Chan 已提交
8389 8390 8391
	bp->timer.data = (unsigned long) bp;
	bp->timer.function = bnx2_timer;

8392
#ifdef BCM_CNIC
8393 8394 8395 8396
	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
		bp->cnic_eth_dev.max_iscsi_conn =
			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8397
	bp->cnic_probe = bnx2_cnic_probe;
8398
#endif
8399 8400
	pci_save_state(pdev);

8401 8402 8403
	return 0;

err_out_unmap:
8404
	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8405
		pci_disable_pcie_error_reporting(pdev);
8406 8407
		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
	}
8408

8409 8410
	pci_iounmap(pdev, bp->regview);
	bp->regview = NULL;
8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422

err_out_release:
	pci_release_regions(pdev);

err_out_disable:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);

err_out:
	return rc;
}

B
Bill Pemberton 已提交
8423
static char *
8424 8425 8426 8427
bnx2_bus_string(struct bnx2 *bp, char *str)
{
	char *s = str;

8428
	if (bp->flags & BNX2_FLAG_PCIE) {
8429 8430 8431
		s += sprintf(s, "PCI Express");
	} else {
		s += sprintf(s, "PCI");
8432
		if (bp->flags & BNX2_FLAG_PCIX)
8433
			s += sprintf(s, "-X");
8434
		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8435 8436 8437 8438 8439 8440 8441 8442
			s += sprintf(s, " 32-bit");
		else
			s += sprintf(s, " 64-bit");
		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
	}
	return str;
}

M
Michael Chan 已提交
8443 8444 8445 8446 8447 8448 8449 8450 8451 8452
static void
bnx2_del_napi(struct bnx2 *bp)
{
	int i;

	for (i = 0; i < bp->irq_nvecs; i++)
		netif_napi_del(&bp->bnx2_napi[i].napi);
}

static void
8453 8454
bnx2_init_napi(struct bnx2 *bp)
{
8455
	int i;
8456

B
Benjamin Li 已提交
8457
	for (i = 0; i < bp->irq_nvecs; i++) {
8458 8459 8460 8461 8462 8463
		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
		int (*poll)(struct napi_struct *, int);

		if (i == 0)
			poll = bnx2_poll;
		else
8464
			poll = bnx2_poll_msix;
8465 8466

		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8467 8468
		bnapi->bp = bp;
	}
8469 8470
}

8471 8472 8473 8474
static const struct net_device_ops bnx2_netdev_ops = {
	.ndo_open		= bnx2_open,
	.ndo_start_xmit		= bnx2_start_xmit,
	.ndo_stop		= bnx2_close,
E
Eric Dumazet 已提交
8475
	.ndo_get_stats64	= bnx2_get_stats64,
8476 8477 8478 8479 8480
	.ndo_set_rx_mode	= bnx2_set_rx_mode,
	.ndo_do_ioctl		= bnx2_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= bnx2_change_mac_addr,
	.ndo_change_mtu		= bnx2_change_mtu,
8481 8482
	.ndo_fix_features	= bnx2_fix_features,
	.ndo_set_features	= bnx2_set_features,
8483
	.ndo_tx_timeout		= bnx2_tx_timeout,
A
Alexey Dobriyan 已提交
8484
#ifdef CONFIG_NET_POLL_CONTROLLER
8485 8486 8487 8488
	.ndo_poll_controller	= poll_bnx2,
#endif
};

B
Bill Pemberton 已提交
8489
static int
8490 8491 8492
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	static int version_printed = 0;
8493
	struct net_device *dev;
8494
	struct bnx2 *bp;
8495
	int rc;
8496
	char str[40];
8497 8498

	if (version_printed++ == 0)
8499
		pr_info("%s", version);
8500 8501

	/* dev zeroed in init_etherdev */
B
Benjamin Li 已提交
8502
	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8503 8504 8505 8506
	if (!dev)
		return -ENOMEM;

	rc = bnx2_init_board(pdev, dev);
8507 8508
	if (rc < 0)
		goto err_free;
8509

8510
	dev->netdev_ops = &bnx2_netdev_ops;
8511 8512 8513
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->ethtool_ops = &bnx2_ethtool_ops;

M
Michael Chan 已提交
8514
	bp = netdev_priv(dev);
8515

8516 8517 8518 8519
	pci_set_drvdata(pdev, dev);

	memcpy(dev->dev_addr, bp->mac_addr, 6);

8520 8521 8522 8523
	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
		NETIF_F_TSO | NETIF_F_TSO_ECN |
		NETIF_F_RXHASH | NETIF_F_RXCSUM;

8524
	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8525 8526 8527
		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;

	dev->vlan_features = dev->hw_features;
8528
	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8529
	dev->features |= dev->hw_features;
8530
	dev->priv_flags |= IFF_UNICAST_FLT;
8531

8532
	if ((rc = register_netdev(dev))) {
8533
		dev_err(&pdev->dev, "Cannot register net device\n");
M
Michael Chan 已提交
8534
		goto error;
8535 8536
	}

8537 8538
	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
		    "node addr %pM\n", board_info[ent->driver_data].name,
8539 8540
		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8541 8542
		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
		    pdev->irq, dev->dev_addr);
8543 8544

	return 0;
M
Michael Chan 已提交
8545 8546

error:
M
Michael Chan 已提交
8547
	pci_iounmap(pdev, bp->regview);
M
Michael Chan 已提交
8548 8549 8550
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
8551
err_free:
M
Michael Chan 已提交
8552 8553
	free_netdev(dev);
	return rc;
8554 8555
}

B
Bill Pemberton 已提交
8556
static void
8557 8558 8559
bnx2_remove_one(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8560
	struct bnx2 *bp = netdev_priv(dev);
8561 8562 8563

	unregister_netdev(dev);

8564
	del_timer_sync(&bp->timer);
8565
	cancel_work_sync(&bp->reset_task);
8566

8567
	pci_iounmap(bp->pdev, bp->regview);
8568

8569 8570
	kfree(bp->temp_stats_blk);

8571
	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8572
		pci_disable_pcie_error_reporting(pdev);
8573 8574
		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
	}
8575

8576 8577
	bnx2_release_firmware(bp);

8578
	free_netdev(dev);
8579

8580 8581 8582 8583 8584 8585
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
}

static int
M
Michael Chan 已提交
8586
bnx2_suspend(struct device *device)
8587
{
M
Michael Chan 已提交
8588
	struct pci_dev *pdev = to_pci_dev(device);
8589
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8590
	struct bnx2 *bp = netdev_priv(dev);
8591

M
Michael Chan 已提交
8592 8593 8594 8595 8596 8597 8598 8599 8600 8601
	if (netif_running(dev)) {
		cancel_work_sync(&bp->reset_task);
		bnx2_netif_stop(bp, true);
		netif_device_detach(dev);
		del_timer_sync(&bp->timer);
		bnx2_shutdown_chip(bp);
		__bnx2_free_irq(bp);
		bnx2_free_skbs(bp);
	}
	bnx2_setup_wol(bp);
8602 8603 8604 8605
	return 0;
}

static int
M
Michael Chan 已提交
8606
bnx2_resume(struct device *device)
8607
{
M
Michael Chan 已提交
8608
	struct pci_dev *pdev = to_pci_dev(device);
8609
	struct net_device *dev = pci_get_drvdata(pdev);
M
Michael Chan 已提交
8610
	struct bnx2 *bp = netdev_priv(dev);
8611 8612 8613 8614

	if (!netif_running(dev))
		return 0;

8615
	bnx2_set_power_state(bp, PCI_D0);
8616
	netif_device_attach(dev);
M
Michael Chan 已提交
8617
	bnx2_request_irq(bp);
8618
	bnx2_init_nic(bp, 1);
8619
	bnx2_netif_start(bp, true);
8620 8621 8622
	return 0;
}

M
Michael Chan 已提交
8623 8624 8625 8626 8627 8628 8629 8630 8631
#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
#define BNX2_PM_OPS (&bnx2_pm_ops)

#else

#define BNX2_PM_OPS NULL

#endif /* CONFIG_PM_SLEEP */
W
Wendy Xiong 已提交
8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648
/**
 * bnx2_io_error_detected - called when PCI error is detected
 * @pdev: Pointer to PCI device
 * @state: The current pci connection state
 *
 * This function is called after a PCI bus error affecting
 * this device has been detected.
 */
static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
					       pci_channel_state_t state)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);

	rtnl_lock();
	netif_device_detach(dev);

8649 8650 8651 8652 8653
	if (state == pci_channel_io_perm_failure) {
		rtnl_unlock();
		return PCI_ERS_RESULT_DISCONNECT;
	}

W
Wendy Xiong 已提交
8654
	if (netif_running(dev)) {
8655
		bnx2_netif_stop(bp, true);
W
Wendy Xiong 已提交
8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676
		del_timer_sync(&bp->timer);
		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
	}

	pci_disable_device(pdev);
	rtnl_unlock();

	/* Request a slot slot reset. */
	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * bnx2_io_slot_reset - called after the pci bus has been reset.
 * @pdev: Pointer to PCI device
 *
 * Restart the card from scratch, as if from a cold-boot.
 */
static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);
8677 8678
	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
	int err = 0;
W
Wendy Xiong 已提交
8679 8680 8681 8682

	rtnl_lock();
	if (pci_enable_device(pdev)) {
		dev_err(&pdev->dev,
8683
			"Cannot re-enable PCI device after reset\n");
8684 8685 8686 8687 8688
	} else {
		pci_set_master(pdev);
		pci_restore_state(pdev);
		pci_save_state(pdev);

M
Michael Chan 已提交
8689
		if (netif_running(dev))
8690
			err = bnx2_init_nic(bp, 1);
M
Michael Chan 已提交
8691

8692 8693 8694 8695 8696 8697 8698
		if (!err)
			result = PCI_ERS_RESULT_RECOVERED;
	}

	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
		bnx2_napi_enable(bp);
		dev_close(dev);
W
Wendy Xiong 已提交
8699
	}
8700
	rtnl_unlock();
W
Wendy Xiong 已提交
8701

8702
	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8703 8704
		return result;

8705 8706 8707 8708 8709
	err = pci_cleanup_aer_uncorrect_error_status(pdev);
	if (err) {
		dev_err(&pdev->dev,
			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
			 err); /* non-fatal, continue */
W
Wendy Xiong 已提交
8710 8711
	}

8712
	return result;
W
Wendy Xiong 已提交
8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728
}

/**
 * bnx2_io_resume - called when traffic can start flowing again.
 * @pdev: Pointer to PCI device
 *
 * This callback is called when the error recovery driver tells us that
 * its OK to resume normal operation.
 */
static void bnx2_io_resume(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp = netdev_priv(dev);

	rtnl_lock();
	if (netif_running(dev))
8729
		bnx2_netif_start(bp, true);
W
Wendy Xiong 已提交
8730 8731 8732 8733 8734

	netif_device_attach(dev);
	rtnl_unlock();
}

M
Michael Chan 已提交
8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756
static void bnx2_shutdown(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2 *bp;

	if (!dev)
		return;

	bp = netdev_priv(dev);
	if (!bp)
		return;

	rtnl_lock();
	if (netif_running(dev))
		dev_close(bp->dev);

	if (system_state == SYSTEM_POWER_OFF)
		bnx2_set_power_state(bp, PCI_D3hot);

	rtnl_unlock();
}

M
Michael Chan 已提交
8757
static const struct pci_error_handlers bnx2_err_handler = {
W
Wendy Xiong 已提交
8758 8759 8760 8761 8762
	.error_detected	= bnx2_io_error_detected,
	.slot_reset	= bnx2_io_slot_reset,
	.resume		= bnx2_io_resume,
};

8763
static struct pci_driver bnx2_pci_driver = {
8764 8765 8766
	.name		= DRV_MODULE_NAME,
	.id_table	= bnx2_pci_tbl,
	.probe		= bnx2_init_one,
B
Bill Pemberton 已提交
8767
	.remove		= bnx2_remove_one,
M
Michael Chan 已提交
8768
	.driver.pm	= BNX2_PM_OPS,
W
Wendy Xiong 已提交
8769
	.err_handler	= &bnx2_err_handler,
M
Michael Chan 已提交
8770
	.shutdown	= bnx2_shutdown,
8771 8772
};

8773
module_pci_driver(bnx2_pci_driver);