bnx2x_main.c 212.8 KB
Newer Older
1
/* bnx2x_main.c: Broadcom Everest network driver.
E
Eliezer Tamir 已提交
2
 *
V
Vladislav Zolotarov 已提交
3
 * Copyright (c) 2007-2010 Broadcom Corporation
E
Eliezer Tamir 已提交
4 5 6 7 8
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
9 10
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
E
Eliezer Tamir 已提交
11 12
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
E
Eilon Greenstein 已提交
13
 * Slowpath and fastpath rework by Vladislav Zolotarov
E
Eliezer Tamir 已提交
14
 * Statistics and Link management by Yitchak Gertner
E
Eliezer Tamir 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 */

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>  /* for dev_info() */
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
41
#include <linux/if_vlan.h>
E
Eliezer Tamir 已提交
42 43 44
#include <net/ip.h>
#include <net/tcp.h>
#include <net/checksum.h>
45
#include <net/ip6_checksum.h>
E
Eliezer Tamir 已提交
46 47
#include <linux/workqueue.h>
#include <linux/crc32.h>
48
#include <linux/crc32c.h>
E
Eliezer Tamir 已提交
49 50 51
#include <linux/prefetch.h>
#include <linux/zlib.h>
#include <linux/io.h>
B
Ben Hutchings 已提交
52
#include <linux/stringify.h>
E
Eliezer Tamir 已提交
53

54
#define BNX2X_MAIN
E
Eliezer Tamir 已提交
55 56
#include "bnx2x.h"
#include "bnx2x_init.h"
57
#include "bnx2x_init_ops.h"
D
Dmitry Kravkov 已提交
58
#include "bnx2x_cmn.h"
E
Eliezer Tamir 已提交
59 60


61 62 63
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
B
Ben Hutchings 已提交
64 65 66 67 68 69 70
#define FW_FILE_VERSION					\
	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
#define FW_FILE_NAME_E1		"bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H	"bnx2x-e1h-" FW_FILE_VERSION ".fw"
71

72 73
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT		(5*HZ)
E
Eliezer Tamir 已提交
74

A
Andrew Morton 已提交
75
static char version[] __devinitdata =
76
	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
E
Eliezer Tamir 已提交
77 78
	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";

79
MODULE_AUTHOR("Eliezer Tamir");
80
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
E
Eliezer Tamir 已提交
81 82
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
B
Ben Hutchings 已提交
83 84
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
E
Eliezer Tamir 已提交
85

E
Eilon Greenstein 已提交
86 87
static int multi_mode = 1;
module_param(multi_mode, int, 0);
E
Eilon Greenstein 已提交
88 89 90
MODULE_PARM_DESC(multi_mode, " Multi queue mode "
			     "(0 Disable; 1 Enable (default))");

91 92 93 94
static int num_queues;
module_param(num_queues, int, 0);
MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
				" (default is as a number of CPUs)");
E
Eilon Greenstein 已提交
95

96 97
static int disable_tpa;
module_param(disable_tpa, int, 0);
98
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
E
Eilon Greenstein 已提交
99 100 101

static int int_mode;
module_param(int_mode, int, 0);
V
Vladislav Zolotarov 已提交
102 103
MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
				"(1 INT#x; 2 MSI)");
E
Eilon Greenstein 已提交
104

105 106 107 108
static int dropless_fc;
module_param(dropless_fc, int, 0);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");

109
static int poll;
E
Eliezer Tamir 已提交
110
module_param(poll, int, 0);
111
MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 113 114 115 116

static int mrrs = -1;
module_param(mrrs, int, 0);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");

117
static int debug;
E
Eliezer Tamir 已提交
118
module_param(debug, int, 0);
119 120
MODULE_PARM_DESC(debug, " Default debug msglevel");

121
static struct workqueue_struct *bnx2x_wq;
E
Eliezer Tamir 已提交
122 123 124

enum bnx2x_board_type {
	BCM57710 = 0,
125 126
	BCM57711 = 1,
	BCM57711E = 2,
E
Eliezer Tamir 已提交
127 128
};

129
/* indexed by board_type, above */
A
Andrew Morton 已提交
130
static struct {
E
Eliezer Tamir 已提交
131 132
	char *name;
} board_info[] __devinitdata = {
133 134 135
	{ "Broadcom NetXtreme II BCM57710 XGb" },
	{ "Broadcom NetXtreme II BCM57711 XGb" },
	{ "Broadcom NetXtreme II BCM57711E XGb" }
E
Eliezer Tamir 已提交
136 137
};

138

139
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
E
Eilon Greenstein 已提交
140 141 142
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
E
Eliezer Tamir 已提交
143 144 145 146 147 148 149 150 151 152 153 154
	{ 0 }
};

MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);

/****************************************************************************
* General service functions
****************************************************************************/

/* used only at init
 * locking is done by mcp
 */
155
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
E
Eliezer Tamir 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
{
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);
}

static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
{
	u32 val;

	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);

	return val;
}

175
const u32 dmae_reg_go_c[] = {
E
Eliezer Tamir 已提交
176 177 178 179 180 181 182
	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};

/* copy command into DMAE command memory and set DMAE command go */
183
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
E
Eliezer Tamir 已提交
184 185 186 187 188 189 190 191
{
	u32 cmd_offset;
	int i;

	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));

192 193
		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
E
Eliezer Tamir 已提交
194 195 196 197
	}
	REG_WR(bp, dmae_reg_go_c[idx], 1);
}

198 199
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
		      u32 len32)
E
Eliezer Tamir 已提交
200
{
201
	struct dmae_command dmae;
E
Eliezer Tamir 已提交
202
	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203 204 205 206 207 208 209 210 211 212 213
	int cnt = 200;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);

		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
		   "  using indirect\n", dst_addr, len32);
		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
		return;
	}

214
	memset(&dmae, 0, sizeof(struct dmae_command));
E
Eliezer Tamir 已提交
215

216 217 218
	dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
E
Eliezer Tamir 已提交
219
#ifdef __BIG_ENDIAN
220
		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
E
Eliezer Tamir 已提交
221
#else
222
		       DMAE_CMD_ENDIANITY_DW_SWAP |
E
Eliezer Tamir 已提交
223
#endif
224 225 226 227 228 229 230 231 232 233
		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
	dmae.src_addr_lo = U64_LO(dma_addr);
	dmae.src_addr_hi = U64_HI(dma_addr);
	dmae.dst_addr_lo = dst_addr >> 2;
	dmae.dst_addr_hi = 0;
	dmae.len = len32;
	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
	dmae.comp_val = DMAE_COMP_VAL;
E
Eliezer Tamir 已提交
234

E
Eilon Greenstein 已提交
235
	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
E
Eliezer Tamir 已提交
236 237 238
	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
		    "dst_addr [%x:%08x (%08x)]\n"
	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239 240 241
	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242
	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
E
Eliezer Tamir 已提交
243 244 245
	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);

246 247
	mutex_lock(&bp->dmae_mutex);

E
Eliezer Tamir 已提交
248 249
	*wb_comp = 0;

250
	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
E
Eliezer Tamir 已提交
251 252

	udelay(5);
253 254 255 256 257

	while (*wb_comp != DMAE_COMP_VAL) {
		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);

		if (!cnt) {
E
Eilon Greenstein 已提交
258
			BNX2X_ERR("DMAE timeout!\n");
E
Eliezer Tamir 已提交
259 260
			break;
		}
261
		cnt--;
Y
Yitchak Gertner 已提交
262 263 264 265 266
		/* adjust delay for emulation/FPGA */
		if (CHIP_REV_IS_SLOW(bp))
			msleep(100);
		else
			udelay(5);
E
Eliezer Tamir 已提交
267
	}
268 269

	mutex_unlock(&bp->dmae_mutex);
E
Eliezer Tamir 已提交
270 271
}

Y
Yaniv Rosner 已提交
272
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
E
Eliezer Tamir 已提交
273
{
274
	struct dmae_command dmae;
E
Eliezer Tamir 已提交
275
	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276 277 278 279 280 281 282 283 284 285 286 287 288
	int cnt = 200;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);
		int i;

		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
		   "  using indirect\n", src_addr, len32);
		for (i = 0; i < len32; i++)
			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
		return;
	}

289
	memset(&dmae, 0, sizeof(struct dmae_command));
E
Eliezer Tamir 已提交
290

291 292 293
	dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
E
Eliezer Tamir 已提交
294
#ifdef __BIG_ENDIAN
295
		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
E
Eliezer Tamir 已提交
296
#else
297
		       DMAE_CMD_ENDIANITY_DW_SWAP |
E
Eliezer Tamir 已提交
298
#endif
299 300 301 302 303 304 305 306 307 308
		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
	dmae.src_addr_lo = src_addr >> 2;
	dmae.src_addr_hi = 0;
	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
	dmae.len = len32;
	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
	dmae.comp_val = DMAE_COMP_VAL;
E
Eliezer Tamir 已提交
309

E
Eilon Greenstein 已提交
310
	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
E
Eliezer Tamir 已提交
311 312 313
	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
		    "dst_addr [%x:%08x (%08x)]\n"
	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314 315 316
	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
E
Eliezer Tamir 已提交
317

318 319 320
	mutex_lock(&bp->dmae_mutex);

	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
E
Eliezer Tamir 已提交
321 322
	*wb_comp = 0;

323
	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
E
Eliezer Tamir 已提交
324 325

	udelay(5);
326 327 328 329

	while (*wb_comp != DMAE_COMP_VAL) {

		if (!cnt) {
E
Eilon Greenstein 已提交
330
			BNX2X_ERR("DMAE timeout!\n");
E
Eliezer Tamir 已提交
331 332
			break;
		}
333
		cnt--;
Y
Yitchak Gertner 已提交
334 335 336 337 338
		/* adjust delay for emulation/FPGA */
		if (CHIP_REV_IS_SLOW(bp))
			msleep(100);
		else
			udelay(5);
E
Eliezer Tamir 已提交
339
	}
340
	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
E
Eliezer Tamir 已提交
341 342
	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343 344 345 346

	mutex_unlock(&bp->dmae_mutex);
}

347 348 349
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
			       u32 addr, u32 len)
{
350
	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351 352
	int offset = 0;

353
	while (len > dmae_wr_max) {
354
		bnx2x_write_dmae(bp, phys_addr + offset,
355 356 357
				 addr + offset, dmae_wr_max);
		offset += dmae_wr_max * 4;
		len -= dmae_wr_max;
358 359 360 361 362
	}

	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
}

363 364 365 366 367 368 369 370
/* used only for slowpath so not inlined */
static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
{
	u32 wb_write[2];

	wb_write[0] = val_hi;
	wb_write[1] = val_lo;
	REG_WR_DMAE(bp, reg, wb_write, 2);
E
Eliezer Tamir 已提交
371 372
}

373 374 375 376 377 378 379 380 381 382 383
#ifdef USE_WB_RD
static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
{
	u32 wb_data[2];

	REG_RD_DMAE(bp, reg, wb_data, 2);

	return HILO_U64(wb_data[0], wb_data[1]);
}
#endif

E
Eliezer Tamir 已提交
384 385 386
static int bnx2x_mc_assert(struct bnx2x *bp)
{
	char last_idx;
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	int i, rc = 0;
	u32 row0, row1, row2, row3;

	/* XSTORM */
	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* TSTORM */
	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* CSTORM */
	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* USTORM */
	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
			   USTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
E
Eliezer Tamir 已提交
499 500
		}
	}
501

E
Eliezer Tamir 已提交
502 503
	return rc;
}
E
Eliezer Tamir 已提交
504

E
Eliezer Tamir 已提交
505 506
static void bnx2x_fw_dump(struct bnx2x *bp)
{
V
Vladislav Zolotarov 已提交
507
	u32 addr;
E
Eliezer Tamir 已提交
508
	u32 mark, offset;
509
	__be32 data[9];
E
Eliezer Tamir 已提交
510 511
	int word;

512 513 514 515
	if (BP_NOMCP(bp)) {
		BNX2X_ERR("NO MCP - can not dump\n");
		return;
	}
V
Vladislav Zolotarov 已提交
516 517 518 519

	addr = bp->common.shmem_base - 0x0800 + 4;
	mark = REG_RD(bp, addr);
	mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520
	pr_err("begin fw dump (mark 0x%x)\n", mark);
E
Eliezer Tamir 已提交
521

522
	pr_err("");
V
Vladislav Zolotarov 已提交
523
	for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
E
Eliezer Tamir 已提交
524
		for (word = 0; word < 8; word++)
V
Vladislav Zolotarov 已提交
525
			data[word] = htonl(REG_RD(bp, offset + 4*word));
E
Eliezer Tamir 已提交
526
		data[8] = 0x0;
527
		pr_cont("%s", (char *)data);
E
Eliezer Tamir 已提交
528
	}
V
Vladislav Zolotarov 已提交
529
	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
E
Eliezer Tamir 已提交
530
		for (word = 0; word < 8; word++)
V
Vladislav Zolotarov 已提交
531
			data[word] = htonl(REG_RD(bp, offset + 4*word));
E
Eliezer Tamir 已提交
532
		data[8] = 0x0;
533
		pr_cont("%s", (char *)data);
E
Eliezer Tamir 已提交
534
	}
535
	pr_err("end of fw dump\n");
E
Eliezer Tamir 已提交
536 537
}

538
void bnx2x_panic_dump(struct bnx2x *bp)
E
Eliezer Tamir 已提交
539 540 541 542
{
	int i;
	u16 j, start, end;

Y
Yitchak Gertner 已提交
543 544 545
	bp->stats_state = STATS_STATE_DISABLED;
	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");

E
Eliezer Tamir 已提交
546 547
	BNX2X_ERR("begin crash dump -----------------\n");

E
Eilon Greenstein 已提交
548 549
	/* Indices */
	/* Common */
V
Vladislav Zolotarov 已提交
550 551 552
	BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
		  "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
		  "  spq_prod_idx(0x%x)\n",
E
Eilon Greenstein 已提交
553 554 555 556
		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);

	/* Rx */
557
	for_each_queue(bp, i) {
E
Eliezer Tamir 已提交
558 559
		struct bnx2x_fastpath *fp = &bp->fp[i];

V
Vladislav Zolotarov 已提交
560 561 562
		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
			  "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
E
Eilon Greenstein 已提交
563
			  i, fp->rx_bd_prod, fp->rx_bd_cons,
Y
Yitchak Gertner 已提交
564 565
			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
V
Vladislav Zolotarov 已提交
566 567
		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
			  "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
E
Eilon Greenstein 已提交
568 569 570 571
			  fp->rx_sge_prod, fp->last_max_sge,
			  le16_to_cpu(fp->fp_u_idx),
			  fp->status_blk->u_status_block.status_block_index);
	}
E
Eliezer Tamir 已提交
572

E
Eilon Greenstein 已提交
573
	/* Tx */
574
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
575
		struct bnx2x_fastpath *fp = &bp->fp[i];
E
Eliezer Tamir 已提交
576

V
Vladislav Zolotarov 已提交
577 578 579
		BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
			  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
			  "  *tx_cons_sb(0x%x)\n",
E
Eilon Greenstein 已提交
580 581
			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
V
Vladislav Zolotarov 已提交
582 583
		BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
			  "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
E
Eilon Greenstein 已提交
584
			  fp->status_blk->c_status_block.status_block_index,
E
Eilon Greenstein 已提交
585
			  fp->tx_db.data.prod);
E
Eilon Greenstein 已提交
586
	}
E
Eliezer Tamir 已提交
587

E
Eilon Greenstein 已提交
588 589
	/* Rings */
	/* Rx */
590
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
591
		struct bnx2x_fastpath *fp = &bp->fp[i];
E
Eliezer Tamir 已提交
592 593 594

		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
E
Eilon Greenstein 已提交
595
		for (j = start; j != end; j = RX_BD(j + 1)) {
E
Eliezer Tamir 已提交
596 597 598
			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];

E
Eilon Greenstein 已提交
599 600
			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
E
Eliezer Tamir 已提交
601 602
		}

603 604
		start = RX_SGE(fp->rx_sge_prod);
		end = RX_SGE(fp->last_max_sge);
E
Eilon Greenstein 已提交
605
		for (j = start; j != end; j = RX_SGE(j + 1)) {
606 607 608
			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];

E
Eilon Greenstein 已提交
609 610
			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 612
		}

E
Eliezer Tamir 已提交
613 614
		start = RCQ_BD(fp->rx_comp_cons - 10);
		end = RCQ_BD(fp->rx_comp_cons + 503);
E
Eilon Greenstein 已提交
615
		for (j = start; j != end; j = RCQ_BD(j + 1)) {
E
Eliezer Tamir 已提交
616 617
			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];

E
Eilon Greenstein 已提交
618 619
			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
E
Eliezer Tamir 已提交
620 621 622
		}
	}

E
Eilon Greenstein 已提交
623
	/* Tx */
624
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
625 626 627 628 629 630 631
		struct bnx2x_fastpath *fp = &bp->fp[i];

		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
		for (j = start; j != end; j = TX_BD(j + 1)) {
			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];

E
Eilon Greenstein 已提交
632 633
			BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
				  i, j, sw_bd->skb, sw_bd->first_bd);
E
Eilon Greenstein 已提交
634 635 636 637 638 639 640
		}

		start = TX_BD(fp->tx_bd_cons - 10);
		end = TX_BD(fp->tx_bd_cons + 254);
		for (j = start; j != end; j = TX_BD(j + 1)) {
			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];

E
Eilon Greenstein 已提交
641 642
			BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
				  i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
E
Eilon Greenstein 已提交
643 644
		}
	}
E
Eliezer Tamir 已提交
645

646
	bnx2x_fw_dump(bp);
E
Eliezer Tamir 已提交
647 648 649 650
	bnx2x_mc_assert(bp);
	BNX2X_ERR("end crash dump -----------------\n");
}

D
Dmitry Kravkov 已提交
651
void bnx2x_int_enable(struct bnx2x *bp)
E
Eliezer Tamir 已提交
652
{
653
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
654 655 656
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);
	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
E
Eilon Greenstein 已提交
657
	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
E
Eliezer Tamir 已提交
658 659

	if (msix) {
E
Eilon Greenstein 已提交
660 661
		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			 HC_CONFIG_0_REG_INT_LINE_EN_0);
E
Eliezer Tamir 已提交
662 663
		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
E
Eilon Greenstein 已提交
664 665 666 667 668
	} else if (msi) {
		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
E
Eliezer Tamir 已提交
669 670
	} else {
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
E
Eliezer Tamir 已提交
671
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
E
Eliezer Tamir 已提交
672 673
			HC_CONFIG_0_REG_INT_LINE_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
E
Eliezer Tamir 已提交
674

E
Eilon Greenstein 已提交
675 676
		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
		   val, port, addr);
E
Eliezer Tamir 已提交
677 678 679

		REG_WR(bp, addr, val);

E
Eliezer Tamir 已提交
680 681 682
		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
	}

E
Eilon Greenstein 已提交
683 684
	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
E
Eliezer Tamir 已提交
685 686

	REG_WR(bp, addr, val);
E
Eilon Greenstein 已提交
687 688 689 690 691
	/*
	 * Ensure that HC_CONFIG is written before leading/trailing edge config
	 */
	mmiowb();
	barrier();
692 693 694 695

	if (CHIP_IS_E1H(bp)) {
		/* init leading/trailing edge */
		if (IS_E1HMF(bp)) {
E
Eilon Greenstein 已提交
696
			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697
			if (bp->port.pmf)
E
Eilon Greenstein 已提交
698 699
				/* enable nig and gpio3 attention */
				val |= 0x1100;
700 701 702 703 704 705
		} else
			val = 0xffff;

		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
	}
E
Eilon Greenstein 已提交
706 707 708

	/* Make sure that interrupts are indeed enabled from here on */
	mmiowb();
E
Eliezer Tamir 已提交
709 710
}

E
Eliezer Tamir 已提交
711
static void bnx2x_int_disable(struct bnx2x *bp)
E
Eliezer Tamir 已提交
712
{
713
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
714 715 716 717 718 719 720 721 722 723 724
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);

	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);

	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
	   val, port, addr);

E
Eilon Greenstein 已提交
725 726 727
	/* flush all outstanding writes */
	mmiowb();

E
Eliezer Tamir 已提交
728 729 730 731 732
	REG_WR(bp, addr, val);
	if (REG_RD(bp, addr) != val)
		BNX2X_ERR("BUG! proper val not read from IGU!\n");
}

D
Dmitry Kravkov 已提交
733
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
E
Eliezer Tamir 已提交
734 735
{
	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
E
Eilon Greenstein 已提交
736
	int i, offset;
E
Eliezer Tamir 已提交
737

738
	/* disable interrupt handling */
E
Eliezer Tamir 已提交
739
	atomic_inc(&bp->intr_sem);
E
Eilon Greenstein 已提交
740 741
	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */

Y
Yitchak Gertner 已提交
742 743 744
	if (disable_hw)
		/* prevent the HW from sending interrupts */
		bnx2x_int_disable(bp);
E
Eliezer Tamir 已提交
745 746 747

	/* make sure all ISRs are done */
	if (msix) {
E
Eilon Greenstein 已提交
748 749
		synchronize_irq(bp->msix_table[0].vector);
		offset = 1;
750 751 752
#ifdef BCM_CNIC
		offset++;
#endif
E
Eliezer Tamir 已提交
753
		for_each_queue(bp, i)
E
Eilon Greenstein 已提交
754
			synchronize_irq(bp->msix_table[i + offset].vector);
E
Eliezer Tamir 已提交
755 756 757 758
	} else
		synchronize_irq(bp->pdev->irq);

	/* make sure sp_task is not running */
759 760
	cancel_delayed_work(&bp->sp_task);
	flush_workqueue(bnx2x_wq);
E
Eliezer Tamir 已提交
761 762
}

763
/* fast path */
E
Eliezer Tamir 已提交
764 765

/*
766
 * General service functions
E
Eliezer Tamir 已提交
767 768
 */

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
/* Return true if succeeded to acquire the lock */
static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;

	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);

	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		DP(NETIF_MSG_HW,
		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return -EINVAL;
	}

	if (func <= 5)
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	else
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);

	/* Try to acquire the lock */
	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
	lock_status = REG_RD(bp, hw_lock_control_reg);
	if (lock_status & resource_bit)
		return true;

	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
	return false;
}

E
Eliezer Tamir 已提交
803

804 805 806
#ifdef BCM_CNIC
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
#endif
807

D
Dmitry Kravkov 已提交
808
void bnx2x_sp_event(struct bnx2x_fastpath *fp,
E
Eliezer Tamir 已提交
809 810 811 812 813 814
			   union eth_rx_cqe *rr_cqe)
{
	struct bnx2x *bp = fp->bp;
	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);

815
	DP(BNX2X_MSG_SP,
E
Eliezer Tamir 已提交
816
	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817
	   fp->index, cid, command, bp->state,
818
	   rr_cqe->ramrod_cqe.ramrod_type);
E
Eliezer Tamir 已提交
819 820 821

	bp->spq_left++;

822
	if (fp->index) {
E
Eliezer Tamir 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
		switch (command | fp->state) {
		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
						BNX2X_FP_STATE_OPENING):
			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
			   cid);
			fp->state = BNX2X_FP_STATE_OPEN;
			break;

		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
			   cid);
			fp->state = BNX2X_FP_STATE_HALTED;
			break;

		default:
838
			BNX2X_ERR("unexpected MC reply (%d)  "
V
Vladislav Zolotarov 已提交
839 840
				  "fp[%d] state is %x\n",
				  command, fp->index, fp->state);
841
			break;
E
Eliezer Tamir 已提交
842
		}
843
		mb(); /* force bnx2x_wait_ramrod() to see the change */
E
Eliezer Tamir 已提交
844 845
		return;
	}
E
Eliezer Tamir 已提交
846

E
Eliezer Tamir 已提交
847 848 849 850 851 852 853 854 855 856 857 858 859
	switch (command | bp->state) {
	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
		bp->state = BNX2X_STATE_OPEN;
		break;

	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
		fp->state = BNX2X_FP_STATE_HALTED;
		break;

	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860
		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861
		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
E
Eliezer Tamir 已提交
862 863
		break;

864 865 866 867 868 869
#ifdef BCM_CNIC
	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
		DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
		bnx2x_cnic_cfc_comp(bp, cid);
		break;
#endif
870

E
Eliezer Tamir 已提交
871
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
E
Eliezer Tamir 已提交
873
		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 875
		bp->set_mac_pending--;
		smp_wmb();
E
Eliezer Tamir 已提交
876 877
		break;

878
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879
		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 881
		bp->set_mac_pending--;
		smp_wmb();
882 883
		break;

E
Eliezer Tamir 已提交
884
	default:
885
		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
E
Eliezer Tamir 已提交
886
			  command, bp->state);
887
		break;
E
Eliezer Tamir 已提交
888
	}
889
	mb(); /* force bnx2x_wait_ramrod() to see the change */
E
Eliezer Tamir 已提交
890 891
}

D
Dmitry Kravkov 已提交
892
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
E
Eliezer Tamir 已提交
893
{
E
Eilon Greenstein 已提交
894
	struct bnx2x *bp = netdev_priv(dev_instance);
E
Eliezer Tamir 已提交
895
	u16 status = bnx2x_ack_int(bp);
896
	u16 mask;
E
Eilon Greenstein 已提交
897
	int i;
E
Eliezer Tamir 已提交
898

899
	/* Return here if interrupt is shared and it's not for us */
E
Eliezer Tamir 已提交
900 901 902 903
	if (unlikely(status == 0)) {
		DP(NETIF_MSG_INTR, "not our interrupt!\n");
		return IRQ_NONE;
	}
E
Eilon Greenstein 已提交
904
	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
E
Eliezer Tamir 已提交
905

906
	/* Return here if interrupt is disabled */
E
Eliezer Tamir 已提交
907 908 909 910 911
	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
		return IRQ_HANDLED;
	}

912 913 914 915 916
#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

E
Eilon Greenstein 已提交
917 918
	for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
		struct bnx2x_fastpath *fp = &bp->fp[i];
E
Eliezer Tamir 已提交
919

E
Eilon Greenstein 已提交
920 921
		mask = 0x2 << fp->sb_id;
		if (status & mask) {
922 923 924 925 926 927 928 929
			/* Handle Rx and Tx according to SB id */
			prefetch(fp->rx_cons_sb);
			prefetch(&fp->status_blk->u_status_block.
						status_block_index);
			prefetch(fp->tx_cons_sb);
			prefetch(&fp->status_blk->c_status_block.
						status_block_index);
			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
E
Eilon Greenstein 已提交
930 931
			status &= ~mask;
		}
E
Eliezer Tamir 已提交
932 933
	}

934 935 936 937 938 939 940 941 942 943 944 945 946 947
#ifdef BCM_CNIC
	mask = 0x2 << CNIC_SB_ID(bp);
	if (status & (mask | 0x1)) {
		struct cnic_ops *c_ops = NULL;

		rcu_read_lock();
		c_ops = rcu_dereference(bp->cnic_ops);
		if (c_ops)
			c_ops->cnic_handler(bp->cnic_data, NULL);
		rcu_read_unlock();

		status &= ~mask;
	}
#endif
E
Eliezer Tamir 已提交
948

949
	if (unlikely(status & 0x1)) {
950
		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
E
Eliezer Tamir 已提交
951 952 953 954 955 956

		status &= ~0x1;
		if (!status)
			return IRQ_HANDLED;
	}

V
Vladislav Zolotarov 已提交
957 958
	if (unlikely(status))
		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959
		   status);
E
Eliezer Tamir 已提交
960

Y
Yaniv Rosner 已提交
961
	return IRQ_HANDLED;
E
Eliezer Tamir 已提交
962 963
}

Y
Yaniv Rosner 已提交
964
/* end of fast path */
E
Eliezer Tamir 已提交
965 966


Y
Yaniv Rosner 已提交
967 968 969 970 971
/* Link */

/*
 * General service functions
 */
E
Eliezer Tamir 已提交
972

D
Dmitry Kravkov 已提交
973
int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Y
Yaniv Rosner 已提交
974 975 976
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
Y
Yitchak Gertner 已提交
977 978
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;
Y
Yaniv Rosner 已提交
979
	int cnt;
E
Eliezer Tamir 已提交
980

Y
Yaniv Rosner 已提交
981 982 983 984 985 986 987
	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		DP(NETIF_MSG_HW,
		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return -EINVAL;
	}
E
Eliezer Tamir 已提交
988

Y
Yitchak Gertner 已提交
989 990 991 992 993 994 995
	if (func <= 5) {
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	} else {
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
	}

Y
Yaniv Rosner 已提交
996
	/* Validating that the resource is not already taken */
Y
Yitchak Gertner 已提交
997
	lock_status = REG_RD(bp, hw_lock_control_reg);
Y
Yaniv Rosner 已提交
998 999 1000 1001 1002
	if (lock_status & resource_bit) {
		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
		   lock_status, resource_bit);
		return -EEXIST;
	}
E
Eliezer Tamir 已提交
1003

E
Eilon Greenstein 已提交
1004 1005
	/* Try for 5 second every 5ms */
	for (cnt = 0; cnt < 1000; cnt++) {
Y
Yaniv Rosner 已提交
1006
		/* Try to acquire the lock */
Y
Yitchak Gertner 已提交
1007 1008
		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
		lock_status = REG_RD(bp, hw_lock_control_reg);
Y
Yaniv Rosner 已提交
1009 1010
		if (lock_status & resource_bit)
			return 0;
E
Eliezer Tamir 已提交
1011

Y
Yaniv Rosner 已提交
1012
		msleep(5);
E
Eliezer Tamir 已提交
1013
	}
Y
Yaniv Rosner 已提交
1014 1015 1016
	DP(NETIF_MSG_HW, "Timeout\n");
	return -EAGAIN;
}
E
Eliezer Tamir 已提交
1017

D
Dmitry Kravkov 已提交
1018
int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Y
Yaniv Rosner 已提交
1019 1020 1021
{
	u32 lock_status;
	u32 resource_bit = (1 << resource);
Y
Yitchak Gertner 已提交
1022 1023
	int func = BP_FUNC(bp);
	u32 hw_lock_control_reg;
E
Eliezer Tamir 已提交
1024

1025 1026
	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);

Y
Yaniv Rosner 已提交
1027 1028 1029 1030 1031 1032 1033 1034
	/* Validating that the resource is within range */
	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
		DP(NETIF_MSG_HW,
		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
		return -EINVAL;
	}

Y
Yitchak Gertner 已提交
1035 1036 1037 1038 1039 1040 1041
	if (func <= 5) {
		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
	} else {
		hw_lock_control_reg =
				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
	}

Y
Yaniv Rosner 已提交
1042
	/* Validating that the resource is currently taken */
Y
Yitchak Gertner 已提交
1043
	lock_status = REG_RD(bp, hw_lock_control_reg);
Y
Yaniv Rosner 已提交
1044 1045 1046 1047
	if (!(lock_status & resource_bit)) {
		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
		   lock_status, resource_bit);
		return -EFAULT;
E
Eliezer Tamir 已提交
1048 1049
	}

D
Dmitry Kravkov 已提交
1050 1051
	REG_WR(bp, hw_lock_control_reg, resource_bit);
	return 0;
Y
Yaniv Rosner 已提交
1052
}
E
Eliezer Tamir 已提交
1053

D
Dmitry Kravkov 已提交
1054

E
Eilon Greenstein 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;
	int value;

	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}

	/* read GPIO value */
	gpio_reg = REG_RD(bp, MISC_REG_GPIO);

	/* get the requested pin value */
	if ((gpio_reg & gpio_mask) == gpio_mask)
		value = 1;
	else
		value = 0;

	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);

	return value;
}

1085
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Y
Yaniv Rosner 已提交
1086 1087 1088
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Y
Yaniv Rosner 已提交
1090 1091 1092 1093
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;
E
Eliezer Tamir 已提交
1094

Y
Yaniv Rosner 已提交
1095 1096 1097 1098
	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}
E
Eliezer Tamir 已提交
1099

Y
Yitchak Gertner 已提交
1100
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Y
Yaniv Rosner 已提交
1101 1102
	/* read GPIO and mask except the float bits */
	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
E
Eliezer Tamir 已提交
1103

Y
Yaniv Rosner 已提交
1104 1105 1106 1107 1108 1109 1110 1111
	switch (mode) {
	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
		   gpio_num, gpio_shift);
		/* clear FLOAT and set CLR */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
		break;
E
Eliezer Tamir 已提交
1112

Y
Yaniv Rosner 已提交
1113 1114 1115 1116 1117 1118 1119
	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
		   gpio_num, gpio_shift);
		/* clear FLOAT and set SET */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
		break;
E
Eliezer Tamir 已提交
1120

1121
	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Y
Yaniv Rosner 已提交
1122 1123 1124 1125 1126
		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
		   gpio_num, gpio_shift);
		/* set FLOAT */
		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
		break;
E
Eliezer Tamir 已提交
1127

Y
Yaniv Rosner 已提交
1128 1129
	default:
		break;
E
Eliezer Tamir 已提交
1130 1131
	}

Y
Yaniv Rosner 已提交
1132
	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Y
Yitchak Gertner 已提交
1133
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
E
Eliezer Tamir 已提交
1134

Y
Yaniv Rosner 已提交
1135
	return 0;
E
Eliezer Tamir 已提交
1136 1137
}

E
Eilon Greenstein 已提交
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
	/* The GPIO should be swapped if swap register is set and active */
	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
	int gpio_shift = gpio_num +
			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
	u32 gpio_mask = (1 << gpio_shift);
	u32 gpio_reg;

	if (gpio_num > MISC_REGISTERS_GPIO_3) {
		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
		return -EINVAL;
	}

	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
	/* read GPIO int */
	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);

	switch (mode) {
	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
				   "output low\n", gpio_num, gpio_shift);
		/* clear SET and set CLR */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
		break;

	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
				   "output high\n", gpio_num, gpio_shift);
		/* clear CLR and set SET */
		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
		break;

	default:
		break;
	}

	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);

	return 0;
}

Y
Yaniv Rosner 已提交
1184
static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
E
Eliezer Tamir 已提交
1185
{
Y
Yaniv Rosner 已提交
1186 1187
	u32 spio_mask = (1 << spio_num);
	u32 spio_reg;
E
Eliezer Tamir 已提交
1188

Y
Yaniv Rosner 已提交
1189 1190 1191 1192
	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
	    (spio_num > MISC_REGISTERS_SPIO_7)) {
		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
		return -EINVAL;
E
Eliezer Tamir 已提交
1193 1194
	}

Y
Yitchak Gertner 已提交
1195
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Y
Yaniv Rosner 已提交
1196 1197
	/* read SPIO and mask except the float bits */
	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
E
Eliezer Tamir 已提交
1198

Y
Yaniv Rosner 已提交
1199
	switch (mode) {
E
Eilon Greenstein 已提交
1200
	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Y
Yaniv Rosner 已提交
1201 1202 1203 1204 1205
		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
		/* clear FLOAT and set CLR */
		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
		break;
E
Eliezer Tamir 已提交
1206

E
Eilon Greenstein 已提交
1207
	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Y
Yaniv Rosner 已提交
1208 1209 1210 1211 1212
		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
		/* clear FLOAT and set SET */
		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
		break;
E
Eliezer Tamir 已提交
1213

Y
Yaniv Rosner 已提交
1214 1215 1216 1217 1218
	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
		/* set FLOAT */
		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
		break;
E
Eliezer Tamir 已提交
1219

Y
Yaniv Rosner 已提交
1220 1221
	default:
		break;
E
Eliezer Tamir 已提交
1222 1223
	}

Y
Yaniv Rosner 已提交
1224
	REG_WR(bp, MISC_REG_SPIO, spio_reg);
Y
Yitchak Gertner 已提交
1225
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Y
Yaniv Rosner 已提交
1226

E
Eliezer Tamir 已提交
1227 1228 1229
	return 0;
}

D
Dmitry Kravkov 已提交
1230
void bnx2x_calc_fc_adv(struct bnx2x *bp)
E
Eliezer Tamir 已提交
1231
{
1232 1233
	switch (bp->link_vars.ieee_fc &
		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Y
Yaniv Rosner 已提交
1234
	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235
		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Y
Yaniv Rosner 已提交
1236 1237
					  ADVERTISED_Pause);
		break;
E
Eilon Greenstein 已提交
1238

Y
Yaniv Rosner 已提交
1239
	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240
		bp->port.advertising |= (ADVERTISED_Asym_Pause |
Y
Yaniv Rosner 已提交
1241 1242
					 ADVERTISED_Pause);
		break;
E
Eilon Greenstein 已提交
1243

Y
Yaniv Rosner 已提交
1244
	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245
		bp->port.advertising |= ADVERTISED_Asym_Pause;
Y
Yaniv Rosner 已提交
1246
		break;
E
Eilon Greenstein 已提交
1247

Y
Yaniv Rosner 已提交
1248
	default:
1249
		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Y
Yaniv Rosner 已提交
1250 1251 1252 1253
					  ADVERTISED_Pause);
		break;
	}
}
E
Eliezer Tamir 已提交
1254

Y
Yaniv Rosner 已提交
1255

D
Dmitry Kravkov 已提交
1256
u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Y
Yaniv Rosner 已提交
1257
{
1258 1259
	if (!BP_NOMCP(bp)) {
		u8 rc;
E
Eliezer Tamir 已提交
1260

1261
		/* Initialize link parameters structure variables */
Y
Yaniv Rosner 已提交
1262 1263
		/* It is recommended to turn off RX FC for jumbo frames
		   for better performance */
1264
		if (bp->dev->mtu > 5000)
1265
			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Y
Yaniv Rosner 已提交
1266
		else
1267
			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
E
Eliezer Tamir 已提交
1268

Y
Yitchak Gertner 已提交
1269
		bnx2x_acquire_phy_lock(bp);
E
Eilon Greenstein 已提交
1270 1271 1272 1273

		if (load_mode == LOAD_DIAG)
			bp->link_params.loopback_mode = LOOPBACK_XGXS_10;

1274
		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
E
Eilon Greenstein 已提交
1275

Y
Yitchak Gertner 已提交
1276
		bnx2x_release_phy_lock(bp);
E
Eliezer Tamir 已提交
1277

1278 1279
		bnx2x_calc_fc_adv(bp);

E
Eilon Greenstein 已提交
1280 1281
		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282
			bnx2x_link_report(bp);
E
Eilon Greenstein 已提交
1283
		}
1284

1285 1286
		return rc;
	}
E
Eilon Greenstein 已提交
1287
	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288
	return -EINVAL;
E
Eliezer Tamir 已提交
1289 1290
}

D
Dmitry Kravkov 已提交
1291
void bnx2x_link_set(struct bnx2x *bp)
E
Eliezer Tamir 已提交
1292
{
1293
	if (!BP_NOMCP(bp)) {
Y
Yitchak Gertner 已提交
1294
		bnx2x_acquire_phy_lock(bp);
1295
		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Y
Yitchak Gertner 已提交
1296
		bnx2x_release_phy_lock(bp);
E
Eliezer Tamir 已提交
1297

1298 1299
		bnx2x_calc_fc_adv(bp);
	} else
E
Eilon Greenstein 已提交
1300
		BNX2X_ERR("Bootcode is missing - can not set link\n");
Y
Yaniv Rosner 已提交
1301
}
E
Eliezer Tamir 已提交
1302

Y
Yaniv Rosner 已提交
1303 1304
static void bnx2x__link_reset(struct bnx2x *bp)
{
1305
	if (!BP_NOMCP(bp)) {
Y
Yitchak Gertner 已提交
1306
		bnx2x_acquire_phy_lock(bp);
E
Eilon Greenstein 已提交
1307
		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Y
Yitchak Gertner 已提交
1308
		bnx2x_release_phy_lock(bp);
1309
	} else
E
Eilon Greenstein 已提交
1310
		BNX2X_ERR("Bootcode is missing - can not reset link\n");
Y
Yaniv Rosner 已提交
1311
}
E
Eliezer Tamir 已提交
1312

D
Dmitry Kravkov 已提交
1313
u8 bnx2x_link_test(struct bnx2x *bp)
Y
Yaniv Rosner 已提交
1314
{
1315
	u8 rc = 0;
E
Eliezer Tamir 已提交
1316

1317 1318 1319 1320 1321 1322
	if (!BP_NOMCP(bp)) {
		bnx2x_acquire_phy_lock(bp);
		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
		bnx2x_release_phy_lock(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not test link\n");
E
Eliezer Tamir 已提交
1323

Y
Yaniv Rosner 已提交
1324 1325
	return rc;
}
E
Eliezer Tamir 已提交
1326

E
Eilon Greenstein 已提交
1327
static void bnx2x_init_port_minmax(struct bnx2x *bp)
1328
{
E
Eilon Greenstein 已提交
1329 1330 1331
	u32 r_param = bp->link_vars.line_speed / 8;
	u32 fair_periodic_timeout_usec;
	u32 t_fair;
1332

E
Eilon Greenstein 已提交
1333 1334 1335
	memset(&(bp->cmng.rs_vars), 0,
	       sizeof(struct rate_shaping_vars_per_port));
	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1336

E
Eilon Greenstein 已提交
1337 1338
	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1339

E
Eilon Greenstein 已提交
1340 1341 1342 1343
	/* this is the threshold below which no timer arming will occur
	   1.25 coefficient is for the threshold to be a little bigger
	   than the real time, to compensate for timer in-accuracy */
	bp->cmng.rs_vars.rs_threshold =
1344 1345
				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;

E
Eilon Greenstein 已提交
1346 1347 1348 1349
	/* resolution of fairness timer */
	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
	/* for 10G it is 1000usec. for 1G it is 10000usec. */
	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1350

E
Eilon Greenstein 已提交
1351 1352
	/* this is the threshold below which we won't arm the timer anymore */
	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1353

E
Eilon Greenstein 已提交
1354 1355 1356 1357 1358 1359
	/* we multiply by 1e3/8 to get bytes/msec.
	   We don't want the credits to pass a credit
	   of the t_fair*FAIR_MEM (algorithm resolution) */
	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
	/* since each tick is 4 usec */
	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1360 1361
}

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
/* Calculates the sum of vn_min_rates.
   It's needed for further normalizing of the min_rates.
   Returns:
     sum of vn_min_rates.
       or
     0 - if all the min_rates are 0.
     In the later case fainess algorithm should be deactivated.
     If not all min_rates are zero then those that are zeroes will be set to 1.
 */
static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
{
	int all_zero = 1;
	int port = BP_PORT(bp);
	int vn;

	bp->vn_weight_sum = 0;
	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
		int func = 2*vn + port;
		u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;

		/* Skip hidden vns */
		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
			continue;

		/* If min rate is zero - set it to 1 */
		if (!vn_min_rate)
			vn_min_rate = DEF_MIN_RATE;
		else
			all_zero = 0;

		bp->vn_weight_sum += vn_min_rate;
	}

	/* ... only if all min rates are zeros - disable fairness */
1398 1399 1400 1401 1402 1403 1404 1405
	if (all_zero) {
		bp->cmng.flags.cmng_enables &=
					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
		   "  fairness will be disabled\n");
	} else
		bp->cmng.flags.cmng_enables |=
					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406 1407
}

E
Eilon Greenstein 已提交
1408
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
{
	struct rate_shaping_vars_per_vn m_rs_vn;
	struct fairness_vars_per_vn m_fair_vn;
	u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
	u16 vn_min_rate, vn_max_rate;
	int i;

	/* If function is hidden - set min and max to zeroes */
	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
		vn_min_rate = 0;
		vn_max_rate = 0;

	} else {
		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1424 1425
		/* If min rate is zero - set it to 1 */
		if (!vn_min_rate)
1426 1427 1428 1429
			vn_min_rate = DEF_MIN_RATE;
		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
	}
E
Eilon Greenstein 已提交
1430
	DP(NETIF_MSG_IFUP,
1431
	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
E
Eilon Greenstein 已提交
1432
	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443

	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));

	/* global vn counter - maximal Mbps for this vn */
	m_rs_vn.vn_counter.rate = vn_max_rate;

	/* quota - number of bytes transmitted in this period */
	m_rs_vn.vn_counter.quota =
				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;

E
Eilon Greenstein 已提交
1444
	if (bp->vn_weight_sum) {
1445 1446
		/* credit for each period of the fairness algorithm:
		   number of bytes in T_FAIR (the vn share the port rate).
E
Eilon Greenstein 已提交
1447 1448 1449
		   vn_weight_sum should not be larger than 10000, thus
		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
		   than zero */
1450
		m_fair_vn.vn_credit_delta =
V
Vladislav Zolotarov 已提交
1451 1452 1453 1454
			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
						   (8 * bp->vn_weight_sum))),
			      (bp->cmng.fair_vars.fair_threshold * 2));
		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
		   m_fair_vn.vn_credit_delta);
	}

	/* Store it to internal memory */
	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
		REG_WR(bp, BAR_XSTRORM_INTMEM +
		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
		       ((u32 *)(&m_rs_vn))[i]);

	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
		REG_WR(bp, BAR_XSTRORM_INTMEM +
		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
		       ((u32 *)(&m_fair_vn))[i]);
}

E
Eilon Greenstein 已提交
1470

Y
Yaniv Rosner 已提交
1471 1472 1473
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
1474
	u32 prev_link_status = bp->link_vars.link_status;
Y
Yitchak Gertner 已提交
1475 1476 1477
	/* Make sure that we are synced with the current statistics */
	bnx2x_stats_handle(bp, STATS_EVENT_STOP);

Y
Yaniv Rosner 已提交
1478
	bnx2x_link_update(&bp->link_params, &bp->link_vars);
E
Eliezer Tamir 已提交
1479

Y
Yitchak Gertner 已提交
1480 1481
	if (bp->link_vars.link_up) {

1482
		/* dropless flow control */
1483
		if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1484 1485 1486 1487 1488 1489 1490
			int port = BP_PORT(bp);
			u32 pause_enabled = 0;

			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
				pause_enabled = 1;

			REG_WR(bp, BAR_USTRORM_INTMEM +
E
Eilon Greenstein 已提交
1491
			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1492 1493 1494
			       pause_enabled);
		}

Y
Yitchak Gertner 已提交
1495 1496 1497 1498 1499 1500 1501 1502
		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
			struct host_port_stats *pstats;

			pstats = bnx2x_sp(bp, port_stats);
			/* reset old bmac stats */
			memset(&(pstats->mac_stx[0]), 0,
			       sizeof(struct mac_stx));
		}
1503
		if (bp->state == BNX2X_STATE_OPEN)
Y
Yitchak Gertner 已提交
1504 1505 1506
			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
	}

1507 1508 1509
	/* indicate link status only if link status actually changed */
	if (prev_link_status != bp->link_vars.link_status)
		bnx2x_link_report(bp);
1510 1511

	if (IS_E1HMF(bp)) {
E
Eilon Greenstein 已提交
1512
		int port = BP_PORT(bp);
1513
		int func;
E
Eilon Greenstein 已提交
1514
		int vn;
1515

1516
		/* Set the attention towards other drivers on the same port */
1517 1518 1519 1520
		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
			if (vn == BP_E1HVN(bp))
				continue;

E
Eilon Greenstein 已提交
1521
			func = ((vn << 1) | port);
1522 1523 1524 1525
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
		}

E
Eilon Greenstein 已提交
1526 1527 1528 1529 1530
		if (bp->link_vars.link_up) {
			int i;

			/* Init rate shaping and fairness contexts */
			bnx2x_init_port_minmax(bp);
1531 1532

			for (vn = VN_0; vn < E1HVN_MAX; vn++)
E
Eilon Greenstein 已提交
1533 1534 1535 1536 1537 1538 1539 1540 1541
				bnx2x_init_vn_minmax(bp, 2*vn + port);

			/* Store it to internal memory */
			for (i = 0;
			     i < sizeof(struct cmng_struct_per_port) / 4; i++)
				REG_WR(bp, BAR_XSTRORM_INTMEM +
				  XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
				       ((u32 *)(&bp->cmng))[i]);
		}
1542
	}
Y
Yaniv Rosner 已提交
1543
}
E
Eliezer Tamir 已提交
1544

D
Dmitry Kravkov 已提交
1545
void bnx2x__link_status_update(struct bnx2x *bp)
Y
Yaniv Rosner 已提交
1546
{
1547
	if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Y
Yaniv Rosner 已提交
1548
		return;
E
Eliezer Tamir 已提交
1549

Y
Yaniv Rosner 已提交
1550
	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
E
Eliezer Tamir 已提交
1551

Y
Yitchak Gertner 已提交
1552 1553 1554 1555 1556
	if (bp->link_vars.link_up)
		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
	else
		bnx2x_stats_handle(bp, STATS_EVENT_STOP);

1557 1558
	bnx2x_calc_vn_weight_sum(bp);

Y
Yaniv Rosner 已提交
1559 1560
	/* indicate link status */
	bnx2x_link_report(bp);
E
Eliezer Tamir 已提交
1561 1562
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static void bnx2x_pmf_update(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 val;

	bp->port.pmf = 1;
	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);

	/* enable nig attention */
	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Y
Yitchak Gertner 已提交
1575 1576

	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1577 1578
}

Y
Yaniv Rosner 已提交
1579
/* end of Link */
E
Eliezer Tamir 已提交
1580 1581 1582 1583 1584 1585 1586

/* slow path */

/*
 * General service functions
 */

1587 1588 1589 1590 1591 1592 1593 1594 1595
/* send the MCP a request, block until there is a reply */
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
{
	int func = BP_FUNC(bp);
	u32 seq = ++bp->fw_seq;
	u32 rc = 0;
	u32 cnt = 1;
	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;

E
Eilon Greenstein 已提交
1596
	mutex_lock(&bp->fw_mb_mutex);
1597 1598 1599 1600 1601 1602 1603 1604 1605
	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));

	do {
		/* let the FW do it's magic ... */
		msleep(delay);

		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);

E
Eilon Greenstein 已提交
1606 1607
		/* Give the FW up to 5 second (500*10ms) */
	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620

	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
	   cnt*delay, rc, seq);

	/* is this a reply to our command? */
	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
		rc &= FW_MSG_CODE_MASK;
	else {
		/* FW BUG! */
		BNX2X_ERR("FW failed to respond!\n");
		bnx2x_fw_dump(bp);
		rc = 0;
	}
E
Eilon Greenstein 已提交
1621
	mutex_unlock(&bp->fw_mb_mutex);
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645

	return rc;
}

static void bnx2x_e1h_disable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

	netif_tx_disable(bp->dev);

	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);

	netif_carrier_off(bp->dev);
}

static void bnx2x_e1h_enable(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);

	/* Tx queue should be only reenabled */
	netif_tx_wake_all_queues(bp->dev);

1646 1647 1648 1649
	/*
	 * Should not call netif_carrier_on since it will be called if the link
	 * is up when checking for link state
	 */
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
}

static void bnx2x_update_min_max(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	int vn, i;

	/* Init rate shaping and fairness contexts */
	bnx2x_init_port_minmax(bp);

	bnx2x_calc_vn_weight_sum(bp);

	for (vn = VN_0; vn < E1HVN_MAX; vn++)
		bnx2x_init_vn_minmax(bp, 2*vn + port);

	if (bp->port.pmf) {
		int func;

		/* Set the attention towards other drivers on the same port */
		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
			if (vn == BP_E1HVN(bp))
				continue;

			func = ((vn << 1) | port);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
		}

		/* Store it to internal memory */
		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
			REG_WR(bp, BAR_XSTRORM_INTMEM +
			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
			       ((u32 *)(&bp->cmng))[i]);
	}
}

static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);

	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {

1692 1693 1694 1695 1696
		/*
		 * This is the only place besides the function initialization
		 * where the bp->flags can change so it is done without any
		 * locks
		 */
1697 1698
		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1699
			bp->flags |= MF_FUNC_DIS;
1700 1701 1702 1703

			bnx2x_e1h_disable(bp);
		} else {
			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1704
			bp->flags &= ~MF_FUNC_DIS;
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722

			bnx2x_e1h_enable(bp);
		}
		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
	}
	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {

		bnx2x_update_min_max(bp);
		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
	}

	/* Report results to MCP */
	if (dcc_event)
		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
	else
		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
}

M
Michael Chan 已提交
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
/* must be called under the spq lock */
static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
{
	struct eth_spe *next_spe = bp->spq_prod_bd;

	if (bp->spq_prod_bd == bp->spq_last_bd) {
		bp->spq_prod_bd = bp->spq;
		bp->spq_prod_idx = 0;
		DP(NETIF_MSG_TIMER, "end of spq\n");
	} else {
		bp->spq_prod_bd++;
		bp->spq_prod_idx++;
	}
	return next_spe;
}

/* must be called under the spq lock */
static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);

	/* Make sure that BD data is updated before writing the producer */
	wmb();

	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
	       bp->spq_prod_idx);
	mmiowb();
}

E
Eliezer Tamir 已提交
1752
/* the slow path queue is odd since completions arrive on the fastpath ring */
D
Dmitry Kravkov 已提交
1753
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
E
Eliezer Tamir 已提交
1754 1755
			 u32 data_hi, u32 data_lo, int common)
{
M
Michael Chan 已提交
1756
	struct eth_spe *spe;
E
Eliezer Tamir 已提交
1757 1758 1759 1760 1761 1762

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -EIO;
#endif

1763
	spin_lock_bh(&bp->spq_lock);
E
Eliezer Tamir 已提交
1764 1765 1766

	if (!bp->spq_left) {
		BNX2X_ERR("BUG! SPQ ring full!\n");
1767
		spin_unlock_bh(&bp->spq_lock);
E
Eliezer Tamir 已提交
1768 1769 1770
		bnx2x_panic();
		return -EBUSY;
	}
E
Eliezer Tamir 已提交
1771

M
Michael Chan 已提交
1772 1773
	spe = bnx2x_sp_get_next(bp);

E
Eliezer Tamir 已提交
1774
	/* CID needs port number to be encoded int it */
M
Michael Chan 已提交
1775
	spe->hdr.conn_and_cmd_data =
V
Vladislav Zolotarov 已提交
1776 1777
			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
				    HW_CID(bp, cid));
M
Michael Chan 已提交
1778
	spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
E
Eliezer Tamir 已提交
1779
	if (common)
M
Michael Chan 已提交
1780
		spe->hdr.type |=
E
Eliezer Tamir 已提交
1781 1782
			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));

M
Michael Chan 已提交
1783 1784
	spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
	spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
E
Eliezer Tamir 已提交
1785 1786 1787

	bp->spq_left--;

V
Vladislav Zolotarov 已提交
1788 1789 1790 1791 1792 1793 1794
	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
	   "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
	   (u32)(U64_LO(bp->spq_mapping) +
	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);

M
Michael Chan 已提交
1795
	bnx2x_sp_prod_update(bp);
1796
	spin_unlock_bh(&bp->spq_lock);
E
Eliezer Tamir 已提交
1797 1798 1799 1800
	return 0;
}

/* acquire split MCP access lock register */
Y
Yitchak Gertner 已提交
1801
static int bnx2x_acquire_alr(struct bnx2x *bp)
E
Eliezer Tamir 已提交
1802
{
1803
	u32 j, val;
1804
	int rc = 0;
E
Eliezer Tamir 已提交
1805 1806

	might_sleep();
1807
	for (j = 0; j < 1000; j++) {
E
Eliezer Tamir 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816
		val = (1UL << 31);
		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
		if (val & (1L << 31))
			break;

		msleep(5);
	}
	if (!(val & (1L << 31))) {
1817
		BNX2X_ERR("Cannot acquire MCP access lock register\n");
E
Eliezer Tamir 已提交
1818 1819 1820 1821 1822 1823
		rc = -EBUSY;
	}

	return rc;
}

Y
Yitchak Gertner 已提交
1824 1825
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
E
Eliezer Tamir 已提交
1826
{
1827
	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
E
Eliezer Tamir 已提交
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
}

static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
{
	struct host_def_status_block *def_sb = bp->def_status_blk;
	u16 rc = 0;

	barrier(); /* status block is written to by the chip */
	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
		rc |= 1;
	}
	if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
		bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
		rc |= 2;
	}
	if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
		bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
		rc |= 4;
	}
	if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
		bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
		rc |= 8;
	}
	if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
		bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
		rc |= 16;
	}
	return rc;
}

/*
 * slow path service functions
 */

static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
{
1865
	int port = BP_PORT(bp);
1866 1867
	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
		       COMMAND_REG_ATTN_BITS_SET);
E
Eliezer Tamir 已提交
1868 1869
	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
1870 1871
	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
				       NIG_REG_MASK_INTERRUPT_PORT0;
E
Eilon Greenstein 已提交
1872
	u32 aeu_mask;
1873
	u32 nig_mask = 0;
E
Eliezer Tamir 已提交
1874 1875 1876 1877

	if (bp->attn_state & asserted)
		BNX2X_ERR("IGU ERROR\n");

E
Eilon Greenstein 已提交
1878 1879 1880
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
	aeu_mask = REG_RD(bp, aeu_addr);

E
Eliezer Tamir 已提交
1881
	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
E
Eilon Greenstein 已提交
1882
	   aeu_mask, asserted);
1883
	aeu_mask &= ~(asserted & 0x3ff);
E
Eilon Greenstein 已提交
1884
	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
E
Eliezer Tamir 已提交
1885

E
Eilon Greenstein 已提交
1886 1887
	REG_WR(bp, aeu_addr, aeu_mask);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
E
Eliezer Tamir 已提交
1888

E
Eilon Greenstein 已提交
1889
	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
E
Eliezer Tamir 已提交
1890
	bp->attn_state |= asserted;
E
Eilon Greenstein 已提交
1891
	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
E
Eliezer Tamir 已提交
1892 1893 1894 1895

	if (asserted & ATTN_HARD_WIRED_MASK) {
		if (asserted & ATTN_NIG_FOR_FUNC) {

1896 1897
			bnx2x_acquire_phy_lock(bp);

1898
			/* save nig interrupt mask */
1899
			nig_mask = REG_RD(bp, nig_int_mask_addr);
1900
			REG_WR(bp, nig_int_mask_addr, 0);
E
Eliezer Tamir 已提交
1901

Y
Yaniv Rosner 已提交
1902
			bnx2x_link_attn(bp);
E
Eliezer Tamir 已提交
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947

			/* handle unicore attn? */
		}
		if (asserted & ATTN_SW_TIMER_4_FUNC)
			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");

		if (asserted & GPIO_2_FUNC)
			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");

		if (asserted & GPIO_3_FUNC)
			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");

		if (asserted & GPIO_4_FUNC)
			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");

		if (port == 0) {
			if (asserted & ATTN_GENERAL_ATTN_1) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_2) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_3) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
			}
		} else {
			if (asserted & ATTN_GENERAL_ATTN_4) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_5) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
			}
			if (asserted & ATTN_GENERAL_ATTN_6) {
				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
			}
		}

	} /* if hardwired */

1948 1949 1950
	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
	   asserted, hc_addr);
	REG_WR(bp, hc_addr, asserted);
E
Eliezer Tamir 已提交
1951 1952

	/* now set back the mask */
1953
	if (asserted & ATTN_NIG_FOR_FUNC) {
1954
		REG_WR(bp, nig_int_mask_addr, nig_mask);
1955 1956
		bnx2x_release_phy_lock(bp);
	}
E
Eliezer Tamir 已提交
1957 1958
}

E
Eilon Greenstein 已提交
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
static inline void bnx2x_fan_failure(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

	/* mark the failure */
	bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
	bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
		 bp->link_params.ext_phy_config);

	/* log the failure */
V
Vladislav Zolotarov 已提交
1970 1971 1972
	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
	       " the driver to shutdown the card to prevent permanent"
	       " damage.  Please contact OEM Support for assistance\n");
E
Eilon Greenstein 已提交
1973
}
1974

1975
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
E
Eliezer Tamir 已提交
1976
{
1977
	int port = BP_PORT(bp);
1978
	int reg_offset;
E
Eilon Greenstein 已提交
1979
	u32 val, swap_val, swap_override;
1980

1981 1982
	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1983

1984
	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1985 1986 1987 1988 1989 1990 1991

		val = REG_RD(bp, reg_offset);
		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("SPIO5 hw attention\n");

E
Eilon Greenstein 已提交
1992
		/* Fan failure attention */
1993 1994
		switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995
			/* Low power mode is controlled by GPIO 2 */
1996
			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997
				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
E
Eilon Greenstein 已提交
1998 1999 2000
			/* The PHY reset is controlled by GPIO 1 */
			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001 2002
			break;

E
Eilon Greenstein 已提交
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
			/* The PHY reset is controlled by GPIO 1 */
			/* fake the port number to cancel the swap done in
			   set_gpio() */
			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
			swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
			port = (swap_val && swap_override) ^ 1;
			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
			break;

2014 2015 2016
		default:
			break;
		}
E
Eilon Greenstein 已提交
2017
		bnx2x_fan_failure(bp);
2018
	}
2019

E
Eilon Greenstein 已提交
2020 2021 2022 2023 2024 2025 2026
	if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
		    AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
		bnx2x_acquire_phy_lock(bp);
		bnx2x_handle_module_detect_int(&bp->link_params);
		bnx2x_release_phy_lock(bp);
	}

2027 2028 2029 2030 2031 2032 2033
	if (attn & HW_INTERRUT_ASSERT_SET_0) {

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2034
			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2035 2036
		bnx2x_panic();
	}
2037 2038 2039 2040 2041 2042
}

static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
{
	u32 val;

2043
	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2044 2045 2046 2047 2048 2049 2050

		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
		BNX2X_ERR("DB hw attention 0x%x\n", val);
		/* DORQ discard attention */
		if (val & 0x2)
			BNX2X_ERR("FATAL error from DORQ\n");
	}
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064

	if (attn & HW_INTERRUT_ASSERT_SET_1) {

		int port = BP_PORT(bp);
		int reg_offset;

		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2065
			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2066 2067
		bnx2x_panic();
	}
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
}

static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
{
	u32 val;

	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {

		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
		BNX2X_ERR("CFC hw attention 0x%x\n", val);
		/* CFC error attention */
		if (val & 0x2)
			BNX2X_ERR("FATAL error from CFC\n");
	}

	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {

		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
		BNX2X_ERR("PXP hw attention 0x%x\n", val);
		/* RQ_USDMDP_FIFO_OVERFLOW */
		if (val & 0x18000)
			BNX2X_ERR("FATAL error from PXP\n");
	}
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104

	if (attn & HW_INTERRUT_ASSERT_SET_2) {

		int port = BP_PORT(bp);
		int reg_offset;

		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);

		val = REG_RD(bp, reg_offset);
		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
		REG_WR(bp, reg_offset, val);

		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2105
			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2106 2107
		bnx2x_panic();
	}
2108 2109 2110 2111
}

static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
{
2112 2113
	u32 val;

2114 2115
	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {

2116 2117 2118 2119
		if (attn & BNX2X_PMF_LINK_ASSERT) {
			int func = BP_FUNC(bp);

			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2120 2121
			bp->mf_config = SHMEM_RD(bp,
					   mf_cfg.func_mf_config[func].config);
2122 2123 2124 2125
			val = SHMEM_RD(bp, func_mb[func].drv_status);
			if (val & DRV_STATUS_DCC_EVENT_MASK)
				bnx2x_dcc_event(bp,
					    (val & DRV_STATUS_DCC_EVENT_MASK));
2126
			bnx2x__link_status_update(bp);
2127
			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2128 2129 2130
				bnx2x_pmf_update(bp);

		} else if (attn & BNX2X_MC_ASSERT_BITS) {
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142

			BNX2X_ERR("MC assert!\n");
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
			bnx2x_panic();

		} else if (attn & BNX2X_MCP_ASSERT) {

			BNX2X_ERR("MCP assert!\n");
			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2143
			bnx2x_fw_dump(bp);
2144 2145 2146 2147 2148 2149

		} else
			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
	}

	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
		if (attn & BNX2X_GRC_TIMEOUT) {
			val = CHIP_IS_E1H(bp) ?
				REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
			BNX2X_ERR("GRC time-out 0x%08x\n", val);
		}
		if (attn & BNX2X_GRC_RSV) {
			val = CHIP_IS_E1H(bp) ?
				REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
			BNX2X_ERR("GRC reserved 0x%08x\n", val);
		}
2161 2162 2163 2164
		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
	}
}

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
#define LOAD_COUNTER_BITS	16 /* Number of bits for load counter */
#define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
#define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
/*
 * should be run under rtnl lock
 */
static inline void bnx2x_set_reset_done(struct bnx2x *bp)
{
	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
	val &= ~(1 << RESET_DONE_FLAG_SHIFT);
	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
	barrier();
	mmiowb();
}

/*
 * should be run under rtnl lock
 */
static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
{
	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
	val |= (1 << 16);
	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
	barrier();
	mmiowb();
}

/*
 * should be run under rtnl lock
 */
D
Dmitry Kravkov 已提交
2198
bool bnx2x_reset_is_done(struct bnx2x *bp)
2199 2200 2201 2202 2203 2204 2205 2206 2207
{
	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
	return (val & RESET_DONE_FLAG_MASK) ? false : true;
}

/*
 * should be run under rtnl lock
 */
D
Dmitry Kravkov 已提交
2208
inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
{
	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);

	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);

	val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
	barrier();
	mmiowb();
}

/*
 * should be run under rtnl lock
 */
D
Dmitry Kravkov 已提交
2223
u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440
{
	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);

	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);

	val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
	barrier();
	mmiowb();

	return val1;
}

/*
 * should be run under rtnl lock
 */
static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
{
	return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
}

static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
{
	u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
	REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
}

static inline void _print_next_block(int idx, const char *blk)
{
	if (idx)
		pr_cont(", ");
	pr_cont("%s", blk);
}

static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
{
	int i = 0;
	u32 cur_bit = 0;
	for (i = 0; sig; i++) {
		cur_bit = ((u32)0x1 << i);
		if (sig & cur_bit) {
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
				_print_next_block(par_num++, "BRB");
				break;
			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
				_print_next_block(par_num++, "PARSER");
				break;
			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
				_print_next_block(par_num++, "TSDM");
				break;
			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
				_print_next_block(par_num++, "SEARCHER");
				break;
			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
				_print_next_block(par_num++, "TSEMI");
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return par_num;
}

static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
{
	int i = 0;
	u32 cur_bit = 0;
	for (i = 0; sig; i++) {
		cur_bit = ((u32)0x1 << i);
		if (sig & cur_bit) {
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
				_print_next_block(par_num++, "PBCLIENT");
				break;
			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
				_print_next_block(par_num++, "QM");
				break;
			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
				_print_next_block(par_num++, "XSDM");
				break;
			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
				_print_next_block(par_num++, "XSEMI");
				break;
			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
				_print_next_block(par_num++, "DOORBELLQ");
				break;
			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
				_print_next_block(par_num++, "VAUX PCI CORE");
				break;
			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
				_print_next_block(par_num++, "DEBUG");
				break;
			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
				_print_next_block(par_num++, "USDM");
				break;
			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
				_print_next_block(par_num++, "USEMI");
				break;
			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
				_print_next_block(par_num++, "UPB");
				break;
			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
				_print_next_block(par_num++, "CSDM");
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return par_num;
}

static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
{
	int i = 0;
	u32 cur_bit = 0;
	for (i = 0; sig; i++) {
		cur_bit = ((u32)0x1 << i);
		if (sig & cur_bit) {
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
				_print_next_block(par_num++, "CSEMI");
				break;
			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
				_print_next_block(par_num++, "PXP");
				break;
			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
				_print_next_block(par_num++,
					"PXPPCICLOCKCLIENT");
				break;
			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
				_print_next_block(par_num++, "CFC");
				break;
			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
				_print_next_block(par_num++, "CDU");
				break;
			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
				_print_next_block(par_num++, "IGU");
				break;
			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
				_print_next_block(par_num++, "MISC");
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return par_num;
}

static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
{
	int i = 0;
	u32 cur_bit = 0;
	for (i = 0; sig; i++) {
		cur_bit = ((u32)0x1 << i);
		if (sig & cur_bit) {
			switch (cur_bit) {
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
				_print_next_block(par_num++, "MCP ROM");
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
				_print_next_block(par_num++, "MCP UMP RX");
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
				_print_next_block(par_num++, "MCP UMP TX");
				break;
			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
				_print_next_block(par_num++, "MCP SCPAD");
				break;
			}

			/* Clear the bit */
			sig &= ~cur_bit;
		}
	}

	return par_num;
}

static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
				     u32 sig2, u32 sig3)
{
	if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
	    (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
		int par_num = 0;
		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
			"[0]:0x%08x [1]:0x%08x "
			"[2]:0x%08x [3]:0x%08x\n",
			  sig0 & HW_PRTY_ASSERT_SET_0,
			  sig1 & HW_PRTY_ASSERT_SET_1,
			  sig2 & HW_PRTY_ASSERT_SET_2,
			  sig3 & HW_PRTY_ASSERT_SET_3);
		printk(KERN_ERR"%s: Parity errors detected in blocks: ",
		       bp->dev->name);
		par_num = bnx2x_print_blocks_with_parity0(
			sig0 & HW_PRTY_ASSERT_SET_0, par_num);
		par_num = bnx2x_print_blocks_with_parity1(
			sig1 & HW_PRTY_ASSERT_SET_1, par_num);
		par_num = bnx2x_print_blocks_with_parity2(
			sig2 & HW_PRTY_ASSERT_SET_2, par_num);
		par_num = bnx2x_print_blocks_with_parity3(
			sig3 & HW_PRTY_ASSERT_SET_3, par_num);
		printk("\n");
		return true;
	} else
		return false;
}

D
Dmitry Kravkov 已提交
2441
bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2442
{
E
Eliezer Tamir 已提交
2443
	struct attn_route attn;
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
	int port = BP_PORT(bp);

	attn.sig[0] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
			     port*4);
	attn.sig[1] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
			     port*4);
	attn.sig[2] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
			     port*4);
	attn.sig[3] = REG_RD(bp,
		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
			     port*4);

	return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
					attn.sig[3]);
}

static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
{
	struct attn_route attn, *group_mask;
2466
	int port = BP_PORT(bp);
2467
	int index;
E
Eliezer Tamir 已提交
2468 2469
	u32 reg_addr;
	u32 val;
E
Eilon Greenstein 已提交
2470
	u32 aeu_mask;
E
Eliezer Tamir 已提交
2471 2472 2473

	/* need to take HW lock because MCP or other port might also
	   try to handle this event */
Y
Yitchak Gertner 已提交
2474
	bnx2x_acquire_alr(bp);
E
Eliezer Tamir 已提交
2475

2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	if (bnx2x_chk_parity_attn(bp)) {
		bp->recovery_state = BNX2X_RECOVERY_INIT;
		bnx2x_set_reset_in_progress(bp);
		schedule_delayed_work(&bp->reset_task, 0);
		/* Disable HW interrupts */
		bnx2x_int_disable(bp);
		bnx2x_release_alr(bp);
		/* In case of parity errors don't handle attentions so that
		 * other function would "see" parity errors.
		 */
		return;
	}

E
Eliezer Tamir 已提交
2489 2490 2491 2492
	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2493 2494
	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
E
Eliezer Tamir 已提交
2495 2496 2497

	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
		if (deasserted & (1 << index)) {
2498
			group_mask = &bp->attn_group[index];
E
Eliezer Tamir 已提交
2499

2500
			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2501 2502
			   index, group_mask->sig[0], group_mask->sig[1],
			   group_mask->sig[2], group_mask->sig[3]);
E
Eliezer Tamir 已提交
2503

2504
			bnx2x_attn_int_deasserted3(bp,
2505
					attn.sig[3] & group_mask->sig[3]);
2506
			bnx2x_attn_int_deasserted1(bp,
2507
					attn.sig[1] & group_mask->sig[1]);
2508
			bnx2x_attn_int_deasserted2(bp,
2509
					attn.sig[2] & group_mask->sig[2]);
2510
			bnx2x_attn_int_deasserted0(bp,
2511
					attn.sig[0] & group_mask->sig[0]);
E
Eliezer Tamir 已提交
2512 2513 2514
		}
	}

Y
Yitchak Gertner 已提交
2515
	bnx2x_release_alr(bp);
E
Eliezer Tamir 已提交
2516

2517
	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
E
Eliezer Tamir 已提交
2518 2519

	val = ~deasserted;
E
Eilon Greenstein 已提交
2520 2521
	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
	   val, reg_addr);
2522
	REG_WR(bp, reg_addr, val);
E
Eliezer Tamir 已提交
2523 2524

	if (~bp->attn_state & deasserted)
E
Eilon Greenstein 已提交
2525
		BNX2X_ERR("IGU ERROR\n");
E
Eliezer Tamir 已提交
2526 2527 2528 2529

	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
			  MISC_REG_AEU_MASK_ATTN_FUNC_0;

E
Eilon Greenstein 已提交
2530 2531 2532 2533 2534
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
	aeu_mask = REG_RD(bp, reg_addr);

	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
	   aeu_mask, deasserted);
2535
	aeu_mask |= (deasserted & 0x3ff);
E
Eilon Greenstein 已提交
2536
	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
E
Eliezer Tamir 已提交
2537

E
Eilon Greenstein 已提交
2538 2539
	REG_WR(bp, reg_addr, aeu_mask);
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
E
Eliezer Tamir 已提交
2540 2541 2542 2543 2544 2545 2546 2547 2548

	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
	bp->attn_state &= ~deasserted;
	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
}

static void bnx2x_attn_int(struct bnx2x *bp)
{
	/* read local copy of bits */
E
Eilon Greenstein 已提交
2549 2550 2551 2552
	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
								attn_bits);
	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
								attn_bits_ack);
E
Eliezer Tamir 已提交
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
	u32 attn_state = bp->attn_state;

	/* look for changed bits */
	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;

	DP(NETIF_MSG_HW,
	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
	   attn_bits, attn_ack, asserted, deasserted);

	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2564
		BNX2X_ERR("BAD attention state\n");
E
Eliezer Tamir 已提交
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575

	/* handle bits that were raised */
	if (asserted)
		bnx2x_attn_int_asserted(bp, asserted);

	if (deasserted)
		bnx2x_attn_int_deasserted(bp, deasserted);
}

static void bnx2x_sp_task(struct work_struct *work)
{
2576
	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
E
Eliezer Tamir 已提交
2577 2578 2579 2580
	u16 status;

	/* Return here if interrupt is disabled */
	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2581
		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
E
Eliezer Tamir 已提交
2582 2583 2584 2585
		return;
	}

	status = bnx2x_update_dsb_idx(bp);
2586 2587
/*	if (status == 0)				     */
/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
E
Eliezer Tamir 已提交
2588

V
Vladislav Zolotarov 已提交
2589
	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
E
Eliezer Tamir 已提交
2590

2591
	/* HW attentions */
V
Vladislav Zolotarov 已提交
2592
	if (status & 0x1) {
E
Eliezer Tamir 已提交
2593
		bnx2x_attn_int(bp);
V
Vladislav Zolotarov 已提交
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
		status &= ~0x1;
	}

	/* CStorm events: STAT_QUERY */
	if (status & 0x2) {
		DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
		status &= ~0x2;
	}

	if (unlikely(status))
		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
		   status);
E
Eliezer Tamir 已提交
2606

E
Eilon Greenstein 已提交
2607
	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
E
Eliezer Tamir 已提交
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
		     IGU_INT_NOP, 1);
	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
		     IGU_INT_NOP, 1);
	bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
		     IGU_INT_NOP, 1);
	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
		     IGU_INT_NOP, 1);
	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
		     IGU_INT_ENABLE, 1);
}

D
Dmitry Kravkov 已提交
2619
irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
E
Eliezer Tamir 已提交
2620 2621 2622 2623 2624 2625
{
	struct net_device *dev = dev_instance;
	struct bnx2x *bp = netdev_priv(dev);

	/* Return here if interrupt is disabled */
	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2626
		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
E
Eliezer Tamir 已提交
2627 2628 2629
		return IRQ_HANDLED;
	}

E
Eilon Greenstein 已提交
2630
	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
E
Eliezer Tamir 已提交
2631 2632 2633 2634 2635 2636

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
#ifdef BCM_CNIC
	{
		struct cnic_ops *c_ops;

		rcu_read_lock();
		c_ops = rcu_dereference(bp->cnic_ops);
		if (c_ops)
			c_ops->cnic_handler(bp->cnic_data, NULL);
		rcu_read_unlock();
	}
#endif
2648
	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
E
Eliezer Tamir 已提交
2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662

	return IRQ_HANDLED;
}

/* end of slow path */

static void bnx2x_timer(unsigned long data)
{
	struct bnx2x *bp = (struct bnx2x *) data;

	if (!netif_running(bp->dev))
		return;

	if (atomic_read(&bp->intr_sem) != 0)
E
Eliezer Tamir 已提交
2663
		goto timer_restart;
E
Eliezer Tamir 已提交
2664 2665 2666 2667 2668

	if (poll) {
		struct bnx2x_fastpath *fp = &bp->fp[0];
		int rc;

2669
		bnx2x_tx_int(fp);
E
Eliezer Tamir 已提交
2670 2671 2672
		rc = bnx2x_rx_int(fp, 1000);
	}

2673 2674
	if (!BP_NOMCP(bp)) {
		int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
2675 2676 2677 2678 2679 2680 2681
		u32 drv_pulse;
		u32 mcp_pulse;

		++bp->fw_drv_pulse_wr_seq;
		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
		/* TBD - add SYSTEM_TIME */
		drv_pulse = bp->fw_drv_pulse_wr_seq;
2682
		SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
E
Eliezer Tamir 已提交
2683

2684
		mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
E
Eliezer Tamir 已提交
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
			     MCP_PULSE_SEQ_MASK);
		/* The delta between driver pulse and mcp response
		 * should be 1 (before mcp response) or 0 (after mcp response)
		 */
		if ((drv_pulse != mcp_pulse) &&
		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
			/* someone lost a heartbeat... */
			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
				  drv_pulse, mcp_pulse);
		}
	}

2697
	if (bp->state == BNX2X_STATE_OPEN)
Y
Yitchak Gertner 已提交
2698
		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
E
Eliezer Tamir 已提交
2699

E
Eliezer Tamir 已提交
2700
timer_restart:
E
Eliezer Tamir 已提交
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
	mod_timer(&bp->timer, jiffies + bp->current_interval);
}

/* end of Statistics */

/* nic init */

/*
 * nic init service functions
 */

2712
static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
E
Eliezer Tamir 已提交
2713
{
2714 2715
	int port = BP_PORT(bp);

E
Eilon Greenstein 已提交
2716 2717 2718 2719 2720 2721 2722
	/* "CSTORM" */
	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
			CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
			CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
			CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
			CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2723 2724
}

D
Dmitry Kravkov 已提交
2725
void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2726
			  dma_addr_t mapping, int sb_id)
2727 2728
{
	int port = BP_PORT(bp);
Y
Yitchak Gertner 已提交
2729
	int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
2730
	int index;
2731
	u64 section;
E
Eliezer Tamir 已提交
2732 2733 2734 2735

	/* USTORM */
	section = ((u64)mapping) + offsetof(struct host_status_block,
					    u_status_block);
2736
	sb->u_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2737

E
Eilon Greenstein 已提交
2738 2739 2740 2741
	REG_WR(bp, BAR_CSTRORM_INTMEM +
	       CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
	REG_WR(bp, BAR_CSTRORM_INTMEM +
	       ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
E
Eliezer Tamir 已提交
2742
	       U64_HI(section));
E
Eilon Greenstein 已提交
2743 2744
	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
		CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
E
Eliezer Tamir 已提交
2745 2746

	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
E
Eilon Greenstein 已提交
2747 2748
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
E
Eliezer Tamir 已提交
2749 2750 2751 2752

	/* CSTORM */
	section = ((u64)mapping) + offsetof(struct host_status_block,
					    c_status_block);
2753
	sb->c_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2754 2755

	REG_WR(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2756
	       CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
E
Eliezer Tamir 已提交
2757
	REG_WR(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2758
	       ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
E
Eliezer Tamir 已提交
2759
	       U64_HI(section));
2760
	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
E
Eilon Greenstein 已提交
2761
		CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
E
Eliezer Tamir 已提交
2762 2763 2764

	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2765
			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2766 2767 2768 2769 2770 2771 2772

	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
}

static void bnx2x_zero_def_sb(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
2773

E
Eilon Greenstein 已提交
2774
	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
E
Eilon Greenstein 已提交
2775 2776
			TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
			sizeof(struct tstorm_def_status_block)/4);
E
Eilon Greenstein 已提交
2777 2778 2779 2780 2781 2782 2783
	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
			CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
			sizeof(struct cstorm_def_status_block_u)/4);
	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
			CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
			sizeof(struct cstorm_def_status_block_c)/4);
	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2784 2785
			XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
			sizeof(struct xstorm_def_status_block)/4);
E
Eliezer Tamir 已提交
2786 2787 2788 2789
}

static void bnx2x_init_def_sb(struct bnx2x *bp,
			      struct host_def_status_block *def_sb,
2790
			      dma_addr_t mapping, int sb_id)
E
Eliezer Tamir 已提交
2791
{
2792 2793
	int port = BP_PORT(bp);
	int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
2794 2795 2796 2797 2798 2799
	int index, val, reg_offset;
	u64 section;

	/* ATTN */
	section = ((u64)mapping) + offsetof(struct host_def_status_block,
					    atten_status_block);
2800
	def_sb->atten_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2801

2802 2803
	bp->attn_state = 0;

E
Eliezer Tamir 已提交
2804 2805 2806
	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);

2807
	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
E
Eliezer Tamir 已提交
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
		bp->attn_group[index].sig[0] = REG_RD(bp,
						     reg_offset + 0x10*index);
		bp->attn_group[index].sig[1] = REG_RD(bp,
					       reg_offset + 0x4 + 0x10*index);
		bp->attn_group[index].sig[2] = REG_RD(bp,
					       reg_offset + 0x8 + 0x10*index);
		bp->attn_group[index].sig[3] = REG_RD(bp,
					       reg_offset + 0xc + 0x10*index);
	}

	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
			     HC_REG_ATTN_MSG0_ADDR_L);

	REG_WR(bp, reg_offset, U64_LO(section));
	REG_WR(bp, reg_offset + 4, U64_HI(section));

	reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);

	val = REG_RD(bp, reg_offset);
2827
	val |= sb_id;
E
Eliezer Tamir 已提交
2828 2829 2830 2831 2832
	REG_WR(bp, reg_offset, val);

	/* USTORM */
	section = ((u64)mapping) + offsetof(struct host_def_status_block,
					    u_def_status_block);
2833
	def_sb->u_def_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2834

E
Eilon Greenstein 已提交
2835 2836 2837 2838
	REG_WR(bp, BAR_CSTRORM_INTMEM +
	       CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
	REG_WR(bp, BAR_CSTRORM_INTMEM +
	       ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
E
Eliezer Tamir 已提交
2839
	       U64_HI(section));
E
Eilon Greenstein 已提交
2840 2841
	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
		CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
E
Eliezer Tamir 已提交
2842 2843

	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
E
Eilon Greenstein 已提交
2844 2845
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
			 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
E
Eliezer Tamir 已提交
2846 2847 2848 2849

	/* CSTORM */
	section = ((u64)mapping) + offsetof(struct host_def_status_block,
					    c_def_status_block);
2850
	def_sb->c_def_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2851 2852

	REG_WR(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2853
	       CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
E
Eliezer Tamir 已提交
2854
	REG_WR(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2855
	       ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
E
Eliezer Tamir 已提交
2856
	       U64_HI(section));
2857
	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
E
Eilon Greenstein 已提交
2858
		CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
E
Eliezer Tamir 已提交
2859 2860 2861

	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2862
			 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
E
Eliezer Tamir 已提交
2863 2864 2865 2866

	/* TSTORM */
	section = ((u64)mapping) + offsetof(struct host_def_status_block,
					    t_def_status_block);
2867
	def_sb->t_def_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2868 2869

	REG_WR(bp, BAR_TSTRORM_INTMEM +
2870
	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
E
Eliezer Tamir 已提交
2871
	REG_WR(bp, BAR_TSTRORM_INTMEM +
2872
	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
E
Eliezer Tamir 已提交
2873
	       U64_HI(section));
2874
	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2875
		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
E
Eliezer Tamir 已提交
2876 2877 2878

	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
		REG_WR16(bp, BAR_TSTRORM_INTMEM +
2879
			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
E
Eliezer Tamir 已提交
2880 2881 2882 2883

	/* XSTORM */
	section = ((u64)mapping) + offsetof(struct host_def_status_block,
					    x_def_status_block);
2884
	def_sb->x_def_status_block.status_block_id = sb_id;
E
Eliezer Tamir 已提交
2885 2886

	REG_WR(bp, BAR_XSTRORM_INTMEM +
2887
	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
E
Eliezer Tamir 已提交
2888
	REG_WR(bp, BAR_XSTRORM_INTMEM +
2889
	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
E
Eliezer Tamir 已提交
2890
	       U64_HI(section));
2891
	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2892
		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
E
Eliezer Tamir 已提交
2893 2894 2895

	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
		REG_WR16(bp, BAR_XSTRORM_INTMEM +
2896
			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2897

Y
Yitchak Gertner 已提交
2898
	bp->stats_pending = 0;
Y
Yitchak Gertner 已提交
2899
	bp->set_mac_pending = 0;
Y
Yitchak Gertner 已提交
2900

2901
	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
E
Eliezer Tamir 已提交
2902 2903
}

D
Dmitry Kravkov 已提交
2904
void bnx2x_update_coalesce(struct bnx2x *bp)
E
Eliezer Tamir 已提交
2905
{
2906
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
2907 2908 2909
	int i;

	for_each_queue(bp, i) {
2910
		int sb_id = bp->fp[i].sb_id;
E
Eliezer Tamir 已提交
2911 2912

		/* HC_INDEX_U_ETH_RX_CQ_CONS */
E
Eilon Greenstein 已提交
2913 2914 2915
		REG_WR8(bp, BAR_CSTRORM_INTMEM +
			CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
						      U_SB_ETH_RX_CQ_INDEX),
2916
			bp->rx_ticks/(4 * BNX2X_BTR));
E
Eilon Greenstein 已提交
2917 2918 2919
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
						       U_SB_ETH_RX_CQ_INDEX),
2920
			 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
E
Eliezer Tamir 已提交
2921 2922 2923

		/* HC_INDEX_C_ETH_TX_CQ_CONS */
		REG_WR8(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2924 2925
			CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
						      C_SB_ETH_TX_CQ_INDEX),
2926
			bp->tx_ticks/(4 * BNX2X_BTR));
E
Eliezer Tamir 已提交
2927
		REG_WR16(bp, BAR_CSTRORM_INTMEM +
E
Eilon Greenstein 已提交
2928 2929
			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
						       C_SB_ETH_TX_CQ_INDEX),
2930
			 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
E
Eliezer Tamir 已提交
2931 2932 2933 2934 2935
	}
}

static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
2936
	int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
2937 2938 2939 2940 2941 2942 2943 2944 2945

	spin_lock_init(&bp->spq_lock);

	bp->spq_left = MAX_SPQ_PENDING;
	bp->spq_prod_idx = 0;
	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
	bp->spq_prod_bd = bp->spq;
	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;

2946
	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
E
Eliezer Tamir 已提交
2947
	       U64_LO(bp->spq_mapping));
2948 2949
	REG_WR(bp,
	       XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
E
Eliezer Tamir 已提交
2950 2951
	       U64_HI(bp->spq_mapping));

2952
	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
E
Eliezer Tamir 已提交
2953 2954 2955 2956 2957 2958 2959
	       bp->spq_prod_idx);
}

static void bnx2x_init_context(struct bnx2x *bp)
{
	int i;

2960 2961
	/* Rx */
	for_each_queue(bp, i) {
E
Eliezer Tamir 已提交
2962 2963
		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
		struct bnx2x_fastpath *fp = &bp->fp[i];
E
Eilon Greenstein 已提交
2964
		u8 cl_id = fp->cl_id;
E
Eliezer Tamir 已提交
2965

2966 2967
		context->ustorm_st_context.common.sb_index_numbers =
						BNX2X_RX_SB_INDEX_NUM;
2968
		context->ustorm_st_context.common.clientId = cl_id;
E
Eilon Greenstein 已提交
2969
		context->ustorm_st_context.common.status_block_id = fp->sb_id;
2970
		context->ustorm_st_context.common.flags =
E
Eilon Greenstein 已提交
2971 2972 2973 2974
			(USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
			 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
		context->ustorm_st_context.common.statistics_counter_id =
						cl_id;
E
Eilon Greenstein 已提交
2975
		context->ustorm_st_context.common.mc_alignment_log_size =
E
Eilon Greenstein 已提交
2976
						BNX2X_RX_ALIGN_SHIFT;
2977
		context->ustorm_st_context.common.bd_buff_size =
2978
						bp->rx_buf_size;
2979
		context->ustorm_st_context.common.bd_page_base_hi =
E
Eliezer Tamir 已提交
2980
						U64_HI(fp->rx_desc_mapping);
2981
		context->ustorm_st_context.common.bd_page_base_lo =
E
Eliezer Tamir 已提交
2982
						U64_LO(fp->rx_desc_mapping);
2983 2984
		if (!fp->disable_tpa) {
			context->ustorm_st_context.common.flags |=
E
Eilon Greenstein 已提交
2985
				USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2986
			context->ustorm_st_context.common.sge_buff_size =
V
Vladislav Zolotarov 已提交
2987 2988
				(u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
					   0xffff);
2989 2990 2991 2992
			context->ustorm_st_context.common.sge_page_base_hi =
						U64_HI(fp->rx_sge_mapping);
			context->ustorm_st_context.common.sge_page_base_lo =
						U64_LO(fp->rx_sge_mapping);
E
Eilon Greenstein 已提交
2993 2994 2995 2996 2997 2998 2999

			context->ustorm_st_context.common.max_sges_for_packet =
				SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
			context->ustorm_st_context.common.max_sges_for_packet =
				((context->ustorm_st_context.common.
				  max_sges_for_packet + PAGES_PER_SGE - 1) &
				 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3000 3001
		}

E
Eilon Greenstein 已提交
3002 3003 3004 3005 3006
		context->ustorm_ag_context.cdu_usage =
			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
					       CDU_REGION_NUMBER_UCM_AG,
					       ETH_CONNECTION_TYPE);

E
Eilon Greenstein 已提交
3007 3008 3009 3010 3011 3012
		context->xstorm_ag_context.cdu_reserved =
			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
					       CDU_REGION_NUMBER_XCM_AG,
					       ETH_CONNECTION_TYPE);
	}

3013 3014
	/* Tx */
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
3015 3016
		struct bnx2x_fastpath *fp = &bp->fp[i];
		struct eth_context *context =
3017
			bnx2x_sp(bp, context[i].eth);
E
Eilon Greenstein 已提交
3018 3019 3020 3021 3022

		context->cstorm_st_context.sb_index_number =
						C_SB_ETH_TX_CQ_INDEX;
		context->cstorm_st_context.status_block_id = fp->sb_id;

E
Eilon Greenstein 已提交
3023 3024 3025 3026
		context->xstorm_st_context.tx_bd_page_base_hi =
						U64_HI(fp->tx_desc_mapping);
		context->xstorm_st_context.tx_bd_page_base_lo =
						U64_LO(fp->tx_desc_mapping);
E
Eilon Greenstein 已提交
3027
		context->xstorm_st_context.statistics_data = (fp->cl_id |
E
Eilon Greenstein 已提交
3028
				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
E
Eliezer Tamir 已提交
3029 3030 3031 3032 3033
	}
}

static void bnx2x_init_ind_table(struct bnx2x *bp)
{
3034
	int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
3035 3036
	int i;

E
Eilon Greenstein 已提交
3037
	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
E
Eliezer Tamir 已提交
3038 3039
		return;

E
Eilon Greenstein 已提交
3040 3041
	DP(NETIF_MSG_IFUP,
	   "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
E
Eliezer Tamir 已提交
3042
	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3043
		REG_WR8(bp, BAR_TSTRORM_INTMEM +
3044
			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3045
			bp->fp->cl_id + (i % bp->num_queues));
E
Eliezer Tamir 已提交
3046 3047
}

D
Dmitry Kravkov 已提交
3048
void bnx2x_set_client_config(struct bnx2x *bp)
3049 3050
{
	struct tstorm_eth_client_config tstorm_client = {0};
3051 3052
	int port = BP_PORT(bp);
	int i;
3053

E
Eilon Greenstein 已提交
3054
	tstorm_client.mtu = bp->dev->mtu;
3055
	tstorm_client.config_flags =
E
Eilon Greenstein 已提交
3056 3057
				(TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
				 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3058
#ifdef BCM_VLAN
3059
	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3060
		tstorm_client.config_flags |=
E
Eilon Greenstein 已提交
3061
				TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3062 3063 3064 3065 3066
		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
	}
#endif

	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
3067 3068
		tstorm_client.statistics_counter_id = bp->fp[i].cl_id;

3069
		REG_WR(bp, BAR_TSTRORM_INTMEM +
3070
		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3071 3072
		       ((u32 *)&tstorm_client)[0]);
		REG_WR(bp, BAR_TSTRORM_INTMEM +
3073
		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3074 3075 3076
		       ((u32 *)&tstorm_client)[1]);
	}

3077 3078
	DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
	   ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3079 3080
}

D
Dmitry Kravkov 已提交
3081
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
E
Eliezer Tamir 已提交
3082 3083
{
	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3084
	int mode = bp->rx_mode;
3085
	int mask = bp->rx_mode_cl_mask;
3086
	int func = BP_FUNC(bp);
3087
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
3088
	int i;
3089 3090 3091 3092 3093 3094
	/* All but management unicast packets should pass to the host as well */
	u32 llh_mask =
		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
E
Eliezer Tamir 已提交
3095

3096
	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
E
Eliezer Tamir 已提交
3097 3098 3099

	switch (mode) {
	case BNX2X_RX_MODE_NONE: /* no Rx */
3100 3101 3102
		tstorm_mac_filter.ucast_drop_all = mask;
		tstorm_mac_filter.mcast_drop_all = mask;
		tstorm_mac_filter.bcast_drop_all = mask;
E
Eliezer Tamir 已提交
3103
		break;
E
Eilon Greenstein 已提交
3104

E
Eliezer Tamir 已提交
3105
	case BNX2X_RX_MODE_NORMAL:
3106
		tstorm_mac_filter.bcast_accept_all = mask;
E
Eliezer Tamir 已提交
3107
		break;
E
Eilon Greenstein 已提交
3108

E
Eliezer Tamir 已提交
3109
	case BNX2X_RX_MODE_ALLMULTI:
3110 3111
		tstorm_mac_filter.mcast_accept_all = mask;
		tstorm_mac_filter.bcast_accept_all = mask;
E
Eliezer Tamir 已提交
3112
		break;
E
Eilon Greenstein 已提交
3113

E
Eliezer Tamir 已提交
3114
	case BNX2X_RX_MODE_PROMISC:
3115 3116 3117
		tstorm_mac_filter.ucast_accept_all = mask;
		tstorm_mac_filter.mcast_accept_all = mask;
		tstorm_mac_filter.bcast_accept_all = mask;
3118 3119
		/* pass management unicast packets as well */
		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
E
Eliezer Tamir 已提交
3120
		break;
E
Eilon Greenstein 已提交
3121

E
Eliezer Tamir 已提交
3122
	default:
3123 3124
		BNX2X_ERR("BAD rx mode (%d)\n", mode);
		break;
E
Eliezer Tamir 已提交
3125 3126
	}

3127 3128 3129 3130
	REG_WR(bp,
	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
	       llh_mask);

E
Eliezer Tamir 已提交
3131 3132
	for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
		REG_WR(bp, BAR_TSTRORM_INTMEM +
3133
		       TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
E
Eliezer Tamir 已提交
3134 3135
		       ((u32 *)&tstorm_mac_filter)[i]);

3136
/*		DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
E
Eliezer Tamir 已提交
3137 3138 3139
		   ((u32 *)&tstorm_mac_filter)[i]); */
	}

3140 3141
	if (mode != BNX2X_RX_MODE_NONE)
		bnx2x_set_client_config(bp);
E
Eliezer Tamir 已提交
3142 3143
}

3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
static void bnx2x_init_internal_common(struct bnx2x *bp)
{
	int i;

	/* Zero this manually as its initialization is
	   currently missing in the initTool */
	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
		REG_WR(bp, BAR_USTRORM_INTMEM +
		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
}

static void bnx2x_init_internal_port(struct bnx2x *bp)
{
	int port = BP_PORT(bp);

E
Eilon Greenstein 已提交
3159 3160 3161 3162
	REG_WR(bp,
	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
	REG_WR(bp,
	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3163 3164 3165 3166 3167
	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
}

static void bnx2x_init_internal_func(struct bnx2x *bp)
E
Eliezer Tamir 已提交
3168 3169 3170
{
	struct tstorm_eth_function_common_config tstorm_config = {0};
	struct stats_indication_flags stats_flags = {0};
3171 3172
	int port = BP_PORT(bp);
	int func = BP_FUNC(bp);
E
Eilon Greenstein 已提交
3173 3174
	int i, j;
	u32 offset;
3175
	u16 max_agg_size;
E
Eliezer Tamir 已提交
3176

3177 3178 3179
	tstorm_config.config_flags = RSS_FLAGS(bp);

	if (is_multi(bp))
E
Eliezer Tamir 已提交
3180
		tstorm_config.rss_result_mask = MULTI_MASK;
E
Eilon Greenstein 已提交
3181 3182 3183 3184 3185 3186

	/* Enable TPA if needed */
	if (bp->flags & TPA_ENABLE_FLAG)
		tstorm_config.config_flags |=
			TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;

E
Eilon Greenstein 已提交
3187 3188 3189
	if (IS_E1HMF(bp))
		tstorm_config.config_flags |=
				TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
E
Eliezer Tamir 已提交
3190

3191 3192
	tstorm_config.leading_client_id = BP_L_ID(bp);

E
Eliezer Tamir 已提交
3193
	REG_WR(bp, BAR_TSTRORM_INTMEM +
3194
	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
E
Eliezer Tamir 已提交
3195 3196
	       (*(u32 *)&tstorm_config));

E
Eliezer Tamir 已提交
3197
	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3198
	bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
E
Eliezer Tamir 已提交
3199 3200
	bnx2x_set_storm_rx_mode(bp);

E
Eilon Greenstein 已提交
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223
	for_each_queue(bp, i) {
		u8 cl_id = bp->fp[i].cl_id;

		/* reset xstorm per client statistics */
		offset = BAR_XSTRORM_INTMEM +
			 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
		for (j = 0;
		     j < sizeof(struct xstorm_per_client_stats) / 4; j++)
			REG_WR(bp, offset + j*4, 0);

		/* reset tstorm per client statistics */
		offset = BAR_TSTRORM_INTMEM +
			 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
		for (j = 0;
		     j < sizeof(struct tstorm_per_client_stats) / 4; j++)
			REG_WR(bp, offset + j*4, 0);

		/* reset ustorm per client statistics */
		offset = BAR_USTRORM_INTMEM +
			 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
		for (j = 0;
		     j < sizeof(struct ustorm_per_client_stats) / 4; j++)
			REG_WR(bp, offset + j*4, 0);
Y
Yitchak Gertner 已提交
3224 3225 3226
	}

	/* Init statistics related context */
3227
	stats_flags.collect_eth = 1;
E
Eliezer Tamir 已提交
3228

Y
Yitchak Gertner 已提交
3229
	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
E
Eliezer Tamir 已提交
3230
	       ((u32 *)&stats_flags)[0]);
Y
Yitchak Gertner 已提交
3231
	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
E
Eliezer Tamir 已提交
3232 3233
	       ((u32 *)&stats_flags)[1]);

Y
Yitchak Gertner 已提交
3234
	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
E
Eliezer Tamir 已提交
3235
	       ((u32 *)&stats_flags)[0]);
Y
Yitchak Gertner 已提交
3236
	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
E
Eliezer Tamir 已提交
3237 3238
	       ((u32 *)&stats_flags)[1]);

E
Eilon Greenstein 已提交
3239 3240 3241 3242 3243
	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
	       ((u32 *)&stats_flags)[0]);
	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
	       ((u32 *)&stats_flags)[1]);

Y
Yitchak Gertner 已提交
3244
	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
E
Eliezer Tamir 已提交
3245
	       ((u32 *)&stats_flags)[0]);
Y
Yitchak Gertner 已提交
3246
	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
E
Eliezer Tamir 已提交
3247 3248
	       ((u32 *)&stats_flags)[1]);

Y
Yitchak Gertner 已提交
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
	REG_WR(bp, BAR_XSTRORM_INTMEM +
	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
	REG_WR(bp, BAR_XSTRORM_INTMEM +
	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));

	REG_WR(bp, BAR_TSTRORM_INTMEM +
	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
	REG_WR(bp, BAR_TSTRORM_INTMEM +
	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3262

E
Eilon Greenstein 已提交
3263 3264 3265 3266 3267 3268 3269
	REG_WR(bp, BAR_USTRORM_INTMEM +
	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
	REG_WR(bp, BAR_USTRORM_INTMEM +
	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));

3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
	if (CHIP_IS_E1H(bp)) {
		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
			IS_E1HMF(bp));
		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
			IS_E1HMF(bp));
		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
			IS_E1HMF(bp));
		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
			IS_E1HMF(bp));

3280 3281
		REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
			 bp->e1hov);
3282 3283
	}

3284
	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
V
Vladislav Zolotarov 已提交
3285 3286
	max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
				   SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3287
	for_each_queue(bp, i) {
3288 3289 3290
		struct bnx2x_fastpath *fp = &bp->fp[i];

		REG_WR(bp, BAR_USTRORM_INTMEM +
3291
		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3292 3293
		       U64_LO(fp->rx_comp_mapping));
		REG_WR(bp, BAR_USTRORM_INTMEM +
3294
		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3295 3296
		       U64_HI(fp->rx_comp_mapping));

E
Eilon Greenstein 已提交
3297 3298 3299 3300 3301 3302 3303 3304
		/* Next page */
		REG_WR(bp, BAR_USTRORM_INTMEM +
		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
		       U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
		REG_WR(bp, BAR_USTRORM_INTMEM +
		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
		       U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));

3305
		REG_WR16(bp, BAR_USTRORM_INTMEM +
3306
			 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3307 3308
			 max_agg_size);
	}
E
Eilon Greenstein 已提交
3309

3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
	/* dropless flow control */
	if (CHIP_IS_E1H(bp)) {
		struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};

		rx_pause.bd_thr_low = 250;
		rx_pause.cqe_thr_low = 250;
		rx_pause.cos = 1;
		rx_pause.sge_thr_low = 0;
		rx_pause.bd_thr_high = 350;
		rx_pause.cqe_thr_high = 350;
		rx_pause.sge_thr_high = 0;

3322
		for_each_queue(bp, i) {
3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
			struct bnx2x_fastpath *fp = &bp->fp[i];

			if (!fp->disable_tpa) {
				rx_pause.sge_thr_low = 150;
				rx_pause.sge_thr_high = 250;
			}


			offset = BAR_USTRORM_INTMEM +
				 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
								   fp->cl_id);
			for (j = 0;
			     j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
			     j++)
				REG_WR(bp, offset + j*4,
				       ((u32 *)&rx_pause)[j]);
		}
	}

E
Eilon Greenstein 已提交
3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));

	/* Init rate shaping and fairness contexts */
	if (IS_E1HMF(bp)) {
		int vn;

		/* During init there is no active link
		   Until link is up, set link rate to 10Gbps */
		bp->link_vars.line_speed = SPEED_10000;
		bnx2x_init_port_minmax(bp);

3353 3354 3355
		if (!BP_NOMCP(bp))
			bp->mf_config =
			      SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
E
Eilon Greenstein 已提交
3356 3357 3358 3359 3360 3361
		bnx2x_calc_vn_weight_sum(bp);

		for (vn = VN_0; vn < E1HVN_MAX; vn++)
			bnx2x_init_vn_minmax(bp, 2*vn + port);

		/* Enable rate shaping and fairness */
3362
		bp->cmng.flags.cmng_enables |=
E
Eilon Greenstein 已提交
3363
					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3364

E
Eilon Greenstein 已提交
3365 3366 3367 3368 3369 3370 3371
	} else {
		/* rate shaping and fairness are disabled */
		DP(NETIF_MSG_IFUP,
		   "single function mode  minmax will be disabled\n");
	}


V
Vladislav Zolotarov 已提交
3372
	/* Store cmng structures to internal memory */
E
Eilon Greenstein 已提交
3373 3374 3375 3376 3377
	if (bp->port.pmf)
		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
			REG_WR(bp, BAR_XSTRORM_INTMEM +
			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
			       ((u32 *)(&bp->cmng))[i]);
E
Eliezer Tamir 已提交
3378 3379
}

3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
{
	switch (load_code) {
	case FW_MSG_CODE_DRV_LOAD_COMMON:
		bnx2x_init_internal_common(bp);
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_PORT:
		bnx2x_init_internal_port(bp);
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
		bnx2x_init_internal_func(bp);
		break;

	default:
		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
		break;
	}
}

D
Dmitry Kravkov 已提交
3401
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
E
Eliezer Tamir 已提交
3402 3403 3404 3405 3406 3407
{
	int i;

	for_each_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];

3408
		fp->bp = bp;
E
Eliezer Tamir 已提交
3409 3410
		fp->state = BNX2X_FP_STATE_CLOSED;
		fp->index = i;
3411
		fp->cl_id = BP_L_ID(bp) + i;
3412 3413 3414
#ifdef BCM_CNIC
		fp->sb_id = fp->cl_id + 1;
#else
3415
		fp->sb_id = fp->cl_id;
3416
#endif
3417
		DP(NETIF_MSG_IFUP,
E
Eilon Greenstein 已提交
3418 3419
		   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
		   i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3420
		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3421
			      fp->sb_id);
3422
		bnx2x_update_fpsb_idx(fp);
E
Eliezer Tamir 已提交
3423 3424
	}

3425 3426 3427 3428
	/* ensure status block indices were read */
	rmb();


3429 3430 3431
	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
			  DEF_SB_ID);
	bnx2x_update_dsb_idx(bp);
E
Eliezer Tamir 已提交
3432 3433 3434 3435 3436
	bnx2x_update_coalesce(bp);
	bnx2x_init_rx_rings(bp);
	bnx2x_init_tx_ring(bp);
	bnx2x_init_sp_ring(bp);
	bnx2x_init_context(bp);
3437
	bnx2x_init_internal(bp, load_code);
E
Eliezer Tamir 已提交
3438
	bnx2x_init_ind_table(bp);
3439 3440 3441 3442 3443 3444 3445 3446 3447
	bnx2x_stats_init(bp);

	/* At this point, we are ready for interrupts */
	atomic_set(&bp->intr_sem, 0);

	/* flush all before enabling interrupts */
	mb();
	mmiowb();

E
Eliezer Tamir 已提交
3448
	bnx2x_int_enable(bp);
3449 3450 3451 3452 3453

	/* Check for SPIO5 */
	bnx2x_attn_int_deasserted0(bp,
		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
				   AEU_INPUTS_ATTN_BITS_SPIO5);
E
Eliezer Tamir 已提交
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463
}

/* end of nic init */

/*
 * gzip service functions
 */

static int bnx2x_gunzip_init(struct bnx2x *bp)
{
3464 3465
	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
					    &bp->gunzip_mapping, GFP_KERNEL);
E
Eliezer Tamir 已提交
3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484
	if (bp->gunzip_buf  == NULL)
		goto gunzip_nomem1;

	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
	if (bp->strm  == NULL)
		goto gunzip_nomem2;

	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
				      GFP_KERNEL);
	if (bp->strm->workspace == NULL)
		goto gunzip_nomem3;

	return 0;

gunzip_nomem3:
	kfree(bp->strm);
	bp->strm = NULL;

gunzip_nomem2:
3485 3486
	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
			  bp->gunzip_mapping);
E
Eliezer Tamir 已提交
3487 3488 3489
	bp->gunzip_buf = NULL;

gunzip_nomem1:
V
Vladislav Zolotarov 已提交
3490 3491
	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
	       " un-compression\n");
E
Eliezer Tamir 已提交
3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
	return -ENOMEM;
}

static void bnx2x_gunzip_end(struct bnx2x *bp)
{
	kfree(bp->strm->workspace);

	kfree(bp->strm);
	bp->strm = NULL;

	if (bp->gunzip_buf) {
3503 3504
		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
				  bp->gunzip_mapping);
E
Eliezer Tamir 已提交
3505 3506 3507 3508
		bp->gunzip_buf = NULL;
	}
}

3509
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
E
Eliezer Tamir 已提交
3510 3511 3512 3513
{
	int n, rc;

	/* check gzip header */
3514 3515
	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
		BNX2X_ERR("Bad gzip header\n");
E
Eliezer Tamir 已提交
3516
		return -EINVAL;
3517
	}
E
Eliezer Tamir 已提交
3518 3519 3520

	n = 10;

3521
#define FNAME				0x8
E
Eliezer Tamir 已提交
3522 3523 3524 3525

	if (zbuf[3] & FNAME)
		while ((zbuf[n++] != 0) && (n < len));

3526
	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
E
Eliezer Tamir 已提交
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
	bp->strm->avail_in = len - n;
	bp->strm->next_out = bp->gunzip_buf;
	bp->strm->avail_out = FW_BUF_SIZE;

	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
	if (rc != Z_OK)
		return rc;

	rc = zlib_inflate(bp->strm, Z_FINISH);
	if ((rc != Z_OK) && (rc != Z_STREAM_END))
3537 3538
		netdev_err(bp->dev, "Firmware decompression error: %s\n",
			   bp->strm->msg);
E
Eliezer Tamir 已提交
3539 3540 3541

	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
	if (bp->gunzip_outlen & 0x3)
V
Vladislav Zolotarov 已提交
3542 3543 3544
		netdev_err(bp->dev, "Firmware decompression error:"
				    " gunzip_outlen (%d) not aligned\n",
				bp->gunzip_outlen);
E
Eliezer Tamir 已提交
3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557
	bp->gunzip_outlen >>= 2;

	zlib_inflateEnd(bp->strm);

	if (rc == Z_STREAM_END)
		return 0;

	return rc;
}

/* nic load/unload */

/*
3558
 * General service functions
E
Eliezer Tamir 已提交
3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
 */

/* send a NIG loopback debug packet */
static void bnx2x_lb_pckt(struct bnx2x *bp)
{
	u32 wb_write[3];

	/* Ethernet source and destination addresses */
	wb_write[0] = 0x55555555;
	wb_write[1] = 0x55555555;
3569
	wb_write[2] = 0x20;		/* SOP */
E
Eliezer Tamir 已提交
3570 3571 3572 3573 3574
	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);

	/* NON-IP protocol */
	wb_write[0] = 0x09000000;
	wb_write[1] = 0x55555555;
3575
	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
E
Eliezer Tamir 已提交
3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
}

/* some of the internal memories
 * are not directly readable from the driver
 * to test them we send debug packets
 */
static int bnx2x_int_mem_test(struct bnx2x *bp)
{
	int factor;
	int count, i;
	u32 val = 0;

3589
	if (CHIP_REV_IS_FPGA(bp))
E
Eliezer Tamir 已提交
3590
		factor = 120;
3591 3592 3593
	else if (CHIP_REV_IS_EMUL(bp))
		factor = 200;
	else
E
Eliezer Tamir 已提交
3594 3595 3596 3597 3598 3599 3600 3601
		factor = 1;

	DP(NETIF_MSG_HW, "start part1\n");

	/* Disable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3602
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
E
Eliezer Tamir 已提交
3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613

	/*  Write 0 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);

	/* send Ethernet packet */
	bnx2x_lb_pckt(bp);

	/* TODO do i reset NIG statistic? */
	/* Wait until NIG register shows 1 packet of size 0x10 */
	count = 1000 * factor;
	while (count) {
3614

E
Eliezer Tamir 已提交
3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643
		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
		val = *bnx2x_sp(bp, wb_data[0]);
		if (val == 0x10)
			break;

		msleep(10);
		count--;
	}
	if (val != 0x10) {
		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
		return -1;
	}

	/* Wait until PRS register shows 1 packet */
	count = 1000 * factor;
	while (count) {
		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
		if (val == 1)
			break;

		msleep(10);
		count--;
	}
	if (val != 0x1) {
		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
		return -2;
	}

	/* Reset and init BRB, PRS */
3644
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
E
Eliezer Tamir 已提交
3645
	msleep(50);
3646
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
E
Eliezer Tamir 已提交
3647
	msleep(50);
3648 3649
	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
3650 3651 3652 3653 3654 3655 3656

	DP(NETIF_MSG_HW, "part2\n");

	/* Disable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3657
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
E
Eliezer Tamir 已提交
3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669

	/* Write 0 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);

	/* send 10 Ethernet packets */
	for (i = 0; i < 10; i++)
		bnx2x_lb_pckt(bp);

	/* Wait until NIG register shows 10 + 1
	   packets of size 11*0x10 = 0xb0 */
	count = 1000 * factor;
	while (count) {
3670

E
Eliezer Tamir 已提交
3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712
		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
		val = *bnx2x_sp(bp, wb_data[0]);
		if (val == 0xb0)
			break;

		msleep(10);
		count--;
	}
	if (val != 0xb0) {
		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
		return -3;
	}

	/* Wait until PRS register shows 2 packets */
	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
	if (val != 2)
		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);

	/* Write 1 to parser credits for CFC search request */
	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);

	/* Wait until PRS register shows 3 packets */
	msleep(10 * factor);
	/* Wait until NIG register shows 1 packet of size 0x10 */
	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
	if (val != 3)
		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);

	/* clear NIG EOP FIFO */
	for (i = 0; i < 11; i++)
		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
	if (val != 1) {
		BNX2X_ERR("clear of NIG failed\n");
		return -4;
	}

	/* Reset and init BRB, PRS, NIG */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
	msleep(50);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
	msleep(50);
3713 3714
	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3715
#ifndef BCM_CNIC
E
Eliezer Tamir 已提交
3716 3717 3718 3719 3720 3721 3722 3723
	/* set NIC mode */
	REG_WR(bp, PRS_REG_NIC_MODE, 1);
#endif

	/* Enable inputs of parser neighbor blocks */
	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3724
	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
E
Eliezer Tamir 已提交
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741

	DP(NETIF_MSG_HW, "done\n");

	return 0; /* OK */
}

static void enable_blocks_attention(struct bnx2x *bp)
{
	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
	REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3742 3743
/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
E
Eliezer Tamir 已提交
3744 3745 3746
	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3747 3748
/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
E
Eliezer Tamir 已提交
3749 3750 3751 3752
	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3753 3754 3755 3756 3757 3758
/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
	if (CHIP_REV_IS_FPGA(bp))
		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
	else
		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
E
Eliezer Tamir 已提交
3759 3760 3761
	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3762 3763
/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
E
Eliezer Tamir 已提交
3764 3765
	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3766 3767
/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
E
Eliezer Tamir 已提交
3768 3769
}

3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
static const struct {
	u32 addr;
	u32 mask;
} bnx2x_parity_mask[] = {
	{PXP_REG_PXP_PRTY_MASK, 0xffffffff},
	{PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
	{PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
	{HC_REG_HC_PRTY_MASK, 0xffffffff},
	{MISC_REG_MISC_PRTY_MASK, 0xffffffff},
	{QM_REG_QM_PRTY_MASK, 0x0},
	{DORQ_REG_DORQ_PRTY_MASK, 0x0},
	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
	{SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
	{CDU_REG_CDU_PRTY_MASK, 0x0},
	{CFC_REG_CFC_PRTY_MASK, 0x0},
	{DBG_REG_DBG_PRTY_MASK, 0x0},
	{DMAE_REG_DMAE_PRTY_MASK, 0x0},
	{BRB1_REG_BRB1_PRTY_MASK, 0x0},
	{PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
	{TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
	{CSDM_REG_CSDM_PRTY_MASK, 0x8},	/* bit 3 */
	{USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
	{XSDM_REG_XSDM_PRTY_MASK, 0x8},	/* bit 3 */
	{TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
	{TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
	{USEM_REG_USEM_PRTY_MASK_0, 0x0},
	{USEM_REG_USEM_PRTY_MASK_1, 0x0},
	{CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
	{CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
	{XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
	{XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
};

static void enable_blocks_parity(struct bnx2x *bp)
{
	int i, mask_arr_len =
		sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));

	for (i = 0; i < mask_arr_len; i++)
		REG_WR(bp, bnx2x_parity_mask[i].addr,
			bnx2x_parity_mask[i].mask);
}

3814

E
Eilon Greenstein 已提交
3815 3816 3817 3818 3819 3820 3821 3822
static void bnx2x_reset_common(struct bnx2x *bp)
{
	/* reset_common */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
	       0xd3ffff7f);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
}

3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
static void bnx2x_init_pxp(struct bnx2x *bp)
{
	u16 devctl;
	int r_order, w_order;

	pci_read_config_word(bp->pdev,
			     bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
	if (bp->mrrs == -1)
		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
	else {
		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
		r_order = bp->mrrs;
	}

	bnx2x_init_pxp_arb(bp, r_order, w_order);
}
E
Eilon Greenstein 已提交
3841 3842 3843

static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
3844
	int is_required;
E
Eilon Greenstein 已提交
3845
	u32 val;
3846
	int port;
E
Eilon Greenstein 已提交
3847

3848 3849 3850 3851
	if (BP_NOMCP(bp))
		return;

	is_required = 0;
E
Eilon Greenstein 已提交
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871
	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
	      SHARED_HW_CFG_FAN_FAILURE_MASK;

	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
		is_required = 1;

	/*
	 * The fan failure mechanism is usually related to the PHY type since
	 * the power consumption of the board is affected by the PHY. Currently,
	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
	 */
	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
		for (port = PORT_0; port < PORT_MAX; port++) {
			u32 phy_type =
				SHMEM_RD(bp, dev_info.port_hw_config[port].
					 external_phy_config) &
				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
			is_required |=
				((phy_type ==
				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
E
Eilon Greenstein 已提交
3872 3873
				 (phy_type ==
				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
E
Eilon Greenstein 已提交
3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889
				 (phy_type ==
				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
		}

	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);

	if (is_required == 0)
		return;

	/* Fan failure is indicated by SPIO 5 */
	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
		       MISC_REGISTERS_SPIO_INPUT_HI_Z);

	/* set to active low mode */
	val = REG_RD(bp, MISC_REG_SPIO_INT);
	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
V
Vladislav Zolotarov 已提交
3890
					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
E
Eilon Greenstein 已提交
3891 3892 3893 3894 3895 3896 3897 3898
	REG_WR(bp, MISC_REG_SPIO_INT, val);

	/* enable interrupt to signal the IGU */
	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
	val |= (1 << MISC_REGISTERS_SPIO_5);
	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}

3899
static int bnx2x_init_common(struct bnx2x *bp)
E
Eliezer Tamir 已提交
3900 3901
{
	u32 val, i;
3902 3903 3904
#ifdef BCM_CNIC
	u32 wb_write[2];
#endif
E
Eliezer Tamir 已提交
3905

3906
	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
E
Eliezer Tamir 已提交
3907

E
Eilon Greenstein 已提交
3908
	bnx2x_reset_common(bp);
3909 3910
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
E
Eliezer Tamir 已提交
3911

3912
	bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3913 3914
	if (CHIP_IS_E1H(bp))
		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
E
Eliezer Tamir 已提交
3915

3916 3917 3918
	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
	msleep(30);
	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
E
Eliezer Tamir 已提交
3919

3920
	bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3921 3922 3923 3924 3925
	if (CHIP_IS_E1(bp)) {
		/* enable HW interrupt from PXP on USDM overflow
		   bit 16 on INT_MASK_0 */
		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
	}
E
Eliezer Tamir 已提交
3926

3927
	bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3928
	bnx2x_init_pxp(bp);
E
Eliezer Tamir 已提交
3929 3930

#ifdef __BIG_ENDIAN
3931 3932 3933 3934 3935
	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
E
Eilon Greenstein 已提交
3936 3937
	/* make sure this value is 0 */
	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3938 3939 3940 3941 3942 3943

/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
E
Eliezer Tamir 已提交
3944 3945
#endif

3946
	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3947
#ifdef BCM_CNIC
3948 3949 3950
	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
E
Eliezer Tamir 已提交
3951 3952
#endif

3953 3954
	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
E
Eliezer Tamir 已提交
3955

3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
	/* let the HW do it's magic ... */
	msleep(100);
	/* finish PXP init */
	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
	if (val != 1) {
		BNX2X_ERR("PXP2 CFG failed\n");
		return -EBUSY;
	}
	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
	if (val != 1) {
		BNX2X_ERR("PXP2 RD_INIT failed\n");
		return -EBUSY;
	}
E
Eliezer Tamir 已提交
3969

3970 3971
	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
E
Eliezer Tamir 已提交
3972

3973
	bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
3974

3975 3976 3977
	/* clean the DMAE memory */
	bp->dmae_ready = 1;
	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
E
Eliezer Tamir 已提交
3978

3979 3980 3981 3982
	bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
3983

3984 3985 3986 3987 3988
	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);

3989
	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004

#ifdef BCM_CNIC
	wb_write[0] = 0;
	wb_write[1] = 0;
	for (i = 0; i < 64; i++) {
		REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);

		if (CHIP_IS_E1H(bp)) {
			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
					  wb_write, 2);
		}
	}
#endif
4005 4006 4007
	/* soft reset pulse */
	REG_WR(bp, QM_REG_SOFT_RESET, 1);
	REG_WR(bp, QM_REG_SOFT_RESET, 0);
E
Eliezer Tamir 已提交
4008

4009
#ifdef BCM_CNIC
4010
	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
4011 4012
#endif

4013
	bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4014 4015 4016 4017 4018
	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
	if (!CHIP_REV_IS_SLOW(bp)) {
		/* enable hw interrupt from doorbell Q */
		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
	}
E
Eliezer Tamir 已提交
4019

4020 4021
	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4022
	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4023
#ifndef BCM_CNIC
4024 4025
	/* set NIC mode */
	REG_WR(bp, PRS_REG_NIC_MODE, 1);
4026
#endif
4027 4028
	if (CHIP_IS_E1H(bp))
		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
E
Eliezer Tamir 已提交
4029

4030 4031 4032 4033
	bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
4034

E
Eilon Greenstein 已提交
4035 4036 4037 4038
	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
	bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
E
Eliezer Tamir 已提交
4039

4040 4041 4042 4043
	bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
4044

4045 4046 4047 4048 4049
	/* sync semi rtc */
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
	       0x80000000);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
	       0x80000000);
E
Eliezer Tamir 已提交
4050

4051 4052 4053
	bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
4054

4055
	REG_WR(bp, SRC_REG_SOFT_RST, 1);
4056 4057
	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
		REG_WR(bp, i, random32());
4058
	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
#ifdef BCM_CNIC
	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
#endif
4071
	REG_WR(bp, SRC_REG_SOFT_RST, 0);
E
Eliezer Tamir 已提交
4072

4073 4074
	if (sizeof(union cdu_context) != 1024)
		/* we currently assume that a context is 1024 bytes */
V
Vladislav Zolotarov 已提交
4075 4076
		dev_alert(&bp->pdev->dev, "please adjust the size "
					  "of cdu_context(%ld)\n",
4077
			 (long)sizeof(union cdu_context));
E
Eliezer Tamir 已提交
4078

4079
	bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4080 4081
	val = (4 << 24) + (0 << 12) + 1024;
	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
E
Eliezer Tamir 已提交
4082

4083
	bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4084
	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
E
Eilon Greenstein 已提交
4085 4086 4087 4088 4089
	/* enable context validation interrupt from CFC */
	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);

	/* set the thresholds to prevent CFC/CDU race */
	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
E
Eliezer Tamir 已提交
4090

4091 4092
	bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
E
Eliezer Tamir 已提交
4093

4094
	bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4095 4096 4097
	/* Reset PCIE errors for debug */
	REG_WR(bp, 0x2814, 0xffffffff);
	REG_WR(bp, 0x3820, 0xffffffff);
E
Eliezer Tamir 已提交
4098

4099 4100 4101 4102
	bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
	bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4103

4104
	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129
	if (CHIP_IS_E1H(bp)) {
		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
	}

	if (CHIP_REV_IS_SLOW(bp))
		msleep(200);

	/* finish CFC init */
	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC LL_INIT failed\n");
		return -EBUSY;
	}
	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC AC_INIT failed\n");
		return -EBUSY;
	}
	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
	if (val != 1) {
		BNX2X_ERR("CFC CAM_INIT failed\n");
		return -EBUSY;
	}
	REG_WR(bp, CFC_REG_DEBUG0, 0);
E
Eliezer Tamir 已提交
4130

4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141
	/* read NIG statistic
	   to see if this is our first up since powerup */
	bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
	val = *bnx2x_sp(bp, wb_data[0]);

	/* do internal memory self test */
	if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
		BNX2X_ERR("internal mem self test failed\n");
		return -EBUSY;
	}

4142
	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
E
Eilon Greenstein 已提交
4143 4144 4145
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
E
Eilon Greenstein 已提交
4146
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
E
Eilon Greenstein 已提交
4147 4148 4149
		bp->port.need_hw_lock = 1;
		break;

4150 4151 4152
	default:
		break;
	}
E
Eliezer Tamir 已提交
4153

E
Eilon Greenstein 已提交
4154 4155
	bnx2x_setup_fan_failure_detection(bp);

4156 4157
	/* clear PXP2 attentions */
	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
E
Eliezer Tamir 已提交
4158

4159
	enable_blocks_attention(bp);
4160 4161
	if (CHIP_PARITY_SUPPORTED(bp))
		enable_blocks_parity(bp);
E
Eliezer Tamir 已提交
4162

Y
Yaniv Rosner 已提交
4163 4164 4165 4166 4167 4168 4169
	if (!BP_NOMCP(bp)) {
		bnx2x_acquire_phy_lock(bp);
		bnx2x_common_init_phy(bp, bp->common.shmem_base);
		bnx2x_release_phy_lock(bp);
	} else
		BNX2X_ERR("Bootcode is missing - can not initialize link\n");

4170 4171
	return 0;
}
E
Eliezer Tamir 已提交
4172

4173 4174 4175
static int bnx2x_init_port(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
4176
	int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4177
	u32 low, high;
4178
	u32 val;
E
Eliezer Tamir 已提交
4179

V
Vladislav Zolotarov 已提交
4180
	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4181 4182

	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
E
Eliezer Tamir 已提交
4183

4184 4185
	bnx2x_init_block(bp, PXP_BLOCK, init_stage);
	bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
E
Eilon Greenstein 已提交
4186 4187 4188 4189

	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4190
	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
E
Eliezer Tamir 已提交
4191

4192 4193
#ifdef BCM_CNIC
	REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
E
Eliezer Tamir 已提交
4194

4195
	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4196 4197
	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
E
Eliezer Tamir 已提交
4198
#endif
V
Vladislav Zolotarov 已提交
4199

4200
	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4201

4202
	bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225
	if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
		/* no pause for emulation and FPGA */
		low = 0;
		high = 513;
	} else {
		if (IS_E1HMF(bp))
			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
		else if (bp->dev->mtu > 4096) {
			if (bp->flags & ONE_PORT_FLAG)
				low = 160;
			else {
				val = bp->dev->mtu;
				/* (24*1024 + val*4)/256 */
				low = 96 + (val/64) + ((val % 64) ? 1 : 0);
			}
		} else
			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
		high = low + 56;	/* 14*1024/256 */
	}
	REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
	REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);


4226
	bnx2x_init_block(bp, PRS_BLOCK, init_stage);
E
Eilon Greenstein 已提交
4227

4228 4229 4230 4231
	bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
	bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
	bnx2x_init_block(bp, USDM_BLOCK, init_stage);
	bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
E
Eilon Greenstein 已提交
4232

4233 4234 4235 4236
	bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
	bnx2x_init_block(bp, USEM_BLOCK, init_stage);
	bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
	bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
E
Eilon Greenstein 已提交
4237

4238 4239
	bnx2x_init_block(bp, UPB_BLOCK, init_stage);
	bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4240

4241
	bnx2x_init_block(bp, PBF_BLOCK, init_stage);
E
Eliezer Tamir 已提交
4242 4243

	/* configure PBF to work without PAUSE mtu 9000 */
4244
	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
E
Eliezer Tamir 已提交
4245 4246

	/* update threshold */
4247
	REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
E
Eliezer Tamir 已提交
4248
	/* update init credit */
4249
	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
E
Eliezer Tamir 已提交
4250 4251

	/* probe changes */
4252
	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
E
Eliezer Tamir 已提交
4253
	msleep(5);
4254
	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
E
Eliezer Tamir 已提交
4255

4256 4257
#ifdef BCM_CNIC
	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
E
Eliezer Tamir 已提交
4258
#endif
4259 4260
	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4261 4262 4263 4264 4265

	if (CHIP_IS_E1(bp)) {
		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
	}
4266
	bnx2x_init_block(bp, HC_BLOCK, init_stage);
4267

4268
	bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4269 4270 4271 4272 4273 4274 4275
	/* init aeu_mask_attn_func_0/1:
	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
	 *             bits 4-7 are used for "per vn group attention" */
	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
	       (IS_E1HMF(bp) ? 0xF7 : 0x7));

4276 4277 4278 4279 4280
	bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
	bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
	bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
	bnx2x_init_block(bp, DBU_BLOCK, init_stage);
	bnx2x_init_block(bp, DBG_BLOCK, init_stage);
E
Eilon Greenstein 已提交
4281

4282
	bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4283 4284 4285 4286 4287 4288 4289 4290

	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);

	if (CHIP_IS_E1H(bp)) {
		/* 0x2 disable e1hov, 0x1 enable */
		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
		       (IS_E1HMF(bp) ? 0x1 : 0x2));

4291 4292 4293 4294 4295
		{
			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
		}
4296 4297
	}

4298 4299
	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
E
Eliezer Tamir 已提交
4300

4301
	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
E
Eilon Greenstein 已提交
4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
		{
		u32 swap_val, swap_override, aeu_gpio_mask, offset;

		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
			       MISC_REGISTERS_GPIO_INPUT_HI_Z, port);

		/* The GPIO should be swapped if the swap register is
		   set and active */
		swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
		swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);

		/* Select function upon port-swap configuration */
		if (port == 0) {
			offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
			aeu_gpio_mask = (swap_val && swap_override) ?
				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
		} else {
			offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
			aeu_gpio_mask = (swap_val && swap_override) ?
				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
		}
		val = REG_RD(bp, offset);
		/* add GPIO3 to group */
		val |= aeu_gpio_mask;
		REG_WR(bp, offset, val);
		}
		break;

4333
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
E
Eilon Greenstein 已提交
4334
	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
E
Eliezer Tamir 已提交
4335
		/* add SPIO 5 to group 0 */
E
Eilon Greenstein 已提交
4336 4337 4338 4339
		{
		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
		val = REG_RD(bp, reg_addr);
E
Eliezer Tamir 已提交
4340
		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
E
Eilon Greenstein 已提交
4341 4342
		REG_WR(bp, reg_addr, val);
		}
E
Eliezer Tamir 已提交
4343 4344 4345 4346 4347 4348
		break;

	default:
		break;
	}

Y
Yaniv Rosner 已提交
4349
	bnx2x__link_reset(bp);
E
Eliezer Tamir 已提交
4350

4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365
	return 0;
}

#define ILT_PER_FUNC		(768/2)
#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
/* the phys address is shifted right 12 bits and has an added
   1=valid bit added to the 53rd bit
   then since this is a wide register(TM)
   we split it into two 32 bit writes
 */
#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
#define PXP_ONE_ILT(x)		(((x) << 10) | x)
#define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)

4366 4367 4368 4369
#ifdef BCM_CNIC
#define CNIC_ILT_LINES		127
#define CNIC_CTX_PER_ILT	16
#else
4370
#define CNIC_ILT_LINES		0
4371
#endif
4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388

static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
{
	int reg;

	if (CHIP_IS_E1H(bp))
		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
	else /* E1 */
		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;

	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
}

static int bnx2x_init_func(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	int func = BP_FUNC(bp);
E
Eilon Greenstein 已提交
4389
	u32 addr, val;
4390 4391
	int i;

V
Vladislav Zolotarov 已提交
4392
	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4393

E
Eilon Greenstein 已提交
4394 4395 4396 4397 4398 4399
	/* set MSI reconfigure capability */
	addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
	val = REG_RD(bp, addr);
	val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
	REG_WR(bp, addr, val);

4400 4401 4402 4403 4404 4405 4406 4407 4408 4409
	i = FUNC_ILT_BASE(func);

	bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
	if (CHIP_IS_E1H(bp)) {
		REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
		REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
	} else /* E1 */
		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));

4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449
#ifdef BCM_CNIC
	i += 1 + CNIC_ILT_LINES;
	bnx2x_ilt_wr(bp, i, bp->timers_mapping);
	if (CHIP_IS_E1(bp))
		REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
	else {
		REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
		REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
	}

	i++;
	bnx2x_ilt_wr(bp, i, bp->qm_mapping);
	if (CHIP_IS_E1(bp))
		REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
	else {
		REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
		REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
	}

	i++;
	bnx2x_ilt_wr(bp, i, bp->t1_mapping);
	if (CHIP_IS_E1(bp))
		REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
	else {
		REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
		REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
	}

	/* tell the searcher where the T2 table is */
	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);

	bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
		    U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));

	bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
		    U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
		    U64_HI((u64)bp->t2_mapping + 16*1024 - 64));

	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
#endif
4450 4451

	if (CHIP_IS_E1H(bp)) {
4452 4453 4454 4455 4456 4457 4458 4459 4460
		bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
		bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472

		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
	}

	/* HC init per function */
	if (CHIP_IS_E1H(bp)) {
		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);

		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
	}
4473
	bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4474

E
Eliezer Tamir 已提交
4475
	/* Reset PCIE errors for debug */
E
Eliezer Tamir 已提交
4476 4477 4478
	REG_WR(bp, 0x2114, 0xffffffff);
	REG_WR(bp, 0x2120, 0xffffffff);

4479 4480 4481
	return 0;
}

D
Dmitry Kravkov 已提交
4482
int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4483 4484
{
	int i, rc = 0;
E
Eliezer Tamir 已提交
4485

4486 4487
	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
	   BP_FUNC(bp), load_code);
E
Eliezer Tamir 已提交
4488

4489 4490
	bp->dmae_ready = 0;
	mutex_init(&bp->dmae_mutex);
4491 4492 4493
	rc = bnx2x_gunzip_init(bp);
	if (rc)
		return rc;
E
Eliezer Tamir 已提交
4494

4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522
	switch (load_code) {
	case FW_MSG_CODE_DRV_LOAD_COMMON:
		rc = bnx2x_init_common(bp);
		if (rc)
			goto init_hw_err;
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_PORT:
		bp->dmae_ready = 1;
		rc = bnx2x_init_port(bp);
		if (rc)
			goto init_hw_err;
		/* no break */

	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
		bp->dmae_ready = 1;
		rc = bnx2x_init_func(bp);
		if (rc)
			goto init_hw_err;
		break;

	default:
		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
		break;
	}

	if (!BP_NOMCP(bp)) {
		int func = BP_FUNC(bp);
E
Eliezer Tamir 已提交
4523 4524

		bp->fw_drv_pulse_wr_seq =
4525
				(SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
E
Eliezer Tamir 已提交
4526
				 DRV_PULSE_SEQ_MASK);
4527 4528
		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
	}
E
Eliezer Tamir 已提交
4529

4530 4531 4532 4533
	/* this needs to be done before gunzip end */
	bnx2x_zero_def_sb(bp);
	for_each_queue(bp, i)
		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4534 4535 4536
#ifdef BCM_CNIC
	bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
#endif
4537 4538 4539 4540 4541

init_hw_err:
	bnx2x_gunzip_end(bp);

	return rc;
E
Eliezer Tamir 已提交
4542 4543
}

D
Dmitry Kravkov 已提交
4544
void bnx2x_free_mem(struct bnx2x *bp)
E
Eliezer Tamir 已提交
4545 4546 4547 4548 4549
{

#define BNX2X_PCI_FREE(x, y, size) \
	do { \
		if (x) { \
4550
			dma_free_coherent(&bp->pdev->dev, size, x, y); \
E
Eliezer Tamir 已提交
4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566
			x = NULL; \
			y = 0; \
		} \
	} while (0)

#define BNX2X_FREE(x) \
	do { \
		if (x) { \
			vfree(x); \
			x = NULL; \
		} \
	} while (0)

	int i;

	/* fastpath */
E
Eilon Greenstein 已提交
4567
	/* Common */
E
Eliezer Tamir 已提交
4568 4569
	for_each_queue(bp, i) {

E
Eilon Greenstein 已提交
4570
		/* status blocks */
E
Eliezer Tamir 已提交
4571 4572
		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
			       bnx2x_fp(bp, i, status_blk_mapping),
E
Eilon Greenstein 已提交
4573
			       sizeof(struct host_status_block));
E
Eilon Greenstein 已提交
4574 4575
	}
	/* Rx */
4576
	for_each_queue(bp, i) {
E
Eliezer Tamir 已提交
4577

E
Eilon Greenstein 已提交
4578
		/* fastpath rx rings: rx_buf rx_desc rx_comp */
E
Eliezer Tamir 已提交
4579 4580 4581 4582 4583 4584 4585 4586 4587 4588
		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
			       bnx2x_fp(bp, i, rx_desc_mapping),
			       sizeof(struct eth_rx_bd) * NUM_RX_BD);

		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
			       bnx2x_fp(bp, i, rx_comp_mapping),
			       sizeof(struct eth_fast_path_rx_cqe) *
			       NUM_RCQ_BD);

4589
		/* SGE ring */
E
Eilon Greenstein 已提交
4590
		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4591 4592 4593 4594
		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
			       bnx2x_fp(bp, i, rx_sge_mapping),
			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
	}
E
Eilon Greenstein 已提交
4595
	/* Tx */
4596
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
4597 4598 4599 4600 4601

		/* fastpath tx rings: tx_buf tx_desc */
		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
			       bnx2x_fp(bp, i, tx_desc_mapping),
E
Eilon Greenstein 已提交
4602
			       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
E
Eilon Greenstein 已提交
4603
	}
E
Eliezer Tamir 已提交
4604 4605 4606
	/* end of fastpath */

	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4607
		       sizeof(struct host_def_status_block));
E
Eliezer Tamir 已提交
4608 4609

	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4610
		       sizeof(struct bnx2x_slowpath));
E
Eliezer Tamir 已提交
4611

4612
#ifdef BCM_CNIC
E
Eliezer Tamir 已提交
4613 4614 4615 4616
	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4617 4618
	BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
		       sizeof(struct host_status_block));
E
Eliezer Tamir 已提交
4619
#endif
4620
	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
E
Eliezer Tamir 已提交
4621 4622 4623 4624 4625

#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}

D
Dmitry Kravkov 已提交
4626
int bnx2x_alloc_mem(struct bnx2x *bp)
E
Eliezer Tamir 已提交
4627 4628 4629 4630
{

#define BNX2X_PCI_ALLOC(x, y, size) \
	do { \
4631
		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
D
Dmitry Kravkov 已提交
4632 4633 4634 4635
		if (x == NULL) \
			goto alloc_mem_err; \
		memset(x, 0, size); \
	} while (0)
E
Eliezer Tamir 已提交
4636

D
Dmitry Kravkov 已提交
4637 4638 4639 4640 4641 4642 4643
#define BNX2X_ALLOC(x, size) \
	do { \
		x = vmalloc(size); \
		if (x == NULL) \
			goto alloc_mem_err; \
		memset(x, 0, size); \
	} while (0)
E
Eliezer Tamir 已提交
4644

D
Dmitry Kravkov 已提交
4645
	int i;
E
Eliezer Tamir 已提交
4646

D
Dmitry Kravkov 已提交
4647 4648
	/* fastpath */
	/* Common */
E
Eliezer Tamir 已提交
4649
	for_each_queue(bp, i) {
D
Dmitry Kravkov 已提交
4650
		bnx2x_fp(bp, i, bp) = bp;
E
Eliezer Tamir 已提交
4651

D
Dmitry Kravkov 已提交
4652 4653 4654 4655
		/* status blocks */
		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
				&bnx2x_fp(bp, i, status_blk_mapping),
				sizeof(struct host_status_block));
E
Eliezer Tamir 已提交
4656
	}
D
Dmitry Kravkov 已提交
4657 4658
	/* Rx */
	for_each_queue(bp, i) {
E
Eliezer Tamir 已提交
4659

D
Dmitry Kravkov 已提交
4660 4661 4662 4663 4664 4665
		/* fastpath rx rings: rx_buf rx_desc rx_comp */
		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
				sizeof(struct sw_rx_bd) * NUM_RX_BD);
		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
				&bnx2x_fp(bp, i, rx_desc_mapping),
				sizeof(struct eth_rx_bd) * NUM_RX_BD);
E
Eilon Greenstein 已提交
4666

D
Dmitry Kravkov 已提交
4667 4668 4669 4670
		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
				&bnx2x_fp(bp, i, rx_comp_mapping),
				sizeof(struct eth_fast_path_rx_cqe) *
				NUM_RCQ_BD);
E
Eliezer Tamir 已提交
4671

D
Dmitry Kravkov 已提交
4672 4673 4674 4675 4676 4677 4678 4679 4680
		/* SGE ring */
		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
				sizeof(struct sw_rx_page) * NUM_RX_SGE);
		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
				&bnx2x_fp(bp, i, rx_sge_mapping),
				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
	}
	/* Tx */
	for_each_queue(bp, i) {
E
Eilon Greenstein 已提交
4681

D
Dmitry Kravkov 已提交
4682 4683 4684 4685 4686 4687
		/* fastpath tx rings: tx_buf tx_desc */
		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
				sizeof(struct sw_tx_bd) * NUM_TX_BD);
		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
				&bnx2x_fp(bp, i, tx_desc_mapping),
				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
E
Eilon Greenstein 已提交
4688
	}
D
Dmitry Kravkov 已提交
4689
	/* end of fastpath */
E
Eilon Greenstein 已提交
4690

D
Dmitry Kravkov 已提交
4691 4692
	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
			sizeof(struct host_def_status_block));
E
Eilon Greenstein 已提交
4693

D
Dmitry Kravkov 已提交
4694 4695
	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
			sizeof(struct bnx2x_slowpath));
E
Eliezer Tamir 已提交
4696

D
Dmitry Kravkov 已提交
4697 4698
#ifdef BCM_CNIC
	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
E
Eilon Greenstein 已提交
4699

D
Dmitry Kravkov 已提交
4700 4701 4702 4703
	/* allocate searcher T2 table
	   we allocate 1/4 of alloc num for T2
	  (which is not entered into the ILT) */
	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
E
Eliezer Tamir 已提交
4704

D
Dmitry Kravkov 已提交
4705 4706 4707
	/* Initialize T2 (for 1024 connections) */
	for (i = 0; i < 16*1024; i += 64)
		*(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
E
Eliezer Tamir 已提交
4708

D
Dmitry Kravkov 已提交
4709 4710
	/* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4711

D
Dmitry Kravkov 已提交
4712 4713
	/* QM queues (128*MAX_CONN) */
	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4714

D
Dmitry Kravkov 已提交
4715 4716 4717
	BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
			sizeof(struct host_status_block));
#endif
4718

D
Dmitry Kravkov 已提交
4719 4720
	/* Slow path ring */
	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4721

D
Dmitry Kravkov 已提交
4722
	return 0;
E
Eilon Greenstein 已提交
4723

D
Dmitry Kravkov 已提交
4724 4725 4726
alloc_mem_err:
	bnx2x_free_mem(bp);
	return -ENOMEM;
E
Eilon Greenstein 已提交
4727

D
Dmitry Kravkov 已提交
4728 4729
#undef BNX2X_PCI_ALLOC
#undef BNX2X_ALLOC
4730 4731 4732
}


E
Eliezer Tamir 已提交
4733 4734 4735 4736
/*
 * Init service functions
 */

4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749
/**
 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
 *
 * @param bp driver descriptor
 * @param set set or clear an entry (1 or 0)
 * @param mac pointer to a buffer containing a MAC
 * @param cl_bit_vec bit vector of clients to register a MAC for
 * @param cam_offset offset in a CAM to use
 * @param with_bcast set broadcast MAC as well
 */
static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
				      u32 cl_bit_vec, u8 cam_offset,
				      u8 with_bcast)
E
Eliezer Tamir 已提交
4750 4751
{
	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4752
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
4753 4754 4755 4756 4757

	/* CAM allocation
	 * unicasts 0-31:port0 32-63:port1
	 * multicast 64-127:port0 128-191:port1
	 */
4758 4759 4760
	config->hdr.length = 1 + (with_bcast ? 1 : 0);
	config->hdr.offset = cam_offset;
	config->hdr.client_id = 0xff;
E
Eliezer Tamir 已提交
4761 4762 4763 4764
	config->hdr.reserved1 = 0;

	/* primary MAC */
	config->config_table[0].cam_entry.msb_mac_addr =
4765
					swab16(*(u16 *)&mac[0]);
E
Eliezer Tamir 已提交
4766
	config->config_table[0].cam_entry.middle_mac_addr =
4767
					swab16(*(u16 *)&mac[2]);
E
Eliezer Tamir 已提交
4768
	config->config_table[0].cam_entry.lsb_mac_addr =
4769
					swab16(*(u16 *)&mac[4]);
4770
	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4771 4772 4773 4774
	if (set)
		config->config_table[0].target_table_entry.flags = 0;
	else
		CAM_INVALIDATE(config->config_table[0]);
E
Eilon Greenstein 已提交
4775
	config->config_table[0].target_table_entry.clients_bit_vector =
4776
						cpu_to_le32(cl_bit_vec);
E
Eliezer Tamir 已提交
4777 4778
	config->config_table[0].target_table_entry.vlan_id = 0;

4779 4780
	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
	   (set ? "setting" : "clearing"),
E
Eliezer Tamir 已提交
4781 4782 4783 4784 4785
	   config->config_table[0].cam_entry.msb_mac_addr,
	   config->config_table[0].cam_entry.middle_mac_addr,
	   config->config_table[0].cam_entry.lsb_mac_addr);

	/* broadcast */
4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802
	if (with_bcast) {
		config->config_table[1].cam_entry.msb_mac_addr =
			cpu_to_le16(0xffff);
		config->config_table[1].cam_entry.middle_mac_addr =
			cpu_to_le16(0xffff);
		config->config_table[1].cam_entry.lsb_mac_addr =
			cpu_to_le16(0xffff);
		config->config_table[1].cam_entry.flags = cpu_to_le16(port);
		if (set)
			config->config_table[1].target_table_entry.flags =
					TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
		else
			CAM_INVALIDATE(config->config_table[1]);
		config->config_table[1].target_table_entry.clients_bit_vector =
							cpu_to_le32(cl_bit_vec);
		config->config_table[1].target_table_entry.vlan_id = 0;
	}
E
Eliezer Tamir 已提交
4803 4804 4805 4806 4807 4808

	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
}

4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819
/**
 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
 *
 * @param bp driver descriptor
 * @param set set or clear an entry (1 or 0)
 * @param mac pointer to a buffer containing a MAC
 * @param cl_bit_vec bit vector of clients to register a MAC for
 * @param cam_offset offset in a CAM to use
 */
static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
				       u32 cl_bit_vec, u8 cam_offset)
4820 4821 4822 4823
{
	struct mac_configuration_cmd_e1h *config =
		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);

E
Eilon Greenstein 已提交
4824
	config->hdr.length = 1;
4825 4826
	config->hdr.offset = cam_offset;
	config->hdr.client_id = 0xff;
4827 4828 4829 4830
	config->hdr.reserved1 = 0;

	/* primary MAC */
	config->config_table[0].msb_mac_addr =
4831
					swab16(*(u16 *)&mac[0]);
4832
	config->config_table[0].middle_mac_addr =
4833
					swab16(*(u16 *)&mac[2]);
4834
	config->config_table[0].lsb_mac_addr =
4835
					swab16(*(u16 *)&mac[4]);
E
Eilon Greenstein 已提交
4836
	config->config_table[0].clients_bit_vector =
4837
					cpu_to_le32(cl_bit_vec);
4838 4839
	config->config_table[0].vlan_id = 0;
	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4840 4841 4842 4843 4844
	if (set)
		config->config_table[0].flags = BP_PORT(bp);
	else
		config->config_table[0].flags =
				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4845

4846
	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4847
	   (set ? "setting" : "clearing"),
4848 4849
	   config->config_table[0].msb_mac_addr,
	   config->config_table[0].middle_mac_addr,
4850
	   config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4851 4852 4853 4854 4855 4856

	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
}

E
Eliezer Tamir 已提交
4857 4858 4859 4860
static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
			     int *state_p, int poll)
{
	/* can take a while if any port is running */
E
Eilon Greenstein 已提交
4861
	int cnt = 5000;
E
Eliezer Tamir 已提交
4862

E
Eliezer Tamir 已提交
4863 4864
	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
	   poll ? "polling" : "waiting", state, idx);
E
Eliezer Tamir 已提交
4865 4866

	might_sleep();
4867
	while (cnt--) {
E
Eliezer Tamir 已提交
4868 4869
		if (poll) {
			bnx2x_rx_int(bp->fp, 10);
4870 4871
			/* if index is different from 0
			 * the reply for some commands will
4872
			 * be on the non default queue
E
Eliezer Tamir 已提交
4873 4874 4875 4876 4877
			 */
			if (idx)
				bnx2x_rx_int(&bp->fp[idx], 10);
		}

4878
		mb(); /* state is changed by bnx2x_sp_event() */
E
Eilon Greenstein 已提交
4879 4880 4881 4882
		if (*state_p == state) {
#ifdef BNX2X_STOP_ON_ERROR
			DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
#endif
E
Eliezer Tamir 已提交
4883
			return 0;
E
Eilon Greenstein 已提交
4884
		}
E
Eliezer Tamir 已提交
4885 4886

		msleep(1);
4887 4888 4889

		if (bp->panic)
			return -EIO;
E
Eliezer Tamir 已提交
4890 4891 4892
	}

	/* timeout! */
4893 4894
	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
		  poll ? "polling" : "waiting", state, idx);
4895 4896 4897
#ifdef BNX2X_STOP_ON_ERROR
	bnx2x_panic();
#endif
E
Eliezer Tamir 已提交
4898

4899
	return -EBUSY;
E
Eliezer Tamir 已提交
4900 4901
}

D
Dmitry Kravkov 已提交
4902
void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
{
	bp->set_mac_pending++;
	smp_wmb();

	bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
				   (1 << bp->fp->cl_id), BP_FUNC(bp));

	/* Wait for a completion */
	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
}

D
Dmitry Kravkov 已提交
4914
void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926
{
	bp->set_mac_pending++;
	smp_wmb();

	bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
				  (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
				  1);

	/* Wait for a completion */
	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
}

4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937
#ifdef BCM_CNIC
/**
 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
 * MAC(s). This function will wait until the ramdord completion
 * returns.
 *
 * @param bp driver handle
 * @param set set or clear the CAM entry
 *
 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
 */
D
Dmitry Kravkov 已提交
4938
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
{
	u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);

	bp->set_mac_pending++;
	smp_wmb();

	/* Send a SET_MAC ramrod */
	if (CHIP_IS_E1(bp))
		bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
				  cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
				  1);
	else
		/* CAM allocation for E1H
		* unicasts: by func number
		* multicast: 20+FUNC*20, 20 each
		*/
		bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
				   cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));

	/* Wait for a completion when setting */
	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);

	return 0;
}
#endif

D
Dmitry Kravkov 已提交
4965
int bnx2x_setup_leading(struct bnx2x *bp)
E
Eliezer Tamir 已提交
4966
{
4967
	int rc;
E
Eliezer Tamir 已提交
4968

E
Eliezer Tamir 已提交
4969
	/* reset IGU state */
4970
	bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
E
Eliezer Tamir 已提交
4971 4972 4973 4974

	/* SETUP ramrod */
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);

4975 4976
	/* Wait for completion */
	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
E
Eliezer Tamir 已提交
4977

4978
	return rc;
E
Eliezer Tamir 已提交
4979 4980
}

D
Dmitry Kravkov 已提交
4981
int bnx2x_setup_multi(struct bnx2x *bp, int index)
E
Eliezer Tamir 已提交
4982
{
E
Eilon Greenstein 已提交
4983 4984
	struct bnx2x_fastpath *fp = &bp->fp[index];

E
Eliezer Tamir 已提交
4985
	/* reset IGU state */
E
Eilon Greenstein 已提交
4986
	bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
E
Eliezer Tamir 已提交
4987

4988
	/* SETUP ramrod */
E
Eilon Greenstein 已提交
4989 4990 4991
	fp->state = BNX2X_FP_STATE_OPENING;
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
		      fp->cl_id, 0);
E
Eliezer Tamir 已提交
4992 4993 4994

	/* Wait for completion */
	return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
E
Eilon Greenstein 已提交
4995
				 &(fp->state), 0);
E
Eliezer Tamir 已提交
4996 4997 4998
}


D
Dmitry Kravkov 已提交
4999
void bnx2x_set_num_queues_msix(struct bnx2x *bp)
E
Eilon Greenstein 已提交
5000 5001 5002 5003
{

	switch (bp->multi_mode) {
	case ETH_RSS_MODE_DISABLED:
5004
		bp->num_queues = 1;
E
Eilon Greenstein 已提交
5005 5006 5007
		break;

	case ETH_RSS_MODE_REGULAR:
5008 5009 5010
		if (num_queues)
			bp->num_queues = min_t(u32, num_queues,
						  BNX2X_MAX_QUEUES(bp));
E
Eilon Greenstein 已提交
5011
		else
5012 5013
			bp->num_queues = min_t(u32, num_online_cpus(),
						  BNX2X_MAX_QUEUES(bp));
E
Eilon Greenstein 已提交
5014 5015 5016 5017
		break;


	default:
5018
		bp->num_queues = 1;
D
Dmitry Kravkov 已提交
5019 5020
		break;
	}
E
Eliezer Tamir 已提交
5021 5022
}

D
Dmitry Kravkov 已提交
5023 5024


E
Eliezer Tamir 已提交
5025 5026
static int bnx2x_stop_multi(struct bnx2x *bp, int index)
{
E
Eilon Greenstein 已提交
5027
	struct bnx2x_fastpath *fp = &bp->fp[index];
E
Eliezer Tamir 已提交
5028 5029
	int rc;

E
Eliezer Tamir 已提交
5030
	/* halt the connection */
E
Eilon Greenstein 已提交
5031 5032
	fp->state = BNX2X_FP_STATE_HALTING;
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
E
Eliezer Tamir 已提交
5033

5034
	/* Wait for completion */
E
Eliezer Tamir 已提交
5035
	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
E
Eilon Greenstein 已提交
5036
			       &(fp->state), 1);
E
Eliezer Tamir 已提交
5037
	if (rc) /* timeout */
E
Eliezer Tamir 已提交
5038 5039 5040 5041 5042
		return rc;

	/* delete cfc entry */
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);

5043 5044
	/* Wait for completion */
	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
E
Eilon Greenstein 已提交
5045
			       &(fp->state), 1);
5046
	return rc;
E
Eliezer Tamir 已提交
5047 5048
}

5049
static int bnx2x_stop_leading(struct bnx2x *bp)
E
Eliezer Tamir 已提交
5050
{
5051
	__le16 dsb_sp_prod_idx;
E
Eliezer Tamir 已提交
5052
	/* if the other port is handling traffic,
E
Eliezer Tamir 已提交
5053
	   this can take a lot of time */
5054 5055
	int cnt = 500;
	int rc;
E
Eliezer Tamir 已提交
5056 5057 5058 5059 5060

	might_sleep();

	/* Send HALT ramrod */
	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5061
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
E
Eliezer Tamir 已提交
5062

5063 5064 5065 5066
	/* Wait for completion */
	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
			       &(bp->fp[0].state), 1);
	if (rc) /* timeout */
5067
		return rc;
E
Eliezer Tamir 已提交
5068

5069
	dsb_sp_prod_idx = *bp->dsb_sp_prod;
E
Eliezer Tamir 已提交
5070

5071
	/* Send PORT_DELETE ramrod */
E
Eliezer Tamir 已提交
5072 5073
	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);

5074
	/* Wait for completion to arrive on default status block
E
Eliezer Tamir 已提交
5075 5076 5077
	   we are going to reset the chip anyway
	   so there is not much to do if this times out
	 */
5078 5079 5080 5081 5082 5083 5084 5085
	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
		if (!cnt) {
			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
#ifdef BNX2X_STOP_ON_ERROR
			bnx2x_panic();
#endif
5086
			rc = -EBUSY;
5087 5088 5089
			break;
		}
		cnt--;
5090
		msleep(1);
5091
		rmb(); /* Refresh the dsb_sp_prod */
5092 5093 5094
	}
	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5095 5096

	return rc;
E
Eliezer Tamir 已提交
5097 5098
}

5099 5100 5101 5102 5103 5104 5105 5106 5107 5108
static void bnx2x_reset_func(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	int func = BP_FUNC(bp);
	int base, i;

	/* Configure IGU */
	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);

5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121
#ifdef BCM_CNIC
	/* Disable Timer scan */
	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
	/*
	 * Wait for at least 10ms and up to 2 second for the timers scan to
	 * complete
	 */
	for (i = 0; i < 200; i++) {
		msleep(10);
		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
			break;
	}
#endif
5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148
	/* Clear ILT */
	base = FUNC_ILT_BASE(func);
	for (i = base; i < base + ILT_PER_FUNC; i++)
		bnx2x_ilt_wr(bp, i, 0);
}

static void bnx2x_reset_port(struct bnx2x *bp)
{
	int port = BP_PORT(bp);
	u32 val;

	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);

	/* Do not rcv packets to BRB */
	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
	/* Do not direct rcv packets that are not for MCP to the BRB */
	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);

	/* Configure AEU */
	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);

	msleep(100);
	/* Check for BRB port occupancy */
	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
	if (val)
		DP(NETIF_MSG_IFDOWN,
E
Eilon Greenstein 已提交
5149
		   "BRB1 is not empty  %d blocks are occupied\n", val);
5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173

	/* TODO: Close Doorbell port? */
}

static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
{
	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
	   BP_FUNC(bp), reset_code);

	switch (reset_code) {
	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
		bnx2x_reset_port(bp);
		bnx2x_reset_func(bp);
		bnx2x_reset_common(bp);
		break;

	case FW_MSG_CODE_DRV_UNLOAD_PORT:
		bnx2x_reset_port(bp);
		bnx2x_reset_func(bp);
		break;

	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
		bnx2x_reset_func(bp);
		break;
5174

5175 5176 5177 5178 5179 5180
	default:
		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
		break;
	}
}

D
Dmitry Kravkov 已提交
5181
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
E
Eliezer Tamir 已提交
5182
{
5183
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
5184
	u32 reset_code = 0;
5185
	int i, cnt, rc;
E
Eliezer Tamir 已提交
5186

E
Eilon Greenstein 已提交
5187
	/* Wait until tx fastpath tasks complete */
5188
	for_each_queue(bp, i) {
5189 5190
		struct bnx2x_fastpath *fp = &bp->fp[i];

5191
		cnt = 1000;
5192
		while (bnx2x_has_tx_work_unload(fp)) {
5193

5194
			bnx2x_tx_int(fp);
5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205
			if (!cnt) {
				BNX2X_ERR("timeout waiting for queue[%d]\n",
					  i);
#ifdef BNX2X_STOP_ON_ERROR
				bnx2x_panic();
				return -EBUSY;
#else
				break;
#endif
			}
			cnt--;
5206
			msleep(1);
5207
		}
5208
	}
5209 5210
	/* Give HW time to discard old tx messages */
	msleep(1);
E
Eliezer Tamir 已提交
5211

5212 5213 5214 5215
	if (CHIP_IS_E1(bp)) {
		struct mac_configuration_cmd *config =
						bnx2x_sp(bp, mcast_config);

5216
		bnx2x_set_eth_mac_addr_e1(bp, 0);
5217

E
Eilon Greenstein 已提交
5218
		for (i = 0; i < config->hdr.length; i++)
5219 5220
			CAM_INVALIDATE(config->config_table[i]);

E
Eilon Greenstein 已提交
5221
		config->hdr.length = i;
5222 5223 5224 5225
		if (CHIP_REV_IS_SLOW(bp))
			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
		else
			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5226
		config->hdr.client_id = bp->fp->cl_id;
5227 5228
		config->hdr.reserved1 = 0;

5229 5230 5231
		bp->set_mac_pending++;
		smp_wmb();

5232 5233 5234 5235 5236
		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);

	} else { /* E1H */
5237 5238
		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);

5239
		bnx2x_set_eth_mac_addr_e1h(bp, 0);
5240 5241 5242

		for (i = 0; i < MC_HASH_SIZE; i++)
			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5243 5244

		REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5245
	}
5246 5247 5248 5249 5250 5251 5252 5253 5254
#ifdef BCM_CNIC
	/* Clear iSCSI L2 MAC */
	mutex_lock(&bp->cnic_mutex);
	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
	}
	mutex_unlock(&bp->cnic_mutex);
#endif
5255

5256 5257 5258
	if (unload_mode == UNLOAD_NORMAL)
		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;

5259
	else if (bp->flags & NO_WOL_FLAG)
5260 5261
		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;

5262
	else if (bp->wol) {
5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280
		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
		u8 *mac_addr = bp->dev->dev_addr;
		u32 val;
		/* The mac address is written to entries 1-4 to
		   preserve entry 0 which is used by the PMF */
		u8 entry = (BP_E1HVN(bp) + 1)*8;

		val = (mac_addr[0] << 8) | mac_addr[1];
		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);

		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
		      (mac_addr[4] << 8) | mac_addr[5];
		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);

		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;

	} else
		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5281

5282 5283
	/* Close multi and leading connections
	   Completions for ramrods are collected in a synchronous way */
E
Eliezer Tamir 已提交
5284 5285
	for_each_nondefault_queue(bp, i)
		if (bnx2x_stop_multi(bp, i))
5286
			goto unload_error;
E
Eliezer Tamir 已提交
5287

5288 5289
	rc = bnx2x_stop_leading(bp);
	if (rc) {
5290
		BNX2X_ERR("Stop leading failed!\n");
5291
#ifdef BNX2X_STOP_ON_ERROR
5292
		return -EBUSY;
5293 5294
#else
		goto unload_error;
5295
#endif
5296 5297 5298
	}

unload_error:
5299
	if (!BP_NOMCP(bp))
5300
		reset_code = bnx2x_fw_command(bp, reset_code);
5301
	else {
E
Eilon Greenstein 已提交
5302
		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5303 5304
		   load_count[0], load_count[1], load_count[2]);
		load_count[0]--;
5305
		load_count[1 + port]--;
E
Eilon Greenstein 已提交
5306
		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5307 5308 5309
		   load_count[0], load_count[1], load_count[2]);
		if (load_count[0] == 0)
			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5310
		else if (load_count[1 + port] == 0)
5311 5312 5313 5314
			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
		else
			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
	}
E
Eliezer Tamir 已提交
5315

5316 5317 5318
	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
		bnx2x__link_reset(bp);
E
Eliezer Tamir 已提交
5319 5320

	/* Reset the chip */
5321
	bnx2x_reset_chip(bp, reset_code);
E
Eliezer Tamir 已提交
5322 5323

	/* Report UNLOAD_DONE to MCP */
5324
	if (!BP_NOMCP(bp))
E
Eliezer Tamir 已提交
5325
		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
E
Eilon Greenstein 已提交
5326

5327 5328
}

D
Dmitry Kravkov 已提交
5329
void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637
{
	u32 val;

	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");

	if (CHIP_IS_E1(bp)) {
		int port = BP_PORT(bp);
		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
			MISC_REG_AEU_MASK_ATTN_FUNC_0;

		val = REG_RD(bp, addr);
		val &= ~(0x300);
		REG_WR(bp, addr, val);
	} else if (CHIP_IS_E1H(bp)) {
		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
	}
}


/* Close gates #2, #3 and #4: */
static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
{
	u32 val, addr;

	/* Gates #2 and #4a are closed/opened for "not E1" only */
	if (!CHIP_IS_E1(bp)) {
		/* #4 */
		val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
		       close ? (val | 0x1) : (val & (~(u32)1)));
		/* #2 */
		val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
		       close ? (val | 0x1) : (val & (~(u32)1)));
	}

	/* #3 */
	addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	val = REG_RD(bp, addr);
	REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));

	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
		close ? "closing" : "opening");
	mmiowb();
}

#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */

static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
{
	/* Do some magic... */
	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
	*magic_val = val & SHARED_MF_CLP_MAGIC;
	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
}

/* Restore the value of the `magic' bit.
 *
 * @param pdev Device handle.
 * @param magic_val Old value of the `magic' bit.
 */
static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
{
	/* Restore the `magic' bit value... */
	/* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
	SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
	MF_CFG_WR(bp, shared_mf_config.clp_mb,
		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
}

/* Prepares for MCP reset: takes care of CLP configurations.
 *
 * @param bp
 * @param magic_val Old value of 'magic' bit.
 */
static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
{
	u32 shmem;
	u32 validity_offset;

	DP(NETIF_MSG_HW, "Starting\n");

	/* Set `magic' bit in order to save MF config */
	if (!CHIP_IS_E1(bp))
		bnx2x_clp_reset_prep(bp, magic_val);

	/* Get shmem offset */
	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
	validity_offset = offsetof(struct shmem_region, validity_map[0]);

	/* Clear validity map flags */
	if (shmem > 0)
		REG_WR(bp, shmem + validity_offset, 0);
}

#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
#define MCP_ONE_TIMEOUT  100    /* 100 ms */

/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
 * depending on the HW type.
 *
 * @param bp
 */
static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
{
	/* special handling for emulation and FPGA,
	   wait 10 times longer */
	if (CHIP_REV_IS_SLOW(bp))
		msleep(MCP_ONE_TIMEOUT*10);
	else
		msleep(MCP_ONE_TIMEOUT);
}

static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
{
	u32 shmem, cnt, validity_offset, val;
	int rc = 0;

	msleep(100);

	/* Get shmem offset */
	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
	if (shmem == 0) {
		BNX2X_ERR("Shmem 0 return failure\n");
		rc = -ENOTTY;
		goto exit_lbl;
	}

	validity_offset = offsetof(struct shmem_region, validity_map[0]);

	/* Wait for MCP to come up */
	for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
		/* TBD: its best to check validity map of last port.
		 * currently checks on port 0.
		 */
		val = REG_RD(bp, shmem + validity_offset);
		DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
		   shmem + validity_offset, val);

		/* check that shared memory is valid. */
		if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
		    == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
			break;

		bnx2x_mcp_wait_one(bp);
	}

	DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);

	/* Check that shared memory is valid. This indicates that MCP is up. */
	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
	    (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
		BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
		rc = -ENOTTY;
		goto exit_lbl;
	}

exit_lbl:
	/* Restore the `magic' bit value */
	if (!CHIP_IS_E1(bp))
		bnx2x_clp_reset_done(bp, magic_val);

	return rc;
}

static void bnx2x_pxp_prep(struct bnx2x *bp)
{
	if (!CHIP_IS_E1(bp)) {
		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
		REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
		mmiowb();
	}
}

/*
 * Reset the whole chip except for:
 *      - PCIE core
 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
 *              one reset bit)
 *      - IGU
 *      - MISC (including AEU)
 *      - GRC
 *      - RBCN, RBCP
 */
static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
{
	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;

	not_reset_mask1 =
		MISC_REGISTERS_RESET_REG_1_RST_HC |
		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
		MISC_REGISTERS_RESET_REG_1_RST_PXP;

	not_reset_mask2 =
		MISC_REGISTERS_RESET_REG_2_RST_MDIO |
		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;

	reset_mask1 = 0xffffffff;

	if (CHIP_IS_E1(bp))
		reset_mask2 = 0xffff;
	else
		reset_mask2 = 0x1ffff;

	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
	       reset_mask1 & (~not_reset_mask1));
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
	       reset_mask2 & (~not_reset_mask2));

	barrier();
	mmiowb();

	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
	mmiowb();
}

static int bnx2x_process_kill(struct bnx2x *bp)
{
	int cnt = 1000;
	u32 val = 0;
	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;


	/* Empty the Tetris buffer, wait for 1s */
	do {
		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
		    ((port_is_idle_0 & 0x1) == 0x1) &&
		    ((port_is_idle_1 & 0x1) == 0x1) &&
		    (pgl_exp_rom2 == 0xffffffff))
			break;
		msleep(1);
	} while (cnt-- > 0);

	if (cnt <= 0) {
		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
			  " are still"
			  " outstanding read requests after 1s!\n");
		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
			  " port_is_idle_0=0x%08x,"
			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
			  pgl_exp_rom2);
		return -EAGAIN;
	}

	barrier();

	/* Close gates #2, #3 and #4 */
	bnx2x_set_234_gates(bp, true);

	/* TBD: Indicate that "process kill" is in progress to MCP */

	/* Clear "unprepared" bit */
	REG_WR(bp, MISC_REG_UNPREPARED, 0);
	barrier();

	/* Make sure all is written to the chip before the reset */
	mmiowb();

	/* Wait for 1ms to empty GLUE and PCI-E core queues,
	 * PSWHST, GRC and PSWRD Tetris buffer.
	 */
	msleep(1);

	/* Prepare to chip reset: */
	/* MCP */
	bnx2x_reset_mcp_prep(bp, &val);

	/* PXP */
	bnx2x_pxp_prep(bp);
	barrier();

	/* reset the chip */
	bnx2x_process_kill_chip_reset(bp);
	barrier();

	/* Recover after reset: */
	/* MCP */
	if (bnx2x_reset_mcp_comp(bp, val))
		return -EAGAIN;

	/* PXP */
	bnx2x_pxp_prep(bp);

	/* Open the gates #2, #3 and #4 */
	bnx2x_set_234_gates(bp, false);

	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
	 * reset state, re-enable attentions. */

E
Eliezer Tamir 已提交
5638 5639 5640
	return 0;
}

5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769
static int bnx2x_leader_reset(struct bnx2x *bp)
{
	int rc = 0;
	/* Try to recover after the failure */
	if (bnx2x_process_kill(bp)) {
		printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
		       bp->dev->name);
		rc = -EAGAIN;
		goto exit_leader_reset;
	}

	/* Clear "reset is in progress" bit and update the driver state */
	bnx2x_set_reset_done(bp);
	bp->recovery_state = BNX2X_RECOVERY_DONE;

exit_leader_reset:
	bp->is_leader = 0;
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
	smp_wmb();
	return rc;
}

/* Assumption: runs under rtnl lock. This together with the fact
 * that it's called only from bnx2x_reset_task() ensure that it
 * will never be called when netif_running(bp->dev) is false.
 */
static void bnx2x_parity_recover(struct bnx2x *bp)
{
	DP(NETIF_MSG_HW, "Handling parity\n");
	while (1) {
		switch (bp->recovery_state) {
		case BNX2X_RECOVERY_INIT:
			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
			/* Try to get a LEADER_LOCK HW lock */
			if (bnx2x_trylock_hw_lock(bp,
				HW_LOCK_RESOURCE_RESERVED_08))
				bp->is_leader = 1;

			/* Stop the driver */
			/* If interface has been removed - break */
			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
				return;

			bp->recovery_state = BNX2X_RECOVERY_WAIT;
			/* Ensure "is_leader" and "recovery_state"
			 *  update values are seen on other CPUs
			 */
			smp_wmb();
			break;

		case BNX2X_RECOVERY_WAIT:
			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
			if (bp->is_leader) {
				u32 load_counter = bnx2x_get_load_cnt(bp);
				if (load_counter) {
					/* Wait until all other functions get
					 * down.
					 */
					schedule_delayed_work(&bp->reset_task,
								HZ/10);
					return;
				} else {
					/* If all other functions got down -
					 * try to bring the chip back to
					 * normal. In any case it's an exit
					 * point for a leader.
					 */
					if (bnx2x_leader_reset(bp) ||
					bnx2x_nic_load(bp, LOAD_NORMAL)) {
						printk(KERN_ERR"%s: Recovery "
						"has failed. Power cycle is "
						"needed.\n", bp->dev->name);
						/* Disconnect this device */
						netif_device_detach(bp->dev);
						/* Block ifup for all function
						 * of this ASIC until
						 * "process kill" or power
						 * cycle.
						 */
						bnx2x_set_reset_in_progress(bp);
						/* Shut down the power */
						bnx2x_set_power_state(bp,
								PCI_D3hot);
						return;
					}

					return;
				}
			} else { /* non-leader */
				if (!bnx2x_reset_is_done(bp)) {
					/* Try to get a LEADER_LOCK HW lock as
					 * long as a former leader may have
					 * been unloaded by the user or
					 * released a leadership by another
					 * reason.
					 */
					if (bnx2x_trylock_hw_lock(bp,
					    HW_LOCK_RESOURCE_RESERVED_08)) {
						/* I'm a leader now! Restart a
						 * switch case.
						 */
						bp->is_leader = 1;
						break;
					}

					schedule_delayed_work(&bp->reset_task,
								HZ/10);
					return;

				} else { /* A leader has completed
					  * the "process kill". It's an exit
					  * point for a non-leader.
					  */
					bnx2x_nic_load(bp, LOAD_NORMAL);
					bp->recovery_state =
						BNX2X_RECOVERY_DONE;
					smp_wmb();
					return;
				}
			}
		default:
			return;
		}
	}
}

/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
 * scheduled on a general queue in order to prevent a dead lock.
 */
5770 5771
static void bnx2x_reset_task(struct work_struct *work)
{
5772
	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5773 5774 5775 5776

#ifdef BNX2X_STOP_ON_ERROR
	BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
		  " so reset not done to allow debug dump,\n"
5777
	 KERN_ERR " you will need to reboot when done\n");
5778 5779 5780 5781 5782 5783 5784 5785
	return;
#endif

	rtnl_lock();

	if (!netif_running(bp->dev))
		goto reset_task_exit;

5786 5787 5788 5789 5790 5791
	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
		bnx2x_parity_recover(bp);
	else {
		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
		bnx2x_nic_load(bp, LOAD_NORMAL);
	}
5792 5793 5794 5795 5796

reset_task_exit:
	rtnl_unlock();
}

E
Eliezer Tamir 已提交
5797 5798 5799 5800 5801 5802
/* end of nic load/unload */

/*
 * Init service functions
 */

5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860
static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
{
	switch (func) {
	case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
	case 1:	return PXP2_REG_PGL_PRETEND_FUNC_F1;
	case 2:	return PXP2_REG_PGL_PRETEND_FUNC_F2;
	case 3:	return PXP2_REG_PGL_PRETEND_FUNC_F3;
	case 4:	return PXP2_REG_PGL_PRETEND_FUNC_F4;
	case 5:	return PXP2_REG_PGL_PRETEND_FUNC_F5;
	case 6:	return PXP2_REG_PGL_PRETEND_FUNC_F6;
	case 7:	return PXP2_REG_PGL_PRETEND_FUNC_F7;
	default:
		BNX2X_ERR("Unsupported function index: %d\n", func);
		return (u32)(-1);
	}
}

static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
{
	u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;

	/* Flush all outstanding writes */
	mmiowb();

	/* Pretend to be function 0 */
	REG_WR(bp, reg, 0);
	/* Flush the GRC transaction (in the chip) */
	new_val = REG_RD(bp, reg);
	if (new_val != 0) {
		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
			  new_val);
		BUG();
	}

	/* From now we are in the "like-E1" mode */
	bnx2x_int_disable(bp);

	/* Flush all outstanding writes */
	mmiowb();

	/* Restore the original funtion settings */
	REG_WR(bp, reg, orig_func);
	new_val = REG_RD(bp, reg);
	if (new_val != orig_func) {
		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
			  orig_func, new_val);
		BUG();
	}
}

static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
{
	if (CHIP_IS_E1H(bp))
		bnx2x_undi_int_disable_e1h(bp, func);
	else
		bnx2x_int_disable(bp);
}

5861 5862 5863 5864 5865 5866 5867 5868 5869 5870
static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
{
	u32 val;

	/* Check if there is any driver already loaded */
	val = REG_RD(bp, MISC_REG_UNPREPARED);
	if (val == 0x1) {
		/* Check if it is the UNDI driver
		 * UNDI driver initializes CID offset for normal bell to 0x7
		 */
Y
Yitchak Gertner 已提交
5871
		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5872 5873 5874
		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
		if (val == 0x7) {
			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5875
			/* save our func */
5876
			int func = BP_FUNC(bp);
5877 5878
			u32 swap_en;
			u32 swap_val;
5879

5880 5881 5882
			/* clear the UNDI indication */
			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);

5883 5884 5885 5886
			BNX2X_DEV_INFO("UNDI is active! reset device\n");

			/* try unload UNDI on port 0 */
			bp->func = 0;
5887 5888 5889
			bp->fw_seq =
			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
				DRV_MSG_SEQ_NUMBER_MASK);
5890 5891 5892 5893 5894
			reset_code = bnx2x_fw_command(bp, reset_code);

			/* if UNDI is loaded on the other port */
			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {

5895 5896 5897 5898
				/* send "DONE" for previous unload */
				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);

				/* unload UNDI on port 1 */
5899
				bp->func = 1;
5900 5901 5902 5903 5904 5905
				bp->fw_seq =
			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
					DRV_MSG_SEQ_NUMBER_MASK);
				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;

				bnx2x_fw_command(bp, reset_code);
5906 5907
			}

5908 5909 5910
			/* now it's safe to release the lock */
			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);

5911
			bnx2x_undi_int_disable(bp, func);
5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931

			/* close input traffic and wait for it */
			/* Do not rcv packets to BRB */
			REG_WR(bp,
			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
			/* Do not direct rcv packets that are not for MCP to
			 * the BRB */
			REG_WR(bp,
			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
			/* clear AEU */
			REG_WR(bp,
			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
			msleep(10);

			/* save NIG port swap info */
			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5932 5933 5934
			/* reset device */
			REG_WR(bp,
			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5935
			       0xd3ffffff);
5936 5937 5938
			REG_WR(bp,
			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
			       0x1403);
5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953
			/* take the NIG out of reset and restore swap values */
			REG_WR(bp,
			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);

			/* send unload done to the MCP */
			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);

			/* restore our func and fw_seq */
			bp->func = func;
			bp->fw_seq =
			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
				DRV_MSG_SEQ_NUMBER_MASK);
5954 5955 5956

		} else
			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5957 5958 5959 5960 5961 5962
	}
}

static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
	u32 val, val2, val3, val4, id;
E
Eilon Greenstein 已提交
5963
	u16 pmc;
5964 5965 5966 5967 5968 5969 5970 5971 5972

	/* Get the chip revision id and number. */
	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
	val = REG_RD(bp, MISC_REG_CHIP_NUM);
	id = ((val & 0xffff) << 16);
	val = REG_RD(bp, MISC_REG_CHIP_REV);
	id |= ((val & 0xf) << 12);
	val = REG_RD(bp, MISC_REG_CHIP_METAL);
	id |= ((val & 0xff) << 4);
E
Eilon Greenstein 已提交
5973
	val = REG_RD(bp, MISC_REG_BOND_ID);
5974 5975 5976 5977 5978
	id |= (val & 0xf);
	bp->common.chip_id = id;
	bp->link_params.chip_id = bp->common.chip_id;
	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);

5979 5980 5981 5982 5983 5984 5985
	val = (REG_RD(bp, 0x2874) & 0x55);
	if ((bp->common.chip_id & 0x1) ||
	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
		bp->flags |= ONE_PORT_FLAG;
		BNX2X_DEV_INFO("single port device\n");
	}

5986 5987 5988 5989 5990 5991 5992
	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
	bp->common.flash_size = (NVRAM_1MB_SIZE <<
				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
		       bp->common.flash_size, bp->common.flash_size);

	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5993
	bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5994
	bp->link_params.shmem_base = bp->common.shmem_base;
5995 5996
	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
		       bp->common.shmem_base, bp->common.shmem2_base);
5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008

	if (!bp->common.shmem_base ||
	    (bp->common.shmem_base < 0xA0000) ||
	    (bp->common.shmem_base >= 0xC0000)) {
		BNX2X_DEV_INFO("MCP not active\n");
		bp->flags |= NO_MCP_FLAG;
		return;
	}

	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
V
Vladislav Zolotarov 已提交
6009
		BNX2X_ERROR("BAD MCP validity signature\n");
6010 6011

	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6012
	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6013 6014 6015 6016 6017

	bp->link_params.hw_led_mode = ((bp->common.hw_config &
					SHARED_HW_CFG_LED_MODE_MASK) >>
				       SHARED_HW_CFG_LED_MODE_SHIFT);

6018 6019 6020 6021 6022 6023 6024 6025 6026
	bp->link_params.feature_config_flags = 0;
	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
		bp->link_params.feature_config_flags |=
				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
	else
		bp->link_params.feature_config_flags &=
				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;

6027 6028 6029 6030 6031 6032
	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
	bp->common.bc_ver = val;
	BNX2X_DEV_INFO("bc_ver %X\n", val);
	if (val < BNX2X_BC_VER) {
		/* for now only warn
		 * later we might need to enforce this */
V
Vladislav Zolotarov 已提交
6033 6034
		BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
			    "please upgrade BC\n", BNX2X_BC_VER, val);
6035
	}
E
Eilon Greenstein 已提交
6036 6037 6038
	bp->link_params.feature_config_flags |=
		(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
		FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
E
Eilon Greenstein 已提交
6039 6040 6041 6042 6043 6044 6045 6046 6047

	if (BP_E1HVN(bp) == 0) {
		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
	} else {
		/* no WOL capability for E1HVN != 0 */
		bp->flags |= NO_WOL_FLAG;
	}
	BNX2X_DEV_INFO("%sWoL capable\n",
E
Eilon Greenstein 已提交
6048
		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
6049 6050 6051 6052 6053 6054

	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);

V
Vladislav Zolotarov 已提交
6055 6056
	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
		 val, val2, val3, val4);
6057 6058 6059 6060
}

static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
						    u32 switch_cfg)
E
Eliezer Tamir 已提交
6061
{
6062
	int port = BP_PORT(bp);
E
Eliezer Tamir 已提交
6063 6064 6065 6066 6067 6068
	u32 ext_phy_type;

	switch (switch_cfg) {
	case SWITCH_CFG_1G:
		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);

Y
Yaniv Rosner 已提交
6069 6070
		ext_phy_type =
			SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
E
Eliezer Tamir 已提交
6071 6072 6073 6074 6075
		switch (ext_phy_type) {
		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
				       ext_phy_type);

6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086
			bp->port.supported |= (SUPPORTED_10baseT_Half |
					       SUPPORTED_10baseT_Full |
					       SUPPORTED_100baseT_Half |
					       SUPPORTED_100baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_2500baseX_Full |
					       SUPPORTED_TP |
					       SUPPORTED_FIBRE |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6087 6088 6089 6090 6091 6092
			break;

		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
				       ext_phy_type);

6093 6094 6095 6096 6097 6098 6099 6100 6101 6102
			bp->port.supported |= (SUPPORTED_10baseT_Half |
					       SUPPORTED_10baseT_Full |
					       SUPPORTED_100baseT_Half |
					       SUPPORTED_100baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_TP |
					       SUPPORTED_FIBRE |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6103 6104 6105 6106 6107
			break;

		default:
			BNX2X_ERR("NVRAM config error. "
				  "BAD SerDes ext_phy_config 0x%x\n",
Y
Yaniv Rosner 已提交
6108
				  bp->link_params.ext_phy_config);
E
Eliezer Tamir 已提交
6109 6110 6111
			return;
		}

6112 6113 6114
		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
					   port*0x10);
		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
E
Eliezer Tamir 已提交
6115 6116 6117 6118 6119
		break;

	case SWITCH_CFG_10G:
		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);

Y
Yaniv Rosner 已提交
6120 6121
		ext_phy_type =
			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
E
Eliezer Tamir 已提交
6122 6123 6124 6125 6126
		switch (ext_phy_type) {
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
				       ext_phy_type);

6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138
			bp->port.supported |= (SUPPORTED_10baseT_Half |
					       SUPPORTED_10baseT_Full |
					       SUPPORTED_100baseT_Half |
					       SUPPORTED_100baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_2500baseX_Full |
					       SUPPORTED_10000baseT_Full |
					       SUPPORTED_TP |
					       SUPPORTED_FIBRE |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6139 6140
			break;

E
Eilon Greenstein 已提交
6141 6142
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6143
				       ext_phy_type);
E
Eliezer Tamir 已提交
6144

6145
			bp->port.supported |= (SUPPORTED_10000baseT_Full |
E
Eilon Greenstein 已提交
6146
					       SUPPORTED_1000baseT_Full |
6147
					       SUPPORTED_FIBRE |
E
Eilon Greenstein 已提交
6148
					       SUPPORTED_Autoneg |
6149 6150
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6151 6152
			break;

E
Eilon Greenstein 已提交
6153 6154
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
E
Eliezer Tamir 已提交
6155 6156
				       ext_phy_type);

6157
			bp->port.supported |= (SUPPORTED_10000baseT_Full |
E
Eilon Greenstein 已提交
6158
					       SUPPORTED_2500baseX_Full |
6159
					       SUPPORTED_1000baseT_Full |
E
Eilon Greenstein 已提交
6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170
					       SUPPORTED_FIBRE |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
			break;

		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
				       ext_phy_type);

			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6171 6172 6173
					       SUPPORTED_FIBRE |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6174 6175
			break;

E
Eilon Greenstein 已提交
6176 6177
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
E
Eliezer Tamir 已提交
6178 6179
				       ext_phy_type);

6180 6181 6182 6183 6184
			bp->port.supported |= (SUPPORTED_10000baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_FIBRE |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6185 6186
			break;

E
Eilon Greenstein 已提交
6187 6188
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
Y
Yaniv Rosner 已提交
6189 6190
				       ext_phy_type);

6191 6192 6193
			bp->port.supported |= (SUPPORTED_10000baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_Autoneg |
E
Eilon Greenstein 已提交
6194
					       SUPPORTED_FIBRE |
6195 6196
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
Y
Yaniv Rosner 已提交
6197 6198
			break;

E
Eilon Greenstein 已提交
6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
				       ext_phy_type);

			bp->port.supported |= (SUPPORTED_10000baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_Autoneg |
					       SUPPORTED_FIBRE |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
			break;

E
Eliezer Tamir 已提交
6211 6212 6213 6214
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
				       ext_phy_type);

6215 6216 6217 6218 6219
			bp->port.supported |= (SUPPORTED_10000baseT_Full |
					       SUPPORTED_TP |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
E
Eliezer Tamir 已提交
6220 6221
			break;

E
Eilon Greenstein 已提交
6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
			BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
				       ext_phy_type);

			bp->port.supported |= (SUPPORTED_10baseT_Half |
					       SUPPORTED_10baseT_Full |
					       SUPPORTED_100baseT_Half |
					       SUPPORTED_100baseT_Full |
					       SUPPORTED_1000baseT_Full |
					       SUPPORTED_10000baseT_Full |
					       SUPPORTED_TP |
					       SUPPORTED_Autoneg |
					       SUPPORTED_Pause |
					       SUPPORTED_Asym_Pause);
			break;

Y
Yaniv Rosner 已提交
6238 6239 6240 6241 6242
		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
				  bp->link_params.ext_phy_config);
			break;

E
Eliezer Tamir 已提交
6243 6244 6245
		default:
			BNX2X_ERR("NVRAM config error. "
				  "BAD XGXS ext_phy_config 0x%x\n",
Y
Yaniv Rosner 已提交
6246
				  bp->link_params.ext_phy_config);
E
Eliezer Tamir 已提交
6247 6248 6249
			return;
		}

6250 6251 6252
		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
					   port*0x18);
		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
E
Eliezer Tamir 已提交
6253 6254 6255 6256 6257

		break;

	default:
		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6258
			  bp->port.link_config);
E
Eliezer Tamir 已提交
6259 6260
		return;
	}
6261
	bp->link_params.phy_addr = bp->port.phy_addr;
E
Eliezer Tamir 已提交
6262 6263

	/* mask what we support according to speed_cap_mask */
Y
Yaniv Rosner 已提交
6264 6265
	if (!(bp->link_params.speed_cap_mask &
				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6266
		bp->port.supported &= ~SUPPORTED_10baseT_Half;
E
Eliezer Tamir 已提交
6267

Y
Yaniv Rosner 已提交
6268 6269
	if (!(bp->link_params.speed_cap_mask &
				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6270
		bp->port.supported &= ~SUPPORTED_10baseT_Full;
E
Eliezer Tamir 已提交
6271

Y
Yaniv Rosner 已提交
6272 6273
	if (!(bp->link_params.speed_cap_mask &
				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6274
		bp->port.supported &= ~SUPPORTED_100baseT_Half;
E
Eliezer Tamir 已提交
6275

Y
Yaniv Rosner 已提交
6276 6277
	if (!(bp->link_params.speed_cap_mask &
				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6278
		bp->port.supported &= ~SUPPORTED_100baseT_Full;
E
Eliezer Tamir 已提交
6279

Y
Yaniv Rosner 已提交
6280 6281
	if (!(bp->link_params.speed_cap_mask &
					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6282 6283
		bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
					SUPPORTED_1000baseT_Full);
E
Eliezer Tamir 已提交
6284

Y
Yaniv Rosner 已提交
6285 6286
	if (!(bp->link_params.speed_cap_mask &
					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6287
		bp->port.supported &= ~SUPPORTED_2500baseX_Full;
E
Eliezer Tamir 已提交
6288

Y
Yaniv Rosner 已提交
6289 6290
	if (!(bp->link_params.speed_cap_mask &
					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6291
		bp->port.supported &= ~SUPPORTED_10000baseT_Full;
E
Eliezer Tamir 已提交
6292

6293
	BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
E
Eliezer Tamir 已提交
6294 6295
}

6296
static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
E
Eliezer Tamir 已提交
6297
{
Y
Yaniv Rosner 已提交
6298
	bp->link_params.req_duplex = DUPLEX_FULL;
E
Eliezer Tamir 已提交
6299

6300
	switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
E
Eliezer Tamir 已提交
6301
	case PORT_FEATURE_LINK_SPEED_AUTO:
6302
		if (bp->port.supported & SUPPORTED_Autoneg) {
Y
Yaniv Rosner 已提交
6303
			bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6304
			bp->port.advertising = bp->port.supported;
E
Eliezer Tamir 已提交
6305
		} else {
Y
Yaniv Rosner 已提交
6306 6307 6308 6309 6310 6311 6312
			u32 ext_phy_type =
			    XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);

			if ((ext_phy_type ==
			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
			    (ext_phy_type ==
			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
E
Eliezer Tamir 已提交
6313
				/* force 10G, no AN */
Y
Yaniv Rosner 已提交
6314
				bp->link_params.req_line_speed = SPEED_10000;
6315
				bp->port.advertising =
E
Eliezer Tamir 已提交
6316 6317 6318 6319 6320 6321 6322
						(ADVERTISED_10000baseT_Full |
						 ADVERTISED_FIBRE);
				break;
			}
			BNX2X_ERR("NVRAM config error. "
				  "Invalid link_config 0x%x"
				  "  Autoneg not supported\n",
6323
				  bp->port.link_config);
E
Eliezer Tamir 已提交
6324 6325 6326 6327 6328
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_10M_FULL:
6329
		if (bp->port.supported & SUPPORTED_10baseT_Full) {
Y
Yaniv Rosner 已提交
6330
			bp->link_params.req_line_speed = SPEED_10;
6331 6332
			bp->port.advertising = (ADVERTISED_10baseT_Full |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6333
		} else {
V
Vladislav Zolotarov 已提交
6334 6335 6336 6337 6338
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6339 6340 6341 6342 6343
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_10M_HALF:
6344
		if (bp->port.supported & SUPPORTED_10baseT_Half) {
Y
Yaniv Rosner 已提交
6345 6346
			bp->link_params.req_line_speed = SPEED_10;
			bp->link_params.req_duplex = DUPLEX_HALF;
6347 6348
			bp->port.advertising = (ADVERTISED_10baseT_Half |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6349
		} else {
V
Vladislav Zolotarov 已提交
6350 6351 6352 6353 6354
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6355 6356 6357 6358 6359
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_100M_FULL:
6360
		if (bp->port.supported & SUPPORTED_100baseT_Full) {
Y
Yaniv Rosner 已提交
6361
			bp->link_params.req_line_speed = SPEED_100;
6362 6363
			bp->port.advertising = (ADVERTISED_100baseT_Full |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6364
		} else {
V
Vladislav Zolotarov 已提交
6365 6366 6367 6368 6369
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6370 6371 6372 6373 6374
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_100M_HALF:
6375
		if (bp->port.supported & SUPPORTED_100baseT_Half) {
Y
Yaniv Rosner 已提交
6376 6377
			bp->link_params.req_line_speed = SPEED_100;
			bp->link_params.req_duplex = DUPLEX_HALF;
6378 6379
			bp->port.advertising = (ADVERTISED_100baseT_Half |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6380
		} else {
V
Vladislav Zolotarov 已提交
6381 6382 6383 6384 6385
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6386 6387 6388 6389 6390
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_1G:
6391
		if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Y
Yaniv Rosner 已提交
6392
			bp->link_params.req_line_speed = SPEED_1000;
6393 6394
			bp->port.advertising = (ADVERTISED_1000baseT_Full |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6395
		} else {
V
Vladislav Zolotarov 已提交
6396 6397 6398 6399 6400
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6401 6402 6403 6404 6405
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_2_5G:
6406
		if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Y
Yaniv Rosner 已提交
6407
			bp->link_params.req_line_speed = SPEED_2500;
6408 6409
			bp->port.advertising = (ADVERTISED_2500baseX_Full |
						ADVERTISED_TP);
E
Eliezer Tamir 已提交
6410
		} else {
V
Vladislav Zolotarov 已提交
6411 6412 6413 6414 6415
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6416 6417 6418 6419 6420 6421 6422
			return;
		}
		break;

	case PORT_FEATURE_LINK_SPEED_10G_CX4:
	case PORT_FEATURE_LINK_SPEED_10G_KX4:
	case PORT_FEATURE_LINK_SPEED_10G_KR:
6423
		if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Y
Yaniv Rosner 已提交
6424
			bp->link_params.req_line_speed = SPEED_10000;
6425 6426
			bp->port.advertising = (ADVERTISED_10000baseT_Full |
						ADVERTISED_FIBRE);
E
Eliezer Tamir 已提交
6427
		} else {
V
Vladislav Zolotarov 已提交
6428 6429 6430 6431 6432
			BNX2X_ERROR("NVRAM config error. "
				    "Invalid link_config 0x%x"
				    "  speed_cap_mask 0x%x\n",
				    bp->port.link_config,
				    bp->link_params.speed_cap_mask);
E
Eliezer Tamir 已提交
6433 6434 6435 6436 6437
			return;
		}
		break;

	default:
V
Vladislav Zolotarov 已提交
6438 6439 6440
		BNX2X_ERROR("NVRAM config error. "
			    "BAD link speed link_config 0x%x\n",
			    bp->port.link_config);
Y
Yaniv Rosner 已提交
6441
		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6442
		bp->port.advertising = bp->port.supported;
E
Eliezer Tamir 已提交
6443 6444 6445
		break;
	}

6446 6447
	bp->link_params.req_flow_ctrl = (bp->port.link_config &
					 PORT_FEATURE_FLOW_CONTROL_MASK);
6448
	if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
R
Randy Dunlap 已提交
6449
	    !(bp->port.supported & SUPPORTED_Autoneg))
6450
		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
E
Eliezer Tamir 已提交
6451

Y
Yaniv Rosner 已提交
6452
	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
E
Eliezer Tamir 已提交
6453
		       "  advertising 0x%x\n",
Y
Yaniv Rosner 已提交
6454 6455
		       bp->link_params.req_line_speed,
		       bp->link_params.req_duplex,
6456
		       bp->link_params.req_flow_ctrl, bp->port.advertising);
E
Eliezer Tamir 已提交
6457 6458
}

6459 6460 6461 6462 6463 6464 6465 6466
static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
	mac_hi = cpu_to_be16(mac_hi);
	mac_lo = cpu_to_be32(mac_lo);
	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
}

6467
static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
E
Eliezer Tamir 已提交
6468
{
6469 6470
	int port = BP_PORT(bp);
	u32 val, val2;
E
Eilon Greenstein 已提交
6471
	u32 config;
6472
	u16 i;
E
Eilon Greenstein 已提交
6473
	u32 ext_phy_type;
E
Eliezer Tamir 已提交
6474

Y
Yaniv Rosner 已提交
6475
	bp->link_params.bp = bp;
6476
	bp->link_params.port = port;
Y
Yaniv Rosner 已提交
6477 6478

	bp->link_params.lane_config =
E
Eliezer Tamir 已提交
6479
		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Y
Yaniv Rosner 已提交
6480
	bp->link_params.ext_phy_config =
E
Eliezer Tamir 已提交
6481 6482
		SHMEM_RD(bp,
			 dev_info.port_hw_config[port].external_phy_config);
E
Eilon Greenstein 已提交
6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493
	/* BCM8727_NOC => BCM8727 no over current */
	if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
	    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
		bp->link_params.ext_phy_config &=
			~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
		bp->link_params.ext_phy_config |=
			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
		bp->link_params.feature_config_flags |=
			FEATURE_CONFIG_BCM8727_NOC;
	}

Y
Yaniv Rosner 已提交
6494
	bp->link_params.speed_cap_mask =
E
Eliezer Tamir 已提交
6495 6496 6497
		SHMEM_RD(bp,
			 dev_info.port_hw_config[port].speed_capability_mask);

6498
	bp->port.link_config =
E
Eliezer Tamir 已提交
6499 6500
		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);

6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513
	/* Get the 4 lanes xgxs config rx and tx */
	for (i = 0; i < 2; i++) {
		val = SHMEM_RD(bp,
			   dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
		bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
		bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);

		val = SHMEM_RD(bp,
			   dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
		bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
		bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
	}

6514 6515 6516
	/* If the device is capable of WoL, set the default state according
	 * to the HW
	 */
E
Eilon Greenstein 已提交
6517
	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6518 6519 6520
	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
		   (config & PORT_FEATURE_WOL_ENABLED));

6521 6522
	BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
		       "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
Y
Yaniv Rosner 已提交
6523 6524
		       bp->link_params.lane_config,
		       bp->link_params.ext_phy_config,
6525
		       bp->link_params.speed_cap_mask, bp->port.link_config);
E
Eliezer Tamir 已提交
6526

E
Eilon Greenstein 已提交
6527 6528
	bp->link_params.switch_cfg |= (bp->port.link_config &
				       PORT_FEATURE_CONNECTED_SWITCH_MASK);
Y
Yaniv Rosner 已提交
6529
	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
E
Eliezer Tamir 已提交
6530 6531 6532

	bnx2x_link_settings_requested(bp);

E
Eilon Greenstein 已提交
6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543
	/*
	 * If connected directly, work with the internal PHY, otherwise, work
	 * with the external PHY
	 */
	ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
		bp->mdio.prtad = bp->link_params.phy_addr;

	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
		bp->mdio.prtad =
6544
			XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
E
Eilon Greenstein 已提交
6545

E
Eliezer Tamir 已提交
6546 6547
	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6548
	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Y
Yaniv Rosner 已提交
6549 6550
	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6551 6552 6553 6554 6555 6556

#ifdef BCM_CNIC
	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
#endif
6557 6558 6559 6560 6561 6562 6563
}

static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
	u32 val, val2;
	int rc = 0;
E
Eliezer Tamir 已提交
6564

6565
	bnx2x_get_common_hwinfo(bp);
E
Eliezer Tamir 已提交
6566

6567 6568
	bp->e1hov = 0;
	bp->e1hmf = 0;
6569
	if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6570 6571
		bp->mf_config =
			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
E
Eliezer Tamir 已提交
6572

6573
		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6574
		       FUNC_MF_CFG_E1HOV_TAG_MASK);
6575
		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6576
			bp->e1hmf = 1;
6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589
		BNX2X_DEV_INFO("%s function mode\n",
			       IS_E1HMF(bp) ? "multi" : "single");

		if (IS_E1HMF(bp)) {
			val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
								e1hov_tag) &
			       FUNC_MF_CFG_E1HOV_TAG_MASK);
			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
				bp->e1hov = val;
				BNX2X_DEV_INFO("E1HOV for func %d is %d "
					       "(0x%04x)\n",
					       func, bp->e1hov, bp->e1hov);
			} else {
V
Vladislav Zolotarov 已提交
6590 6591
				BNX2X_ERROR("No valid E1HOV for func %d,"
					    "  aborting\n", func);
6592 6593
				rc = -EPERM;
			}
6594 6595
		} else {
			if (BP_E1HVN(bp)) {
V
Vladislav Zolotarov 已提交
6596 6597
				BNX2X_ERROR("VN %d in single function mode,"
					    "  aborting\n", BP_E1HVN(bp));
6598 6599
				rc = -EPERM;
			}
6600 6601
		}
	}
E
Eliezer Tamir 已提交
6602

6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625
	if (!BP_NOMCP(bp)) {
		bnx2x_get_port_hwinfo(bp);

		bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
			      DRV_MSG_SEQ_NUMBER_MASK);
		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
	}

	if (IS_E1HMF(bp)) {
		val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
		val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
			bp->dev->dev_addr[5] = (u8)(val & 0xff);
			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
			       ETH_ALEN);
			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
			       ETH_ALEN);
E
Eliezer Tamir 已提交
6626
		}
6627 6628

		return rc;
E
Eliezer Tamir 已提交
6629 6630
	}

6631 6632
	if (BP_NOMCP(bp)) {
		/* only supposed to happen on emulation/FPGA */
V
Vladislav Zolotarov 已提交
6633
		BNX2X_ERROR("warning: random MAC workaround active\n");
6634 6635 6636
		random_ether_addr(bp->dev->dev_addr);
		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
	}
E
Eliezer Tamir 已提交
6637

6638 6639 6640
	return rc;
}

6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704
static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
{
	int cnt, i, block_end, rodi;
	char vpd_data[BNX2X_VPD_LEN+1];
	char str_id_reg[VENDOR_ID_LEN+1];
	char str_id_cap[VENDOR_ID_LEN+1];
	u8 len;

	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));

	if (cnt < BNX2X_VPD_LEN)
		goto out_not_found;

	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
			     PCI_VPD_LRDT_RO_DATA);
	if (i < 0)
		goto out_not_found;


	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
		    pci_vpd_lrdt_size(&vpd_data[i]);

	i += PCI_VPD_LRDT_TAG_SIZE;

	if (block_end > BNX2X_VPD_LEN)
		goto out_not_found;

	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
				   PCI_VPD_RO_KEYWORD_MFR_ID);
	if (rodi < 0)
		goto out_not_found;

	len = pci_vpd_info_field_size(&vpd_data[rodi]);

	if (len != VENDOR_ID_LEN)
		goto out_not_found;

	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;

	/* vendor specific info */
	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {

		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
						PCI_VPD_RO_KEYWORD_VENDOR0);
		if (rodi >= 0) {
			len = pci_vpd_info_field_size(&vpd_data[rodi]);

			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;

			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
				memcpy(bp->fw_ver, &vpd_data[rodi], len);
				bp->fw_ver[len] = ' ';
			}
		}
		return;
	}
out_not_found:
	return;
}

6705 6706 6707
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
6708
	int timer_interval;
6709 6710
	int rc;

6711 6712
	/* Disable interrupt handling until HW is initialized */
	atomic_set(&bp->intr_sem, 1);
E
Eilon Greenstein 已提交
6713
	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6714

6715
	mutex_init(&bp->port.phy_mutex);
E
Eilon Greenstein 已提交
6716
	mutex_init(&bp->fw_mb_mutex);
6717
	spin_lock_init(&bp->stats_lock);
6718 6719 6720
#ifdef BCM_CNIC
	mutex_init(&bp->cnic_mutex);
#endif
E
Eliezer Tamir 已提交
6721

6722
	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6723
	INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6724 6725 6726

	rc = bnx2x_get_hwinfo(bp);

6727
	bnx2x_read_fwinfo(bp);
6728 6729 6730 6731 6732
	/* need to reset chip if undi was active */
	if (!BP_NOMCP(bp))
		bnx2x_undi_unload(bp);

	if (CHIP_REV_IS_FPGA(bp))
V
Vladislav Zolotarov 已提交
6733
		dev_err(&bp->pdev->dev, "FPGA detected\n");
6734 6735

	if (BP_NOMCP(bp) && (func == 0))
V
Vladislav Zolotarov 已提交
6736 6737
		dev_err(&bp->pdev->dev, "MCP disabled, "
					"must load devices in order!\n");
6738

E
Eilon Greenstein 已提交
6739
	/* Set multi queue mode */
E
Eilon Greenstein 已提交
6740 6741
	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
V
Vladislav Zolotarov 已提交
6742 6743
		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
					"requested is not MSI-X\n");
E
Eilon Greenstein 已提交
6744 6745 6746
		multi_mode = ETH_RSS_MODE_DISABLED;
	}
	bp->multi_mode = multi_mode;
6747
	bp->int_mode = int_mode;
E
Eilon Greenstein 已提交
6748

D
Dmitry Kravkov 已提交
6749 6750
	bp->dev->features |= NETIF_F_GRO;

6751 6752 6753 6754 6755 6756 6757 6758
	/* Set TPA flags */
	if (disable_tpa) {
		bp->flags &= ~TPA_ENABLE_FLAG;
		bp->dev->features &= ~NETIF_F_LRO;
	} else {
		bp->flags |= TPA_ENABLE_FLAG;
		bp->dev->features |= NETIF_F_LRO;
	}
6759
	bp->disable_tpa = disable_tpa;
6760

6761 6762 6763 6764 6765
	if (CHIP_IS_E1(bp))
		bp->dropless_fc = 0;
	else
		bp->dropless_fc = dropless_fc;

6766
	bp->mrrs = mrrs;
6767

6768 6769 6770 6771 6772
	bp->tx_ring_size = MAX_TX_AVAIL;
	bp->rx_ring_size = MAX_RX_AVAIL;

	bp->rx_csum = 1;

6773 6774 6775
	/* make sure that the numbers are in the right granularity */
	bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
	bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6776

6777 6778
	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
	bp->current_interval = (poll ? poll : timer_interval);
6779 6780 6781 6782 6783 6784 6785

	init_timer(&bp->timer);
	bp->timer.expires = jiffies + bp->current_interval;
	bp->timer.data = (unsigned long) bp;
	bp->timer.function = bnx2x_timer;

	return rc;
E
Eliezer Tamir 已提交
6786 6787 6788
}


6789 6790 6791
/****************************************************************************
* General service functions
****************************************************************************/
E
Eliezer Tamir 已提交
6792

Y
Yitchak Gertner 已提交
6793
/* called with rtnl_lock */
E
Eliezer Tamir 已提交
6794 6795 6796 6797
static int bnx2x_open(struct net_device *dev)
{
	struct bnx2x *bp = netdev_priv(dev);

E
Eilon Greenstein 已提交
6798 6799
	netif_carrier_off(dev);

E
Eliezer Tamir 已提交
6800 6801
	bnx2x_set_power_state(bp, PCI_D0);

6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835
	if (!bnx2x_reset_is_done(bp)) {
		do {
			/* Reset MCP mail box sequence if there is on going
			 * recovery
			 */
			bp->fw_seq = 0;

			/* If it's the first function to load and reset done
			 * is still not cleared it may mean that. We don't
			 * check the attention state here because it may have
			 * already been cleared by a "common" reset but we
			 * shell proceed with "process kill" anyway.
			 */
			if ((bnx2x_get_load_cnt(bp) == 0) &&
				bnx2x_trylock_hw_lock(bp,
				HW_LOCK_RESOURCE_RESERVED_08) &&
				(!bnx2x_leader_reset(bp))) {
				DP(NETIF_MSG_HW, "Recovered in open\n");
				break;
			}

			bnx2x_set_power_state(bp, PCI_D3hot);

			printk(KERN_ERR"%s: Recovery flow hasn't been properly"
			" completed yet. Try again later. If u still see this"
			" message after a few retries then power cycle is"
			" required.\n", bp->dev->name);

			return -EAGAIN;
		} while (0);
	}

	bp->recovery_state = BNX2X_RECOVERY_DONE;

Y
Yitchak Gertner 已提交
6836
	return bnx2x_nic_load(bp, LOAD_OPEN);
E
Eliezer Tamir 已提交
6837 6838
}

Y
Yitchak Gertner 已提交
6839
/* called with rtnl_lock */
E
Eliezer Tamir 已提交
6840 6841 6842 6843 6844
static int bnx2x_close(struct net_device *dev)
{
	struct bnx2x *bp = netdev_priv(dev);

	/* Unload the driver, release IRQs */
Y
Yitchak Gertner 已提交
6845
	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6846
	bnx2x_set_power_state(bp, PCI_D3hot);
E
Eliezer Tamir 已提交
6847 6848 6849 6850

	return 0;
}

E
Eilon Greenstein 已提交
6851
/* called with netif_tx_lock from dev_mcast.c */
D
Dmitry Kravkov 已提交
6852
void bnx2x_set_rx_mode(struct net_device *dev)
6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868
{
	struct bnx2x *bp = netdev_priv(dev);
	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
	int port = BP_PORT(bp);

	if (bp->state != BNX2X_STATE_OPEN) {
		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
		return;
	}

	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);

	if (dev->flags & IFF_PROMISC)
		rx_mode = BNX2X_RX_MODE_PROMISC;

	else if ((dev->flags & IFF_ALLMULTI) ||
6869 6870
		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
		  CHIP_IS_E1(bp)))
6871 6872 6873 6874 6875
		rx_mode = BNX2X_RX_MODE_ALLMULTI;

	else { /* some multicasts */
		if (CHIP_IS_E1(bp)) {
			int i, old, offset;
6876
			struct netdev_hw_addr *ha;
6877 6878 6879
			struct mac_configuration_cmd *config =
						bnx2x_sp(bp, mcast_config);

6880
			i = 0;
6881
			netdev_for_each_mc_addr(ha, dev) {
6882 6883
				config->config_table[i].
					cam_entry.msb_mac_addr =
6884
					swab16(*(u16 *)&ha->addr[0]);
6885 6886
				config->config_table[i].
					cam_entry.middle_mac_addr =
6887
					swab16(*(u16 *)&ha->addr[2]);
6888 6889
				config->config_table[i].
					cam_entry.lsb_mac_addr =
6890
					swab16(*(u16 *)&ha->addr[4]);
6891 6892 6893 6894
				config->config_table[i].cam_entry.flags =
							cpu_to_le16(port);
				config->config_table[i].
					target_table_entry.flags = 0;
E
Eilon Greenstein 已提交
6895 6896 6897
				config->config_table[i].target_table_entry.
					clients_bit_vector =
						cpu_to_le32(1 << BP_L_ID(bp));
6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908
				config->config_table[i].
					target_table_entry.vlan_id = 0;

				DP(NETIF_MSG_IFUP,
				   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
				   config->config_table[i].
						cam_entry.msb_mac_addr,
				   config->config_table[i].
						cam_entry.middle_mac_addr,
				   config->config_table[i].
						cam_entry.lsb_mac_addr);
6909
				i++;
6910
			}
E
Eilon Greenstein 已提交
6911
			old = config->hdr.length;
6912 6913 6914 6915
			if (old > i) {
				for (; i < old; i++) {
					if (CAM_IS_INVALID(config->
							   config_table[i])) {
6916
						/* already invalidated */
6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929
						break;
					}
					/* invalidate */
					CAM_INVALIDATE(config->
						       config_table[i]);
				}
			}

			if (CHIP_REV_IS_SLOW(bp))
				offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
			else
				offset = BNX2X_MAX_MULTICAST*(1 + port);

E
Eilon Greenstein 已提交
6930
			config->hdr.length = i;
6931
			config->hdr.offset = offset;
E
Eilon Greenstein 已提交
6932
			config->hdr.client_id = bp->fp->cl_id;
6933 6934
			config->hdr.reserved1 = 0;

6935 6936 6937
			bp->set_mac_pending++;
			smp_wmb();

6938 6939 6940 6941 6942 6943
			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
				      0);
		} else { /* E1H */
			/* Accept one or more multicasts */
6944
			struct netdev_hw_addr *ha;
6945 6946 6947 6948 6949 6950
			u32 mc_filter[MC_HASH_SIZE];
			u32 crc, bit, regidx;
			int i;

			memset(mc_filter, 0, 4 * MC_HASH_SIZE);

6951
			netdev_for_each_mc_addr(ha, dev) {
J
Johannes Berg 已提交
6952
				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6953
				   ha->addr);
6954

6955
				crc = crc32c_le(0, ha->addr, ETH_ALEN);
6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971
				bit = (crc >> 24) & 0xff;
				regidx = bit >> 5;
				bit &= 0x1f;
				mc_filter[regidx] |= (1 << bit);
			}

			for (i = 0; i < MC_HASH_SIZE; i++)
				REG_WR(bp, MC_HASH_OFFSET(bp, i),
				       mc_filter[i]);
		}
	}

	bp->rx_mode = rx_mode;
	bnx2x_set_storm_rx_mode(bp);
}

E
Eliezer Tamir 已提交
6972

Y
Yaniv Rosner 已提交
6973
/* called with rtnl_lock */
E
Eilon Greenstein 已提交
6974 6975
static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
			   int devad, u16 addr)
E
Eliezer Tamir 已提交
6976
{
E
Eilon Greenstein 已提交
6977 6978 6979 6980
	struct bnx2x *bp = netdev_priv(netdev);
	u16 value;
	int rc;
	u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
E
Eliezer Tamir 已提交
6981

E
Eilon Greenstein 已提交
6982 6983
	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
	   prtad, devad, addr);
E
Eliezer Tamir 已提交
6984

E
Eilon Greenstein 已提交
6985 6986 6987 6988 6989 6990 6991 6992
	if (prtad != bp->mdio.prtad) {
		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
		   prtad, bp->mdio.prtad);
		return -EINVAL;
	}

	/* The HW expects different devad if CL22 is used */
	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
Y
Yaniv Rosner 已提交
6993

E
Eilon Greenstein 已提交
6994 6995 6996 6997 6998
	bnx2x_acquire_phy_lock(bp);
	rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
			     devad, addr, &value);
	bnx2x_release_phy_lock(bp);
	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
E
Eliezer Tamir 已提交
6999

E
Eilon Greenstein 已提交
7000 7001 7002 7003
	if (!rc)
		rc = value;
	return rc;
}
E
Eliezer Tamir 已提交
7004

E
Eilon Greenstein 已提交
7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019
/* called with rtnl_lock */
static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
			    u16 addr, u16 value)
{
	struct bnx2x *bp = netdev_priv(netdev);
	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
	int rc;

	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
			   " value 0x%x\n", prtad, devad, addr, value);

	if (prtad != bp->mdio.prtad) {
		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
		   prtad, bp->mdio.prtad);
		return -EINVAL;
E
Eliezer Tamir 已提交
7020 7021
	}

E
Eilon Greenstein 已提交
7022 7023
	/* The HW expects different devad if CL22 is used */
	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
E
Eliezer Tamir 已提交
7024

E
Eilon Greenstein 已提交
7025 7026 7027 7028 7029 7030
	bnx2x_acquire_phy_lock(bp);
	rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
			      devad, addr, value);
	bnx2x_release_phy_lock(bp);
	return rc;
}
Y
Yaniv Rosner 已提交
7031

E
Eilon Greenstein 已提交
7032 7033 7034 7035 7036
/* called with rtnl_lock */
static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	struct bnx2x *bp = netdev_priv(dev);
	struct mii_ioctl_data *mdio = if_mii(ifr);
E
Eliezer Tamir 已提交
7037

E
Eilon Greenstein 已提交
7038 7039
	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
	   mdio->phy_id, mdio->reg_num, mdio->val_in);
E
Eliezer Tamir 已提交
7040

E
Eilon Greenstein 已提交
7041 7042 7043 7044
	if (!netif_running(dev))
		return -EAGAIN;

	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
E
Eliezer Tamir 已提交
7045 7046
}

A
Alexey Dobriyan 已提交
7047
#ifdef CONFIG_NET_POLL_CONTROLLER
E
Eliezer Tamir 已提交
7048 7049 7050 7051 7052 7053 7054 7055 7056 7057
static void poll_bnx2x(struct net_device *dev)
{
	struct bnx2x *bp = netdev_priv(dev);

	disable_irq(bp->pdev->irq);
	bnx2x_interrupt(bp->pdev->irq, dev);
	enable_irq(bp->pdev->irq);
}
#endif

7058 7059 7060 7061
static const struct net_device_ops bnx2x_netdev_ops = {
	.ndo_open		= bnx2x_open,
	.ndo_stop		= bnx2x_close,
	.ndo_start_xmit		= bnx2x_start_xmit,
E
Eilon Greenstein 已提交
7062
	.ndo_set_multicast_list	= bnx2x_set_rx_mode,
7063 7064 7065 7066 7067 7068 7069 7070
	.ndo_set_mac_address	= bnx2x_change_mac_addr,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_do_ioctl		= bnx2x_ioctl,
	.ndo_change_mtu		= bnx2x_change_mtu,
	.ndo_tx_timeout		= bnx2x_tx_timeout,
#ifdef BCM_VLAN
	.ndo_vlan_rx_register	= bnx2x_vlan_rx_register,
#endif
A
Alexey Dobriyan 已提交
7071
#ifdef CONFIG_NET_POLL_CONTROLLER
7072 7073 7074 7075
	.ndo_poll_controller	= poll_bnx2x,
#endif
};

7076 7077
static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
				    struct net_device *dev)
E
Eliezer Tamir 已提交
7078 7079 7080 7081 7082 7083 7084
{
	struct bnx2x *bp;
	int rc;

	SET_NETDEV_DEV(dev, &pdev->dev);
	bp = netdev_priv(dev);

7085 7086
	bp->dev = dev;
	bp->pdev = pdev;
E
Eliezer Tamir 已提交
7087
	bp->flags = 0;
7088
	bp->func = PCI_FUNC(pdev->devfn);
E
Eliezer Tamir 已提交
7089 7090 7091

	rc = pci_enable_device(pdev);
	if (rc) {
V
Vladislav Zolotarov 已提交
7092 7093
		dev_err(&bp->pdev->dev,
			"Cannot enable PCI device, aborting\n");
E
Eliezer Tamir 已提交
7094 7095 7096 7097
		goto err_out;
	}

	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
V
Vladislav Zolotarov 已提交
7098 7099
		dev_err(&bp->pdev->dev,
			"Cannot find PCI device base address, aborting\n");
E
Eliezer Tamir 已提交
7100 7101 7102 7103 7104
		rc = -ENODEV;
		goto err_out_disable;
	}

	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
V
Vladislav Zolotarov 已提交
7105 7106
		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
		       " base address, aborting\n");
E
Eliezer Tamir 已提交
7107 7108 7109 7110
		rc = -ENODEV;
		goto err_out_disable;
	}

7111 7112 7113
	if (atomic_read(&pdev->enable_cnt) == 1) {
		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
		if (rc) {
V
Vladislav Zolotarov 已提交
7114 7115
			dev_err(&bp->pdev->dev,
				"Cannot obtain PCI resources, aborting\n");
7116 7117
			goto err_out_disable;
		}
E
Eliezer Tamir 已提交
7118

7119 7120 7121
		pci_set_master(pdev);
		pci_save_state(pdev);
	}
E
Eliezer Tamir 已提交
7122 7123 7124

	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
	if (bp->pm_cap == 0) {
V
Vladislav Zolotarov 已提交
7125 7126
		dev_err(&bp->pdev->dev,
			"Cannot find power management capability, aborting\n");
E
Eliezer Tamir 已提交
7127 7128 7129 7130 7131 7132
		rc = -EIO;
		goto err_out_release;
	}

	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
	if (bp->pcie_cap == 0) {
V
Vladislav Zolotarov 已提交
7133 7134
		dev_err(&bp->pdev->dev,
			"Cannot find PCI Express capability, aborting\n");
E
Eliezer Tamir 已提交
7135 7136 7137 7138
		rc = -EIO;
		goto err_out_release;
	}

7139
	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
E
Eliezer Tamir 已提交
7140
		bp->flags |= USING_DAC_FLAG;
7141
		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
V
Vladislav Zolotarov 已提交
7142 7143
			dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
			       " failed, aborting\n");
E
Eliezer Tamir 已提交
7144 7145 7146 7147
			rc = -EIO;
			goto err_out_release;
		}

7148
	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
V
Vladislav Zolotarov 已提交
7149 7150
		dev_err(&bp->pdev->dev,
			"System does not support DMA, aborting\n");
E
Eliezer Tamir 已提交
7151 7152 7153 7154
		rc = -EIO;
		goto err_out_release;
	}

7155 7156 7157
	dev->mem_start = pci_resource_start(pdev, 0);
	dev->base_addr = dev->mem_start;
	dev->mem_end = pci_resource_end(pdev, 0);
E
Eliezer Tamir 已提交
7158 7159 7160

	dev->irq = pdev->irq;

7161
	bp->regview = pci_ioremap_bar(pdev, 0);
E
Eliezer Tamir 已提交
7162
	if (!bp->regview) {
V
Vladislav Zolotarov 已提交
7163 7164
		dev_err(&bp->pdev->dev,
			"Cannot map register space, aborting\n");
E
Eliezer Tamir 已提交
7165 7166 7167 7168
		rc = -ENOMEM;
		goto err_out_release;
	}

7169 7170 7171
	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
					min_t(u64, BNX2X_DB_SIZE,
					      pci_resource_len(pdev, 2)));
E
Eliezer Tamir 已提交
7172
	if (!bp->doorbells) {
V
Vladislav Zolotarov 已提交
7173 7174
		dev_err(&bp->pdev->dev,
			"Cannot map doorbell space, aborting\n");
E
Eliezer Tamir 已提交
7175 7176 7177 7178 7179 7180
		rc = -ENOMEM;
		goto err_out_unmap;
	}

	bnx2x_set_power_state(bp, PCI_D0);

7181 7182 7183 7184 7185 7186 7187
	/* clean indirect addresses */
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);
	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
E
Eliezer Tamir 已提交
7188

7189 7190 7191
	/* Reset the load counter */
	bnx2x_clear_load_cnt(bp);

7192
	dev->watchdog_timeo = TX_TIMEOUT;
E
Eliezer Tamir 已提交
7193

7194
	dev->netdev_ops = &bnx2x_netdev_ops;
7195
	bnx2x_set_ethtool_ops(dev);
7196 7197 7198 7199
	dev->features |= NETIF_F_SG;
	dev->features |= NETIF_F_HW_CSUM;
	if (bp->flags & USING_DAC_FLAG)
		dev->features |= NETIF_F_HIGHDMA;
E
Eilon Greenstein 已提交
7200 7201
	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
	dev->features |= NETIF_F_TSO6;
7202 7203
#ifdef BCM_VLAN
	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7204
	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
E
Eilon Greenstein 已提交
7205 7206 7207 7208 7209 7210 7211

	dev->vlan_features |= NETIF_F_SG;
	dev->vlan_features |= NETIF_F_HW_CSUM;
	if (bp->flags & USING_DAC_FLAG)
		dev->vlan_features |= NETIF_F_HIGHDMA;
	dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
	dev->vlan_features |= NETIF_F_TSO6;
7212
#endif
E
Eliezer Tamir 已提交
7213

E
Eilon Greenstein 已提交
7214 7215 7216 7217 7218 7219 7220 7221
	/* get_port_hwinfo() will set prtad and mmds properly */
	bp->mdio.prtad = MDIO_PRTAD_NONE;
	bp->mdio.mmds = 0;
	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
	bp->mdio.dev = dev;
	bp->mdio.mdio_read = bnx2x_mdio_read;
	bp->mdio.mdio_write = bnx2x_mdio_write;

E
Eliezer Tamir 已提交
7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234
	return 0;

err_out_unmap:
	if (bp->regview) {
		iounmap(bp->regview);
		bp->regview = NULL;
	}
	if (bp->doorbells) {
		iounmap(bp->doorbells);
		bp->doorbells = NULL;
	}

err_out_release:
7235 7236
	if (atomic_read(&pdev->enable_cnt) == 1)
		pci_release_regions(pdev);
E
Eliezer Tamir 已提交
7237 7238 7239 7240 7241 7242 7243 7244 7245

err_out_disable:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);

err_out:
	return rc;
}

7246 7247
static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
						 int *width, int *speed)
E
Eliezer Tamir 已提交
7248 7249 7250
{
	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);

7251
	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
E
Eliezer Tamir 已提交
7252

7253 7254
	/* return value of 1=2.5GHz 2=5GHz */
	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
E
Eliezer Tamir 已提交
7255
}
7256

7257 7258
static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
{
7259
	const struct firmware *firmware = bp->firmware;
7260 7261 7262
	struct bnx2x_fw_file_hdr *fw_hdr;
	struct bnx2x_fw_file_section *sections;
	u32 offset, len, num_ops;
7263
	u16 *ops_offsets;
7264
	int i;
7265
	const u8 *fw_ver;
7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278

	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
		return -EINVAL;

	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
	sections = (struct bnx2x_fw_file_section *)fw_hdr;

	/* Make sure none of the offsets and sizes make us read beyond
	 * the end of the firmware data */
	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
		offset = be32_to_cpu(sections[i].offset);
		len = be32_to_cpu(sections[i].len);
		if (offset + len > firmware->size) {
V
Vladislav Zolotarov 已提交
7279 7280
			dev_err(&bp->pdev->dev,
				"Section %d length is out of bounds\n", i);
7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291
			return -EINVAL;
		}
	}

	/* Likewise for the init_ops offsets */
	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
	ops_offsets = (u16 *)(firmware->data + offset);
	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);

	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
V
Vladislav Zolotarov 已提交
7292 7293
			dev_err(&bp->pdev->dev,
				"Section offset %d is out of bounds\n", i);
7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304
			return -EINVAL;
		}
	}

	/* Check FW version */
	offset = be32_to_cpu(fw_hdr->fw_version.offset);
	fw_ver = firmware->data + offset;
	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
V
Vladislav Zolotarov 已提交
7305 7306
		dev_err(&bp->pdev->dev,
			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7307 7308 7309 7310 7311
		       fw_ver[0], fw_ver[1], fw_ver[2],
		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
		       BCM_5710_FW_MINOR_VERSION,
		       BCM_5710_FW_REVISION_VERSION,
		       BCM_5710_FW_ENGINEERING_VERSION);
7312
		return -EINVAL;
7313 7314 7315 7316 7317
	}

	return 0;
}

7318
static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7319
{
7320 7321
	const __be32 *source = (const __be32 *)_source;
	u32 *target = (u32 *)_target;
7322 7323 7324 7325 7326 7327 7328 7329 7330 7331
	u32 i;

	for (i = 0; i < n/4; i++)
		target[i] = be32_to_cpu(source[i]);
}

/*
   Ops array is stored in the following format:
   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
 */
7332
static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7333
{
7334 7335
	const __be32 *source = (const __be32 *)_source;
	struct raw_op *target = (struct raw_op *)_target;
7336 7337
	u32 i, j, tmp;

7338
	for (i = 0, j = 0; i < n/8; i++, j += 2) {
7339 7340
		tmp = be32_to_cpu(source[j]);
		target[i].op = (tmp >> 24) & 0xff;
V
Vladislav Zolotarov 已提交
7341 7342
		target[i].offset = tmp & 0xffffff;
		target[i].raw_data = be32_to_cpu(source[j + 1]);
7343 7344
	}
}
7345 7346

static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7347
{
7348 7349
	const __be16 *source = (const __be16 *)_source;
	u16 *target = (u16 *)_target;
7350 7351 7352 7353 7354 7355
	u32 i;

	for (i = 0; i < n/2; i++)
		target[i] = be16_to_cpu(source[i]);
}

7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366
#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
do {									\
	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
	bp->arr = kmalloc(len, GFP_KERNEL);				\
	if (!bp->arr) {							\
		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
		goto lbl;						\
	}								\
	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
	     (u8 *)bp->arr, len);					\
} while (0)
7367 7368 7369

static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
{
B
Ben Hutchings 已提交
7370
	const char *fw_file_name;
7371
	struct bnx2x_fw_file_hdr *fw_hdr;
B
Ben Hutchings 已提交
7372
	int rc;
7373 7374

	if (CHIP_IS_E1(bp))
B
Ben Hutchings 已提交
7375
		fw_file_name = FW_FILE_NAME_E1;
V
Vladislav Zolotarov 已提交
7376
	else if (CHIP_IS_E1H(bp))
B
Ben Hutchings 已提交
7377
		fw_file_name = FW_FILE_NAME_E1H;
V
Vladislav Zolotarov 已提交
7378 7379 7380 7381
	else {
		dev_err(dev, "Unsupported chip revision\n");
		return -EINVAL;
	}
7382

V
Vladislav Zolotarov 已提交
7383
	dev_info(dev, "Loading %s\n", fw_file_name);
7384 7385 7386

	rc = request_firmware(&bp->firmware, fw_file_name, dev);
	if (rc) {
V
Vladislav Zolotarov 已提交
7387
		dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7388 7389 7390 7391 7392
		goto request_firmware_exit;
	}

	rc = bnx2x_check_firmware(bp);
	if (rc) {
V
Vladislav Zolotarov 已提交
7393
		dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406
		goto request_firmware_exit;
	}

	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;

	/* Initialize the pointers to the init arrays */
	/* Blob */
	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);

	/* Opcodes */
	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);

	/* Offsets */
7407 7408
	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
			    be16_to_cpu_n);
7409 7410

	/* STORMs firmware */
7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426
	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
			be32_to_cpu(fw_hdr->usem_pram_data.offset);
	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
			be32_to_cpu(fw_hdr->csem_pram_data.offset);
7427 7428

	return 0;
7429

7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440
init_offsets_alloc_err:
	kfree(bp->init_ops);
init_ops_alloc_err:
	kfree(bp->init_data);
request_firmware_exit:
	release_firmware(bp->firmware);

	return rc;
}


E
Eliezer Tamir 已提交
7441 7442 7443 7444 7445
static int __devinit bnx2x_init_one(struct pci_dev *pdev,
				    const struct pci_device_id *ent)
{
	struct net_device *dev = NULL;
	struct bnx2x *bp;
7446
	int pcie_width, pcie_speed;
E
Eliezer Tamir 已提交
7447
	int rc;
E
Eliezer Tamir 已提交
7448 7449

	/* dev zeroed in init_etherdev */
E
Eilon Greenstein 已提交
7450
	dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7451
	if (!dev) {
V
Vladislav Zolotarov 已提交
7452
		dev_err(&pdev->dev, "Cannot allocate net device\n");
E
Eliezer Tamir 已提交
7453
		return -ENOMEM;
7454
	}
E
Eliezer Tamir 已提交
7455 7456

	bp = netdev_priv(dev);
7457
	bp->msg_enable = debug;
E
Eliezer Tamir 已提交
7458

7459 7460
	pci_set_drvdata(pdev, dev);

7461
	rc = bnx2x_init_dev(pdev, dev);
E
Eliezer Tamir 已提交
7462 7463 7464 7465 7466
	if (rc < 0) {
		free_netdev(dev);
		return rc;
	}

7467
	rc = bnx2x_init_bp(bp);
7468 7469 7470
	if (rc)
		goto init_one_exit;

7471 7472 7473
	/* Set init arrays */
	rc = bnx2x_init_firmware(bp, &pdev->dev);
	if (rc) {
V
Vladislav Zolotarov 已提交
7474
		dev_err(&pdev->dev, "Error loading firmware\n");
7475 7476 7477
		goto init_one_exit;
	}

7478
	rc = register_netdev(dev);
7479
	if (rc) {
7480
		dev_err(&pdev->dev, "Cannot register net device\n");
7481 7482 7483
		goto init_one_exit;
	}

7484
	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
V
Vladislav Zolotarov 已提交
7485 7486 7487 7488 7489 7490
	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
	       " IRQ %d, ", board_info[ent->driver_data].name,
	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
	       pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
	       dev->base_addr, bp->pdev->irq);
	pr_cont("node addr %pM\n", dev->dev_addr);
E
Eilon Greenstein 已提交
7491

E
Eliezer Tamir 已提交
7492
	return 0;
7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509

init_one_exit:
	if (bp->regview)
		iounmap(bp->regview);

	if (bp->doorbells)
		iounmap(bp->doorbells);

	free_netdev(dev);

	if (atomic_read(&pdev->enable_cnt) == 1)
		pci_release_regions(pdev);

	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);

	return rc;
E
Eliezer Tamir 已提交
7510 7511 7512 7513 7514
}

static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
7515 7516 7517
	struct bnx2x *bp;

	if (!dev) {
V
Vladislav Zolotarov 已提交
7518
		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7519 7520 7521
		return;
	}
	bp = netdev_priv(dev);
E
Eliezer Tamir 已提交
7522 7523 7524

	unregister_netdev(dev);

7525 7526 7527
	/* Make sure RESET task is not scheduled before continuing */
	cancel_delayed_work_sync(&bp->reset_task);

7528 7529 7530 7531 7532
	kfree(bp->init_ops_offsets);
	kfree(bp->init_ops);
	kfree(bp->init_data);
	release_firmware(bp->firmware);

E
Eliezer Tamir 已提交
7533 7534 7535 7536 7537 7538 7539
	if (bp->regview)
		iounmap(bp->regview);

	if (bp->doorbells)
		iounmap(bp->doorbells);

	free_netdev(dev);
7540 7541 7542 7543

	if (atomic_read(&pdev->enable_cnt) == 1)
		pci_release_regions(pdev);

E
Eliezer Tamir 已提交
7544 7545 7546 7547
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
}

Y
Yitchak Gertner 已提交
7548 7549 7550 7551 7552 7553 7554 7555 7556
static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
{
	int i;

	bp->state = BNX2X_STATE_ERROR;

	bp->rx_mode = BNX2X_RX_MODE_NONE;

	bnx2x_netif_stop(bp, 0);
7557
	netif_carrier_off(bp->dev);
Y
Yitchak Gertner 已提交
7558 7559 7560 7561 7562 7563

	del_timer_sync(&bp->timer);
	bp->stats_state = STATS_STATE_DISABLED;
	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");

	/* Release IRQs */
7564
	bnx2x_free_irq(bp, false);
Y
Yitchak Gertner 已提交
7565 7566 7567 7568 7569

	if (CHIP_IS_E1(bp)) {
		struct mac_configuration_cmd *config =
						bnx2x_sp(bp, mcast_config);

E
Eilon Greenstein 已提交
7570
		for (i = 0; i < config->hdr.length; i++)
Y
Yitchak Gertner 已提交
7571 7572 7573 7574 7575
			CAM_INVALIDATE(config->config_table[i]);
	}

	/* Free SKBs, SGEs, TPA pool and driver internals */
	bnx2x_free_skbs(bp);
7576
	for_each_queue(bp, i)
Y
Yitchak Gertner 已提交
7577
		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7578
	for_each_queue(bp, i)
E
Eilon Greenstein 已提交
7579
		netif_napi_del(&bnx2x_fp(bp, i, napi));
Y
Yitchak Gertner 已提交
7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616
	bnx2x_free_mem(bp);

	bp->state = BNX2X_STATE_CLOSED;

	return 0;
}

static void bnx2x_eeh_recover(struct bnx2x *bp)
{
	u32 val;

	mutex_init(&bp->port.phy_mutex);

	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
	bp->link_params.shmem_base = bp->common.shmem_base;
	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);

	if (!bp->common.shmem_base ||
	    (bp->common.shmem_base < 0xA0000) ||
	    (bp->common.shmem_base >= 0xC0000)) {
		BNX2X_DEV_INFO("MCP not active\n");
		bp->flags |= NO_MCP_FLAG;
		return;
	}

	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
		BNX2X_ERR("BAD MCP validity signature\n");

	if (!BP_NOMCP(bp)) {
		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
			      & DRV_MSG_SEQ_NUMBER_MASK);
		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
	}
}

W
Wendy Xiong 已提交
7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634
/**
 * bnx2x_io_error_detected - called when PCI error is detected
 * @pdev: Pointer to PCI device
 * @state: The current pci connection state
 *
 * This function is called after a PCI bus error affecting
 * this device has been detected.
 */
static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2x *bp = netdev_priv(dev);

	rtnl_lock();

	netif_device_detach(dev);

7635 7636 7637 7638 7639
	if (state == pci_channel_io_perm_failure) {
		rtnl_unlock();
		return PCI_ERS_RESULT_DISCONNECT;
	}

W
Wendy Xiong 已提交
7640
	if (netif_running(dev))
Y
Yitchak Gertner 已提交
7641
		bnx2x_eeh_nic_unload(bp);
W
Wendy Xiong 已提交
7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693

	pci_disable_device(pdev);

	rtnl_unlock();

	/* Request a slot reset */
	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * bnx2x_io_slot_reset - called after the PCI bus has been reset
 * @pdev: Pointer to PCI device
 *
 * Restart the card from scratch, as if from a cold-boot.
 */
static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2x *bp = netdev_priv(dev);

	rtnl_lock();

	if (pci_enable_device(pdev)) {
		dev_err(&pdev->dev,
			"Cannot re-enable PCI device after reset\n");
		rtnl_unlock();
		return PCI_ERS_RESULT_DISCONNECT;
	}

	pci_set_master(pdev);
	pci_restore_state(pdev);

	if (netif_running(dev))
		bnx2x_set_power_state(bp, PCI_D0);

	rtnl_unlock();

	return PCI_ERS_RESULT_RECOVERED;
}

/**
 * bnx2x_io_resume - called when traffic can start flowing again
 * @pdev: Pointer to PCI device
 *
 * This callback is called when the error recovery driver tells us that
 * its OK to resume normal operation.
 */
static void bnx2x_io_resume(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct bnx2x *bp = netdev_priv(dev);

7694 7695 7696 7697 7698
	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
		return;
	}

W
Wendy Xiong 已提交
7699 7700
	rtnl_lock();

Y
Yitchak Gertner 已提交
7701 7702
	bnx2x_eeh_recover(bp);

W
Wendy Xiong 已提交
7703
	if (netif_running(dev))
Y
Yitchak Gertner 已提交
7704
		bnx2x_nic_load(bp, LOAD_NORMAL);
W
Wendy Xiong 已提交
7705 7706 7707 7708 7709 7710 7711 7712

	netif_device_attach(dev);

	rtnl_unlock();
}

static struct pci_error_handlers bnx2x_err_handler = {
	.error_detected = bnx2x_io_error_detected,
E
Eilon Greenstein 已提交
7713 7714
	.slot_reset     = bnx2x_io_slot_reset,
	.resume         = bnx2x_io_resume,
W
Wendy Xiong 已提交
7715 7716
};

E
Eliezer Tamir 已提交
7717
static struct pci_driver bnx2x_pci_driver = {
W
Wendy Xiong 已提交
7718 7719 7720 7721 7722 7723 7724
	.name        = DRV_MODULE_NAME,
	.id_table    = bnx2x_pci_tbl,
	.probe       = bnx2x_init_one,
	.remove      = __devexit_p(bnx2x_remove_one),
	.suspend     = bnx2x_suspend,
	.resume      = bnx2x_resume,
	.err_handler = &bnx2x_err_handler,
E
Eliezer Tamir 已提交
7725 7726 7727 7728
};

static int __init bnx2x_init(void)
{
7729 7730
	int ret;

7731
	pr_info("%s", version);
7732

7733 7734
	bnx2x_wq = create_singlethread_workqueue("bnx2x");
	if (bnx2x_wq == NULL) {
7735
		pr_err("Cannot create workqueue\n");
7736 7737 7738
		return -ENOMEM;
	}

7739 7740
	ret = pci_register_driver(&bnx2x_pci_driver);
	if (ret) {
7741
		pr_err("Cannot register driver\n");
7742 7743 7744
		destroy_workqueue(bnx2x_wq);
	}
	return ret;
E
Eliezer Tamir 已提交
7745 7746 7747 7748 7749
}

static void __exit bnx2x_cleanup(void)
{
	pci_unregister_driver(&bnx2x_pci_driver);
7750 7751

	destroy_workqueue(bnx2x_wq);
E
Eliezer Tamir 已提交
7752 7753 7754 7755 7756
}

module_init(bnx2x_init);
module_exit(bnx2x_cleanup);

7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868
#ifdef BCM_CNIC

/* count denotes the number of new completions we have seen */
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
	struct eth_spe *spe;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return;
#endif

	spin_lock_bh(&bp->spq_lock);
	bp->cnic_spq_pending -= count;

	for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
	     bp->cnic_spq_pending++) {

		if (!bp->cnic_kwq_pending)
			break;

		spe = bnx2x_sp_get_next(bp);
		*spe = *bp->cnic_kwq_cons;

		bp->cnic_kwq_pending--;

		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);

		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
			bp->cnic_kwq_cons = bp->cnic_kwq;
		else
			bp->cnic_kwq_cons++;
	}
	bnx2x_sp_prod_update(bp);
	spin_unlock_bh(&bp->spq_lock);
}

static int bnx2x_cnic_sp_queue(struct net_device *dev,
			       struct kwqe_16 *kwqes[], u32 count)
{
	struct bnx2x *bp = netdev_priv(dev);
	int i;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -EIO;
#endif

	spin_lock_bh(&bp->spq_lock);

	for (i = 0; i < count; i++) {
		struct eth_spe *spe = (struct eth_spe *)kwqes[i];

		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
			break;

		*bp->cnic_kwq_prod = *spe;

		bp->cnic_kwq_pending++;

		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
		   spe->data.mac_config_addr.hi,
		   spe->data.mac_config_addr.lo,
		   bp->cnic_kwq_pending);

		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
			bp->cnic_kwq_prod = bp->cnic_kwq;
		else
			bp->cnic_kwq_prod++;
	}

	spin_unlock_bh(&bp->spq_lock);

	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
		bnx2x_cnic_sp_post(bp, 0);

	return i;
}

static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
{
	struct cnic_ops *c_ops;
	int rc = 0;

	mutex_lock(&bp->cnic_mutex);
	c_ops = bp->cnic_ops;
	if (c_ops)
		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
	mutex_unlock(&bp->cnic_mutex);

	return rc;
}

static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
{
	struct cnic_ops *c_ops;
	int rc = 0;

	rcu_read_lock();
	c_ops = rcu_dereference(bp->cnic_ops);
	if (c_ops)
		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
	rcu_read_unlock();

	return rc;
}

/*
 * for commands that have no data
 */
D
Dmitry Kravkov 已提交
7869
int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936
{
	struct cnic_ctl_info ctl = {0};

	ctl.cmd = cmd;

	return bnx2x_cnic_ctl_send(bp, &ctl);
}

static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
{
	struct cnic_ctl_info ctl;

	/* first we tell CNIC and only then we count this as a completion */
	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
	ctl.data.comp.cid = cid;

	bnx2x_cnic_ctl_send_bh(bp, &ctl);
	bnx2x_cnic_sp_post(bp, 1);
}

static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
{
	struct bnx2x *bp = netdev_priv(dev);
	int rc = 0;

	switch (ctl->cmd) {
	case DRV_CTL_CTXTBL_WR_CMD: {
		u32 index = ctl->data.io.offset;
		dma_addr_t addr = ctl->data.io.dma_addr;

		bnx2x_ilt_wr(bp, index, addr);
		break;
	}

	case DRV_CTL_COMPLETION_CMD: {
		int count = ctl->data.comp.comp_count;

		bnx2x_cnic_sp_post(bp, count);
		break;
	}

	/* rtnl_lock is held.  */
	case DRV_CTL_START_L2_CMD: {
		u32 cli = ctl->data.ring.client_id;

		bp->rx_mode_cl_mask |= (1 << cli);
		bnx2x_set_storm_rx_mode(bp);
		break;
	}

	/* rtnl_lock is held.  */
	case DRV_CTL_STOP_L2_CMD: {
		u32 cli = ctl->data.ring.client_id;

		bp->rx_mode_cl_mask &= ~(1 << cli);
		bnx2x_set_storm_rx_mode(bp);
		break;
	}

	default:
		BNX2X_ERR("unknown command %x\n", ctl->cmd);
		rc = -EINVAL;
	}

	return rc;
}

D
Dmitry Kravkov 已提交
7937
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039
{
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	if (bp->flags & USING_MSIX_FLAG) {
		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
		cp->irq_arr[0].vector = bp->msix_table[1].vector;
	} else {
		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
	}
	cp->irq_arr[0].status_blk = bp->cnic_sb;
	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
	cp->irq_arr[1].status_blk = bp->def_status_blk;
	cp->irq_arr[1].status_blk_num = DEF_SB_ID;

	cp->num_irq = 2;
}

static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
			       void *data)
{
	struct bnx2x *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	if (ops == NULL)
		return -EINVAL;

	if (atomic_read(&bp->intr_sem) != 0)
		return -EBUSY;

	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!bp->cnic_kwq)
		return -ENOMEM;

	bp->cnic_kwq_cons = bp->cnic_kwq;
	bp->cnic_kwq_prod = bp->cnic_kwq;
	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;

	bp->cnic_spq_pending = 0;
	bp->cnic_kwq_pending = 0;

	bp->cnic_data = data;

	cp->num_irq = 0;
	cp->drv_state = CNIC_DRV_STATE_REGD;

	bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));

	bnx2x_setup_cnic_irq_info(bp);
	bnx2x_set_iscsi_eth_mac_addr(bp, 1);
	bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
	rcu_assign_pointer(bp->cnic_ops, ops);

	return 0;
}

static int bnx2x_unregister_cnic(struct net_device *dev)
{
	struct bnx2x *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	mutex_lock(&bp->cnic_mutex);
	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
	}
	cp->drv_state = 0;
	rcu_assign_pointer(bp->cnic_ops, NULL);
	mutex_unlock(&bp->cnic_mutex);
	synchronize_rcu();
	kfree(bp->cnic_kwq);
	bp->cnic_kwq = NULL;

	return 0;
}

struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
{
	struct bnx2x *bp = netdev_priv(dev);
	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;

	cp->drv_owner = THIS_MODULE;
	cp->chip_id = CHIP_ID(bp);
	cp->pdev = bp->pdev;
	cp->io_base = bp->regview;
	cp->io_base2 = bp->doorbells;
	cp->max_kwqe_pending = 8;
	cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
	cp->ctx_tbl_len = CNIC_ILT_LINES;
	cp->starting_cid = BCM_CNIC_CID_START;
	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
	cp->drv_ctl = bnx2x_drv_ctl;
	cp->drv_register_cnic = bnx2x_register_cnic;
	cp->drv_unregister_cnic = bnx2x_unregister_cnic;

	return cp;
}
EXPORT_SYMBOL(bnx2x_cnic_probe);

#endif /* BCM_CNIC */
8040