be_main.c 104.6 KB
Newer Older
S
Sathya Perla 已提交
1
/*
2
 * Copyright (C) 2005 - 2011 Emulex
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
S
Sathya Perla 已提交
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
S
Sathya Perla 已提交
16 17
 */

18
#include <linux/prefetch.h>
19
#include <linux/module.h>
S
Sathya Perla 已提交
20
#include "be.h"
21
#include "be_cmds.h"
22
#include <asm/div64.h>
S
Sathya Perla 已提交
23
#include <linux/aer.h>
S
Sathya Perla 已提交
24 25 26 27 28 29 30

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");

31 32 33
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
S
Sathya Perla 已提交
34

35 36 37 38
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

S
Sathya Perla 已提交
39
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 43
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
S
Sathya Perla 已提交
47 48 49
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
50
/* UE Status Low CSR */
51
static const char * const ue_status_low_desc[] = {
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
	"AXGMAC0",
	"AXGMAC1",
	"JTAG",
	"MPU_INTPEND"
};
/* UE Status High CSR */
86
static const char * const ue_status_hi_desc[] = {
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
	"HOST8",
	"HOST9",
110
	"NETC",
111 112 113 114 115 116 117 118 119
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown"
};
S
Sathya Perla 已提交
120

121 122 123 124 125 126 127
/* Is BE in a multi-channel mode */
static inline bool be_is_mc(struct be_adapter *adapter) {
	return (adapter->function_mode & FLEX10_MODE ||
		adapter->function_mode & VNIC_MODE ||
		adapter->function_mode & UMC_ENABLED);
}

S
Sathya Perla 已提交
128 129 130
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
131
	if (mem->va) {
I
Ivan Vecera 已提交
132 133
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
134 135
		mem->va = NULL;
	}
S
Sathya Perla 已提交
136 137 138 139 140 141 142 143 144 145 146
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
		u16 len, u16 entry_size)
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
I
Ivan Vecera 已提交
147 148
	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
				     GFP_KERNEL);
S
Sathya Perla 已提交
149
	if (!mem->va)
S
Sathya Perla 已提交
150
		return -ENOMEM;
S
Sathya Perla 已提交
151 152 153 154
	memset(mem->va, 0, mem->size);
	return 0;
}

155
static void be_intr_set(struct be_adapter *adapter, bool enable)
S
Sathya Perla 已提交
156
{
157
	u32 reg, enabled;
158

159
	if (adapter->eeh_error)
160 161
		return;

162 163 164 165
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
				&reg);
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

166
	if (!enabled && enable)
S
Sathya Perla 已提交
167
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168
	else if (enabled && !enable)
S
Sathya Perla 已提交
169
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170
	else
S
Sathya Perla 已提交
171
		return;
172

173 174
	pci_write_config_dword(adapter->pdev,
			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
S
Sathya Perla 已提交
175 176
}

177
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
S
Sathya Perla 已提交
178 179 180 181
{
	u32 val = 0;
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182 183

	wmb();
184
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
S
Sathya Perla 已提交
185 186
}

187
static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
S
Sathya Perla 已提交
188 189 190 191
{
	u32 val = 0;
	val |= qid & DB_TXULP_RING_ID_MASK;
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192 193

	wmb();
194
	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
S
Sathya Perla 已提交
195 196
}

197
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
S
Sathya Perla 已提交
198 199 200 201
		bool arm, bool clear_int, u16 num_popped)
{
	u32 val = 0;
	val |= qid & DB_EQ_RING_ID_MASK;
202 203
	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
			DB_EQ_RING_ID_EXT_MASK_SHIFT);
204

205
	if (adapter->eeh_error)
206 207
		return;

S
Sathya Perla 已提交
208 209 210 211 212 213
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
S
Sathya Perla 已提交
215 216
}

217
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
S
Sathya Perla 已提交
218 219 220
{
	u32 val = 0;
	val |= qid & DB_CQ_RING_ID_MASK;
221 222
	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
			DB_CQ_RING_ID_EXT_MASK_SHIFT);
223

224
	if (adapter->eeh_error)
225 226
		return;

S
Sathya Perla 已提交
227 228 229
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
S
Sathya Perla 已提交
231 232 233 234 235 236 237
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;
	int status = 0;
238
	u8 current_mac[ETH_ALEN];
239
	u32 pmac_id = adapter->pmac_id[0];
S
Sathya Perla 已提交
240

241 242 243
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

244
	status = be_cmd_mac_addr_query(adapter, current_mac,
245 246
				MAC_ADDRESS_TYPE_NETWORK, false,
				adapter->if_handle, 0);
247
	if (status)
248
		goto err;
S
Sathya Perla 已提交
249

250 251
	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
252
				adapter->if_handle, &adapter->pmac_id[0], 0);
253 254
		if (status)
			goto err;
S
Sathya Perla 已提交
255

256 257 258 259 260 261
		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
	}
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
	return 0;
err:
	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
S
Sathya Perla 已提交
262 263 264
	return status;
}

265 266
static void populate_be2_stats(struct be_adapter *adapter)
{
267 268 269
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
270
	struct be_port_rxf_stats_v0 *port_stats =
271 272
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
273

274
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
275 276 277 278 279 280 281 282 283 284 285 286 287 288
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
289
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
290 291
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
292 293 294
	drvs->rx_address_mismatch_drops =
					port_stats->rx_address_mismatch_drops +
					port_stats->rx_vlan_mismatch_drops;
295 296 297 298 299 300 301
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
302
		drvs->jabber_events = rxf_stats->port1_jabber_events;
303
	else
304
		drvs->jabber_events = rxf_stats->port0_jabber_events;
305 306 307 308
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
309 310
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
311 312 313 314 315
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

static void populate_be3_stats(struct be_adapter *adapter)
{
316 317 318
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
319
	struct be_port_rxf_stats_v1 *port_stats =
320 321
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
322

323
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
324 325
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
343
	drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
344 345
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
346
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
347 348 349 350 351 352 353
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
354 355
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
356 357 358
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

S
Selvin Xavier 已提交
359 360
static void populate_lancer_stats(struct be_adapter *adapter)
{
361

S
Selvin Xavier 已提交
362
	struct be_drv_stats *drvs = &adapter->drv_stats;
363 364 365 366 367 368 369
	struct lancer_pport_stats *pport_stats =
					pport_stats_from_cmd(adapter);

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
S
Selvin Xavier 已提交
370
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
S
Selvin Xavier 已提交
372 373 374 375 376 377 378 379 380 381 382 383
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 385 386
	drvs->rx_address_mismatch_drops =
					pport_stats->rx_address_mismatch_drops +
					pport_stats->rx_vlan_mismatch_drops;
387
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
S
Selvin Xavier 已提交
388
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 390
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
S
Selvin Xavier 已提交
391
	drvs->jabber_events = pport_stats->rx_jabbers;
392 393
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
S
Selvin Xavier 已提交
394
	drvs->rx_drops_too_many_frags =
395
				pport_stats->rx_drops_too_many_frags_lo;
S
Selvin Xavier 已提交
396
}
397

398 399 400 401 402 403 404 405 406 407 408 409
static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

410 411
void be_parse_stats(struct be_adapter *adapter)
{
412 413 414 415
	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
	struct be_rx_obj *rxo;
	int i;

S
Selvin Xavier 已提交
416 417 418 419 420 421
	if (adapter->generation == BE_GEN3) {
		if (lancer_chip(adapter))
			populate_lancer_stats(adapter);
		 else
			populate_be3_stats(adapter);
	} else {
422
		populate_be2_stats(adapter);
S
Selvin Xavier 已提交
423
	}
424

425 426 427
	if (lancer_chip(adapter))
		goto done;

428
	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
429 430 431 432 433 434 435
	for_all_rx_queues(adapter, rxo, i) {
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
	}
436 437
done:
	return;
438 439
}

440 441
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
					struct rtnl_link_stats64 *stats)
S
Sathya Perla 已提交
442
{
443
	struct be_adapter *adapter = netdev_priv(netdev);
444
	struct be_drv_stats *drvs = &adapter->drv_stats;
445
	struct be_rx_obj *rxo;
446
	struct be_tx_obj *txo;
447 448
	u64 pkts, bytes;
	unsigned int start;
449
	int i;
S
Sathya Perla 已提交
450

451
	for_all_rx_queues(adapter, rxo, i) {
452 453 454 455 456 457 458 459 460 461 462
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
		do {
			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
					rx_stats(rxo)->rx_drops_no_frags;
463 464
	}

465
	for_all_tx_queues(adapter, txo, i) {
466 467 468 469 470 471 472 473
		const struct be_tx_stats *tx_stats = tx_stats(txo);
		do {
			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
474
	}
S
Sathya Perla 已提交
475 476

	/* bad pkts received */
477
	stats->rx_errors = drvs->rx_crc_errors +
478 479 480 481 482 483 484 485
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
486
		drvs->rx_dropped_runt;
487

S
Sathya Perla 已提交
488
	/* detailed rx errors */
489
	stats->rx_length_errors = drvs->rx_in_range_errors +
490 491
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;
492

493
	stats->rx_crc_errors = drvs->rx_crc_errors;
S
Sathya Perla 已提交
494 495

	/* frame alignment errors */
496
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
497

S
Sathya Perla 已提交
498 499
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
500
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
501 502
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
503
	return stats;
S
Sathya Perla 已提交
504 505
}

506
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
S
Sathya Perla 已提交
507 508 509
{
	struct net_device *netdev = adapter->netdev;

510
	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
511
		netif_carrier_off(netdev);
512
		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
S
Sathya Perla 已提交
513
	}
514 515 516 517 518

	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
		netif_carrier_on(netdev);
	else
		netif_carrier_off(netdev);
S
Sathya Perla 已提交
519 520
}

521
static void be_tx_stats_update(struct be_tx_obj *txo,
522
			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
S
Sathya Perla 已提交
523
{
524 525
	struct be_tx_stats *stats = tx_stats(txo);

526
	u64_stats_update_begin(&stats->sync);
527 528 529 530
	stats->tx_reqs++;
	stats->tx_wrbs += wrb_cnt;
	stats->tx_bytes += copied;
	stats->tx_pkts += (gso_segs ? gso_segs : 1);
S
Sathya Perla 已提交
531
	if (stopped)
532
		stats->tx_stops++;
533
	u64_stats_update_end(&stats->sync);
S
Sathya Perla 已提交
534 535 536
}

/* Determine number of WRB entries needed to xmit data in an skb */
537 538
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
								bool *dummy)
S
Sathya Perla 已提交
539
{
540 541 542 543
	int cnt = (skb->len > skb->data_len);

	cnt += skb_shinfo(skb)->nr_frags;

S
Sathya Perla 已提交
544 545
	/* to account for hdr wrb */
	cnt++;
546 547 548
	if (lancer_chip(adapter) || !(cnt & 1)) {
		*dummy = false;
	} else {
S
Sathya Perla 已提交
549 550 551
		/* add a dummy to make it an even num */
		cnt++;
		*dummy = true;
552
	}
S
Sathya Perla 已提交
553 554 555 556 557 558 559 560 561
	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
	return cnt;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
562
	wrb->rsvd0 = 0;
S
Sathya Perla 已提交
563 564
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
					struct sk_buff *skb)
{
	u8 vlan_prio;
	u16 vlan_tag;

	vlan_tag = vlan_tx_tag_get(skb);
	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
	/* If vlan priority provided by OS is NOT in available bmap */
	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
				adapter->recommended_prio;

	return vlan_tag;
}

581 582 583 584 585
static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return vlan_tx_tag_present(skb) || adapter->pvid;
}

586 587
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
		struct sk_buff *skb, u32 wrb_cnt, u32 len)
S
Sathya Perla 已提交
588
{
589
	u16 vlan_tag;
590

S
Sathya Perla 已提交
591 592 593 594
	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

A
Ajit Khaparde 已提交
595
	if (skb_is_gso(skb)) {
S
Sathya Perla 已提交
596 597 598
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
			hdr, skb_shinfo(skb)->gso_size);
599
		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
A
Ajit Khaparde 已提交
600
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
601 602 603 604 605 606 607 608 609 610
		if (lancer_chip(adapter) && adapter->sli_family  ==
							LANCER_A0_SLI_FAMILY) {
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
			if (is_tcp_pkt(skb))
				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
								tcpcs, hdr, 1);
			else if (is_udp_pkt(skb))
				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
								udpcs, hdr, 1);
		}
S
Sathya Perla 已提交
611 612 613 614 615 616 617
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (is_tcp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
		else if (is_udp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

A
Ajit Khaparde 已提交
618
	if (vlan_tx_tag_present(skb)) {
S
Sathya Perla 已提交
619
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
620
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
621
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
S
Sathya Perla 已提交
622 623 624 625 626 627 628 629
	}

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}

I
Ivan Vecera 已提交
630
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
631 632 633 634 635 636 637
		bool unmap_single)
{
	dma_addr_t dma;

	be_dws_le_to_cpu(wrb, sizeof(*wrb));

	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
638
	if (wrb->frag_len) {
639
		if (unmap_single)
I
Ivan Vecera 已提交
640 641
			dma_unmap_single(dev, dma, wrb->frag_len,
					 DMA_TO_DEVICE);
642
		else
I
Ivan Vecera 已提交
643
			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
644 645
	}
}
S
Sathya Perla 已提交
646

647
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
S
Sathya Perla 已提交
648 649
		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
650 651
	dma_addr_t busaddr;
	int i, copied = 0;
I
Ivan Vecera 已提交
652
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
653 654 655
	struct sk_buff *first_skb = skb;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;
656 657
	bool map_single = false;
	u16 map_head;
S
Sathya Perla 已提交
658 659 660

	hdr = queue_head_node(txq);
	queue_head_inc(txq);
661
	map_head = txq->head;
S
Sathya Perla 已提交
662

663
	if (skb->len > skb->data_len) {
E
Eric Dumazet 已提交
664
		int len = skb_headlen(skb);
I
Ivan Vecera 已提交
665 666
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, busaddr))
667 668
			goto dma_err;
		map_single = true;
669 670 671 672 673 674
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}
S
Sathya Perla 已提交
675

676
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
E
Eric Dumazet 已提交
677
		const struct skb_frag_struct *frag =
678
			&skb_shinfo(skb)->frags[i];
679
		busaddr = skb_frag_dma_map(dev, frag, 0,
E
Eric Dumazet 已提交
680
					   skb_frag_size(frag), DMA_TO_DEVICE);
I
Ivan Vecera 已提交
681
		if (dma_mapping_error(dev, busaddr))
682
			goto dma_err;
683
		wrb = queue_head_node(txq);
E
Eric Dumazet 已提交
684
		wrb_fill(wrb, busaddr, skb_frag_size(frag));
685 686
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
E
Eric Dumazet 已提交
687
		copied += skb_frag_size(frag);
S
Sathya Perla 已提交
688 689 690 691 692 693 694 695 696
	}

	if (dummy_wrb) {
		wrb = queue_head_node(txq);
		wrb_fill(wrb, 0, 0);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
	}

697
	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
S
Sathya Perla 已提交
698 699 700
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return copied;
701 702 703 704
dma_err:
	txq->head = map_head;
	while (copied) {
		wrb = queue_head_node(txq);
I
Ivan Vecera 已提交
705
		unmap_tx_frag(dev, wrb, map_single);
706 707 708 709 710
		map_single = false;
		copied -= wrb->frag_len;
		queue_head_inc(txq);
	}
	return 0;
S
Sathya Perla 已提交
711 712
}

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
					     struct sk_buff *skb)
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

	if (vlan_tx_tag_present(skb)) {
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
		__vlan_put_tag(skb, vlan_tag);
		skb->vlan_tci = 0;
	}

	return skb;
}

731
static netdev_tx_t be_xmit(struct sk_buff *skb,
732
			struct net_device *netdev)
S
Sathya Perla 已提交
733 734
{
	struct be_adapter *adapter = netdev_priv(netdev);
735 736
	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
	struct be_queue_info *txq = &txo->q;
737
	struct iphdr *ip = NULL;
S
Sathya Perla 已提交
738
	u32 wrb_cnt = 0, copied = 0;
739
	u32 start = txq->head, eth_hdr_len;
S
Sathya Perla 已提交
740 741
	bool dummy_wrb, stopped = false;

742 743 744 745 746
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
		VLAN_ETH_HLEN : ETH_HLEN;

	/* HW has a bug which considers padding bytes as legal
	 * and modifies the IPv4 hdr's 'tot_len' field
747
	 */
748 749 750 751 752
	if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
			is_ipv4_pkt(skb)) {
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}
753

754 755 756 757 758 759 760
	/* HW has a bug wherein it will calculate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL &&
			be_vlan_tag_chk(adapter, skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb);
761 762 763 764
		if (unlikely(!skb))
			goto tx_drop;
	}

765
	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
S
Sathya Perla 已提交
766

767
	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
768
	if (copied) {
E
Eric Dumazet 已提交
769 770
		int gso_segs = skb_shinfo(skb)->gso_segs;

771
		/* record the sent skb in the sent_skb table */
772 773
		BUG_ON(txo->sent_skb_list[start]);
		txo->sent_skb_list[start] = skb;
774 775 776 777 778

		/* Ensure txq has space for the next skb; Else stop the queue
		 * *BEFORE* ringing the tx doorbell, so that we serialze the
		 * tx compls of the current transmit which'll wake up the queue
		 */
779
		atomic_add(wrb_cnt, &txq->used);
780 781
		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
								txq->len) {
782
			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
783 784
			stopped = true;
		}
S
Sathya Perla 已提交
785

786
		be_txq_notify(adapter, txq->id, wrb_cnt);
S
Sathya Perla 已提交
787

E
Eric Dumazet 已提交
788
		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
789 790 791
	} else {
		txq->head = start;
		dev_kfree_skb_any(skb);
S
Sathya Perla 已提交
792
	}
793
tx_drop:
S
Sathya Perla 已提交
794 795 796 797 798 799 800
	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	if (new_mtu < BE_MIN_MTU ||
A
Ajit Khaparde 已提交
801 802
			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
					(ETH_HLEN + ETH_FCS_LEN))) {
S
Sathya Perla 已提交
803 804
		dev_info(&adapter->pdev->dev,
			"MTU must be between %d and %d bytes\n",
A
Ajit Khaparde 已提交
805 806
			BE_MIN_MTU,
			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
S
Sathya Perla 已提交
807 808 809 810 811 812 813 814 815
		return -EINVAL;
	}
	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
			netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
	return 0;
}

/*
816 817
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
S
Sathya Perla 已提交
818
 */
S
Sathya Perla 已提交
819
static int be_vid_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
820
{
S
Sathya Perla 已提交
821 822
	u16 vids[BE_NUM_VLANS_SUPPORTED];
	u16 num = 0, i;
823
	int status = 0;
824

825 826 827 828
	/* No need to further configure vids if in promiscuous mode */
	if (adapter->promiscuous)
		return 0;

829 830 831 832 833 834
	if (adapter->vlans_added > adapter->max_vlans)
		goto set_vlan_promisc;

	/* Construct VLAN Table to give to HW */
	for (i = 0; i < VLAN_N_VID; i++)
		if (adapter->vlan_tag[i])
S
Sathya Perla 已提交
835
			vids[num++] = cpu_to_le16(i);
836 837

	status = be_cmd_vlan_config(adapter, adapter->if_handle,
S
Sathya Perla 已提交
838
				    vids, num, 1, 0);
839 840 841 842 843 844

	/* Set to VLAN promisc mode as setting VLAN filter failed */
	if (status) {
		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
		goto set_vlan_promisc;
S
Sathya Perla 已提交
845
	}
846

847
	return status;
848 849 850 851 852

set_vlan_promisc:
	status = be_cmd_vlan_config(adapter, adapter->if_handle,
				    NULL, 0, 1, 1);
	return status;
S
Sathya Perla 已提交
853 854
}

855
static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
S
Sathya Perla 已提交
856 857
{
	struct be_adapter *adapter = netdev_priv(netdev);
A
Ajit Khaparde 已提交
858
	int status = 0;
S
Sathya Perla 已提交
859

A
Ajit Khaparde 已提交
860 861 862 863
	if (!be_physfn(adapter)) {
		status = -EINVAL;
		goto ret;
	}
864

S
Sathya Perla 已提交
865
	adapter->vlan_tag[vid] = 1;
866
	if (adapter->vlans_added <= (adapter->max_vlans + 1))
S
Sathya Perla 已提交
867
		status = be_vid_config(adapter);
868

A
Ajit Khaparde 已提交
869 870 871 872 873 874
	if (!status)
		adapter->vlans_added++;
	else
		adapter->vlan_tag[vid] = 0;
ret:
	return status;
S
Sathya Perla 已提交
875 876
}

877
static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
S
Sathya Perla 已提交
878 879
{
	struct be_adapter *adapter = netdev_priv(netdev);
A
Ajit Khaparde 已提交
880
	int status = 0;
S
Sathya Perla 已提交
881

A
Ajit Khaparde 已提交
882 883 884 885
	if (!be_physfn(adapter)) {
		status = -EINVAL;
		goto ret;
	}
886

S
Sathya Perla 已提交
887
	adapter->vlan_tag[vid] = 0;
888
	if (adapter->vlans_added <= adapter->max_vlans)
S
Sathya Perla 已提交
889
		status = be_vid_config(adapter);
890

A
Ajit Khaparde 已提交
891 892 893 894 895 896
	if (!status)
		adapter->vlans_added--;
	else
		adapter->vlan_tag[vid] = 1;
ret:
	return status;
S
Sathya Perla 已提交
897 898
}

899
static void be_set_rx_mode(struct net_device *netdev)
S
Sathya Perla 已提交
900 901
{
	struct be_adapter *adapter = netdev_priv(netdev);
902
	int status;
S
Sathya Perla 已提交
903

904
	if (netdev->flags & IFF_PROMISC) {
905
		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 907
		adapter->promiscuous = true;
		goto done;
S
Sathya Perla 已提交
908 909
	}

L
Lucas De Marchi 已提交
910
	/* BE was previously in promiscuous mode; disable it */
911 912
	if (adapter->promiscuous) {
		adapter->promiscuous = false;
913
		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
914 915

		if (adapter->vlans_added)
S
Sathya Perla 已提交
916
			be_vid_config(adapter);
S
Sathya Perla 已提交
917 918
	}

919
	/* Enable multicast promisc if num configured exceeds what we support */
920
	if (netdev->flags & IFF_ALLMULTI ||
921 922
			netdev_mc_count(netdev) > BE_MAX_MC) {
		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
923
		goto done;
S
Sathya Perla 已提交
924 925
	}

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	if (netdev_uc_count(netdev) != adapter->uc_macs) {
		struct netdev_hw_addr *ha;
		int i = 1; /* First slot is claimed by the Primary MAC */

		for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
			be_cmd_pmac_del(adapter, adapter->if_handle,
					adapter->pmac_id[i], 0);
		}

		if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
			be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
			adapter->promiscuous = true;
			goto done;
		}

		netdev_for_each_uc_addr(ha, adapter->netdev) {
			adapter->uc_macs++; /* First slot is for Primary MAC */
			be_cmd_pmac_add(adapter, (u8 *)ha->addr,
					adapter->if_handle,
					&adapter->pmac_id[adapter->uc_macs], 0);
		}
	}

949 950 951 952 953 954 955 956
	status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);

	/* Set to MCAST promisc mode if setting MULTICAST address fails */
	if (status) {
		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
	}
957 958
done:
	return;
S
Sathya Perla 已提交
959 960
}

961 962 963
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
	struct be_adapter *adapter = netdev_priv(netdev);
964
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
965 966
	int status;

967
	if (!sriov_enabled(adapter))
968 969
		return -EPERM;

970
	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
971 972
		return -EINVAL;

973 974 975
	if (lancer_chip(adapter)) {
		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
	} else {
976 977
		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
					 vf_cfg->pmac_id, vf + 1);
978

979 980
		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
					 &vf_cfg->pmac_id, vf + 1);
981 982
	}

983
	if (status)
984 985
		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
				mac, vf);
986
	else
987
		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
988

989 990 991
	return status;
}

992 993 994 995
static int be_get_vf_config(struct net_device *netdev, int vf,
			struct ifla_vf_info *vi)
{
	struct be_adapter *adapter = netdev_priv(netdev);
996
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
997

998
	if (!sriov_enabled(adapter))
999 1000
		return -EPERM;

1001
	if (vf >= adapter->num_vfs)
1002 1003 1004
		return -EINVAL;

	vi->vf = vf;
1005 1006
	vi->tx_rate = vf_cfg->tx_rate;
	vi->vlan = vf_cfg->vlan_tag;
1007
	vi->qos = 0;
1008
	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1009 1010 1011 1012

	return 0;
}

1013 1014 1015 1016 1017 1018
static int be_set_vf_vlan(struct net_device *netdev,
			int vf, u16 vlan, u8 qos)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status = 0;

1019
	if (!sriov_enabled(adapter))
1020 1021
		return -EPERM;

1022
	if (vf >= adapter->num_vfs || vlan > 4095)
1023 1024 1025
		return -EINVAL;

	if (vlan) {
1026 1027 1028 1029 1030 1031 1032
		if (adapter->vf_cfg[vf].vlan_tag != vlan) {
			/* If this is new value, program it. Else skip. */
			adapter->vf_cfg[vf].vlan_tag = vlan;

			status = be_cmd_set_hsw_config(adapter, vlan,
				vf + 1, adapter->vf_cfg[vf].if_handle);
		}
1033
	} else {
1034
		/* Reset Transparent Vlan Tagging. */
1035
		adapter->vf_cfg[vf].vlan_tag = 0;
1036 1037 1038
		vlan = adapter->vf_cfg[vf].def_vid;
		status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
			adapter->vf_cfg[vf].if_handle);
1039 1040 1041 1042 1043 1044 1045 1046 1047
	}


	if (status)
		dev_info(&adapter->pdev->dev,
				"VLAN %d config on VF %d failed\n", vlan, vf);
	return status;
}

1048 1049 1050 1051 1052 1053
static int be_set_vf_tx_rate(struct net_device *netdev,
			int vf, int rate)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status = 0;

1054
	if (!sriov_enabled(adapter))
1055 1056
		return -EPERM;

1057
	if (vf >= adapter->num_vfs)
1058 1059
		return -EINVAL;

1060 1061 1062 1063 1064
	if (rate < 100 || rate > 10000) {
		dev_err(&adapter->pdev->dev,
			"tx rate must be between 100 and 10000 Mbps\n");
		return -EINVAL;
	}
1065

1066
	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1067 1068

	if (status)
1069
		dev_err(&adapter->pdev->dev,
1070
				"tx rate %d on VF %d failed\n", rate, vf);
1071 1072
	else
		adapter->vf_cfg[vf].tx_rate = rate;
1073 1074 1075
	return status;
}

1076 1077 1078
static int be_find_vfs(struct be_adapter *adapter, int vf_state)
{
	struct pci_dev *dev, *pdev = adapter->pdev;
I
Ivan Vecera 已提交
1079
	int vfs = 0, assigned_vfs = 0, pos;
1080 1081 1082
	u16 offset, stride;

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1083 1084
	if (!pos)
		return 0;
1085 1086 1087 1088 1089
	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);

	dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
	while (dev) {
I
Ivan Vecera 已提交
1090
		if (dev->is_virtfn && dev->physfn == pdev) {
1091 1092 1093 1094 1095 1096 1097 1098 1099
			vfs++;
			if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
				assigned_vfs++;
		}
		dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
	}
	return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
}

S
Sathya Perla 已提交
1100
static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
S
Sathya Perla 已提交
1101
{
S
Sathya Perla 已提交
1102
	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1103
	ulong now = jiffies;
1104
	ulong delta = now - stats->rx_jiffies;
1105 1106
	u64 pkts;
	unsigned int start, eqd;
1107

S
Sathya Perla 已提交
1108 1109 1110 1111 1112 1113
	if (!eqo->enable_aic) {
		eqd = eqo->eqd;
		goto modify_eqd;
	}

	if (eqo->idx >= adapter->num_rx_qs)
1114
		return;
S
Sathya Perla 已提交
1115

S
Sathya Perla 已提交
1116 1117
	stats = rx_stats(&adapter->rx_obj[eqo->idx]);

1118
	/* Wrapped around */
1119 1120
	if (time_before(now, stats->rx_jiffies)) {
		stats->rx_jiffies = now;
1121 1122
		return;
	}
S
Sathya Perla 已提交
1123

1124 1125
	/* Update once a second */
	if (delta < HZ)
S
Sathya Perla 已提交
1126 1127
		return;

1128 1129 1130 1131 1132
	do {
		start = u64_stats_fetch_begin_bh(&stats->sync);
		pkts = stats->rx_pkts;
	} while (u64_stats_fetch_retry_bh(&stats->sync, start));

1133
	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1134
	stats->rx_pkts_prev = pkts;
1135
	stats->rx_jiffies = now;
S
Sathya Perla 已提交
1136 1137 1138
	eqd = (stats->rx_pps / 110000) << 3;
	eqd = min(eqd, eqo->max_eqd);
	eqd = max(eqd, eqo->min_eqd);
1139 1140
	if (eqd < 10)
		eqd = 0;
S
Sathya Perla 已提交
1141 1142 1143 1144 1145

modify_eqd:
	if (eqd != eqo->cur_eqd) {
		be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
		eqo->cur_eqd = eqd;
1146
	}
S
Sathya Perla 已提交
1147 1148
}

1149
static void be_rx_stats_update(struct be_rx_obj *rxo,
1150
		struct be_rx_compl_info *rxcp)
1151
{
1152
	struct be_rx_stats *stats = rx_stats(rxo);
1153

1154
	u64_stats_update_begin(&stats->sync);
1155
	stats->rx_compl++;
1156
	stats->rx_bytes += rxcp->pkt_size;
1157
	stats->rx_pkts++;
1158
	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1159
		stats->rx_mcast_pkts++;
1160
	if (rxcp->err)
1161
		stats->rx_compl_err++;
1162
	u64_stats_update_end(&stats->sync);
1163 1164
}

1165
static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1166
{
1167 1168
	/* L4 checksum is not reliable for non TCP/UDP packets.
	 * Also ignore ipcksm for ipv6 pkts */
1169 1170
	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
				(rxcp->ip_csum || rxcp->ipv6);
1171 1172
}

S
Sathya Perla 已提交
1173 1174
static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
						u16 frag_idx)
S
Sathya Perla 已提交
1175
{
S
Sathya Perla 已提交
1176
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1177
	struct be_rx_page_info *rx_page_info;
1178
	struct be_queue_info *rxq = &rxo->q;
S
Sathya Perla 已提交
1179

1180
	rx_page_info = &rxo->page_info_tbl[frag_idx];
S
Sathya Perla 已提交
1181 1182
	BUG_ON(!rx_page_info->page);

A
Ajit Khaparde 已提交
1183
	if (rx_page_info->last_page_user) {
I
Ivan Vecera 已提交
1184 1185 1186
		dma_unmap_page(&adapter->pdev->dev,
			       dma_unmap_addr(rx_page_info, bus),
			       adapter->big_page_size, DMA_FROM_DEVICE);
A
Ajit Khaparde 已提交
1187 1188
		rx_page_info->last_page_user = false;
	}
S
Sathya Perla 已提交
1189 1190 1191 1192 1193 1194

	atomic_dec(&rxq->used);
	return rx_page_info;
}

/* Throwaway the data in the Rx completion */
S
Sathya Perla 已提交
1195 1196
static void be_rx_compl_discard(struct be_rx_obj *rxo,
				struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1197
{
1198
	struct be_queue_info *rxq = &rxo->q;
S
Sathya Perla 已提交
1199
	struct be_rx_page_info *page_info;
1200
	u16 i, num_rcvd = rxcp->num_rcvd;
S
Sathya Perla 已提交
1201

1202
	for (i = 0; i < num_rcvd; i++) {
S
Sathya Perla 已提交
1203
		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1204 1205
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
1206
		index_inc(&rxcp->rxq_idx, rxq->len);
S
Sathya Perla 已提交
1207 1208 1209 1210 1211 1212 1213
	}
}

/*
 * skb_fill_rx_data forms a complete skb for an ether frame
 * indicated by rxcp.
 */
S
Sathya Perla 已提交
1214 1215
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
			     struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1216
{
1217
	struct be_queue_info *rxq = &rxo->q;
S
Sathya Perla 已提交
1218
	struct be_rx_page_info *page_info;
1219 1220
	u16 i, j;
	u16 hdr_len, curr_frag_len, remaining;
S
Sathya Perla 已提交
1221 1222
	u8 *start;

S
Sathya Perla 已提交
1223
	page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
S
Sathya Perla 已提交
1224 1225 1226 1227
	start = page_address(page_info->page) + page_info->page_offset;
	prefetch(start);

	/* Copy data in the first descriptor of this completion */
1228
	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
S
Sathya Perla 已提交
1229 1230 1231

	skb->len = curr_frag_len;
	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1232
		memcpy(skb->data, start, curr_frag_len);
S
Sathya Perla 已提交
1233 1234 1235 1236 1237
		/* Complete packet has now been moved to data */
		put_page(page_info->page);
		skb->data_len = 0;
		skb->tail += curr_frag_len;
	} else {
1238 1239
		hdr_len = ETH_HLEN;
		memcpy(skb->data, start, hdr_len);
S
Sathya Perla 已提交
1240
		skb_shinfo(skb)->nr_frags = 1;
1241
		skb_frag_set_page(skb, 0, page_info->page);
S
Sathya Perla 已提交
1242 1243
		skb_shinfo(skb)->frags[0].page_offset =
					page_info->page_offset + hdr_len;
E
Eric Dumazet 已提交
1244
		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
S
Sathya Perla 已提交
1245
		skb->data_len = curr_frag_len - hdr_len;
E
Eric Dumazet 已提交
1246
		skb->truesize += rx_frag_size;
S
Sathya Perla 已提交
1247 1248
		skb->tail += hdr_len;
	}
A
Ajit Khaparde 已提交
1249
	page_info->page = NULL;
S
Sathya Perla 已提交
1250

1251 1252 1253
	if (rxcp->pkt_size <= rx_frag_size) {
		BUG_ON(rxcp->num_rcvd != 1);
		return;
S
Sathya Perla 已提交
1254 1255 1256
	}

	/* More frags present for this completion */
1257 1258 1259
	index_inc(&rxcp->rxq_idx, rxq->len);
	remaining = rxcp->pkt_size - curr_frag_len;
	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
S
Sathya Perla 已提交
1260
		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1261
		curr_frag_len = min(remaining, rx_frag_size);
S
Sathya Perla 已提交
1262

1263 1264 1265 1266
		/* Coalesce all frags from the same physical page in one slot */
		if (page_info->page_offset == 0) {
			/* Fresh page */
			j++;
1267
			skb_frag_set_page(skb, j, page_info->page);
1268 1269
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
E
Eric Dumazet 已提交
1270
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1271 1272 1273 1274 1275
			skb_shinfo(skb)->nr_frags++;
		} else {
			put_page(page_info->page);
		}

E
Eric Dumazet 已提交
1276
		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
S
Sathya Perla 已提交
1277 1278
		skb->len += curr_frag_len;
		skb->data_len += curr_frag_len;
E
Eric Dumazet 已提交
1279
		skb->truesize += rx_frag_size;
1280 1281
		remaining -= curr_frag_len;
		index_inc(&rxcp->rxq_idx, rxq->len);
A
Ajit Khaparde 已提交
1282
		page_info->page = NULL;
S
Sathya Perla 已提交
1283
	}
1284
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
1285 1286
}

1287
/* Process the RX completion indicated by rxcp when GRO is disabled */
S
Sathya Perla 已提交
1288 1289
static void be_rx_compl_process(struct be_rx_obj *rxo,
				struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1290
{
S
Sathya Perla 已提交
1291
	struct be_adapter *adapter = rxo->adapter;
1292
	struct net_device *netdev = adapter->netdev;
S
Sathya Perla 已提交
1293
	struct sk_buff *skb;
1294

1295
	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1296
	if (unlikely(!skb)) {
1297
		rx_stats(rxo)->rx_drops_no_skbs++;
S
Sathya Perla 已提交
1298
		be_rx_compl_discard(rxo, rxcp);
S
Sathya Perla 已提交
1299 1300 1301
		return;
	}

S
Sathya Perla 已提交
1302
	skb_fill_rx_data(rxo, skb, rxcp);
S
Sathya Perla 已提交
1303

1304
	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1305
		skb->ip_summed = CHECKSUM_UNNECESSARY;
S
Somnath Kotur 已提交
1306 1307
	else
		skb_checksum_none_assert(skb);
S
Sathya Perla 已提交
1308

1309
	skb->protocol = eth_type_trans(skb, netdev);
1310
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
S
Sathya Perla 已提交
1311
	if (netdev->features & NETIF_F_RXHASH)
A
Ajit Khaparde 已提交
1312 1313
		skb->rxhash = rxcp->rss_hash;

S
Sathya Perla 已提交
1314

1315
	if (rxcp->vlanf)
A
Ajit Khaparde 已提交
1316 1317 1318
		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);

	netif_receive_skb(skb);
S
Sathya Perla 已提交
1319 1320
}

1321
/* Process the RX completion indicated by rxcp when GRO is enabled */
S
Sathya Perla 已提交
1322 1323
void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
			     struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1324
{
S
Sathya Perla 已提交
1325
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1326
	struct be_rx_page_info *page_info;
1327
	struct sk_buff *skb = NULL;
1328
	struct be_queue_info *rxq = &rxo->q;
1329 1330
	u16 remaining, curr_frag_len;
	u16 i, j;
1331

S
Sathya Perla 已提交
1332
	skb = napi_get_frags(napi);
1333
	if (!skb) {
S
Sathya Perla 已提交
1334
		be_rx_compl_discard(rxo, rxcp);
1335 1336 1337
		return;
	}

1338 1339
	remaining = rxcp->pkt_size;
	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
S
Sathya Perla 已提交
1340
		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
S
Sathya Perla 已提交
1341 1342 1343

		curr_frag_len = min(remaining, rx_frag_size);

1344 1345 1346 1347
		/* Coalesce all frags from the same physical page in one slot */
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
1348
			skb_frag_set_page(skb, j, page_info->page);
1349 1350
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
E
Eric Dumazet 已提交
1351
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1352 1353 1354
		} else {
			put_page(page_info->page);
		}
E
Eric Dumazet 已提交
1355
		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
E
Eric Dumazet 已提交
1356
		skb->truesize += rx_frag_size;
1357
		remaining -= curr_frag_len;
1358
		index_inc(&rxcp->rxq_idx, rxq->len);
S
Sathya Perla 已提交
1359 1360
		memset(page_info, 0, sizeof(*page_info));
	}
1361
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
1362

1363
	skb_shinfo(skb)->nr_frags = j + 1;
1364 1365
	skb->len = rxcp->pkt_size;
	skb->data_len = rxcp->pkt_size;
1366
	skb->ip_summed = CHECKSUM_UNNECESSARY;
1367
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
A
Ajit Khaparde 已提交
1368 1369
	if (adapter->netdev->features & NETIF_F_RXHASH)
		skb->rxhash = rxcp->rss_hash;
1370

1371
	if (rxcp->vlanf)
A
Ajit Khaparde 已提交
1372 1373
		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);

S
Sathya Perla 已提交
1374
	napi_gro_frags(napi);
1375 1376
}

S
Sathya Perla 已提交
1377 1378
static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
1379 1380 1381 1382 1383 1384
{
	rxcp->pkt_size =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1385
	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	rxcp->ip_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
	rxcp->l4_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
	rxcp->ipv6 =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
	rxcp->rxq_idx =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
	rxcp->num_rcvd =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
	rxcp->pkt_type =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
A
Ajit Khaparde 已提交
1398
	rxcp->rss_hash =
1399
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1400 1401
	if (rxcp->vlanf) {
		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1402 1403 1404
					  compl);
		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
					       compl);
1405
	}
1406
	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1407 1408
}

S
Sathya Perla 已提交
1409 1410
static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
1411 1412 1413 1414 1415 1416
{
	rxcp->pkt_size =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1417
	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	rxcp->ip_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
	rxcp->l4_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
	rxcp->ipv6 =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
	rxcp->rxq_idx =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
	rxcp->num_rcvd =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
	rxcp->pkt_type =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
A
Ajit Khaparde 已提交
1430
	rxcp->rss_hash =
1431
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1432 1433
	if (rxcp->vlanf) {
		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1434 1435 1436
					  compl);
		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
					       compl);
1437
	}
1438
	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1439 1440 1441 1442 1443 1444 1445
}

static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
{
	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
	struct be_rx_compl_info *rxcp = &rxo->rxcp;
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1446

1447 1448 1449 1450
	/* For checking the valid bit it is Ok to use either definition as the
	 * valid bit is at the same position in both v0 and v1 Rx compl */
	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
		return NULL;
S
Sathya Perla 已提交
1451

1452 1453
	rmb();
	be_dws_le_to_cpu(compl, sizeof(*compl));
S
Sathya Perla 已提交
1454

1455
	if (adapter->be3_native)
S
Sathya Perla 已提交
1456
		be_parse_rx_compl_v1(compl, rxcp);
1457
	else
S
Sathya Perla 已提交
1458
		be_parse_rx_compl_v0(compl, rxcp);
S
Sathya Perla 已提交
1459

1460 1461 1462
	if (rxcp->vlanf) {
		/* vlanf could be wrongly set in some cards.
		 * ignore if vtm is not set */
1463
		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1464
			rxcp->vlanf = 0;
S
Sathya Perla 已提交
1465

1466
		if (!lancer_chip(adapter))
1467
			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
S
Sathya Perla 已提交
1468

1469
		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1470
		    !adapter->vlan_tag[rxcp->vlan_tag])
1471 1472
			rxcp->vlanf = 0;
	}
1473 1474 1475

	/* As the compl has been parsed, reset it; we wont touch it again */
	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
S
Sathya Perla 已提交
1476

1477
	queue_tail_inc(&rxo->cq);
S
Sathya Perla 已提交
1478 1479 1480
	return rxcp;
}

1481
static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
S
Sathya Perla 已提交
1482 1483
{
	u32 order = get_order(size);
1484

S
Sathya Perla 已提交
1485
	if (order > 0)
1486 1487
		gfp |= __GFP_COMP;
	return  alloc_pages(gfp, order);
S
Sathya Perla 已提交
1488 1489 1490 1491 1492 1493
}

/*
 * Allocate a page, split it to fragments of size rx_frag_size and post as
 * receive buffers to BE
 */
1494
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
S
Sathya Perla 已提交
1495
{
1496
	struct be_adapter *adapter = rxo->adapter;
1497
	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1498
	struct be_queue_info *rxq = &rxo->q;
S
Sathya Perla 已提交
1499 1500 1501 1502 1503
	struct page *pagep = NULL;
	struct be_eth_rx_d *rxd;
	u64 page_dmaaddr = 0, frag_dmaaddr;
	u32 posted, page_offset = 0;

1504
	page_info = &rxo->page_info_tbl[rxq->head];
S
Sathya Perla 已提交
1505 1506
	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
		if (!pagep) {
1507
			pagep = be_alloc_pages(adapter->big_page_size, gfp);
S
Sathya Perla 已提交
1508
			if (unlikely(!pagep)) {
1509
				rx_stats(rxo)->rx_post_fail++;
S
Sathya Perla 已提交
1510 1511
				break;
			}
I
Ivan Vecera 已提交
1512 1513 1514
			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
						    0, adapter->big_page_size,
						    DMA_FROM_DEVICE);
S
Sathya Perla 已提交
1515 1516 1517 1518 1519 1520 1521
			page_info->page_offset = 0;
		} else {
			get_page(pagep);
			page_info->page_offset = page_offset + rx_frag_size;
		}
		page_offset = page_info->page_offset;
		page_info->page = pagep;
1522
		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
S
Sathya Perla 已提交
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;

		rxd = queue_head_node(rxq);
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));

		/* Any space left in the current big page for another frag? */
		if ((page_offset + rx_frag_size + rx_frag_size) >
					adapter->big_page_size) {
			pagep = NULL;
			page_info->last_page_user = true;
		}
1535 1536 1537

		prev_page_info = page_info;
		queue_head_inc(rxq);
S
Sathya Perla 已提交
1538
		page_info = &rxo->page_info_tbl[rxq->head];
S
Sathya Perla 已提交
1539 1540
	}
	if (pagep)
1541
		prev_page_info->last_page_user = true;
S
Sathya Perla 已提交
1542 1543 1544

	if (posted) {
		atomic_add(posted, &rxq->used);
1545
		be_rxq_notify(adapter, rxq->id, posted);
1546 1547
	} else if (atomic_read(&rxq->used) == 0) {
		/* Let be_worker replenish when memory is available */
1548
		rxo->rx_post_starved = true;
S
Sathya Perla 已提交
1549 1550 1551
	}
}

1552
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
S
Sathya Perla 已提交
1553 1554 1555 1556 1557 1558
{
	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);

	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
		return NULL;

1559
	rmb();
S
Sathya Perla 已提交
1560 1561 1562 1563 1564 1565 1566 1567
	be_dws_le_to_cpu(txcp, sizeof(*txcp));

	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;

	queue_tail_inc(tx_cq);
	return txcp;
}

1568 1569
static u16 be_tx_compl_process(struct be_adapter *adapter,
		struct be_tx_obj *txo, u16 last_index)
S
Sathya Perla 已提交
1570
{
1571
	struct be_queue_info *txq = &txo->q;
1572
	struct be_eth_wrb *wrb;
1573
	struct sk_buff **sent_skbs = txo->sent_skb_list;
S
Sathya Perla 已提交
1574
	struct sk_buff *sent_skb;
1575 1576
	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
	bool unmap_skb_hdr = true;
S
Sathya Perla 已提交
1577

1578
	sent_skb = sent_skbs[txq->tail];
S
Sathya Perla 已提交
1579
	BUG_ON(!sent_skb);
1580 1581 1582
	sent_skbs[txq->tail] = NULL;

	/* skip header wrb */
1583
	queue_tail_inc(txq);
S
Sathya Perla 已提交
1584

1585
	do {
S
Sathya Perla 已提交
1586
		cur_index = txq->tail;
1587
		wrb = queue_tail_node(txq);
I
Ivan Vecera 已提交
1588 1589
		unmap_tx_frag(&adapter->pdev->dev, wrb,
			      (unmap_skb_hdr && skb_headlen(sent_skb)));
1590 1591
		unmap_skb_hdr = false;

S
Sathya Perla 已提交
1592 1593
		num_wrbs++;
		queue_tail_inc(txq);
1594
	} while (cur_index != last_index);
S
Sathya Perla 已提交
1595 1596

	kfree_skb(sent_skb);
1597
	return num_wrbs;
S
Sathya Perla 已提交
1598 1599
}

S
Sathya Perla 已提交
1600 1601
/* Return the number of events in the event queue */
static inline int events_get(struct be_eq_obj *eqo)
1602
{
S
Sathya Perla 已提交
1603 1604
	struct be_eq_entry *eqe;
	int num = 0;
1605

S
Sathya Perla 已提交
1606 1607 1608 1609
	do {
		eqe = queue_tail_node(&eqo->q);
		if (eqe->evt == 0)
			break;
1610

S
Sathya Perla 已提交
1611 1612 1613 1614 1615 1616 1617
		rmb();
		eqe->evt = 0;
		num++;
		queue_tail_inc(&eqo->q);
	} while (true);

	return num;
1618 1619
}

S
Sathya Perla 已提交
1620
static int event_handle(struct be_eq_obj *eqo)
1621
{
S
Sathya Perla 已提交
1622 1623
	bool rearm = false;
	int num = events_get(eqo);
1624

S
Sathya Perla 已提交
1625
	/* Deal with any spurious interrupts that come without events */
1626 1627 1628
	if (!num)
		rearm = true;

1629 1630 1631
	if (num || msix_enabled(eqo->adapter))
		be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);

1632
	if (num)
S
Sathya Perla 已提交
1633
		napi_schedule(&eqo->napi);
1634 1635 1636 1637

	return num;
}

S
Sathya Perla 已提交
1638 1639
/* Leaves the EQ is disarmed state */
static void be_eq_clean(struct be_eq_obj *eqo)
1640
{
S
Sathya Perla 已提交
1641
	int num = events_get(eqo);
1642

S
Sathya Perla 已提交
1643
	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1644 1645
}

S
Sathya Perla 已提交
1646
static void be_rx_cq_clean(struct be_rx_obj *rxo)
S
Sathya Perla 已提交
1647 1648
{
	struct be_rx_page_info *page_info;
1649 1650
	struct be_queue_info *rxq = &rxo->q;
	struct be_queue_info *rx_cq = &rxo->cq;
1651
	struct be_rx_compl_info *rxcp;
S
Sathya Perla 已提交
1652 1653 1654
	u16 tail;

	/* First cleanup pending rx completions */
1655
	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
S
Sathya Perla 已提交
1656 1657
		be_rx_compl_discard(rxo, rxcp);
		be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
S
Sathya Perla 已提交
1658 1659 1660 1661
	}

	/* Then free posted rx buffer that were not used */
	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1662
	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
S
Sathya Perla 已提交
1663
		page_info = get_rx_page_info(rxo, tail);
S
Sathya Perla 已提交
1664 1665 1666 1667
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(atomic_read(&rxq->used));
1668
	rxq->tail = rxq->head = 0;
S
Sathya Perla 已提交
1669 1670
}

S
Sathya Perla 已提交
1671
static void be_tx_compl_clean(struct be_adapter *adapter)
S
Sathya Perla 已提交
1672
{
S
Sathya Perla 已提交
1673 1674
	struct be_tx_obj *txo;
	struct be_queue_info *txq;
1675
	struct be_eth_tx_compl *txcp;
1676
	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1677 1678
	struct sk_buff *sent_skb;
	bool dummy_wrb;
S
Sathya Perla 已提交
1679
	int i, pending_txqs;
1680 1681 1682

	/* Wait for a max of 200ms for all the tx-completions to arrive. */
	do {
S
Sathya Perla 已提交
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
		pending_txqs = adapter->num_tx_qs;

		for_all_tx_queues(adapter, txo, i) {
			txq = &txo->q;
			while ((txcp = be_tx_compl_get(&txo->cq))) {
				end_idx =
					AMAP_GET_BITS(struct amap_eth_tx_compl,
						      wrb_index, txcp);
				num_wrbs += be_tx_compl_process(adapter, txo,
								end_idx);
				cmpl++;
			}
			if (cmpl) {
				be_cq_notify(adapter, txo->cq.id, false, cmpl);
				atomic_sub(num_wrbs, &txq->used);
				cmpl = 0;
				num_wrbs = 0;
			}
			if (atomic_read(&txq->used) == 0)
				pending_txqs--;
1703 1704
		}

S
Sathya Perla 已提交
1705
		if (pending_txqs == 0 || ++timeo > 200)
1706 1707 1708 1709 1710
			break;

		mdelay(1);
	} while (true);

S
Sathya Perla 已提交
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
	for_all_tx_queues(adapter, txo, i) {
		txq = &txo->q;
		if (atomic_read(&txq->used))
			dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
				atomic_read(&txq->used));

		/* free posted tx for which compls will never arrive */
		while (atomic_read(&txq->used)) {
			sent_skb = txo->sent_skb_list[txq->tail];
			end_idx = txq->tail;
			num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
						   &dummy_wrb);
			index_adv(&end_idx, num_wrbs - 1, txq->len);
			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
			atomic_sub(num_wrbs, &txq->used);
		}
1727
	}
S
Sathya Perla 已提交
1728 1729
}

S
Sathya Perla 已提交
1730 1731 1732 1733 1734 1735
static void be_evt_queues_destroy(struct be_adapter *adapter)
{
	struct be_eq_obj *eqo;
	int i;

	for_all_evt_queues(adapter, eqo, i) {
1736 1737
		if (eqo->q.created) {
			be_eq_clean(eqo);
S
Sathya Perla 已提交
1738
			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1739
		}
S
Sathya Perla 已提交
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
		be_queue_free(adapter, &eqo->q);
	}
}

static int be_evt_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq;
	struct be_eq_obj *eqo;
	int i, rc;

	adapter->num_evt_qs = num_irqs(adapter);

	for_all_evt_queues(adapter, eqo, i) {
		eqo->adapter = adapter;
		eqo->tx_budget = BE_TX_BUDGET;
		eqo->idx = i;
		eqo->max_eqd = BE_MAX_EQD;
		eqo->enable_aic = true;

		eq = &eqo->q;
		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
					sizeof(struct be_eq_entry));
		if (rc)
			return rc;

		rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
		if (rc)
			return rc;
	}
1769
	return 0;
S
Sathya Perla 已提交
1770 1771
}

1772 1773 1774 1775
static void be_mcc_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

1776
	q = &adapter->mcc_obj.q;
1777
	if (q->created)
1778
		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1779 1780
	be_queue_free(adapter, q);

1781
	q = &adapter->mcc_obj.cq;
1782
	if (q->created)
1783
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1784 1785 1786 1787 1788 1789 1790 1791
	be_queue_free(adapter, q);
}

/* Must be called only after TX qs are created as MCC shares TX EQ */
static int be_mcc_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *q, *cq;

1792
	cq = &adapter->mcc_obj.cq;
1793
	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1794
			sizeof(struct be_mcc_compl)))
1795 1796
		goto err;

S
Sathya Perla 已提交
1797 1798
	/* Use the default EQ for MCC completions */
	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1799 1800
		goto mcc_cq_free;

1801
	q = &adapter->mcc_obj.q;
1802 1803 1804
	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
		goto mcc_cq_destroy;

1805
	if (be_cmd_mccq_create(adapter, q, cq))
1806 1807 1808 1809 1810 1811 1812
		goto mcc_q_free;

	return 0;

mcc_q_free:
	be_queue_free(adapter, q);
mcc_cq_destroy:
1813
	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1814 1815 1816 1817 1818 1819
mcc_cq_free:
	be_queue_free(adapter, cq);
err:
	return -1;
}

S
Sathya Perla 已提交
1820 1821 1822
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;
1823 1824
	struct be_tx_obj *txo;
	u8 i;
S
Sathya Perla 已提交
1825

1826 1827 1828 1829 1830
	for_all_tx_queues(adapter, txo, i) {
		q = &txo->q;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
		be_queue_free(adapter, q);
S
Sathya Perla 已提交
1831

1832 1833 1834 1835 1836
		q = &txo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
	}
S
Sathya Perla 已提交
1837 1838
}

1839 1840
static int be_num_txqs_want(struct be_adapter *adapter)
{
1841 1842 1843
	if (sriov_want(adapter) || be_is_mc(adapter) ||
	    lancer_chip(adapter) || !be_physfn(adapter) ||
	    adapter->generation == BE_GEN2)
1844 1845 1846 1847 1848
		return 1;
	else
		return MAX_TX_QS;
}

S
Sathya Perla 已提交
1849
static int be_tx_cqs_create(struct be_adapter *adapter)
S
Sathya Perla 已提交
1850
{
S
Sathya Perla 已提交
1851 1852
	struct be_queue_info *cq, *eq;
	int status;
1853 1854
	struct be_tx_obj *txo;
	u8 i;
S
Sathya Perla 已提交
1855

1856
	adapter->num_tx_qs = be_num_txqs_want(adapter);
1857 1858
	if (adapter->num_tx_qs != MAX_TX_QS) {
		rtnl_lock();
1859 1860
		netif_set_real_num_tx_queues(adapter->netdev,
			adapter->num_tx_qs);
1861 1862
		rtnl_unlock();
	}
1863

S
Sathya Perla 已提交
1864 1865 1866 1867 1868 1869
	for_all_tx_queues(adapter, txo, i) {
		cq = &txo->cq;
		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
					sizeof(struct be_eth_tx_compl));
		if (status)
			return status;
1870

S
Sathya Perla 已提交
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
		/* If num_evt_qs is less than num_tx_qs, then more than
		 * one txq share an eq
		 */
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		status = be_cmd_cq_create(adapter, cq, eq, false, 3);
		if (status)
			return status;
	}
	return 0;
}
S
Sathya Perla 已提交
1881

S
Sathya Perla 已提交
1882 1883 1884 1885
static int be_tx_qs_create(struct be_adapter *adapter)
{
	struct be_tx_obj *txo;
	int i, status;
1886

1887
	for_all_tx_queues(adapter, txo, i) {
S
Sathya Perla 已提交
1888 1889 1890 1891
		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
					sizeof(struct be_eth_wrb));
		if (status)
			return status;
S
Sathya Perla 已提交
1892

S
Sathya Perla 已提交
1893 1894 1895
		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
		if (status)
			return status;
1896
	}
S
Sathya Perla 已提交
1897

S
Sathya Perla 已提交
1898
	return 0;
S
Sathya Perla 已提交
1899 1900
}

S
Sathya Perla 已提交
1901
static void be_rx_cqs_destroy(struct be_adapter *adapter)
S
Sathya Perla 已提交
1902 1903
{
	struct be_queue_info *q;
1904 1905 1906 1907 1908 1909 1910 1911
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
1912 1913 1914
	}
}

S
Sathya Perla 已提交
1915
static int be_rx_cqs_create(struct be_adapter *adapter)
S
Sathya Perla 已提交
1916
{
S
Sathya Perla 已提交
1917
	struct be_queue_info *eq, *cq;
1918 1919
	struct be_rx_obj *rxo;
	int rc, i;
S
Sathya Perla 已提交
1920

S
Sathya Perla 已提交
1921 1922 1923 1924 1925
	/* We'll create as many RSS rings as there are irqs.
	 * But when there's only one irq there's no use creating RSS rings
	 */
	adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
				num_irqs(adapter) + 1 : 1;
1926 1927 1928 1929 1930 1931
	if (adapter->num_rx_qs != MAX_RX_QS) {
		rtnl_lock();
		netif_set_real_num_rx_queues(adapter->netdev,
					     adapter->num_rx_qs);
		rtnl_unlock();
	}
1932

S
Sathya Perla 已提交
1933
	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1934 1935 1936 1937 1938 1939
	for_all_rx_queues(adapter, rxo, i) {
		rxo->adapter = adapter;
		cq = &rxo->cq;
		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
				sizeof(struct be_eth_rx_compl));
		if (rc)
S
Sathya Perla 已提交
1940
			return rc;
1941

S
Sathya Perla 已提交
1942 1943
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944
		if (rc)
S
Sathya Perla 已提交
1945
			return rc;
1946
	}
S
Sathya Perla 已提交
1947

S
Sathya Perla 已提交
1948 1949
	if (adapter->num_rx_qs != MAX_RX_QS)
		dev_info(&adapter->pdev->dev,
1950
			"Created only %d receive queues\n", adapter->num_rx_qs);
S
Sathya Perla 已提交
1951

S
Sathya Perla 已提交
1952
	return 0;
1953 1954
}

S
Sathya Perla 已提交
1955 1956 1957
static irqreturn_t be_intx(int irq, void *dev)
{
	struct be_adapter *adapter = dev;
S
Sathya Perla 已提交
1958
	int num_evts;
S
Sathya Perla 已提交
1959

S
Sathya Perla 已提交
1960 1961 1962 1963 1964 1965
	/* With INTx only one EQ is used */
	num_evts = event_handle(&adapter->eq_obj[0]);
	if (num_evts)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
S
Sathya Perla 已提交
1966 1967
}

S
Sathya Perla 已提交
1968
static irqreturn_t be_msix(int irq, void *dev)
S
Sathya Perla 已提交
1969
{
S
Sathya Perla 已提交
1970
	struct be_eq_obj *eqo = dev;
S
Sathya Perla 已提交
1971

S
Sathya Perla 已提交
1972
	event_handle(eqo);
S
Sathya Perla 已提交
1973 1974 1975
	return IRQ_HANDLED;
}

1976
static inline bool do_gro(struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1977
{
1978
	return (rxcp->tcpf && !rxcp->err) ? true : false;
S
Sathya Perla 已提交
1979 1980
}

S
Sathya Perla 已提交
1981 1982
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
			int budget)
S
Sathya Perla 已提交
1983
{
1984 1985
	struct be_adapter *adapter = rxo->adapter;
	struct be_queue_info *rx_cq = &rxo->cq;
1986
	struct be_rx_compl_info *rxcp;
S
Sathya Perla 已提交
1987 1988 1989
	u32 work_done;

	for (work_done = 0; work_done < budget; work_done++) {
1990
		rxcp = be_rx_compl_get(rxo);
S
Sathya Perla 已提交
1991 1992 1993
		if (!rxcp)
			break;

1994 1995 1996 1997 1998 1999
		/* Is it a flush compl that has no data */
		if (unlikely(rxcp->num_rcvd == 0))
			goto loop_continue;

		/* Discard compl with partial DMA Lancer B0 */
		if (unlikely(!rxcp->pkt_size)) {
S
Sathya Perla 已提交
2000
			be_rx_compl_discard(rxo, rxcp);
2001 2002 2003 2004 2005 2006 2007 2008
			goto loop_continue;
		}

		/* On BE drop pkts that arrive due to imperfect filtering in
		 * promiscuous mode on some skews
		 */
		if (unlikely(rxcp->port != adapter->port_num &&
				!lancer_chip(adapter))) {
S
Sathya Perla 已提交
2009
			be_rx_compl_discard(rxo, rxcp);
2010
			goto loop_continue;
2011
		}
2012

2013
		if (do_gro(rxcp))
S
Sathya Perla 已提交
2014
			be_rx_compl_process_gro(rxo, napi, rxcp);
2015
		else
S
Sathya Perla 已提交
2016
			be_rx_compl_process(rxo, rxcp);
2017
loop_continue:
2018
		be_rx_stats_update(rxo, rxcp);
S
Sathya Perla 已提交
2019 2020
	}

S
Sathya Perla 已提交
2021 2022
	if (work_done) {
		be_cq_notify(adapter, rx_cq->id, true, work_done);
2023

S
Sathya Perla 已提交
2024 2025
		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
			be_post_rx_frags(rxo, GFP_ATOMIC);
S
Sathya Perla 已提交
2026
	}
S
Sathya Perla 已提交
2027

S
Sathya Perla 已提交
2028 2029 2030
	return work_done;
}

S
Sathya Perla 已提交
2031 2032
static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
			  int budget, int idx)
S
Sathya Perla 已提交
2033 2034
{
	struct be_eth_tx_compl *txcp;
S
Sathya Perla 已提交
2035
	int num_wrbs = 0, work_done;
2036

S
Sathya Perla 已提交
2037 2038 2039 2040 2041
	for (work_done = 0; work_done < budget; work_done++) {
		txcp = be_tx_compl_get(&txo->cq);
		if (!txcp)
			break;
		num_wrbs += be_tx_compl_process(adapter, txo,
2042 2043
				AMAP_GET_BITS(struct amap_eth_tx_compl,
					wrb_index, txcp));
S
Sathya Perla 已提交
2044
	}
S
Sathya Perla 已提交
2045

S
Sathya Perla 已提交
2046 2047 2048
	if (work_done) {
		be_cq_notify(adapter, txo->cq.id, true, work_done);
		atomic_sub(num_wrbs, &txo->q.used);
2049

S
Sathya Perla 已提交
2050 2051 2052 2053 2054
		/* As Tx wrbs have been freed up, wake up netdev queue
		 * if it was stopped due to lack of tx wrbs.  */
		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
			atomic_read(&txo->q.used) < txo->q.len / 2) {
			netif_wake_subqueue(adapter->netdev, idx);
2055
		}
S
Sathya Perla 已提交
2056 2057 2058 2059

		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
		tx_stats(txo)->tx_compl += work_done;
		u64_stats_update_end(&tx_stats(txo)->sync_compl);
S
Sathya Perla 已提交
2060
	}
S
Sathya Perla 已提交
2061 2062
	return (work_done < budget); /* Done */
}
S
Sathya Perla 已提交
2063

S
Sathya Perla 已提交
2064 2065 2066 2067 2068 2069
int be_poll(struct napi_struct *napi, int budget)
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter = eqo->adapter;
	int max_work = 0, work, i;
	bool tx_done;
S
Sathya Perla 已提交
2070

S
Sathya Perla 已提交
2071 2072 2073 2074 2075 2076
	/* Process all TXQs serviced by this EQ */
	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
					eqo->tx_budget, i);
		if (!tx_done)
			max_work = budget;
S
Sathya Perla 已提交
2077 2078
	}

S
Sathya Perla 已提交
2079 2080 2081 2082 2083 2084 2085 2086
	/* This loop will iterate twice for EQ0 in which
	 * completions of the last RXQ (default one) are also processed
	 * For other EQs the loop iterates only once
	 */
	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
		max_work = max(work, max_work);
	}
S
Sathya Perla 已提交
2087

S
Sathya Perla 已提交
2088 2089
	if (is_mcc_eqo(eqo))
		be_process_mcc(adapter);
2090

S
Sathya Perla 已提交
2091 2092 2093 2094 2095 2096
	if (max_work < budget) {
		napi_complete(napi);
		be_eq_notify(adapter, eqo->q.id, true, false, 0);
	} else {
		/* As we'll continue in polling mode, count and clear events */
		be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2097
	}
S
Sathya Perla 已提交
2098
	return max_work;
S
Sathya Perla 已提交
2099 2100
}

2101
void be_detect_error(struct be_adapter *adapter)
2102
{
2103 2104
	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2105 2106
	u32 i;

2107
	if (be_crit_error(adapter))
2108 2109
		return;

2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	if (lancer_chip(adapter)) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
			sliport_err1 = ioread32(adapter->db +
					SLIPORT_ERROR1_OFFSET);
			sliport_err2 = ioread32(adapter->db +
					SLIPORT_ERROR2_OFFSET);
		}
	} else {
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_LOW, &ue_lo);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_HIGH, &ue_hi);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);

2128 2129
		ue_lo = (ue_lo & ~ue_lo_mask);
		ue_hi = (ue_hi & ~ue_hi_mask);
2130
	}
2131

2132 2133
	if (ue_lo || ue_hi ||
		sliport_status & SLIPORT_STATUS_ERR_MASK) {
2134
		adapter->hw_error = true;
2135
		dev_err(&adapter->pdev->dev,
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
			"Error detected in the card\n");
	}

	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
		dev_err(&adapter->pdev->dev,
			"ERR: sliport status 0x%x\n", sliport_status);
		dev_err(&adapter->pdev->dev,
			"ERR: sliport error1 0x%x\n", sliport_err1);
		dev_err(&adapter->pdev->dev,
			"ERR: sliport error2 0x%x\n", sliport_err2);
2146 2147
	}

2148 2149 2150
	if (ue_lo) {
		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
			if (ue_lo & 1)
2151 2152 2153 2154
				dev_err(&adapter->pdev->dev,
				"UE: %s bit set\n", ue_status_low_desc[i]);
		}
	}
2155

2156 2157 2158
	if (ue_hi) {
		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
			if (ue_hi & 1)
2159 2160 2161 2162 2163 2164 2165
				dev_err(&adapter->pdev->dev,
				"UE: %s bit set\n", ue_status_hi_desc[i]);
		}
	}

}

2166 2167
static void be_msix_disable(struct be_adapter *adapter)
{
2168
	if (msix_enabled(adapter)) {
2169
		pci_disable_msix(adapter->pdev);
2170
		adapter->num_msix_vec = 0;
2171 2172 2173
	}
}

S
Sathya Perla 已提交
2174 2175
static uint be_num_rss_want(struct be_adapter *adapter)
{
2176
	u32 num = 0;
S
Sathya Perla 已提交
2177
	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2178
	     !sriov_want(adapter) && be_physfn(adapter)) {
2179 2180 2181 2182
		num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
		num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
	}
	return num;
S
Sathya Perla 已提交
2183 2184
}

S
Sathya Perla 已提交
2185 2186
static void be_msix_enable(struct be_adapter *adapter)
{
S
Sathya Perla 已提交
2187
#define BE_MIN_MSIX_VECTORS		1
2188
	int i, status, num_vec, num_roce_vec = 0;
S
Sathya Perla 已提交
2189

S
Sathya Perla 已提交
2190 2191
	/* If RSS queues are not used, need a vec for default RX Q */
	num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2192 2193 2194 2195 2196 2197 2198
	if (be_roce_supported(adapter)) {
		num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
					(num_online_cpus() + 1));
		num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
		num_vec += num_roce_vec;
		num_vec = min(num_vec, MAX_MSIX_VECTORS);
	}
S
Sathya Perla 已提交
2199
	num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2200

2201
	for (i = 0; i < num_vec; i++)
S
Sathya Perla 已提交
2202 2203
		adapter->msix_entries[i].entry = i;

2204
	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2205 2206 2207
	if (status == 0) {
		goto done;
	} else if (status >= BE_MIN_MSIX_VECTORS) {
2208
		num_vec = status;
2209
		if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2210
				num_vec) == 0)
2211 2212 2213 2214
			goto done;
	}
	return;
done:
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
	if (be_roce_supported(adapter)) {
		if (num_vec > num_roce_vec) {
			adapter->num_msix_vec = num_vec - num_roce_vec;
			adapter->num_msix_roce_vec =
				num_vec - adapter->num_msix_vec;
		} else {
			adapter->num_msix_vec = num_vec;
			adapter->num_msix_roce_vec = 0;
		}
	} else
		adapter->num_msix_vec = num_vec;
2226
	return;
S
Sathya Perla 已提交
2227 2228
}

2229
static inline int be_msix_vec_get(struct be_adapter *adapter,
S
Sathya Perla 已提交
2230
				struct be_eq_obj *eqo)
2231
{
S
Sathya Perla 已提交
2232
	return adapter->msix_entries[eqo->idx].vector;
2233
}
S
Sathya Perla 已提交
2234

2235 2236
static int be_msix_register(struct be_adapter *adapter)
{
S
Sathya Perla 已提交
2237 2238 2239
	struct net_device *netdev = adapter->netdev;
	struct be_eq_obj *eqo;
	int status, i, vec;
S
Sathya Perla 已提交
2240

S
Sathya Perla 已提交
2241 2242 2243 2244
	for_all_evt_queues(adapter, eqo, i) {
		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
		vec = be_msix_vec_get(adapter, eqo);
		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2245 2246 2247
		if (status)
			goto err_msix;
	}
2248

S
Sathya Perla 已提交
2249
	return 0;
2250
err_msix:
S
Sathya Perla 已提交
2251 2252 2253 2254
	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);
	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
		status);
2255
	be_msix_disable(adapter);
S
Sathya Perla 已提交
2256 2257 2258 2259 2260 2261 2262 2263
	return status;
}

static int be_irq_register(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

2264
	if (msix_enabled(adapter)) {
S
Sathya Perla 已提交
2265 2266 2267
		status = be_msix_register(adapter);
		if (status == 0)
			goto done;
2268 2269 2270
		/* INTx is not supported for VF */
		if (!be_physfn(adapter))
			return status;
S
Sathya Perla 已提交
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
	}

	/* INTx */
	netdev->irq = adapter->pdev->irq;
	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
			adapter);
	if (status) {
		dev_err(&adapter->pdev->dev,
			"INTx request IRQ failed - err %d\n", status);
		return status;
	}
done:
	adapter->isr_registered = true;
	return 0;
}

static void be_irq_unregister(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
S
Sathya Perla 已提交
2290
	struct be_eq_obj *eqo;
2291
	int i;
S
Sathya Perla 已提交
2292 2293 2294 2295 2296

	if (!adapter->isr_registered)
		return;

	/* INTx */
2297
	if (!msix_enabled(adapter)) {
S
Sathya Perla 已提交
2298 2299 2300 2301 2302
		free_irq(netdev->irq, adapter);
		goto done;
	}

	/* MSIx */
S
Sathya Perla 已提交
2303 2304
	for_all_evt_queues(adapter, eqo, i)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2305

S
Sathya Perla 已提交
2306 2307 2308 2309
done:
	adapter->isr_registered = false;
}

S
Sathya Perla 已提交
2310
static void be_rx_qs_destroy(struct be_adapter *adapter)
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
{
	struct be_queue_info *q;
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->q;
		if (q->created) {
			be_cmd_rxq_destroy(adapter, q);
			/* After the rxq is invalidated, wait for a grace time
			 * of 1ms for all dma to end and the flush compl to
			 * arrive
			 */
			mdelay(1);
S
Sathya Perla 已提交
2325
			be_rx_cq_clean(rxo);
2326
		}
S
Sathya Perla 已提交
2327
		be_queue_free(adapter, q);
2328 2329 2330
	}
}

2331 2332 2333
static int be_close(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
2334 2335
	struct be_eq_obj *eqo;
	int i;
2336

2337 2338
	be_roce_dev_close(adapter);

2339 2340
	be_async_mcc_disable(adapter);

2341 2342
	if (!lancer_chip(adapter))
		be_intr_set(adapter, false);
2343

S
Sathya Perla 已提交
2344 2345 2346 2347 2348 2349 2350
	for_all_evt_queues(adapter, eqo, i) {
		napi_disable(&eqo->napi);
		if (msix_enabled(adapter))
			synchronize_irq(be_msix_vec_get(adapter, eqo));
		else
			synchronize_irq(netdev->irq);
		be_eq_clean(eqo);
2351 2352
	}

2353 2354 2355 2356 2357
	be_irq_unregister(adapter);

	/* Wait for all pending tx completions to arrive so that
	 * all tx skbs are freed.
	 */
S
Sathya Perla 已提交
2358
	be_tx_compl_clean(adapter);
2359

S
Sathya Perla 已提交
2360
	be_rx_qs_destroy(adapter);
2361 2362 2363
	return 0;
}

S
Sathya Perla 已提交
2364
static int be_rx_qs_create(struct be_adapter *adapter)
2365 2366
{
	struct be_rx_obj *rxo;
2367 2368
	int rc, i, j;
	u8 rsstable[128];
2369 2370

	for_all_rx_queues(adapter, rxo, i) {
S
Sathya Perla 已提交
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
				    sizeof(struct be_eth_rx_d));
		if (rc)
			return rc;
	}

	/* The FW would like the default RXQ to be created first */
	rxo = default_rxo(adapter);
	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
			       adapter->if_handle, false, &rxo->rss_id);
	if (rc)
		return rc;

	for_all_rss_queues(adapter, rxo, i) {
2385
		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
S
Sathya Perla 已提交
2386 2387
				       rx_frag_size, adapter->if_handle,
				       true, &rxo->rss_id);
2388 2389 2390 2391 2392
		if (rc)
			return rc;
	}

	if (be_multi_rxq(adapter)) {
2393 2394 2395 2396 2397 2398 2399 2400
		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
			for_all_rss_queues(adapter, rxo, i) {
				if ((j + i) >= 128)
					break;
				rsstable[j + i] = rxo->rss_id;
			}
		}
		rc = be_cmd_rss_config(adapter, rsstable, 128);
2401 2402 2403 2404 2405
		if (rc)
			return rc;
	}

	/* First time posting */
S
Sathya Perla 已提交
2406
	for_all_rx_queues(adapter, rxo, i)
2407
		be_post_rx_frags(rxo, GFP_KERNEL);
2408 2409 2410
	return 0;
}

S
Sathya Perla 已提交
2411 2412 2413
static int be_open(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
2414
	struct be_eq_obj *eqo;
2415
	struct be_rx_obj *rxo;
S
Sathya Perla 已提交
2416
	struct be_tx_obj *txo;
2417
	u8 link_status;
2418
	int status, i;
2419

S
Sathya Perla 已提交
2420
	status = be_rx_qs_create(adapter);
2421 2422 2423
	if (status)
		goto err;

2424 2425
	be_irq_register(adapter);

2426 2427
	if (!lancer_chip(adapter))
		be_intr_set(adapter, true);
2428

S
Sathya Perla 已提交
2429
	for_all_rx_queues(adapter, rxo, i)
2430
		be_cq_notify(adapter, rxo->cq.id, true, 0);
2431

S
Sathya Perla 已提交
2432 2433 2434
	for_all_tx_queues(adapter, txo, i)
		be_cq_notify(adapter, txo->cq.id, true, 0);

2435 2436
	be_async_mcc_enable(adapter);

S
Sathya Perla 已提交
2437 2438 2439 2440 2441
	for_all_evt_queues(adapter, eqo, i) {
		napi_enable(&eqo->napi);
		be_eq_notify(adapter, eqo->q.id, true, false, 0);
	}

2442 2443 2444 2445 2446
	status = be_cmd_link_status_query(adapter, NULL, NULL,
					  &link_status, 0);
	if (!status)
		be_link_status_update(adapter, link_status);

2447
	be_roce_dev_open(adapter);
2448 2449 2450 2451
	return 0;
err:
	be_close(adapter->netdev);
	return -EIO;
2452 2453
}

2454 2455 2456 2457 2458 2459 2460 2461 2462
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
	struct be_dma_mem cmd;
	int status = 0;
	u8 mac[ETH_ALEN];

	memset(mac, 0, ETH_ALEN);

	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
I
Ivan Vecera 已提交
2463 2464
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				    GFP_KERNEL);
2465 2466 2467 2468 2469 2470 2471 2472 2473
	if (cmd.va == NULL)
		return -1;
	memset(cmd.va, 0, cmd.size);

	if (enable) {
		status = pci_write_config_dword(adapter->pdev,
			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
		if (status) {
			dev_err(&adapter->pdev->dev,
2474
				"Could not enable Wake-on-lan\n");
I
Ivan Vecera 已提交
2475 2476
			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
					  cmd.dma);
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
			return status;
		}
		status = be_cmd_enable_magic_wol(adapter,
				adapter->netdev->dev_addr, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
	} else {
		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
	}

I
Ivan Vecera 已提交
2489
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2490 2491 2492
	return status;
}

2493 2494 2495 2496 2497 2498 2499 2500
/*
 * Generate a seed MAC address from the PF MAC Address using jhash.
 * MAC Address for VFs are assigned incrementally starting from the seed.
 * These addresses are programmed in the ASIC by the PF and the VF driver
 * queries for the MAC address during its probe.
 */
static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
{
2501
	u32 vf;
2502
	int status = 0;
2503
	u8 mac[ETH_ALEN];
2504
	struct be_vf_cfg *vf_cfg;
2505 2506 2507

	be_vf_eth_addr_generate(adapter, mac);

2508
	for_all_vfs(adapter, vf_cfg, vf) {
2509 2510 2511 2512
		if (lancer_chip(adapter)) {
			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
		} else {
			status = be_cmd_pmac_add(adapter, mac,
2513 2514
						 vf_cfg->if_handle,
						 &vf_cfg->pmac_id, vf + 1);
2515 2516
		}

2517 2518
		if (status)
			dev_err(&adapter->pdev->dev,
2519
			"Mac address assignment failed for VF %d\n", vf);
2520
		else
2521
			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2522 2523 2524 2525 2526 2527

		mac[5] += 1;
	}
	return status;
}

2528
static void be_vf_clear(struct be_adapter *adapter)
2529
{
2530
	struct be_vf_cfg *vf_cfg;
2531 2532
	u32 vf;

2533 2534 2535 2536 2537
	if (be_find_vfs(adapter, ASSIGNED)) {
		dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
		goto done;
	}

2538
	for_all_vfs(adapter, vf_cfg, vf) {
2539 2540 2541
		if (lancer_chip(adapter))
			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
		else
2542 2543
			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
					vf_cfg->pmac_id, vf + 1);
2544

2545 2546
		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
	}
2547 2548 2549 2550
	pci_disable_sriov(adapter->pdev);
done:
	kfree(adapter->vf_cfg);
	adapter->num_vfs = 0;
2551 2552
}

2553 2554
static int be_clear(struct be_adapter *adapter)
{
2555 2556
	int i = 1;

2557 2558 2559 2560 2561
	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
		cancel_delayed_work_sync(&adapter->work);
		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
	}

2562
	if (sriov_enabled(adapter))
2563 2564
		be_vf_clear(adapter);

2565 2566 2567 2568
	for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
		be_cmd_pmac_del(adapter, adapter->if_handle,
			adapter->pmac_id[i], 0);

2569
	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2570 2571

	be_mcc_queues_destroy(adapter);
S
Sathya Perla 已提交
2572
	be_rx_cqs_destroy(adapter);
2573
	be_tx_queues_destroy(adapter);
S
Sathya Perla 已提交
2574
	be_evt_queues_destroy(adapter);
2575

S
Sathya Perla 已提交
2576
	be_msix_disable(adapter);
2577 2578 2579
	return 0;
}

2580
static int be_vf_setup_init(struct be_adapter *adapter)
2581
{
2582
	struct be_vf_cfg *vf_cfg;
2583 2584
	int vf;

2585 2586 2587 2588 2589
	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
				  GFP_KERNEL);
	if (!adapter->vf_cfg)
		return -ENOMEM;

2590 2591 2592
	for_all_vfs(adapter, vf_cfg, vf) {
		vf_cfg->if_handle = -1;
		vf_cfg->pmac_id = -1;
2593
	}
2594
	return 0;
2595 2596
}

2597 2598
static int be_vf_setup(struct be_adapter *adapter)
{
2599
	struct be_vf_cfg *vf_cfg;
2600
	struct device *dev = &adapter->pdev->dev;
2601
	u32 cap_flags, en_flags, vf;
2602
	u16 def_vlan, lnk_speed;
2603 2604 2605 2606 2607 2608 2609 2610
	int status, enabled_vfs;

	enabled_vfs = be_find_vfs(adapter, ENABLED);
	if (enabled_vfs) {
		dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
		dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
		return 0;
	}
2611

2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
	if (num_vfs > adapter->dev_num_vfs) {
		dev_warn(dev, "Device supports %d VFs and not %d\n",
			 adapter->dev_num_vfs, num_vfs);
		num_vfs = adapter->dev_num_vfs;
	}

	status = pci_enable_sriov(adapter->pdev, num_vfs);
	if (!status) {
		adapter->num_vfs = num_vfs;
	} else {
		/* Platform doesn't support SRIOV though device supports it */
		dev_warn(dev, "SRIOV enable failed\n");
		return 0;
	}

	status = be_vf_setup_init(adapter);
	if (status)
		goto err;
2630

2631 2632
	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
				BE_IF_FLAGS_MULTICAST;
2633
	for_all_vfs(adapter, vf_cfg, vf) {
2634 2635
		status = be_cmd_if_create(adapter, cap_flags, en_flags,
					  &vf_cfg->if_handle, vf + 1);
2636 2637 2638 2639
		if (status)
			goto err;
	}

2640 2641 2642 2643 2644
	if (!enabled_vfs) {
		status = be_vf_eth_addr_config(adapter);
		if (status)
			goto err;
	}
2645

2646
	for_all_vfs(adapter, vf_cfg, vf) {
2647 2648
		lnk_speed = 1000;
		status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2649 2650
		if (status)
			goto err;
2651
		vf_cfg->tx_rate = lnk_speed * 10;
2652 2653 2654 2655 2656 2657

		status = be_cmd_get_hsw_config(adapter, &def_vlan,
				vf + 1, vf_cfg->if_handle);
		if (status)
			goto err;
		vf_cfg->def_vid = def_vlan;
2658 2659 2660 2661 2662 2663
	}
	return 0;
err:
	return status;
}

2664 2665 2666
static void be_setup_init(struct be_adapter *adapter)
{
	adapter->vlan_prio_bmap = 0xff;
A
Ajit Khaparde 已提交
2667
	adapter->phy.link_speed = -1;
2668 2669 2670 2671
	adapter->if_handle = -1;
	adapter->be3_native = false;
	adapter->promiscuous = false;
	adapter->eq_next_idx = 0;
A
Ajit Khaparde 已提交
2672
	adapter->phy.forced_port_speed = -1;
2673 2674
}

2675 2676
static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
			   bool *active_mac, u32 *pmac_id)
2677
{
2678
	int status = 0;
2679

2680 2681 2682 2683 2684 2685
	if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
		memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
		if (!lancer_chip(adapter) && !be_physfn(adapter))
			*active_mac = true;
		else
			*active_mac = false;
2686

2687 2688
		return status;
	}
2689

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
	if (lancer_chip(adapter)) {
		status = be_cmd_get_mac_from_list(adapter, mac,
						  active_mac, pmac_id, 0);
		if (*active_mac) {
			status = be_cmd_mac_addr_query(adapter, mac,
						       MAC_ADDRESS_TYPE_NETWORK,
						       false, if_handle,
						       *pmac_id);
		}
	} else if (be_physfn(adapter)) {
		/* For BE3, for PF get permanent MAC */
		status = be_cmd_mac_addr_query(adapter, mac,
					       MAC_ADDRESS_TYPE_NETWORK, true,
					       0, 0);
		*active_mac = false;
2705
	} else {
2706 2707 2708 2709 2710
		/* For BE3, for VF get soft MAC assigned by PF*/
		status = be_cmd_mac_addr_query(adapter, mac,
					       MAC_ADDRESS_TYPE_NETWORK, false,
					       if_handle, 0);
		*active_mac = true;
2711
	}
2712 2713 2714
	return status;
}

2715 2716 2717 2718 2719 2720 2721 2722 2723 2724
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
	int pos;
	u16 dev_num_vfs;

	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
	if (pos) {
		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
				     &dev_num_vfs);
2725 2726
		if (!lancer_chip(adapter))
			dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 2728 2729 2730 2731
		adapter->dev_num_vfs = dev_num_vfs;
	}
	return 0;
}

2732 2733
static int be_setup(struct be_adapter *adapter)
{
2734
	struct device *dev = &adapter->pdev->dev;
2735
	u32 cap_flags, en_flags;
2736
	u32 tx_fc, rx_fc;
S
Sathya Perla 已提交
2737
	int status;
2738
	u8 mac[ETH_ALEN];
2739
	bool active_mac;
2740

2741
	be_setup_init(adapter);
S
Sathya Perla 已提交
2742

2743 2744
	be_get_config(adapter);

2745
	be_cmd_req_native_mode(adapter);
2746

S
Sathya Perla 已提交
2747 2748 2749 2750
	be_msix_enable(adapter);

	status = be_evt_queues_create(adapter);
	if (status)
2751
		goto err;
S
Sathya Perla 已提交
2752

S
Sathya Perla 已提交
2753 2754 2755 2756 2757 2758
	status = be_tx_cqs_create(adapter);
	if (status)
		goto err;

	status = be_rx_cqs_create(adapter);
	if (status)
2759
		goto err;
S
Sathya Perla 已提交
2760

2761
	status = be_mcc_queues_create(adapter);
S
Sathya Perla 已提交
2762
	if (status)
2763
		goto err;
S
Sathya Perla 已提交
2764

2765 2766 2767
	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 2769
			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;

2770 2771 2772 2773
	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
		cap_flags |= BE_IF_FLAGS_RSS;
		en_flags |= BE_IF_FLAGS_RSS;
	}
2774

2775 2776 2777 2778 2779 2780 2781
	if (lancer_chip(adapter) && !be_physfn(adapter)) {
		en_flags = BE_IF_FLAGS_UNTAGGED |
			    BE_IF_FLAGS_BROADCAST |
			    BE_IF_FLAGS_MULTICAST;
		cap_flags = en_flags;
	}

2782
	status = be_cmd_if_create(adapter, cap_flags, en_flags,
2783
				  &adapter->if_handle, 0);
2784
	if (status != 0)
2785
		goto err;
S
Sathya Perla 已提交
2786

2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
	memset(mac, 0, ETH_ALEN);
	active_mac = false;
	status = be_get_mac_addr(adapter, mac, adapter->if_handle,
				 &active_mac, &adapter->pmac_id[0]);
	if (status != 0)
		goto err;

	if (!active_mac) {
		status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
					 &adapter->pmac_id[0], 0);
		if (status != 0)
			goto err;
	}

	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2804
	}
2805

S
Sathya Perla 已提交
2806 2807 2808 2809
	status = be_tx_qs_create(adapter);
	if (status)
		goto err;

2810
	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2811

2812
	if (adapter->vlans_added)
S
Sathya Perla 已提交
2813
		be_vid_config(adapter);
2814

2815
	be_set_rx_mode(adapter->netdev);
2816

2817
	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2818

2819 2820
	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
		be_cmd_set_flow_control(adapter, adapter->tx_fc,
2821
					adapter->rx_fc);
2822

2823 2824 2825 2826 2827
	if (be_physfn(adapter) && num_vfs) {
		if (adapter->dev_num_vfs)
			be_vf_setup(adapter);
		else
			dev_warn(dev, "device doesn't support SRIOV\n");
2828 2829
	}

A
Ajit Khaparde 已提交
2830 2831 2832 2833
	be_cmd_get_phy_info(adapter);
	if (be_pause_supported(adapter))
		adapter->phy.fc_autoneg = 1;

2834 2835
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2836
	return 0;
2837 2838 2839 2840
err:
	be_clear(adapter);
	return status;
}
S
Sathya Perla 已提交
2841

I
Ivan Vecera 已提交
2842 2843 2844 2845
#ifdef CONFIG_NET_POLL_CONTROLLER
static void be_netpoll(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
2846
	struct be_eq_obj *eqo;
I
Ivan Vecera 已提交
2847 2848
	int i;

S
Sathya Perla 已提交
2849 2850 2851 2852
	for_all_evt_queues(adapter, eqo, i)
		event_handle(eqo);

	return;
I
Ivan Vecera 已提交
2853 2854 2855
}
#endif

2856
#define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
2857 2858
char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};

2859
static bool be_flash_redboot(struct be_adapter *adapter,
2860 2861
			const u8 *p, u32 img_start, int image_size,
			int hdr_size)
2862 2863 2864 2865
{
	u32 crc_offset;
	u8 flashed_crc[4];
	int status;
2866 2867 2868

	crc_offset = hdr_size + img_start + image_size - 4;

2869
	p += crc_offset;
2870 2871

	status = be_cmd_get_flash_crc(adapter, flashed_crc,
2872
			(image_size - 4));
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
	if (status) {
		dev_err(&adapter->pdev->dev,
		"could not get crc from flash, not flashing redboot\n");
		return false;
	}

	/*update redboot only if crc does not match*/
	if (!memcmp(flashed_crc, p, 4))
		return false;
	else
		return true;
}

2886 2887
static bool phy_flashing_required(struct be_adapter *adapter)
{
A
Ajit Khaparde 已提交
2888 2889
	return (adapter->phy.phy_type == TN_8022 &&
		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2890 2891
}

2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
static bool is_comp_in_ufi(struct be_adapter *adapter,
			   struct flash_section_info *fsec, int type)
{
	int i = 0, img_type = 0;
	struct flash_section_info_g2 *fsec_g2 = NULL;

	if (adapter->generation != BE_GEN3)
		fsec_g2 = (struct flash_section_info_g2 *)fsec;

	for (i = 0; i < MAX_FLASH_COMP; i++) {
		if (fsec_g2)
			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
		else
			img_type = le32_to_cpu(fsec->fsec_entry[i].type);

		if (img_type == type)
			return true;
	}
	return false;

}

struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
					 int header_size,
					 const struct firmware *fw)
{
	struct flash_section_info *fsec = NULL;
	const u8 *p = fw->data;

	p += header_size;
	while (p < (fw->data + fw->size)) {
		fsec = (struct flash_section_info *)p;
		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
			return fsec;
		p += 32;
	}
	return NULL;
}

2931
static int be_flash_data(struct be_adapter *adapter,
2932 2933 2934
			 const struct firmware *fw,
			 struct be_dma_mem *flash_cmd,
			 int num_of_images)
2935

2936
{
2937
	int status = 0, i, filehdr_size = 0;
2938
	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939
	u32 total_bytes = 0, flash_op;
2940 2941 2942
	int num_bytes;
	const u8 *p = fw->data;
	struct be_cmd_write_flashrom *req = flash_cmd->va;
J
Joe Perches 已提交
2943
	const struct flash_comp *pflashcomp;
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
	int num_comp, hdr_size;
	struct flash_section_info *fsec = NULL;

	struct flash_comp gen3_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2968
	};
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986

	struct flash_comp gen2_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987 2988 2989 2990 2991
	};

	if (adapter->generation == BE_GEN3) {
		pflashcomp = gen3_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g3);
J
Joe Perches 已提交
2992
		num_comp = ARRAY_SIZE(gen3_flash_types);
2993 2994 2995
	} else {
		pflashcomp = gen2_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g2);
J
Joe Perches 已提交
2996
		num_comp = ARRAY_SIZE(gen2_flash_types);
2997
	}
2998 2999 3000 3001 3002 3003 3004
	/* Get flash section info*/
	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
	if (!fsec) {
		dev_err(&adapter->pdev->dev,
			"Invalid Cookie. UFI corrupted ?\n");
		return -1;
	}
3005
	for (i = 0; i < num_comp; i++) {
3006
		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3007
			continue;
3008 3009 3010 3011 3012 3013

		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
			continue;

		if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3014 3015 3016
			if (!phy_flashing_required(adapter))
				continue;
		}
3017 3018 3019 3020 3021 3022 3023

		hdr_size = filehdr_size +
			   (num_of_images * sizeof(struct image_hdr));

		if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
		    (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
				       pflashcomp[i].size, hdr_size)))
3024
			continue;
3025 3026

		/* Flash the component */
3027
		p = fw->data;
3028
		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 3030 3031
		if (p + pflashcomp[i].size > fw->data + fw->size)
			return -1;
		total_bytes = pflashcomp[i].size;
3032 3033 3034 3035 3036 3037
		while (total_bytes) {
			if (total_bytes > 32*1024)
				num_bytes = 32*1024;
			else
				num_bytes = total_bytes;
			total_bytes -= num_bytes;
3038
			if (!total_bytes) {
3039
				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 3041 3042 3043
					flash_op = FLASHROM_OPER_PHY_FLASH;
				else
					flash_op = FLASHROM_OPER_FLASH;
			} else {
3044
				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 3046 3047 3048
					flash_op = FLASHROM_OPER_PHY_SAVE;
				else
					flash_op = FLASHROM_OPER_SAVE;
			}
3049 3050 3051 3052 3053
			memcpy(req->params.data_buf, p, num_bytes);
			p += num_bytes;
			status = be_cmd_write_flashrom(adapter, flash_cmd,
				pflashcomp[i].optype, flash_op, num_bytes);
			if (status) {
3054 3055
				if ((status == ILLEGAL_IOCTL_REQ) &&
					(pflashcomp[i].optype ==
3056
						OPTYPE_PHY_FW))
3057
					break;
3058 3059 3060 3061
				dev_err(&adapter->pdev->dev,
					"cmd to write to flash rom failed.\n");
				return -1;
			}
3062 3063 3064 3065 3066
		}
	}
	return 0;
}

3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
{
	if (fhdr == NULL)
		return 0;
	if (fhdr->build[0] == '3')
		return BE_GEN3;
	else if (fhdr->build[0] == '2')
		return BE_GEN2;
	else
		return 0;
}

3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
static int lancer_wait_idle(struct be_adapter *adapter)
{
#define SLIPORT_IDLE_TIMEOUT 30
	u32 reg_val;
	int status = 0, i;

	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
			break;

		ssleep(1);
	}

	if (i == SLIPORT_IDLE_TIMEOUT)
		status = -1;

	return status;
}

static int lancer_fw_reset(struct be_adapter *adapter)
{
	int status = 0;

	status = lancer_wait_idle(adapter);
	if (status)
		return status;

	iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
		  PHYSDEV_CONTROL_OFFSET);

	return status;
}

3113 3114
static int lancer_fw_download(struct be_adapter *adapter,
				const struct firmware *fw)
3115
{
3116 3117
#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3118
	struct be_dma_mem flash_cmd;
3119 3120 3121 3122 3123 3124 3125 3126
	const u8 *data_ptr = NULL;
	u8 *dest_image_ptr = NULL;
	size_t image_size = 0;
	u32 chunk_size = 0;
	u32 data_written = 0;
	u32 offset = 0;
	int status = 0;
	u8 add_status = 0;
3127
	u8 change_status;
3128

3129
	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130
		dev_err(&adapter->pdev->dev,
3131 3132 3133 3134
			"FW Image not properly aligned. "
			"Length must be 4 byte aligned.\n");
		status = -EINVAL;
		goto lancer_fw_exit;
3135 3136
	}

3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
				+ LANCER_FW_DOWNLOAD_CHUNK;
	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
						&flash_cmd.dma, GFP_KERNEL);
	if (!flash_cmd.va) {
		status = -ENOMEM;
		dev_err(&adapter->pdev->dev,
			"Memory allocation failure while flashing\n");
		goto lancer_fw_exit;
	}
3147

3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
	dest_image_ptr = flash_cmd.va +
				sizeof(struct lancer_cmd_req_write_object);
	image_size = fw->size;
	data_ptr = fw->data;

	while (image_size) {
		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);

		/* Copy the image chunk content. */
		memcpy(dest_image_ptr, data_ptr, chunk_size);

		status = lancer_cmd_write_object(adapter, &flash_cmd,
3160 3161 3162 3163
						 chunk_size, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);
3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
		if (status)
			break;

		offset += data_written;
		data_ptr += data_written;
		image_size -= data_written;
	}

	if (!status) {
		/* Commit the FW written */
		status = lancer_cmd_write_object(adapter, &flash_cmd,
3175 3176 3177 3178
						 0, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
	}

	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
				flash_cmd.dma);
	if (status) {
		dev_err(&adapter->pdev->dev,
			"Firmware load error. "
			"Status code: 0x%x Additional Status: 0x%x\n",
			status, add_status);
		goto lancer_fw_exit;
	}

3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
	if (change_status == LANCER_FW_RESET_NEEDED) {
		status = lancer_fw_reset(adapter);
		if (status) {
			dev_err(&adapter->pdev->dev,
				"Adapter busy for FW reset.\n"
				"New FW will not be active.\n");
			goto lancer_fw_exit;
		}
	} else if (change_status != LANCER_NO_RESET_NEEDED) {
			dev_err(&adapter->pdev->dev,
				"System reboot required for new FW"
				" to be active\n");
	}

3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
lancer_fw_exit:
	return status;
}

static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
	struct flash_file_hdr_g2 *fhdr;
	struct flash_file_hdr_g3 *fhdr3;
	struct image_hdr *img_hdr_ptr = NULL;
	struct be_dma_mem flash_cmd;
	const u8 *p;
	int status = 0, i = 0, num_imgs = 0;
3218 3219

	p = fw->data;
3220
	fhdr = (struct flash_file_hdr_g2 *) p;
3221 3222

	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
I
Ivan Vecera 已提交
3223 3224
	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
					  &flash_cmd.dma, GFP_KERNEL);
3225 3226 3227 3228
	if (!flash_cmd.va) {
		status = -ENOMEM;
		dev_err(&adapter->pdev->dev,
			"Memory allocation failure while flashing\n");
3229
		goto be_fw_exit;
3230 3231
	}

3232 3233 3234
	if ((adapter->generation == BE_GEN3) &&
			(get_ufigen_type(fhdr) == BE_GEN3)) {
		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3235 3236
		num_imgs = le32_to_cpu(fhdr3->num_imgs);
		for (i = 0; i < num_imgs; i++) {
3237 3238
			img_hdr_ptr = (struct image_hdr *) (fw->data +
					(sizeof(struct flash_file_hdr_g3) +
3239 3240 3241 3242
					 i * sizeof(struct image_hdr)));
			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
				status = be_flash_data(adapter, fw, &flash_cmd,
							num_imgs);
3243 3244 3245 3246 3247 3248 3249 3250
		}
	} else if ((adapter->generation == BE_GEN2) &&
			(get_ufigen_type(fhdr) == BE_GEN2)) {
		status = be_flash_data(adapter, fw, &flash_cmd, 0);
	} else {
		dev_err(&adapter->pdev->dev,
			"UFI and Interface are not compatible for flashing\n");
		status = -1;
3251 3252
	}

I
Ivan Vecera 已提交
3253 3254
	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
			  flash_cmd.dma);
3255 3256
	if (status) {
		dev_err(&adapter->pdev->dev, "Firmware load error\n");
3257
		goto be_fw_exit;
3258 3259
	}

3260
	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3261

3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287
be_fw_exit:
	return status;
}

int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
{
	const struct firmware *fw;
	int status;

	if (!netif_running(adapter->netdev)) {
		dev_err(&adapter->pdev->dev,
			"Firmware load not allowed (interface is down)\n");
		return -1;
	}

	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
	if (status)
		goto fw_exit;

	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);

	if (lancer_chip(adapter))
		status = lancer_fw_download(adapter, fw);
	else
		status = be_fw_download(adapter, fw);

3288 3289 3290 3291 3292
fw_exit:
	release_firmware(fw);
	return status;
}

3293
static const struct net_device_ops be_netdev_ops = {
S
Sathya Perla 已提交
3294 3295 3296
	.ndo_open		= be_open,
	.ndo_stop		= be_close,
	.ndo_start_xmit		= be_xmit,
3297
	.ndo_set_rx_mode	= be_set_rx_mode,
S
Sathya Perla 已提交
3298 3299
	.ndo_set_mac_address	= be_mac_addr_set,
	.ndo_change_mtu		= be_change_mtu,
3300
	.ndo_get_stats64	= be_get_stats64,
S
Sathya Perla 已提交
3301 3302 3303
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
3304
	.ndo_set_vf_mac		= be_set_vf_mac,
3305
	.ndo_set_vf_vlan	= be_set_vf_vlan,
3306
	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
I
Ivan Vecera 已提交
3307 3308 3309 3310
	.ndo_get_vf_config	= be_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= be_netpoll,
#endif
S
Sathya Perla 已提交
3311 3312 3313 3314 3315
};

static void be_netdev_init(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
3316
	struct be_eq_obj *eqo;
3317
	int i;
S
Sathya Perla 已提交
3318

3319
	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3320 3321 3322 3323
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
		NETIF_F_HW_VLAN_TX;
	if (be_multi_rxq(adapter))
		netdev->hw_features |= NETIF_F_RXHASH;
3324 3325

	netdev->features |= netdev->hw_features |
3326
		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
A
Ajit Khaparde 已提交
3327

3328
	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3329
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3330

3331 3332
	netdev->priv_flags |= IFF_UNICAST_FLT;

S
Sathya Perla 已提交
3333 3334
	netdev->flags |= IFF_MULTICAST;

3335
	netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3336

S
Sathya Perla 已提交
3337
	netdev->netdev_ops = &be_netdev_ops;
S
Sathya Perla 已提交
3338 3339 3340

	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);

S
Sathya Perla 已提交
3341 3342
	for_all_evt_queues(adapter, eqo, i)
		netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
S
Sathya Perla 已提交
3343 3344 3345 3346
}

static void be_unmap_pci_bars(struct be_adapter *adapter)
{
3347 3348 3349 3350
	if (adapter->csr)
		iounmap(adapter->csr);
	if (adapter->db)
		iounmap(adapter->db);
3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
	if (adapter->roce_db.base)
		pci_iounmap(adapter->pdev, adapter->roce_db.base);
}

static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
{
	struct pci_dev *pdev = adapter->pdev;
	u8 __iomem *addr;

	addr = pci_iomap(pdev, 2, 0);
	if (addr == NULL)
		return -ENOMEM;

	adapter->roce_db.base = addr;
	adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
	adapter->roce_db.size = 8192;
	adapter->roce_db.total_size = pci_resource_len(pdev, 2);
	return 0;
S
Sathya Perla 已提交
3369 3370 3371 3372 3373
}

static int be_map_pci_bars(struct be_adapter *adapter)
{
	u8 __iomem *addr;
3374
	int db_reg;
S
Sathya Perla 已提交
3375

3376
	if (lancer_chip(adapter)) {
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
		if (be_type_2_3(adapter)) {
			addr = ioremap_nocache(
					pci_resource_start(adapter->pdev, 0),
					pci_resource_len(adapter->pdev, 0));
			if (addr == NULL)
				return -ENOMEM;
			adapter->db = addr;
		}
		if (adapter->if_type == SLI_INTF_TYPE_3) {
			if (lancer_roce_map_pci_bars(adapter))
				goto pci_map_err;
		}
3389 3390 3391
		return 0;
	}

3392 3393 3394 3395 3396 3397 3398
	if (be_physfn(adapter)) {
		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
				pci_resource_len(adapter->pdev, 2));
		if (addr == NULL)
			return -ENOMEM;
		adapter->csr = addr;
	}
S
Sathya Perla 已提交
3399

3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
	if (adapter->generation == BE_GEN2) {
		db_reg = 4;
	} else {
		if (be_physfn(adapter))
			db_reg = 4;
		else
			db_reg = 0;
	}
	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
				pci_resource_len(adapter->pdev, db_reg));
S
Sathya Perla 已提交
3410 3411
	if (addr == NULL)
		goto pci_map_err;
3412
	adapter->db = addr;
3413 3414 3415 3416 3417 3418 3419
	if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
		adapter->roce_db.size = 4096;
		adapter->roce_db.io_addr =
				pci_resource_start(adapter->pdev, db_reg);
		adapter->roce_db.total_size =
				pci_resource_len(adapter->pdev, db_reg);
	}
S
Sathya Perla 已提交
3420 3421 3422 3423 3424 3425 3426 3427
	return 0;
pci_map_err:
	be_unmap_pci_bars(adapter);
	return -ENOMEM;
}

static void be_ctrl_cleanup(struct be_adapter *adapter)
{
3428
	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
S
Sathya Perla 已提交
3429 3430 3431 3432

	be_unmap_pci_bars(adapter);

	if (mem->va)
I
Ivan Vecera 已提交
3433 3434
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
3435

3436
	mem = &adapter->rx_filter;
3437
	if (mem->va)
I
Ivan Vecera 已提交
3438 3439
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
3440
	kfree(adapter->pmac_id);
S
Sathya Perla 已提交
3441 3442 3443 3444
}

static int be_ctrl_init(struct be_adapter *adapter)
{
3445 3446
	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447
	struct be_dma_mem *rx_filter = &adapter->rx_filter;
S
Sathya Perla 已提交
3448 3449 3450 3451
	int status;

	status = be_map_pci_bars(adapter);
	if (status)
3452
		goto done;
S
Sathya Perla 已提交
3453 3454

	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
I
Ivan Vecera 已提交
3455 3456 3457 3458
	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
						mbox_mem_alloc->size,
						&mbox_mem_alloc->dma,
						GFP_KERNEL);
S
Sathya Perla 已提交
3459
	if (!mbox_mem_alloc->va) {
3460 3461
		status = -ENOMEM;
		goto unmap_pci_bars;
S
Sathya Perla 已提交
3462 3463 3464 3465 3466
	}
	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3467

3468 3469 3470 3471
	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
					&rx_filter->dma, GFP_KERNEL);
	if (rx_filter->va == NULL) {
3472 3473 3474
		status = -ENOMEM;
		goto free_mbox;
	}
3475
	memset(rx_filter->va, 0, rx_filter->size);
3476

3477 3478 3479 3480 3481 3482
	/* primary mac needs 1 pmac entry */
	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
				   sizeof(*adapter->pmac_id), GFP_KERNEL);
	if (!adapter->pmac_id)
		return -ENOMEM;

3483
	mutex_init(&adapter->mbox_lock);
3484 3485
	spin_lock_init(&adapter->mcc_lock);
	spin_lock_init(&adapter->mcc_cq_lock);
3486

3487
	init_completion(&adapter->flash_compl);
3488
	pci_save_state(adapter->pdev);
S
Sathya Perla 已提交
3489
	return 0;
3490 3491

free_mbox:
I
Ivan Vecera 已提交
3492 3493
	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
3494 3495 3496 3497 3498 3499

unmap_pci_bars:
	be_unmap_pci_bars(adapter);

done:
	return status;
S
Sathya Perla 已提交
3500 3501 3502 3503
}

static void be_stats_cleanup(struct be_adapter *adapter)
{
3504
	struct be_dma_mem *cmd = &adapter->stats_cmd;
S
Sathya Perla 已提交
3505 3506

	if (cmd->va)
I
Ivan Vecera 已提交
3507 3508
		dma_free_coherent(&adapter->pdev->dev, cmd->size,
				  cmd->va, cmd->dma);
S
Sathya Perla 已提交
3509 3510 3511 3512
}

static int be_stats_init(struct be_adapter *adapter)
{
3513
	struct be_dma_mem *cmd = &adapter->stats_cmd;
S
Sathya Perla 已提交
3514

S
Selvin Xavier 已提交
3515
	if (adapter->generation == BE_GEN2) {
3516
		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
S
Selvin Xavier 已提交
3517 3518 3519 3520 3521 3522
	} else {
		if (lancer_chip(adapter))
			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
		else
			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
	}
I
Ivan Vecera 已提交
3523 3524
	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
				     GFP_KERNEL);
S
Sathya Perla 已提交
3525 3526
	if (cmd->va == NULL)
		return -1;
3527
	memset(cmd->va, 0, cmd->size);
S
Sathya Perla 已提交
3528 3529 3530 3531 3532 3533
	return 0;
}

static void __devexit be_remove(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
3534

S
Sathya Perla 已提交
3535 3536 3537
	if (!adapter)
		return;

3538 3539
	be_roce_dev_remove(adapter);

3540 3541
	cancel_delayed_work_sync(&adapter->func_recovery_work);

S
Sathya Perla 已提交
3542 3543
	unregister_netdev(adapter->netdev);

3544 3545
	be_clear(adapter);

3546 3547 3548
	/* tell fw we're done with firing cmds */
	be_cmd_fw_clean(adapter);

S
Sathya Perla 已提交
3549 3550 3551 3552
	be_stats_cleanup(adapter);

	be_ctrl_cleanup(adapter);

S
Sathya Perla 已提交
3553 3554
	pci_disable_pcie_error_reporting(pdev);

S
Sathya Perla 已提交
3555 3556 3557 3558 3559 3560 3561
	pci_set_drvdata(pdev, NULL);
	pci_release_regions(pdev);
	pci_disable_device(pdev);

	free_netdev(adapter->netdev);
}

3562 3563 3564 3565 3566 3567
bool be_is_wol_supported(struct be_adapter *adapter)
{
	return ((adapter->wol_cap & BE_WOL_CAP) &&
		!be_is_wol_excluded(adapter)) ? true : false;
}

3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
u32 be_get_fw_log_level(struct be_adapter *adapter)
{
	struct be_dma_mem extfat_cmd;
	struct be_fat_conf_params *cfgs;
	int status;
	u32 level = 0;
	int j;

	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
					     &extfat_cmd.dma);

	if (!extfat_cmd.va) {
		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
			__func__);
		goto err;
	}

	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
	if (!status) {
		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
						sizeof(struct be_cmd_resp_hdr));
3591
		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3592 3593 3594 3595 3596 3597 3598 3599 3600
			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
		}
	}
	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
			    extfat_cmd.dma);
err:
	return level;
}
3601
static int be_get_initial_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
3602 3603
{
	int status;
3604
	u32 level;
S
Sathya Perla 已提交
3605

3606 3607
	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
			&adapter->function_mode, &adapter->function_caps);
3608 3609 3610
	if (status)
		return status;

3611
	if (adapter->function_mode & FLEX10_MODE)
3612
		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3613 3614 3615
	else
		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;

3616 3617 3618 3619 3620
	if (be_physfn(adapter))
		adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
	else
		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;

3621 3622 3623 3624
	status = be_cmd_get_cntl_attributes(adapter);
	if (status)
		return status;

3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635
	status = be_cmd_get_acpi_wol_cap(adapter);
	if (status) {
		/* in case of a failure to get wol capabillities
		 * check the exclusion list to determine WOL capability */
		if (!be_is_wol_excluded(adapter))
			adapter->wol_cap |= BE_WOL_CAP;
	}

	if (be_is_wol_supported(adapter))
		adapter->wol = true;

3636 3637 3638
	/* Must be a power of 2 or else MODULO will BUG_ON */
	adapter->be_get_temp_freq = 64;

3639 3640 3641
	level = be_get_fw_log_level(adapter);
	adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;

3642
	return 0;
S
Sathya Perla 已提交
3643 3644
}

3645
static int be_dev_type_check(struct be_adapter *adapter)
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659
{
	struct pci_dev *pdev = adapter->pdev;
	u32 sli_intf = 0, if_type;

	switch (pdev->device) {
	case BE_DEVICE_ID1:
	case OC_DEVICE_ID1:
		adapter->generation = BE_GEN2;
		break;
	case BE_DEVICE_ID2:
	case OC_DEVICE_ID2:
		adapter->generation = BE_GEN3;
		break;
	case OC_DEVICE_ID3:
3660
	case OC_DEVICE_ID4:
3661
		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3662 3663
		adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
						SLI_INTF_IF_TYPE_SHIFT;
3664 3665 3666
		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
						SLI_INTF_IF_TYPE_SHIFT;
		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
			!be_type_2_3(adapter)) {
			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
			return -EINVAL;
		}
		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
					 SLI_INTF_FAMILY_SHIFT);
		adapter->generation = BE_GEN3;
		break;
	case OC_DEVICE_ID5:
		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
		if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687
			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
			return -EINVAL;
		}
		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
					 SLI_INTF_FAMILY_SHIFT);
		adapter->generation = BE_GEN3;
		break;
	default:
		adapter->generation = 0;
	}
3688 3689 3690

	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691 3692 3693
	return 0;
}

3694
static int lancer_recover_func(struct be_adapter *adapter)
3695 3696 3697
{
	int status;

3698 3699 3700
	status = lancer_test_and_set_rdy_state(adapter);
	if (status)
		goto err;
3701

3702 3703
	if (netif_running(adapter->netdev))
		be_close(adapter->netdev);
3704

3705 3706 3707 3708 3709 3710 3711 3712
	be_clear(adapter);

	adapter->hw_error = false;
	adapter->fw_timeout = false;

	status = be_setup(adapter);
	if (status)
		goto err;
3713

3714 3715
	if (netif_running(adapter->netdev)) {
		status = be_open(adapter->netdev);
3716 3717
		if (status)
			goto err;
3718
	}
3719

3720 3721 3722 3723 3724 3725
	dev_err(&adapter->pdev->dev,
		"Adapter SLIPORT recovery succeeded\n");
	return 0;
err:
	dev_err(&adapter->pdev->dev,
		"Adapter SLIPORT recovery failed\n");
3726

3727 3728 3729 3730 3731 3732 3733 3734
	return status;
}

static void be_func_recovery_task(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter,  func_recovery_work.work);
	int status;
3735

3736
	be_detect_error(adapter);
3737

3738
	if (adapter->hw_error && lancer_chip(adapter)) {
3739

3740 3741
		if (adapter->eeh_error)
			goto out;
3742

3743 3744 3745
		rtnl_lock();
		netif_device_detach(adapter->netdev);
		rtnl_unlock();
3746

3747
		status = lancer_recover_func(adapter);
3748

3749 3750
		if (!status)
			netif_device_attach(adapter->netdev);
3751
	}
3752 3753 3754 3755

out:
	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
3756 3757 3758 3759 3760 3761 3762
}

static void be_worker(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter, work.work);
	struct be_rx_obj *rxo;
S
Sathya Perla 已提交
3763
	struct be_eq_obj *eqo;
3764 3765 3766 3767 3768
	int i;

	/* when interrupts are not yet enabled, just reap any pending
	* mcc completions */
	if (!netif_running(adapter->netdev)) {
3769
		local_bh_disable();
S
Sathya Perla 已提交
3770
		be_process_mcc(adapter);
3771
		local_bh_enable();
3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782
		goto reschedule;
	}

	if (!adapter->stats_cmd_sent) {
		if (lancer_chip(adapter))
			lancer_cmd_get_pport_stats(adapter,
						&adapter->stats_cmd);
		else
			be_cmd_get_stats(adapter, &adapter->stats_cmd);
	}

3783 3784 3785
	if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
		be_cmd_get_die_temperature(adapter);

3786 3787 3788 3789 3790 3791 3792
	for_all_rx_queues(adapter, rxo, i) {
		if (rxo->rx_post_starved) {
			rxo->rx_post_starved = false;
			be_post_rx_frags(rxo, GFP_KERNEL);
		}
	}

S
Sathya Perla 已提交
3793 3794 3795
	for_all_evt_queues(adapter, eqo, i)
		be_eqd_update(adapter, eqo);

3796 3797 3798 3799 3800
reschedule:
	adapter->work_counter++;
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}

3801 3802
static bool be_reset_required(struct be_adapter *adapter)
{
3803
	return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3804 3805
}

S
Sathya Perla 已提交
3806 3807 3808 3809 3810 3811
static int __devinit be_probe(struct pci_dev *pdev,
			const struct pci_device_id *pdev_id)
{
	int status = 0;
	struct be_adapter *adapter;
	struct net_device *netdev;
3812
	char port_name;
S
Sathya Perla 已提交
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822

	status = pci_enable_device(pdev);
	if (status)
		goto do_none;

	status = pci_request_regions(pdev, DRV_NAME);
	if (status)
		goto disable_dev;
	pci_set_master(pdev);

3823
	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
S
Sathya Perla 已提交
3824 3825 3826 3827 3828 3829 3830
	if (netdev == NULL) {
		status = -ENOMEM;
		goto rel_reg;
	}
	adapter = netdev_priv(netdev);
	adapter->pdev = pdev;
	pci_set_drvdata(pdev, adapter);
3831

3832
	status = be_dev_type_check(adapter);
3833
	if (status)
3834 3835
		goto free_netdev;

S
Sathya Perla 已提交
3836
	adapter->netdev = netdev;
3837
	SET_NETDEV_DEV(netdev, &pdev->dev);
S
Sathya Perla 已提交
3838

I
Ivan Vecera 已提交
3839
	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
S
Sathya Perla 已提交
3840 3841 3842
	if (!status) {
		netdev->features |= NETIF_F_HIGHDMA;
	} else {
I
Ivan Vecera 已提交
3843
		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
S
Sathya Perla 已提交
3844 3845 3846 3847 3848 3849
		if (status) {
			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
			goto free_netdev;
		}
	}

S
Sathya Perla 已提交
3850 3851 3852 3853
	status = pci_enable_pcie_error_reporting(pdev);
	if (status)
		dev_err(&pdev->dev, "Could not use PCIe error reporting\n");

S
Sathya Perla 已提交
3854 3855
	status = be_ctrl_init(adapter);
	if (status)
3856
		goto free_netdev;
S
Sathya Perla 已提交
3857

3858
	/* sync up with fw's ready state */
3859
	if (be_physfn(adapter)) {
3860
		status = be_fw_wait_ready(adapter);
3861 3862 3863
		if (status)
			goto ctrl_clean;
	}
S
Sathya Perla 已提交
3864

3865 3866
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
S
Sathya Perla 已提交
3867
	if (status)
3868 3869
		goto ctrl_clean;

3870 3871 3872 3873 3874
	if (be_reset_required(adapter)) {
		status = be_cmd_reset_function(adapter);
		if (status)
			goto ctrl_clean;
	}
3875

S
Sathya Perla 已提交
3876 3877 3878 3879 3880 3881
	/* The INTR bit may be set in the card when probed by a kdump kernel
	 * after a crash.
	 */
	if (!lancer_chip(adapter))
		be_intr_set(adapter, false);

3882 3883 3884 3885
	status = be_stats_init(adapter);
	if (status)
		goto ctrl_clean;

3886
	status = be_get_initial_config(adapter);
S
Sathya Perla 已提交
3887 3888 3889 3890
	if (status)
		goto stats_clean;

	INIT_DELAYED_WORK(&adapter->work, be_worker);
3891
	INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3892
	adapter->rx_fc = adapter->tx_fc = true;
S
Sathya Perla 已提交
3893

3894 3895
	status = be_setup(adapter);
	if (status)
3896
		goto msix_disable;
3897

3898
	be_netdev_init(netdev);
S
Sathya Perla 已提交
3899 3900
	status = register_netdev(netdev);
	if (status != 0)
3901
		goto unsetup;
S
Sathya Perla 已提交
3902

3903 3904
	be_roce_dev_add(adapter);

3905 3906
	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
3907 3908 3909 3910 3911

	be_cmd_query_port_name(adapter, &port_name);

	dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
		 port_name);
3912

S
Sathya Perla 已提交
3913 3914
	return 0;

3915 3916
unsetup:
	be_clear(adapter);
3917 3918
msix_disable:
	be_msix_disable(adapter);
S
Sathya Perla 已提交
3919 3920 3921 3922
stats_clean:
	be_stats_cleanup(adapter);
ctrl_clean:
	be_ctrl_cleanup(adapter);
3923
free_netdev:
3924
	free_netdev(netdev);
3925
	pci_set_drvdata(pdev, NULL);
S
Sathya Perla 已提交
3926 3927 3928 3929 3930
rel_reg:
	pci_release_regions(pdev);
disable_dev:
	pci_disable_device(pdev);
do_none:
3931
	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
S
Sathya Perla 已提交
3932 3933 3934 3935 3936 3937 3938 3939
	return status;
}

static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

3940 3941 3942
	if (adapter->wol)
		be_setup_wol(adapter, true);

3943 3944
	cancel_delayed_work_sync(&adapter->func_recovery_work);

S
Sathya Perla 已提交
3945 3946 3947 3948 3949 3950
	netif_device_detach(netdev);
	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
3951
	be_clear(adapter);
S
Sathya Perla 已提交
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973

	pci_save_state(pdev);
	pci_disable_device(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}

static int be_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	netif_device_detach(netdev);

	status = pci_enable_device(pdev);
	if (status)
		return status;

	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

3974 3975 3976 3977 3978
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		return status;

3979
	be_setup(adapter);
S
Sathya Perla 已提交
3980 3981 3982 3983 3984
	if (netif_running(netdev)) {
		rtnl_lock();
		be_open(netdev);
		rtnl_unlock();
	}
3985 3986 3987

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
S
Sathya Perla 已提交
3988
	netif_device_attach(netdev);
3989 3990 3991

	if (adapter->wol)
		be_setup_wol(adapter, false);
3992

S
Sathya Perla 已提交
3993 3994 3995
	return 0;
}

3996 3997 3998 3999 4000 4001 4002
/*
 * An FLR will stop BE from DMAing any data.
 */
static void be_shutdown(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);

4003 4004
	if (!adapter)
		return;
4005

4006
	cancel_delayed_work_sync(&adapter->work);
4007
	cancel_delayed_work_sync(&adapter->func_recovery_work);
4008

4009
	netif_device_detach(adapter->netdev);
4010 4011 4012 4013

	if (adapter->wol)
		be_setup_wol(adapter, true);

4014 4015
	be_cmd_reset_function(adapter);

4016 4017 4018
	pci_disable_device(pdev);
}

4019 4020 4021 4022 4023 4024 4025 4026
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
				pci_channel_state_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_err(&adapter->pdev->dev, "EEH error detected\n");

4027 4028 4029
	adapter->eeh_error = true;

	cancel_delayed_work_sync(&adapter->func_recovery_work);
4030

4031
	rtnl_lock();
4032
	netif_device_detach(netdev);
4033
	rtnl_unlock();
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046

	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
	be_clear(adapter);

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_disable_device(pdev);

4047 4048 4049 4050 4051
	/* The error could cause the FW to trigger a flash debug dump.
	 * Resetting the card while flash dump is in progress
	 * can cause it not to recover; wait for it to finish
	 */
	ssleep(30);
4052 4053 4054 4055 4056 4057 4058 4059 4060
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	int status;

	dev_info(&adapter->pdev->dev, "EEH reset\n");
4061
	be_clear_all_error(adapter);
4062 4063 4064 4065 4066 4067 4068 4069 4070 4071

	status = pci_enable_device(pdev);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_set_master(pdev);
	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

	/* Check if card is ok and fw is ready */
4072
	status = be_fw_wait_ready(adapter);
4073 4074 4075
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

S
Sathya Perla 已提交
4076
	pci_cleanup_aer_uncorrect_error_status(pdev);
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094
	return PCI_ERS_RESULT_RECOVERED;
}

static void be_eeh_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_info(&adapter->pdev->dev, "EEH resume\n");

	pci_save_state(pdev);

	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		goto err;

4095 4096 4097 4098
	status = be_cmd_reset_function(adapter);
	if (status)
		goto err;

4099 4100 4101 4102 4103 4104 4105 4106 4107
	status = be_setup(adapter);
	if (status)
		goto err;

	if (netif_running(netdev)) {
		status = be_open(netdev);
		if (status)
			goto err;
	}
4108 4109 4110

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122
	netif_device_attach(netdev);
	return;
err:
	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
}

static struct pci_error_handlers be_eeh_handlers = {
	.error_detected = be_eeh_err_detected,
	.slot_reset = be_eeh_reset,
	.resume = be_eeh_resume,
};

S
Sathya Perla 已提交
4123 4124 4125 4126 4127 4128
static struct pci_driver be_driver = {
	.name = DRV_NAME,
	.id_table = be_dev_ids,
	.probe = be_probe,
	.remove = be_remove,
	.suspend = be_suspend,
4129
	.resume = be_resume,
4130
	.shutdown = be_shutdown,
4131
	.err_handler = &be_eeh_handlers
S
Sathya Perla 已提交
4132 4133 4134 4135
};

static int __init be_init_module(void)
{
4136 4137
	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
	    rx_frag_size != 2048) {
S
Sathya Perla 已提交
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152
		printk(KERN_WARNING DRV_NAME
			" : Module param rx_frag_size must be 2048/4096/8192."
			" Using 2048\n");
		rx_frag_size = 2048;
	}

	return pci_register_driver(&be_driver);
}
module_init(be_init_module);

static void __exit be_exit_module(void)
{
	pci_unregister_driver(&be_driver);
}
module_exit(be_exit_module);