be_main.c 143.7 KB
Newer Older
S
Sathya Perla 已提交
1
/*
V
Vasundhara Volam 已提交
2
 * Copyright (C) 2005 - 2014 Emulex
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
S
Sathya Perla 已提交
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
S
Sathya Perla 已提交
16 17
 */

18
#include <linux/prefetch.h>
19
#include <linux/module.h>
S
Sathya Perla 已提交
20
#include "be.h"
21
#include "be_cmds.h"
22
#include <asm/div64.h>
S
Sathya Perla 已提交
23
#include <linux/aer.h>
24
#include <linux/if_bridge.h>
25
#include <net/busy_poll.h>
26
#include <net/vxlan.h>
S
Sathya Perla 已提交
27 28 29

MODULE_VERSION(DRV_VER);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30
MODULE_AUTHOR("Emulex Corporation");
S
Sathya Perla 已提交
31 32
MODULE_LICENSE("GPL");

33 34 35
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
S
Sathya Perla 已提交
36

37 38 39 40
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

41
static const struct pci_device_id be_dev_ids[] = {
42
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 45
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
S
Sathya Perla 已提交
50 51 52
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
53
/* UE Status Low CSR */
54
static const char * const ue_status_low_desc[] = {
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
83 84 85 86
	"ERX2 ",
	"SPARE ",
	"JTAG ",
	"MPU_INTPEND "
87
};
88

89
/* UE Status High CSR */
90
static const char * const ue_status_hi_desc[] = {
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
112 113
	"ECRC",
	"Poison TLP",
114
	"NETC",
115 116 117 118 119 120 121
	"PERIPH",
	"LLTXULP",
	"D2P",
	"RCON",
	"LDMA",
	"LLTXP",
	"LLTXPB",
122 123
	"Unknown"
};
S
Sathya Perla 已提交
124 125 126 127

static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
128

129
	if (mem->va) {
I
Ivan Vecera 已提交
130 131
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
132 133
		mem->va = NULL;
	}
S
Sathya Perla 已提交
134 135 136
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137
			  u16 len, u16 entry_size)
S
Sathya Perla 已提交
138 139 140 141 142 143 144
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
145 146
	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
				      GFP_KERNEL);
S
Sathya Perla 已提交
147
	if (!mem->va)
S
Sathya Perla 已提交
148
		return -ENOMEM;
S
Sathya Perla 已提交
149 150 151
	return 0;
}

152
static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
S
Sathya Perla 已提交
153
{
154
	u32 reg, enabled;
155

156
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157
			      &reg);
158 159
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

160
	if (!enabled && enable)
S
Sathya Perla 已提交
161
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
	else if (enabled && !enable)
S
Sathya Perla 已提交
163
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
	else
S
Sathya Perla 已提交
165
		return;
166

167
	pci_write_config_dword(adapter->pdev,
168
			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
S
Sathya Perla 已提交
169 170
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
static void be_intr_set(struct be_adapter *adapter, bool enable)
{
	int status = 0;

	/* On lancer interrupts can't be controlled via this register */
	if (lancer_chip(adapter))
		return;

	if (adapter->eeh_error)
		return;

	status = be_cmd_intr_set(adapter, enable);
	if (status)
		be_reg_intr_set(adapter, enable);
}

187
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
S
Sathya Perla 已提交
188 189
{
	u32 val = 0;
190

S
Sathya Perla 已提交
191 192
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
193 194

	wmb();
195
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
S
Sathya Perla 已提交
196 197
}

V
Vasundhara Volam 已提交
198 199
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted)
S
Sathya Perla 已提交
200 201
{
	u32 val = 0;
202

V
Vasundhara Volam 已提交
203
	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
S
Sathya Perla 已提交
204
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
205 206

	wmb();
V
Vasundhara Volam 已提交
207
	iowrite32(val, adapter->db + txo->db_offset);
S
Sathya Perla 已提交
208 209
}

210
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
211
			 bool arm, bool clear_int, u16 num_popped)
S
Sathya Perla 已提交
212 213
{
	u32 val = 0;
214

S
Sathya Perla 已提交
215
	val |= qid & DB_EQ_RING_ID_MASK;
216
	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
217

218
	if (adapter->eeh_error)
219 220
		return;

S
Sathya Perla 已提交
221 222 223 224 225 226
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
227
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
S
Sathya Perla 已提交
228 229
}

230
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
S
Sathya Perla 已提交
231 232
{
	u32 val = 0;
233

S
Sathya Perla 已提交
234
	val |= qid & DB_CQ_RING_ID_MASK;
235 236
	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
			DB_CQ_RING_ID_EXT_MASK_SHIFT);
237

238
	if (adapter->eeh_error)
239 240
		return;

S
Sathya Perla 已提交
241 242 243
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
S
Sathya Perla 已提交
245 246 247 248 249
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
250
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
251
	struct sockaddr *addr = p;
252 253 254
	int status;
	u8 mac[ETH_ALEN];
	u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
S
Sathya Perla 已提交
255

256 257 258
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

259 260 261 262 263 264
	/* Proceed further only if, User provided MAC is different
	 * from active MAC
	 */
	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
		return 0;

265 266 267 268 269
	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
	 * privilege or if PF did not provision the new MAC address.
	 * On BE3, this cmd will always fail if the VF doesn't have the
	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
	 * the MAC for the VF.
270
	 */
271 272 273 274 275 276 277 278 279 280 281
	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
				 adapter->if_handle, &adapter->pmac_id[0], 0);
	if (!status) {
		curr_pmac_id = adapter->pmac_id[0];

		/* Delete the old programmed MAC. This call may fail if the
		 * old MAC was already deleted by the PF driver.
		 */
		if (adapter->pmac_id[0] != old_pmac_id)
			be_cmd_pmac_del(adapter, adapter->if_handle,
					old_pmac_id, 0);
282 283
	}

284 285
	/* Decide if the new MAC is successfully activated only after
	 * querying the FW
286
	 */
287 288
	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
				       adapter->if_handle, true, 0);
289
	if (status)
290
		goto err;
S
Sathya Perla 已提交
291

292 293 294
	/* The MAC change did not happen, either due to lack of privilege
	 * or PF didn't pre-provision.
	 */
295
	if (!ether_addr_equal(addr->sa_data, mac)) {
296 297 298 299
		status = -EPERM;
		goto err;
	}

300
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
301
	dev_info(dev, "MAC address changed to %pM\n", mac);
302 303
	return 0;
err:
304
	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
S
Sathya Perla 已提交
305 306 307
	return status;
}

308 309 310 311 312 313 314
/* BE2 supports only v0 cmd */
static void *hw_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
315
	} else if (BE3_chip(adapter)) {
316 317
		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;

318 319 320 321
		return &cmd->hw_stats;
	} else {
		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;

322 323 324 325 326 327 328 329 330 331 332
		return &cmd->hw_stats;
	}
}

/* BE2 supports only v0 cmd */
static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
333
	} else if (BE3_chip(adapter)) {
334 335
		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);

336 337 338 339
		return &hw_stats->erx;
	} else {
		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);

340 341 342 343 344
		return &hw_stats->erx;
	}
}

static void populate_be_v0_stats(struct be_adapter *adapter)
345
{
346 347 348
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
349
	struct be_port_rxf_stats_v0 *port_stats =
350 351
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
352

353
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
354 355 356 357 358 359 360 361 362 363 364 365 366 367
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
368
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
369 370
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
371 372 373
	drvs->rx_address_filtered =
					port_stats->rx_address_filtered +
					port_stats->rx_vlan_filtered;
374 375 376 377 378 379 380
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
381
		drvs->jabber_events = rxf_stats->port1_jabber_events;
382
	else
383
		drvs->jabber_events = rxf_stats->port0_jabber_events;
384 385 386 387
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
388 389
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
390 391 392
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

393
static void populate_be_v1_stats(struct be_adapter *adapter)
394
{
395 396 397
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
398
	struct be_port_rxf_stats_v1 *port_stats =
399 400
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
401

402
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
403 404
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
422
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
423 424
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
425
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
426 427
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
428
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
429 430 431 432 433
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
434 435
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
436 437 438
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static void populate_be_v2_stats(struct be_adapter *adapter)
{
	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats_v2 *port_stats =
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;

	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
483
	if (be_roce_supported(adapter)) {
484 485 486 487 488 489 490
		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
		drvs->rx_roce_frames = port_stats->roce_frames_received;
		drvs->roce_drops_crc = port_stats->roce_drops_crc;
		drvs->roce_drops_payload_len =
			port_stats->roce_drops_payload_len;
	}
491 492
}

S
Selvin Xavier 已提交
493 494 495
static void populate_lancer_stats(struct be_adapter *adapter)
{
	struct be_drv_stats *drvs = &adapter->drv_stats;
496
	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
497 498 499 500 501

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
S
Selvin Xavier 已提交
502
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
503
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
S
Selvin Xavier 已提交
504 505 506 507 508 509 510 511 512 513 514 515
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
516 517 518
	drvs->rx_address_filtered =
					pport_stats->rx_address_filtered +
					pport_stats->rx_vlan_filtered;
519
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
S
Selvin Xavier 已提交
520
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
521 522
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
S
Selvin Xavier 已提交
523
	drvs->jabber_events = pport_stats->rx_jabbers;
524 525
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
S
Selvin Xavier 已提交
526
	drvs->rx_drops_too_many_frags =
527
				pport_stats->rx_drops_too_many_frags_lo;
S
Selvin Xavier 已提交
528
}
529

530 531 532 533 534 535 536 537 538 539 540 541
static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

J
Jingoo Han 已提交
542
static void populate_erx_stats(struct be_adapter *adapter,
543
			       struct be_rx_obj *rxo, u32 erx_stat)
544 545 546 547 548 549 550 551 552 553 554
{
	if (!BEx_chip(adapter))
		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
	else
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				     (u16)erx_stat);
}

555 556
void be_parse_stats(struct be_adapter *adapter)
{
557
	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
558 559
	struct be_rx_obj *rxo;
	int i;
560
	u32 erx_stat;
561

562 563
	if (lancer_chip(adapter)) {
		populate_lancer_stats(adapter);
S
Selvin Xavier 已提交
564
	} else {
565 566
		if (BE2_chip(adapter))
			populate_be_v0_stats(adapter);
567 568
		else if (BE3_chip(adapter))
			/* for BE3 */
569
			populate_be_v1_stats(adapter);
570 571
		else
			populate_be_v2_stats(adapter);
572

573
		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
574
		for_all_rx_queues(adapter, rxo, i) {
575 576
			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
			populate_erx_stats(adapter, rxo, erx_stat);
577
		}
578
	}
579 580
}

581
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582
						struct rtnl_link_stats64 *stats)
S
Sathya Perla 已提交
583
{
584
	struct be_adapter *adapter = netdev_priv(netdev);
585
	struct be_drv_stats *drvs = &adapter->drv_stats;
586
	struct be_rx_obj *rxo;
587
	struct be_tx_obj *txo;
588 589
	u64 pkts, bytes;
	unsigned int start;
590
	int i;
S
Sathya Perla 已提交
591

592
	for_all_rx_queues(adapter, rxo, i) {
593
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
594

595
		do {
596
			start = u64_stats_fetch_begin_irq(&rx_stats->sync);
597 598
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
599
		} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
600 601 602 603 604
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
					rx_stats(rxo)->rx_drops_no_frags;
605 606
	}

607
	for_all_tx_queues(adapter, txo, i) {
608
		const struct be_tx_stats *tx_stats = tx_stats(txo);
609

610
		do {
611
			start = u64_stats_fetch_begin_irq(&tx_stats->sync);
612 613
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
614
		} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
615 616
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
617
	}
S
Sathya Perla 已提交
618 619

	/* bad pkts received */
620
	stats->rx_errors = drvs->rx_crc_errors +
621 622 623 624 625 626 627 628
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
629
		drvs->rx_dropped_runt;
630

S
Sathya Perla 已提交
631
	/* detailed rx errors */
632
	stats->rx_length_errors = drvs->rx_in_range_errors +
633 634
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;
635

636
	stats->rx_crc_errors = drvs->rx_crc_errors;
S
Sathya Perla 已提交
637 638

	/* frame alignment errors */
639
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
640

S
Sathya Perla 已提交
641 642
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
643
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
644 645
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
646
	return stats;
S
Sathya Perla 已提交
647 648
}

649
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
S
Sathya Perla 已提交
650 651 652
{
	struct net_device *netdev = adapter->netdev;

653
	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
654
		netif_carrier_off(netdev);
655
		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
S
Sathya Perla 已提交
656
	}
657

658
	if (link_status)
659 660 661
		netif_carrier_on(netdev);
	else
		netif_carrier_off(netdev);
S
Sathya Perla 已提交
662 663
}

664
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
S
Sathya Perla 已提交
665
{
666 667
	struct be_tx_stats *stats = tx_stats(txo);

668
	u64_stats_update_begin(&stats->sync);
669
	stats->tx_reqs++;
670 671
	stats->tx_bytes += skb->len;
	stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
672
	u64_stats_update_end(&stats->sync);
S
Sathya Perla 已提交
673 674
}

675 676
/* Returns number of WRBs needed for the skb */
static u32 skb_wrb_cnt(struct sk_buff *skb)
S
Sathya Perla 已提交
677
{
678 679
	/* +1 for the header wrb */
	return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
S
Sathya Perla 已提交
680 681 682 683
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
684 685 686 687 688 689 690 691 692 693 694 695 696 697
	wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
	wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
	wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
	wrb->rsvd0 = 0;
}

/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
 * to avoid the swap and shift/mask operations in wrb_fill().
 */
static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
{
	wrb->frag_pa_hi = 0;
	wrb->frag_pa_lo = 0;
	wrb->frag_len = 0;
698
	wrb->rsvd0 = 0;
S
Sathya Perla 已提交
699 700
}

701
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
702
				     struct sk_buff *skb)
703 704 705 706
{
	u8 vlan_prio;
	u16 vlan_tag;

707
	vlan_tag = skb_vlan_tag_get(skb);
708 709 710 711 712 713 714 715 716
	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
	/* If vlan priority provided by OS is NOT in available bmap */
	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
				adapter->recommended_prio;

	return vlan_tag;
}

717 718 719 720 721 722 723 724 725 726 727 728 729
/* Used only for IP tunnel packets */
static u16 skb_inner_ip_proto(struct sk_buff *skb)
{
	return (inner_ip_hdr(skb)->version == 4) ?
		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
}

static u16 skb_ip_proto(struct sk_buff *skb)
{
	return (ip_hdr(skb)->version == 4) ?
		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}

730 731 732
static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
				       struct sk_buff *skb,
				       struct be_wrb_params *wrb_params)
S
Sathya Perla 已提交
733
{
734
	u16 proto;
S
Sathya Perla 已提交
735

A
Ajit Khaparde 已提交
736
	if (skb_is_gso(skb)) {
737 738
		BE_WRB_F_SET(wrb_params->features, LSO, 1);
		wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
739
		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
740
			BE_WRB_F_SET(wrb_params->features, LSO6, 1);
S
Sathya Perla 已提交
741
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
742
		if (skb->encapsulation) {
743
			BE_WRB_F_SET(wrb_params->features, IPCS, 1);
744 745 746 747 748
			proto = skb_inner_ip_proto(skb);
		} else {
			proto = skb_ip_proto(skb);
		}
		if (proto == IPPROTO_TCP)
749
			BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
750
		else if (proto == IPPROTO_UDP)
751
			BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
S
Sathya Perla 已提交
752 753
	}

754
	if (skb_vlan_tag_present(skb)) {
755 756
		BE_WRB_F_SET(wrb_params->features, VLAN, 1);
		wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
S
Sathya Perla 已提交
757 758
	}

759 760
	BE_WRB_F_SET(wrb_params->features, CRC, 1);
}
761

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
static void wrb_fill_hdr(struct be_adapter *adapter,
			 struct be_eth_hdr_wrb *hdr,
			 struct be_wrb_params *wrb_params,
			 struct sk_buff *skb)
{
	memset(hdr, 0, sizeof(*hdr));

	SET_TX_WRB_HDR_BITS(crc, hdr,
			    BE_WRB_F_GET(wrb_params->features, CRC));
	SET_TX_WRB_HDR_BITS(ipcs, hdr,
			    BE_WRB_F_GET(wrb_params->features, IPCS));
	SET_TX_WRB_HDR_BITS(tcpcs, hdr,
			    BE_WRB_F_GET(wrb_params->features, TCPCS));
	SET_TX_WRB_HDR_BITS(udpcs, hdr,
			    BE_WRB_F_GET(wrb_params->features, UDPCS));

	SET_TX_WRB_HDR_BITS(lso, hdr,
			    BE_WRB_F_GET(wrb_params->features, LSO));
	SET_TX_WRB_HDR_BITS(lso6, hdr,
			    BE_WRB_F_GET(wrb_params->features, LSO6));
	SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);

	/* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
	 * hack is not needed, the evt bit is set while ringing DB.
786
	 */
787 788 789 790 791 792 793 794
	SET_TX_WRB_HDR_BITS(event, hdr,
			    BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
	SET_TX_WRB_HDR_BITS(vlan, hdr,
			    BE_WRB_F_GET(wrb_params->features, VLAN));
	SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);

	SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
	SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
S
Sathya Perla 已提交
795 796
}

I
Ivan Vecera 已提交
797
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
798
			  bool unmap_single)
799 800
{
	dma_addr_t dma;
801
	u32 frag_len = le32_to_cpu(wrb->frag_len);
802 803


804 805 806
	dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
		(u64)le32_to_cpu(wrb->frag_pa_lo);
	if (frag_len) {
807
		if (unmap_single)
808
			dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
809
		else
810
			dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
811 812
	}
}
S
Sathya Perla 已提交
813

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
/* Grab a WRB header for xmit */
static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
{
	u16 head = txo->q.head;

	queue_head_inc(&txo->q);
	return head;
}

/* Set up the WRB header for xmit */
static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
				struct be_tx_obj *txo,
				struct be_wrb_params *wrb_params,
				struct sk_buff *skb, u16 head)
{
	u32 num_frags = skb_wrb_cnt(skb);
	struct be_queue_info *txq = &txo->q;
	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);

	wrb_fill_hdr(adapter, hdr, wrb_params, skb);
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	BUG_ON(txo->sent_skb_list[head]);
	txo->sent_skb_list[head] = skb;
	txo->last_req_hdr = head;
	atomic_add(num_frags, &txq->used);
	txo->last_req_wrb_cnt = num_frags;
	txo->pend_wrb_cnt += num_frags;
}

/* Setup a WRB fragment (buffer descriptor) for xmit */
static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
				 int len)
{
	struct be_eth_wrb *wrb;
	struct be_queue_info *txq = &txo->q;

	wrb = queue_head_node(txq);
	wrb_fill(wrb, busaddr, len);
	queue_head_inc(txq);
}

/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
 * was invoked. The producer index is restored to the previous packet and the
 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
 */
static void be_xmit_restore(struct be_adapter *adapter,
			    struct be_tx_obj *txo, u16 head, bool map_single,
			    u32 copied)
{
	struct device *dev;
	struct be_eth_wrb *wrb;
	struct be_queue_info *txq = &txo->q;

	dev = &adapter->pdev->dev;
	txq->head = head;

	/* skip the first wrb (hdr); it's not mapped */
	queue_head_inc(txq);
	while (copied) {
		wrb = queue_head_node(txq);
		unmap_tx_frag(dev, wrb, map_single);
		map_single = false;
		copied -= le32_to_cpu(wrb->frag_len);
		queue_head_inc(txq);
	}

	txq->head = head;
}

/* Enqueue the given packet for transmit. This routine allocates WRBs for the
 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
 * of WRBs used up by the packet.
 */
888
static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
889 890
			   struct sk_buff *skb,
			   struct be_wrb_params *wrb_params)
S
Sathya Perla 已提交
891
{
892
	u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
I
Ivan Vecera 已提交
893
	struct device *dev = &adapter->pdev->dev;
894
	struct be_queue_info *txq = &txo->q;
895
	bool map_single = false;
896
	u16 head = txq->head;
897 898
	dma_addr_t busaddr;
	int len;
S
Sathya Perla 已提交
899

900
	head = be_tx_get_wrb_hdr(txo);
S
Sathya Perla 已提交
901

902
	if (skb->len > skb->data_len) {
903
		len = skb_headlen(skb);
904

I
Ivan Vecera 已提交
905 906
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, busaddr))
907 908
			goto dma_err;
		map_single = true;
909
		be_tx_setup_wrb_frag(txo, busaddr, len);
910 911
		copied += len;
	}
S
Sathya Perla 已提交
912

913
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
914
		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
915
		len = skb_frag_size(frag);
916

917
		busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
I
Ivan Vecera 已提交
918
		if (dma_mapping_error(dev, busaddr))
919
			goto dma_err;
920 921
		be_tx_setup_wrb_frag(txo, busaddr, len);
		copied += len;
S
Sathya Perla 已提交
922 923
	}

924
	be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
S
Sathya Perla 已提交
925

926 927
	be_tx_stats_update(txo, skb);
	return wrb_cnt;
S
Sathya Perla 已提交
928

929
dma_err:
930 931
	adapter->drv_stats.dma_map_errors++;
	be_xmit_restore(adapter, txo, head, map_single, copied);
932
	return 0;
S
Sathya Perla 已提交
933 934
}

935 936 937 938 939
static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
{
	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}

940
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
941
					     struct sk_buff *skb,
942 943
					     struct be_wrb_params
					     *wrb_params)
944 945 946 947 948 949 950
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

951
	if (skb_vlan_tag_present(skb))
952
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
953 954 955 956 957 958 959

	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
		if (!vlan_tag)
			vlan_tag = adapter->pvid;
		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
		 * skip VLAN insertion
		 */
960
		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
961
	}
962 963

	if (vlan_tag) {
964 965
		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
						vlan_tag);
966 967 968 969 970 971 972 973
		if (unlikely(!skb))
			return skb;
		skb->vlan_tci = 0;
	}

	/* Insert the outer VLAN, if any */
	if (adapter->qnq_vid) {
		vlan_tag = adapter->qnq_vid;
974 975
		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
						vlan_tag);
976 977
		if (unlikely(!skb))
			return skb;
978
		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
979 980
	}

981 982 983
	return skb;
}

984 985 986 987 988 989 990 991 992 993 994 995
static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
	struct ethhdr *eh = (struct ethhdr *)skb->data;
	u16 offset = ETH_HLEN;

	if (eh->h_proto == htons(ETH_P_IPV6)) {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);

		offset += sizeof(struct ipv6hdr);
		if (ip6h->nexthdr != NEXTHDR_TCP &&
		    ip6h->nexthdr != NEXTHDR_UDP) {
			struct ipv6_opt_hdr *ehdr =
K
Kalesh AP 已提交
996
				(struct ipv6_opt_hdr *)(skb->data + offset);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007

			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
			if (ehdr->hdrlen == 0xff)
				return true;
		}
	}
	return false;
}

static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
1008
	return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1009 1010
}

1011
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1012
{
1013
	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1014 1015
}

1016 1017
static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
						  struct sk_buff *skb,
1018 1019
						  struct be_wrb_params
						  *wrb_params)
S
Sathya Perla 已提交
1020
{
1021
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1022 1023
	unsigned int eth_hdr_len;
	struct iphdr *ip;
1024

1025 1026
	/* For padded packets, BE HW modifies tot_len field in IP header
	 * incorrecly when VLAN tag is inserted by HW.
1027
	 * For padded packets, Lancer computes incorrect checksum.
1028
	 */
1029 1030
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
						VLAN_ETH_HLEN : ETH_HLEN;
1031
	if (skb->len <= 60 &&
1032
	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1033
	    is_ipv4_pkt(skb)) {
1034 1035 1036
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}
1037

1038
	/* If vlan tag is already inlined in the packet, skip HW VLAN
1039
	 * tagging in pvid-tagging mode
1040
	 */
1041
	if (be_pvid_tagging_enabled(adapter) &&
1042
	    veh->h_vlan_proto == htons(ETH_P_8021Q))
1043
		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1044

1045 1046 1047 1048 1049
	/* HW has a bug wherein it will calculate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL &&
1050
	    skb_vlan_tag_present(skb)) {
1051
		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1052
		if (unlikely(!skb))
1053
			goto err;
1054 1055 1056 1057 1058 1059 1060
	}

	/* HW may lockup when VLAN HW tagging is requested on
	 * certain ipv6 packets. Drop such pkts if the HW workaround to
	 * skip HW tagging is not enabled by FW.
	 */
	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
K
Kalesh AP 已提交
1061 1062
		     (adapter->pvid || adapter->qnq_vid) &&
		     !qnq_async_evt_rcvd(adapter)))
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
		goto tx_drop;

	/* Manual VLAN tag insertion to prevent:
	 * ASIC lockup when the ASIC inserts VLAN tag into
	 * certain ipv6 packets. Insert VLAN tags in driver,
	 * and set event, completion, vlan bits accordingly
	 * in the Tx WRB.
	 */
	if (be_ipv6_tx_stall_chk(adapter, skb) &&
	    be_vlan_tag_tx_chk(adapter, skb)) {
1073
		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1074
		if (unlikely(!skb))
1075
			goto err;
1076 1077
	}

1078 1079 1080
	return skb;
tx_drop:
	dev_kfree_skb_any(skb);
1081
err:
1082 1083 1084
	return NULL;
}

1085 1086
static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
					   struct sk_buff *skb,
1087
					   struct be_wrb_params *wrb_params)
1088 1089 1090 1091 1092 1093
{
	/* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
	 * less may cause a transmit stall on that port. So the work-around is
	 * to pad short packets (<= 32 bytes) to a 36-byte length.
	 */
	if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1094
		if (skb_put_padto(skb, 36))
1095 1096 1097 1098
			return NULL;
	}

	if (BEx_chip(adapter) || lancer_chip(adapter)) {
1099
		skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1100 1101 1102 1103 1104 1105 1106
		if (!skb)
			return NULL;
	}

	return skb;
}

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
{
	struct be_queue_info *txq = &txo->q;
	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);

	/* Mark the last request eventable if it hasn't been marked already */
	if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
		hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);

	/* compose a dummy wrb if there are odd set of wrbs to notify */
	if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1118
		wrb_fill_dummy(queue_head_node(txq));
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		queue_head_inc(txq);
		atomic_inc(&txq->used);
		txo->pend_wrb_cnt++;
		hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
					   TX_HDR_WRB_NUM_SHIFT);
		hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
					  TX_HDR_WRB_NUM_SHIFT);
	}
	be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
	txo->pend_wrb_cnt = 0;
}

1131 1132 1133
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
1134 1135
	u16 q_idx = skb_get_queue_mapping(skb);
	struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1136
	struct be_wrb_params wrb_params = { 0 };
1137
	struct be_queue_info *txq = &txo->q;
1138
	bool flush = !skb->xmit_more;
1139
	u16 wrb_cnt;
1140

1141
	skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1142 1143
	if (unlikely(!skb))
		goto drop;
S
Sathya Perla 已提交
1144

1145 1146 1147
	be_get_wrb_params_from_skb(adapter, skb, &wrb_params);

	wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1148 1149 1150 1151
	if (unlikely(!wrb_cnt)) {
		dev_kfree_skb_any(skb);
		goto drop;
	}
E
Eric Dumazet 已提交
1152

1153 1154 1155 1156
	if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
		netif_stop_subqueue(netdev, q_idx);
		tx_stats(txo)->tx_stops++;
	}
1157

1158 1159
	if (flush || __netif_subqueue_stopped(netdev, q_idx))
		be_xmit_flush(adapter, txo);
S
Sathya Perla 已提交
1160

1161 1162 1163 1164 1165 1166
	return NETDEV_TX_OK;
drop:
	tx_stats(txo)->tx_drv_drops++;
	/* Flush the already enqueued tx requests */
	if (flush && txo->pend_wrb_cnt)
		be_xmit_flush(adapter, txo);
S
Sathya Perla 已提交
1167 1168 1169 1170 1171 1172 1173

	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
K
Kalesh AP 已提交
1174 1175 1176 1177 1178
	struct device *dev = &adapter->pdev->dev;

	if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
		dev_info(dev, "MTU must be between %d and %d bytes\n",
			 BE_MIN_MTU, BE_MAX_MTU);
S
Sathya Perla 已提交
1179 1180
		return -EINVAL;
	}
K
Kalesh AP 已提交
1181 1182

	dev_info(dev, "MTU changed from %d to %d bytes\n",
1183
		 netdev->mtu, new_mtu);
S
Sathya Perla 已提交
1184 1185 1186 1187
	netdev->mtu = new_mtu;
	return 0;
}

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
	return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
			BE_IF_FLAGS_ALL_PROMISCUOUS;
}

static int be_set_vlan_promisc(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	int status;

	if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
		return 0;

	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
	if (!status) {
		dev_info(dev, "Enabled VLAN promiscuous mode\n");
		adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
	} else {
		dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
	}
	return status;
}

static int be_clear_vlan_promisc(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	int status;

	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
	if (!status) {
		dev_info(dev, "Disabling VLAN promiscuous mode\n");
		adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
	}
	return status;
}

S
Sathya Perla 已提交
1225
/*
1226 1227
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
S
Sathya Perla 已提交
1228
 */
S
Sathya Perla 已提交
1229
static int be_vid_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
1230
{
V
Vasundhara Volam 已提交
1231
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
1232
	u16 vids[BE_NUM_VLANS_SUPPORTED];
1233
	u16 num = 0, i = 0;
1234
	int status = 0;
1235

1236
	/* No need to further configure vids if in promiscuous mode */
1237
	if (be_in_all_promisc(adapter))
1238 1239
		return 0;

1240
	if (adapter->vlans_added > be_max_vlans(adapter))
1241
		return be_set_vlan_promisc(adapter);
1242 1243

	/* Construct VLAN Table to give to HW */
1244 1245
	for_each_set_bit(i, adapter->vids, VLAN_N_VID)
		vids[num++] = cpu_to_le16(i);
1246

1247
	status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1248
	if (status) {
1249
		dev_err(dev, "Setting HW VLAN filtering failed\n");
1250
		/* Set to VLAN promisc mode as setting VLAN filter failed */
1251 1252
		if (addl_status(status) ==
				MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1253 1254 1255
			return be_set_vlan_promisc(adapter);
	} else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
		status = be_clear_vlan_promisc(adapter);
S
Sathya Perla 已提交
1256
	}
1257
	return status;
S
Sathya Perla 已提交
1258 1259
}

1260
static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
S
Sathya Perla 已提交
1261 1262
{
	struct be_adapter *adapter = netdev_priv(netdev);
A
Ajit Khaparde 已提交
1263
	int status = 0;
S
Sathya Perla 已提交
1264

1265 1266
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
1267 1268
		return status;

1269
	if (test_bit(vid, adapter->vids))
1270
		return status;
1271

1272
	set_bit(vid, adapter->vids);
1273
	adapter->vlans_added++;
1274

1275 1276 1277
	status = be_vid_config(adapter);
	if (status) {
		adapter->vlans_added--;
1278
		clear_bit(vid, adapter->vids);
1279
	}
1280

A
Ajit Khaparde 已提交
1281
	return status;
S
Sathya Perla 已提交
1282 1283
}

1284
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
S
Sathya Perla 已提交
1285 1286 1287
{
	struct be_adapter *adapter = netdev_priv(netdev);

1288 1289
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
1290
		return 0;
1291

1292
	clear_bit(vid, adapter->vids);
1293 1294 1295
	adapter->vlans_added--;

	return be_vid_config(adapter);
S
Sathya Perla 已提交
1296 1297
}

1298
static void be_clear_all_promisc(struct be_adapter *adapter)
1299
{
1300
	be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1301
	adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1302 1303
}

1304 1305 1306 1307 1308 1309 1310
static void be_set_all_promisc(struct be_adapter *adapter)
{
	be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
	adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
}

static void be_set_mc_promisc(struct be_adapter *adapter)
S
Sathya Perla 已提交
1311
{
1312
	int status;
S
Sathya Perla 已提交
1313

1314 1315
	if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
		return;
S
Sathya Perla 已提交
1316

1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
	if (!status)
		adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
}

static void be_set_mc_list(struct be_adapter *adapter)
{
	int status;

	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
	if (!status)
		adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
	else
		be_set_mc_promisc(adapter);
}

static void be_set_uc_list(struct be_adapter *adapter)
{
	struct netdev_hw_addr *ha;
	int i = 1; /* First slot is claimed by the Primary MAC */

	for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
		be_cmd_pmac_del(adapter, adapter->if_handle,
				adapter->pmac_id[i], 0);

	if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
		be_set_all_promisc(adapter);
		return;
S
Sathya Perla 已提交
1345 1346
	}

1347 1348 1349 1350 1351 1352
	netdev_for_each_uc_addr(ha, adapter->netdev) {
		adapter->uc_macs++; /* First slot is for Primary MAC */
		be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
				&adapter->pmac_id[adapter->uc_macs], 0);
	}
}
S
Sathya Perla 已提交
1353

1354 1355 1356
static void be_clear_uc_list(struct be_adapter *adapter)
{
	int i;
1357

1358 1359 1360 1361 1362
	for (i = 1; i < (adapter->uc_macs + 1); i++)
		be_cmd_pmac_del(adapter, adapter->if_handle,
				adapter->pmac_id[i], 0);
	adapter->uc_macs = 0;
}
1363

1364 1365 1366
static void be_set_rx_mode(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
1367

1368 1369 1370
	if (netdev->flags & IFF_PROMISC) {
		be_set_all_promisc(adapter);
		return;
1371 1372
	}

1373 1374 1375 1376 1377
	/* Interface was previously in promiscuous mode; disable it */
	if (be_in_all_promisc(adapter)) {
		be_clear_all_promisc(adapter);
		if (adapter->vlans_added)
			be_vid_config(adapter);
1378
	}
1379

1380 1381 1382 1383
	/* Enable multicast promisc if num configured exceeds what we support */
	if (netdev->flags & IFF_ALLMULTI ||
	    netdev_mc_count(netdev) > be_max_mc(adapter)) {
		be_set_mc_promisc(adapter);
1384
		return;
1385
	}
1386

1387 1388 1389 1390
	if (netdev_uc_count(netdev) != adapter->uc_macs)
		be_set_uc_list(adapter);

	be_set_mc_list(adapter);
S
Sathya Perla 已提交
1391 1392
}

1393 1394 1395
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
	struct be_adapter *adapter = netdev_priv(netdev);
1396
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1397 1398
	int status;

1399
	if (!sriov_enabled(adapter))
1400 1401
		return -EPERM;

1402
	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1403 1404
		return -EINVAL;

1405 1406 1407 1408 1409 1410
	/* Proceed further only if user provided MAC is different
	 * from active MAC
	 */
	if (ether_addr_equal(mac, vf_cfg->mac_addr))
		return 0;

1411 1412 1413
	if (BEx_chip(adapter)) {
		be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
				vf + 1);
1414

1415 1416
		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
					 &vf_cfg->pmac_id, vf + 1);
1417 1418 1419
	} else {
		status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
					vf + 1);
1420 1421
	}

1422 1423 1424 1425 1426
	if (status) {
		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
			mac, vf, status);
		return be_cmd_status(status);
	}
1427

1428 1429 1430
	ether_addr_copy(vf_cfg->mac_addr, mac);

	return 0;
1431 1432
}

1433
static int be_get_vf_config(struct net_device *netdev, int vf,
1434
			    struct ifla_vf_info *vi)
1435 1436
{
	struct be_adapter *adapter = netdev_priv(netdev);
1437
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1438

1439
	if (!sriov_enabled(adapter))
1440 1441
		return -EPERM;

1442
	if (vf >= adapter->num_vfs)
1443 1444 1445
		return -EINVAL;

	vi->vf = vf;
1446 1447
	vi->max_tx_rate = vf_cfg->tx_rate;
	vi->min_tx_rate = 0;
1448 1449
	vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1450
	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1451
	vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1452 1453 1454 1455

	return 0;
}

1456
static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1457 1458
{
	struct be_adapter *adapter = netdev_priv(netdev);
1459
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1460 1461
	int status = 0;

1462
	if (!sriov_enabled(adapter))
1463 1464
		return -EPERM;

1465
	if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1466 1467
		return -EINVAL;

1468 1469
	if (vlan || qos) {
		vlan |= qos << VLAN_PRIO_SHIFT;
1470
		if (vf_cfg->vlan_tag != vlan)
1471 1472
			status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
						       vf_cfg->if_handle, 0);
1473
	} else {
1474
		/* Reset Transparent Vlan Tagging. */
1475 1476
		status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
					       vf + 1, vf_cfg->if_handle, 0);
1477 1478
	}

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	if (status) {
		dev_err(&adapter->pdev->dev,
			"VLAN %d config on VF %d failed : %#x\n", vlan,
			vf, status);
		return be_cmd_status(status);
	}

	vf_cfg->vlan_tag = vlan;

	return 0;
1489 1490
}

1491 1492
static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
			     int min_tx_rate, int max_tx_rate)
1493 1494
{
	struct be_adapter *adapter = netdev_priv(netdev);
1495 1496 1497 1498
	struct device *dev = &adapter->pdev->dev;
	int percent_rate, status = 0;
	u16 link_speed = 0;
	u8 link_status;
1499

1500
	if (!sriov_enabled(adapter))
1501 1502
		return -EPERM;

1503
	if (vf >= adapter->num_vfs)
1504 1505
		return -EINVAL;

1506 1507 1508
	if (min_tx_rate)
		return -EINVAL;

1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	if (!max_tx_rate)
		goto config_qos;

	status = be_cmd_link_status_query(adapter, &link_speed,
					  &link_status, 0);
	if (status)
		goto err;

	if (!link_status) {
		dev_err(dev, "TX-rate setting not allowed when link is down\n");
1519
		status = -ENETDOWN;
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
		goto err;
	}

	if (max_tx_rate < 100 || max_tx_rate > link_speed) {
		dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
			link_speed);
		status = -EINVAL;
		goto err;
	}

	/* On Skyhawk the QOS setting must be done only as a % value */
	percent_rate = link_speed / 100;
	if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
		dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
			percent_rate);
		status = -EINVAL;
		goto err;
1537
	}
1538

1539 1540
config_qos:
	status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1541
	if (status)
1542 1543 1544 1545 1546 1547 1548 1549
		goto err;

	adapter->vf_cfg[vf].tx_rate = max_tx_rate;
	return 0;

err:
	dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
		max_tx_rate, vf);
1550
	return be_cmd_status(status);
1551
}
1552

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
static int be_set_vf_link_state(struct net_device *netdev, int vf,
				int link_state)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status;

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (vf >= adapter->num_vfs)
		return -EINVAL;

	status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1566 1567 1568 1569 1570
	if (status) {
		dev_err(&adapter->pdev->dev,
			"Link state change on VF %d failed: %#x\n", vf, status);
		return be_cmd_status(status);
	}
1571

1572 1573 1574
	adapter->vf_cfg[vf].plink_tracking = link_state;

	return 0;
1575
}
1576

1577 1578
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
			  ulong now)
S
Sathya Perla 已提交
1579
{
1580 1581 1582 1583
	aic->rx_pkts_prev = rx_pkts;
	aic->tx_reqs_prev = tx_pkts;
	aic->jiffies = now;
}
1584

1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
static void be_eqd_update(struct be_adapter *adapter)
{
	struct be_set_eqd set_eqd[MAX_EVT_QS];
	int eqd, i, num = 0, start;
	struct be_aic_obj *aic;
	struct be_eq_obj *eqo;
	struct be_rx_obj *rxo;
	struct be_tx_obj *txo;
	u64 rx_pkts, tx_pkts;
	ulong now;
	u32 pps, delta;
S
Sathya Perla 已提交
1596

1597 1598 1599 1600 1601 1602 1603 1604
	for_all_evt_queues(adapter, eqo, i) {
		aic = &adapter->aic_obj[eqo->idx];
		if (!aic->enable) {
			if (aic->jiffies)
				aic->jiffies = 0;
			eqd = aic->et_eqd;
			goto modify_eqd;
		}
S
Sathya Perla 已提交
1605

1606 1607
		rxo = &adapter->rx_obj[eqo->idx];
		do {
1608
			start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1609
			rx_pkts = rxo->stats.rx_pkts;
1610
		} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
S
Sathya Perla 已提交
1611

1612 1613
		txo = &adapter->tx_obj[eqo->idx];
		do {
1614
			start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1615
			tx_pkts = txo->stats.tx_reqs;
1616
		} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
S
Sathya Perla 已提交
1617

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		/* Skip, if wrapped around or first calculation */
		now = jiffies;
		if (!aic->jiffies || time_before(now, aic->jiffies) ||
		    rx_pkts < aic->rx_pkts_prev ||
		    tx_pkts < aic->tx_reqs_prev) {
			be_aic_update(aic, rx_pkts, tx_pkts, now);
			continue;
		}

		delta = jiffies_to_msecs(now - aic->jiffies);
		pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
			(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
		eqd = (pps / 15000) << 2;
S
Sathya Perla 已提交
1631

1632 1633 1634 1635 1636 1637
		if (eqd < 8)
			eqd = 0;
		eqd = min_t(u32, eqd, aic->max_eqd);
		eqd = max_t(u32, eqd, aic->min_eqd);

		be_aic_update(aic, rx_pkts, tx_pkts, now);
S
Sathya Perla 已提交
1638
modify_eqd:
1639 1640 1641 1642 1643 1644
		if (eqd != aic->prev_eqd) {
			set_eqd[num].delay_multiplier = (eqd * 65)/100;
			set_eqd[num].eq_id = eqo->q.id;
			aic->prev_eqd = eqd;
			num++;
		}
1645
	}
1646 1647 1648

	if (num)
		be_cmd_modify_eqd(adapter, set_eqd, num);
S
Sathya Perla 已提交
1649 1650
}

1651
static void be_rx_stats_update(struct be_rx_obj *rxo,
1652
			       struct be_rx_compl_info *rxcp)
1653
{
1654
	struct be_rx_stats *stats = rx_stats(rxo);
1655

1656
	u64_stats_update_begin(&stats->sync);
1657
	stats->rx_compl++;
1658
	stats->rx_bytes += rxcp->pkt_size;
1659
	stats->rx_pkts++;
1660
	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1661
		stats->rx_mcast_pkts++;
1662
	if (rxcp->err)
1663
		stats->rx_compl_err++;
1664
	u64_stats_update_end(&stats->sync);
1665 1666
}

1667
static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1668
{
1669
	/* L4 checksum is not reliable for non TCP/UDP packets.
1670 1671
	 * Also ignore ipcksm for ipv6 pkts
	 */
1672
	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1673
		(rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1674 1675
}

1676
static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
S
Sathya Perla 已提交
1677
{
S
Sathya Perla 已提交
1678
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1679
	struct be_rx_page_info *rx_page_info;
1680
	struct be_queue_info *rxq = &rxo->q;
1681
	u16 frag_idx = rxq->tail;
S
Sathya Perla 已提交
1682

1683
	rx_page_info = &rxo->page_info_tbl[frag_idx];
S
Sathya Perla 已提交
1684 1685
	BUG_ON(!rx_page_info->page);

1686
	if (rx_page_info->last_frag) {
I
Ivan Vecera 已提交
1687 1688 1689
		dma_unmap_page(&adapter->pdev->dev,
			       dma_unmap_addr(rx_page_info, bus),
			       adapter->big_page_size, DMA_FROM_DEVICE);
1690 1691 1692 1693 1694
		rx_page_info->last_frag = false;
	} else {
		dma_sync_single_for_cpu(&adapter->pdev->dev,
					dma_unmap_addr(rx_page_info, bus),
					rx_frag_size, DMA_FROM_DEVICE);
A
Ajit Khaparde 已提交
1695
	}
S
Sathya Perla 已提交
1696

1697
	queue_tail_inc(rxq);
S
Sathya Perla 已提交
1698 1699 1700 1701 1702
	atomic_dec(&rxq->used);
	return rx_page_info;
}

/* Throwaway the data in the Rx completion */
S
Sathya Perla 已提交
1703 1704
static void be_rx_compl_discard(struct be_rx_obj *rxo,
				struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1705 1706
{
	struct be_rx_page_info *page_info;
1707
	u16 i, num_rcvd = rxcp->num_rcvd;
S
Sathya Perla 已提交
1708

1709
	for (i = 0; i < num_rcvd; i++) {
1710
		page_info = get_rx_page_info(rxo);
1711 1712
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
S
Sathya Perla 已提交
1713 1714 1715 1716 1717 1718 1719
	}
}

/*
 * skb_fill_rx_data forms a complete skb for an ether frame
 * indicated by rxcp.
 */
S
Sathya Perla 已提交
1720 1721
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
			     struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1722 1723
{
	struct be_rx_page_info *page_info;
1724 1725
	u16 i, j;
	u16 hdr_len, curr_frag_len, remaining;
S
Sathya Perla 已提交
1726 1727
	u8 *start;

1728
	page_info = get_rx_page_info(rxo);
S
Sathya Perla 已提交
1729 1730 1731 1732
	start = page_address(page_info->page) + page_info->page_offset;
	prefetch(start);

	/* Copy data in the first descriptor of this completion */
1733
	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
S
Sathya Perla 已提交
1734 1735 1736

	skb->len = curr_frag_len;
	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1737
		memcpy(skb->data, start, curr_frag_len);
S
Sathya Perla 已提交
1738 1739 1740 1741 1742
		/* Complete packet has now been moved to data */
		put_page(page_info->page);
		skb->data_len = 0;
		skb->tail += curr_frag_len;
	} else {
1743 1744
		hdr_len = ETH_HLEN;
		memcpy(skb->data, start, hdr_len);
S
Sathya Perla 已提交
1745
		skb_shinfo(skb)->nr_frags = 1;
1746
		skb_frag_set_page(skb, 0, page_info->page);
S
Sathya Perla 已提交
1747 1748
		skb_shinfo(skb)->frags[0].page_offset =
					page_info->page_offset + hdr_len;
1749 1750
		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
				  curr_frag_len - hdr_len);
S
Sathya Perla 已提交
1751
		skb->data_len = curr_frag_len - hdr_len;
E
Eric Dumazet 已提交
1752
		skb->truesize += rx_frag_size;
S
Sathya Perla 已提交
1753 1754
		skb->tail += hdr_len;
	}
A
Ajit Khaparde 已提交
1755
	page_info->page = NULL;
S
Sathya Perla 已提交
1756

1757 1758 1759
	if (rxcp->pkt_size <= rx_frag_size) {
		BUG_ON(rxcp->num_rcvd != 1);
		return;
S
Sathya Perla 已提交
1760 1761 1762
	}

	/* More frags present for this completion */
1763 1764
	remaining = rxcp->pkt_size - curr_frag_len;
	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1765
		page_info = get_rx_page_info(rxo);
1766
		curr_frag_len = min(remaining, rx_frag_size);
S
Sathya Perla 已提交
1767

1768 1769 1770 1771
		/* Coalesce all frags from the same physical page in one slot */
		if (page_info->page_offset == 0) {
			/* Fresh page */
			j++;
1772
			skb_frag_set_page(skb, j, page_info->page);
1773 1774
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
E
Eric Dumazet 已提交
1775
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1776 1777 1778 1779 1780
			skb_shinfo(skb)->nr_frags++;
		} else {
			put_page(page_info->page);
		}

E
Eric Dumazet 已提交
1781
		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
S
Sathya Perla 已提交
1782 1783
		skb->len += curr_frag_len;
		skb->data_len += curr_frag_len;
E
Eric Dumazet 已提交
1784
		skb->truesize += rx_frag_size;
1785
		remaining -= curr_frag_len;
A
Ajit Khaparde 已提交
1786
		page_info->page = NULL;
S
Sathya Perla 已提交
1787
	}
1788
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
1789 1790
}

1791
/* Process the RX completion indicated by rxcp when GRO is disabled */
1792
static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
S
Sathya Perla 已提交
1793
				struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1794
{
S
Sathya Perla 已提交
1795
	struct be_adapter *adapter = rxo->adapter;
1796
	struct net_device *netdev = adapter->netdev;
S
Sathya Perla 已提交
1797
	struct sk_buff *skb;
1798

1799
	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1800
	if (unlikely(!skb)) {
1801
		rx_stats(rxo)->rx_drops_no_skbs++;
S
Sathya Perla 已提交
1802
		be_rx_compl_discard(rxo, rxcp);
S
Sathya Perla 已提交
1803 1804 1805
		return;
	}

S
Sathya Perla 已提交
1806
	skb_fill_rx_data(rxo, skb, rxcp);
S
Sathya Perla 已提交
1807

1808
	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1809
		skb->ip_summed = CHECKSUM_UNNECESSARY;
S
Somnath Kotur 已提交
1810 1811
	else
		skb_checksum_none_assert(skb);
S
Sathya Perla 已提交
1812

1813
	skb->protocol = eth_type_trans(skb, netdev);
1814
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
S
Sathya Perla 已提交
1815
	if (netdev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
1816
		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1817

1818
	skb->csum_level = rxcp->tunneled;
1819
	skb_mark_napi_id(skb, napi);
S
Sathya Perla 已提交
1820

1821
	if (rxcp->vlanf)
1822
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
A
Ajit Khaparde 已提交
1823 1824

	netif_receive_skb(skb);
S
Sathya Perla 已提交
1825 1826
}

1827
/* Process the RX completion indicated by rxcp when GRO is enabled */
J
Jingoo Han 已提交
1828 1829 1830
static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
				    struct napi_struct *napi,
				    struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
1831
{
S
Sathya Perla 已提交
1832
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1833
	struct be_rx_page_info *page_info;
1834
	struct sk_buff *skb = NULL;
1835 1836
	u16 remaining, curr_frag_len;
	u16 i, j;
1837

S
Sathya Perla 已提交
1838
	skb = napi_get_frags(napi);
1839
	if (!skb) {
S
Sathya Perla 已提交
1840
		be_rx_compl_discard(rxo, rxcp);
1841 1842 1843
		return;
	}

1844 1845
	remaining = rxcp->pkt_size;
	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1846
		page_info = get_rx_page_info(rxo);
S
Sathya Perla 已提交
1847 1848 1849

		curr_frag_len = min(remaining, rx_frag_size);

1850 1851 1852 1853
		/* Coalesce all frags from the same physical page in one slot */
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
1854
			skb_frag_set_page(skb, j, page_info->page);
1855 1856
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
E
Eric Dumazet 已提交
1857
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1858 1859 1860
		} else {
			put_page(page_info->page);
		}
E
Eric Dumazet 已提交
1861
		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
E
Eric Dumazet 已提交
1862
		skb->truesize += rx_frag_size;
1863
		remaining -= curr_frag_len;
S
Sathya Perla 已提交
1864 1865
		memset(page_info, 0, sizeof(*page_info));
	}
1866
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
1867

1868
	skb_shinfo(skb)->nr_frags = j + 1;
1869 1870
	skb->len = rxcp->pkt_size;
	skb->data_len = rxcp->pkt_size;
1871
	skb->ip_summed = CHECKSUM_UNNECESSARY;
1872
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
A
Ajit Khaparde 已提交
1873
	if (adapter->netdev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
1874
		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1875

1876
	skb->csum_level = rxcp->tunneled;
1877
	skb_mark_napi_id(skb, napi);
1878

1879
	if (rxcp->vlanf)
1880
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
A
Ajit Khaparde 已提交
1881

S
Sathya Perla 已提交
1882
	napi_gro_frags(napi);
1883 1884
}

S
Sathya Perla 已提交
1885 1886
static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
1887
{
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
	rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
	rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
	rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
	rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
	rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
	rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
	rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
	rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
	rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
	rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
	rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
1899
	if (rxcp->vlanf) {
1900 1901
		rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
		rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
1902
	}
1903
	rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
1904
	rxcp->tunneled =
1905
		GET_RX_COMPL_V1_BITS(tunneled, compl);
1906 1907
}

S
Sathya Perla 已提交
1908 1909
static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
1910
{
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
	rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
	rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
	rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
	rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
	rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
	rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
	rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
	rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
	rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
	rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
	rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
1922
	if (rxcp->vlanf) {
1923 1924
		rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
		rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
1925
	}
1926 1927
	rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
	rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
1928 1929 1930 1931 1932 1933 1934
}

static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
{
	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
	struct be_rx_compl_info *rxcp = &rxo->rxcp;
	struct be_adapter *adapter = rxo->adapter;
S
Sathya Perla 已提交
1935

1936 1937 1938 1939
	/* For checking the valid bit it is Ok to use either definition as the
	 * valid bit is at the same position in both v0 and v1 Rx compl */
	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
		return NULL;
S
Sathya Perla 已提交
1940

1941 1942
	rmb();
	be_dws_le_to_cpu(compl, sizeof(*compl));
S
Sathya Perla 已提交
1943

1944
	if (adapter->be3_native)
S
Sathya Perla 已提交
1945
		be_parse_rx_compl_v1(compl, rxcp);
1946
	else
S
Sathya Perla 已提交
1947
		be_parse_rx_compl_v0(compl, rxcp);
S
Sathya Perla 已提交
1948

1949 1950 1951
	if (rxcp->ip_frag)
		rxcp->l4_csum = 0;

1952
	if (rxcp->vlanf) {
1953 1954 1955 1956 1957
		/* In QNQ modes, if qnq bit is not set, then the packet was
		 * tagged only with the transparent outer vlan-tag and must
		 * not be treated as a vlan packet by host
		 */
		if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1958
			rxcp->vlanf = 0;
S
Sathya Perla 已提交
1959

1960
		if (!lancer_chip(adapter))
1961
			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
S
Sathya Perla 已提交
1962

1963
		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1964
		    !test_bit(rxcp->vlan_tag, adapter->vids))
1965 1966
			rxcp->vlanf = 0;
	}
1967 1968 1969

	/* As the compl has been parsed, reset it; we wont touch it again */
	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
S
Sathya Perla 已提交
1970

1971
	queue_tail_inc(&rxo->cq);
S
Sathya Perla 已提交
1972 1973 1974
	return rxcp;
}

1975
static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
S
Sathya Perla 已提交
1976 1977
{
	u32 order = get_order(size);
1978

S
Sathya Perla 已提交
1979
	if (order > 0)
1980 1981
		gfp |= __GFP_COMP;
	return  alloc_pages(gfp, order);
S
Sathya Perla 已提交
1982 1983 1984 1985 1986 1987
}

/*
 * Allocate a page, split it to fragments of size rx_frag_size and post as
 * receive buffers to BE
 */
1988
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
S
Sathya Perla 已提交
1989
{
1990
	struct be_adapter *adapter = rxo->adapter;
1991
	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1992
	struct be_queue_info *rxq = &rxo->q;
S
Sathya Perla 已提交
1993
	struct page *pagep = NULL;
1994
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
1995 1996
	struct be_eth_rx_d *rxd;
	u64 page_dmaaddr = 0, frag_dmaaddr;
1997
	u32 posted, page_offset = 0, notify = 0;
S
Sathya Perla 已提交
1998

1999
	page_info = &rxo->page_info_tbl[rxq->head];
2000
	for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
S
Sathya Perla 已提交
2001
		if (!pagep) {
2002
			pagep = be_alloc_pages(adapter->big_page_size, gfp);
S
Sathya Perla 已提交
2003
			if (unlikely(!pagep)) {
2004
				rx_stats(rxo)->rx_post_fail++;
S
Sathya Perla 已提交
2005 2006
				break;
			}
2007 2008
			page_dmaaddr = dma_map_page(dev, pagep, 0,
						    adapter->big_page_size,
I
Ivan Vecera 已提交
2009
						    DMA_FROM_DEVICE);
2010 2011 2012
			if (dma_mapping_error(dev, page_dmaaddr)) {
				put_page(pagep);
				pagep = NULL;
2013
				adapter->drv_stats.dma_map_errors++;
2014 2015
				break;
			}
2016
			page_offset = 0;
S
Sathya Perla 已提交
2017 2018
		} else {
			get_page(pagep);
2019
			page_offset += rx_frag_size;
S
Sathya Perla 已提交
2020
		}
2021
		page_info->page_offset = page_offset;
S
Sathya Perla 已提交
2022 2023 2024
		page_info->page = pagep;

		rxd = queue_head_node(rxq);
2025
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
S
Sathya Perla 已提交
2026 2027 2028 2029 2030 2031 2032
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));

		/* Any space left in the current big page for another frag? */
		if ((page_offset + rx_frag_size + rx_frag_size) >
					adapter->big_page_size) {
			pagep = NULL;
2033 2034 2035 2036
			page_info->last_frag = true;
			dma_unmap_addr_set(page_info, bus, page_dmaaddr);
		} else {
			dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
S
Sathya Perla 已提交
2037
		}
2038 2039 2040

		prev_page_info = page_info;
		queue_head_inc(rxq);
S
Sathya Perla 已提交
2041
		page_info = &rxo->page_info_tbl[rxq->head];
S
Sathya Perla 已提交
2042
	}
2043 2044 2045 2046 2047 2048 2049 2050

	/* Mark the last frag of a page when we break out of the above loop
	 * with no more slots available in the RXQ
	 */
	if (pagep) {
		prev_page_info->last_frag = true;
		dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
	}
S
Sathya Perla 已提交
2051 2052 2053

	if (posted) {
		atomic_add(posted, &rxq->used);
2054 2055
		if (rxo->rx_post_starved)
			rxo->rx_post_starved = false;
2056 2057 2058 2059 2060
		do {
			notify = min(256u, posted);
			be_rxq_notify(adapter, rxq->id, notify);
			posted -= notify;
		} while (posted);
2061 2062
	} else if (atomic_read(&rxq->used) == 0) {
		/* Let be_worker replenish when memory is available */
2063
		rxo->rx_post_starved = true;
S
Sathya Perla 已提交
2064 2065 2066
	}
}

2067
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
S
Sathya Perla 已提交
2068 2069 2070 2071 2072 2073
{
	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);

	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
		return NULL;

2074
	rmb();
S
Sathya Perla 已提交
2075 2076 2077 2078 2079 2080 2081 2082
	be_dws_le_to_cpu(txcp, sizeof(*txcp));

	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;

	queue_tail_inc(tx_cq);
	return txcp;
}

2083
static u16 be_tx_compl_process(struct be_adapter *adapter,
2084
			       struct be_tx_obj *txo, u16 last_index)
S
Sathya Perla 已提交
2085
{
2086
	struct sk_buff **sent_skbs = txo->sent_skb_list;
2087
	struct be_queue_info *txq = &txo->q;
2088 2089 2090
	u16 frag_index, num_wrbs = 0;
	struct sk_buff *skb = NULL;
	bool unmap_skb_hdr = false;
2091
	struct be_eth_wrb *wrb;
S
Sathya Perla 已提交
2092

2093
	do {
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
		if (sent_skbs[txq->tail]) {
			/* Free skb from prev req */
			if (skb)
				dev_consume_skb_any(skb);
			skb = sent_skbs[txq->tail];
			sent_skbs[txq->tail] = NULL;
			queue_tail_inc(txq);  /* skip hdr wrb */
			num_wrbs++;
			unmap_skb_hdr = true;
		}
2104
		wrb = queue_tail_node(txq);
2105
		frag_index = txq->tail;
I
Ivan Vecera 已提交
2106
		unmap_tx_frag(&adapter->pdev->dev, wrb,
2107
			      (unmap_skb_hdr && skb_headlen(skb)));
2108
		unmap_skb_hdr = false;
S
Sathya Perla 已提交
2109
		queue_tail_inc(txq);
2110 2111 2112
		num_wrbs++;
	} while (frag_index != last_index);
	dev_consume_skb_any(skb);
S
Sathya Perla 已提交
2113

2114
	return num_wrbs;
S
Sathya Perla 已提交
2115 2116
}

S
Sathya Perla 已提交
2117 2118
/* Return the number of events in the event queue */
static inline int events_get(struct be_eq_obj *eqo)
2119
{
S
Sathya Perla 已提交
2120 2121
	struct be_eq_entry *eqe;
	int num = 0;
2122

S
Sathya Perla 已提交
2123 2124 2125 2126
	do {
		eqe = queue_tail_node(&eqo->q);
		if (eqe->evt == 0)
			break;
2127

S
Sathya Perla 已提交
2128 2129 2130 2131 2132 2133 2134
		rmb();
		eqe->evt = 0;
		num++;
		queue_tail_inc(&eqo->q);
	} while (true);

	return num;
2135 2136
}

S
Sathya Perla 已提交
2137 2138
/* Leaves the EQ is disarmed state */
static void be_eq_clean(struct be_eq_obj *eqo)
2139
{
S
Sathya Perla 已提交
2140
	int num = events_get(eqo);
2141

S
Sathya Perla 已提交
2142
	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2143 2144
}

S
Sathya Perla 已提交
2145
static void be_rx_cq_clean(struct be_rx_obj *rxo)
S
Sathya Perla 已提交
2146 2147
{
	struct be_rx_page_info *page_info;
2148 2149
	struct be_queue_info *rxq = &rxo->q;
	struct be_queue_info *rx_cq = &rxo->cq;
2150
	struct be_rx_compl_info *rxcp;
2151 2152
	struct be_adapter *adapter = rxo->adapter;
	int flush_wait = 0;
S
Sathya Perla 已提交
2153

2154 2155 2156 2157 2158 2159 2160 2161
	/* Consume pending rx completions.
	 * Wait for the flush completion (identified by zero num_rcvd)
	 * to arrive. Notify CQ even when there are no more CQ entries
	 * for HW to flush partially coalesced CQ entries.
	 * In Lancer, there is no need to wait for flush compl.
	 */
	for (;;) {
		rxcp = be_rx_compl_get(rxo);
K
Kalesh AP 已提交
2162
		if (!rxcp) {
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
			if (lancer_chip(adapter))
				break;

			if (flush_wait++ > 10 || be_hw_error(adapter)) {
				dev_warn(&adapter->pdev->dev,
					 "did not receive flush compl\n");
				break;
			}
			be_cq_notify(adapter, rx_cq->id, true, 0);
			mdelay(1);
		} else {
			be_rx_compl_discard(rxo, rxcp);
2175
			be_cq_notify(adapter, rx_cq->id, false, 1);
2176 2177 2178
			if (rxcp->num_rcvd == 0)
				break;
		}
S
Sathya Perla 已提交
2179 2180
	}

2181 2182 2183 2184
	/* After cleanup, leave the CQ in unarmed state */
	be_cq_notify(adapter, rx_cq->id, false, 0);

	/* Then free posted rx buffers that were not used */
2185 2186
	while (atomic_read(&rxq->used) > 0) {
		page_info = get_rx_page_info(rxo);
S
Sathya Perla 已提交
2187 2188 2189 2190
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(atomic_read(&rxq->used));
2191 2192
	rxq->tail = 0;
	rxq->head = 0;
S
Sathya Perla 已提交
2193 2194
}

S
Sathya Perla 已提交
2195
static void be_tx_compl_clean(struct be_adapter *adapter)
S
Sathya Perla 已提交
2196
{
2197 2198
	u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
2199 2200
	struct be_tx_obj *txo;
	struct be_queue_info *txq;
2201
	struct be_eth_tx_compl *txcp;
S
Sathya Perla 已提交
2202
	int i, pending_txqs;
2203

2204
	/* Stop polling for compls when HW has been silent for 10ms */
2205
	do {
S
Sathya Perla 已提交
2206 2207 2208
		pending_txqs = adapter->num_tx_qs;

		for_all_tx_queues(adapter, txo, i) {
2209 2210
			cmpl = 0;
			num_wrbs = 0;
S
Sathya Perla 已提交
2211 2212
			txq = &txo->q;
			while ((txcp = be_tx_compl_get(&txo->cq))) {
2213
				end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
S
Sathya Perla 已提交
2214 2215 2216 2217 2218 2219 2220
				num_wrbs += be_tx_compl_process(adapter, txo,
								end_idx);
				cmpl++;
			}
			if (cmpl) {
				be_cq_notify(adapter, txo->cq.id, false, cmpl);
				atomic_sub(num_wrbs, &txq->used);
2221
				timeo = 0;
S
Sathya Perla 已提交
2222
			}
2223
			if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
S
Sathya Perla 已提交
2224
				pending_txqs--;
2225 2226
		}

2227
		if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2228 2229 2230 2231 2232
			break;

		mdelay(1);
	} while (true);

2233
	/* Free enqueued TX that was never notified to HW */
S
Sathya Perla 已提交
2234 2235 2236
	for_all_tx_queues(adapter, txo, i) {
		txq = &txo->q;

2237 2238 2239 2240
		if (atomic_read(&txq->used)) {
			dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
				 i, atomic_read(&txq->used));
			notified_idx = txq->tail;
S
Sathya Perla 已提交
2241
			end_idx = txq->tail;
2242 2243 2244 2245 2246
			index_adv(&end_idx, atomic_read(&txq->used) - 1,
				  txq->len);
			/* Use the tx-compl process logic to handle requests
			 * that were not sent to the HW.
			 */
S
Sathya Perla 已提交
2247 2248
			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
			atomic_sub(num_wrbs, &txq->used);
2249 2250 2251 2252 2253 2254 2255
			BUG_ON(atomic_read(&txq->used));
			txo->pend_wrb_cnt = 0;
			/* Since hw was never notified of these requests,
			 * reset TXQ indices
			 */
			txq->head = notified_idx;
			txq->tail = notified_idx;
S
Sathya Perla 已提交
2256
		}
2257
	}
S
Sathya Perla 已提交
2258 2259
}

S
Sathya Perla 已提交
2260 2261 2262 2263 2264 2265
static void be_evt_queues_destroy(struct be_adapter *adapter)
{
	struct be_eq_obj *eqo;
	int i;

	for_all_evt_queues(adapter, eqo, i) {
2266 2267
		if (eqo->q.created) {
			be_eq_clean(eqo);
S
Sathya Perla 已提交
2268
			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2269
			napi_hash_del(&eqo->napi);
2270
			netif_napi_del(&eqo->napi);
2271
		}
S
Sathya Perla 已提交
2272 2273 2274 2275 2276 2277 2278 2279
		be_queue_free(adapter, &eqo->q);
	}
}

static int be_evt_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq;
	struct be_eq_obj *eqo;
2280
	struct be_aic_obj *aic;
S
Sathya Perla 已提交
2281 2282
	int i, rc;

2283 2284
	adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
				    adapter->cfg_num_qs);
S
Sathya Perla 已提交
2285 2286

	for_all_evt_queues(adapter, eqo, i) {
2287 2288
		netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
			       BE_NAPI_WEIGHT);
2289
		napi_hash_add(&eqo->napi);
2290
		aic = &adapter->aic_obj[i];
S
Sathya Perla 已提交
2291 2292
		eqo->adapter = adapter;
		eqo->idx = i;
2293 2294
		aic->max_eqd = BE_MAX_EQD;
		aic->enable = true;
S
Sathya Perla 已提交
2295 2296 2297

		eq = &eqo->q;
		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2298
				    sizeof(struct be_eq_entry));
S
Sathya Perla 已提交
2299 2300 2301
		if (rc)
			return rc;

S
Sathya Perla 已提交
2302
		rc = be_cmd_eq_create(adapter, eqo);
S
Sathya Perla 已提交
2303 2304 2305
		if (rc)
			return rc;
	}
2306
	return 0;
S
Sathya Perla 已提交
2307 2308
}

2309 2310 2311 2312
static void be_mcc_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

2313
	q = &adapter->mcc_obj.q;
2314
	if (q->created)
2315
		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2316 2317
	be_queue_free(adapter, q);

2318
	q = &adapter->mcc_obj.cq;
2319
	if (q->created)
2320
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2321 2322 2323 2324 2325 2326 2327 2328
	be_queue_free(adapter, q);
}

/* Must be called only after TX qs are created as MCC shares TX EQ */
static int be_mcc_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *q, *cq;

2329
	cq = &adapter->mcc_obj.cq;
2330
	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2331
			   sizeof(struct be_mcc_compl)))
2332 2333
		goto err;

S
Sathya Perla 已提交
2334 2335
	/* Use the default EQ for MCC completions */
	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2336 2337
		goto mcc_cq_free;

2338
	q = &adapter->mcc_obj.q;
2339 2340 2341
	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
		goto mcc_cq_destroy;

2342
	if (be_cmd_mccq_create(adapter, q, cq))
2343 2344 2345 2346 2347 2348 2349
		goto mcc_q_free;

	return 0;

mcc_q_free:
	be_queue_free(adapter, q);
mcc_cq_destroy:
2350
	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2351 2352 2353 2354 2355 2356
mcc_cq_free:
	be_queue_free(adapter, cq);
err:
	return -1;
}

S
Sathya Perla 已提交
2357 2358 2359
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;
2360 2361
	struct be_tx_obj *txo;
	u8 i;
S
Sathya Perla 已提交
2362

2363 2364 2365 2366 2367
	for_all_tx_queues(adapter, txo, i) {
		q = &txo->q;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
		be_queue_free(adapter, q);
S
Sathya Perla 已提交
2368

2369 2370 2371 2372 2373
		q = &txo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
	}
S
Sathya Perla 已提交
2374 2375
}

2376
static int be_tx_qs_create(struct be_adapter *adapter)
S
Sathya Perla 已提交
2377
{
S
Sathya Perla 已提交
2378
	struct be_queue_info *cq, *eq;
2379
	struct be_tx_obj *txo;
2380
	int status, i;
S
Sathya Perla 已提交
2381

2382
	adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2383

S
Sathya Perla 已提交
2384 2385 2386 2387 2388 2389
	for_all_tx_queues(adapter, txo, i) {
		cq = &txo->cq;
		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
					sizeof(struct be_eth_tx_compl));
		if (status)
			return status;
2390

2391 2392 2393
		u64_stats_init(&txo->stats.sync);
		u64_stats_init(&txo->stats.sync_compl);

S
Sathya Perla 已提交
2394 2395 2396 2397 2398 2399 2400
		/* If num_evt_qs is less than num_tx_qs, then more than
		 * one txq share an eq
		 */
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		status = be_cmd_cq_create(adapter, cq, eq, false, 3);
		if (status)
			return status;
S
Sathya Perla 已提交
2401

S
Sathya Perla 已提交
2402 2403 2404 2405
		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
					sizeof(struct be_eth_wrb));
		if (status)
			return status;
S
Sathya Perla 已提交
2406

V
Vasundhara Volam 已提交
2407
		status = be_cmd_txq_create(adapter, txo);
S
Sathya Perla 已提交
2408 2409
		if (status)
			return status;
2410
	}
S
Sathya Perla 已提交
2411

S
Sathya Perla 已提交
2412 2413
	dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
		 adapter->num_tx_qs);
S
Sathya Perla 已提交
2414
	return 0;
S
Sathya Perla 已提交
2415 2416
}

S
Sathya Perla 已提交
2417
static void be_rx_cqs_destroy(struct be_adapter *adapter)
S
Sathya Perla 已提交
2418 2419
{
	struct be_queue_info *q;
2420 2421 2422 2423 2424 2425 2426 2427
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
2428 2429 2430
	}
}

S
Sathya Perla 已提交
2431
static int be_rx_cqs_create(struct be_adapter *adapter)
S
Sathya Perla 已提交
2432
{
S
Sathya Perla 已提交
2433
	struct be_queue_info *eq, *cq;
2434 2435
	struct be_rx_obj *rxo;
	int rc, i;
S
Sathya Perla 已提交
2436

2437 2438 2439 2440 2441
	/* We can create as many RSS rings as there are EQs. */
	adapter->num_rx_qs = adapter->num_evt_qs;

	/* We'll use RSS only if atleast 2 RSS rings are supported.
	 * When RSS is used, we'll need a default RXQ for non-IP traffic.
S
Sathya Perla 已提交
2442
	 */
2443 2444 2445
	if (adapter->num_rx_qs > 1)
		adapter->num_rx_qs++;

S
Sathya Perla 已提交
2446
	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2447 2448 2449 2450
	for_all_rx_queues(adapter, rxo, i) {
		rxo->adapter = adapter;
		cq = &rxo->cq;
		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2451
				    sizeof(struct be_eth_rx_compl));
2452
		if (rc)
S
Sathya Perla 已提交
2453
			return rc;
2454

2455
		u64_stats_init(&rxo->stats.sync);
S
Sathya Perla 已提交
2456 2457
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2458
		if (rc)
S
Sathya Perla 已提交
2459
			return rc;
2460
	}
S
Sathya Perla 已提交
2461

S
Sathya Perla 已提交
2462 2463 2464
	dev_info(&adapter->pdev->dev,
		 "created %d RSS queue(s) and 1 default RX queue\n",
		 adapter->num_rx_qs - 1);
S
Sathya Perla 已提交
2465
	return 0;
2466 2467
}

S
Sathya Perla 已提交
2468 2469
static irqreturn_t be_intx(int irq, void *dev)
{
2470 2471 2472
	struct be_eq_obj *eqo = dev;
	struct be_adapter *adapter = eqo->adapter;
	int num_evts = 0;
S
Sathya Perla 已提交
2473

2474 2475 2476 2477 2478 2479 2480
	/* IRQ is not expected when NAPI is scheduled as the EQ
	 * will not be armed.
	 * But, this can happen on Lancer INTx where it takes
	 * a while to de-assert INTx or in BE2 where occasionaly
	 * an interrupt may be raised even when EQ is unarmed.
	 * If NAPI is already scheduled, then counting & notifying
	 * events will orphan them.
2481
	 */
2482
	if (napi_schedule_prep(&eqo->napi)) {
2483
		num_evts = events_get(eqo);
2484 2485 2486 2487 2488
		__napi_schedule(&eqo->napi);
		if (num_evts)
			eqo->spurious_intr = 0;
	}
	be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2489

2490 2491 2492
	/* Return IRQ_HANDLED only for the the first spurious intr
	 * after a valid intr to stop the kernel from branding
	 * this irq as a bad one!
2493
	 */
2494 2495 2496 2497
	if (num_evts || eqo->spurious_intr++ == 0)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
S
Sathya Perla 已提交
2498 2499
}

S
Sathya Perla 已提交
2500
static irqreturn_t be_msix(int irq, void *dev)
S
Sathya Perla 已提交
2501
{
S
Sathya Perla 已提交
2502
	struct be_eq_obj *eqo = dev;
S
Sathya Perla 已提交
2503

2504 2505
	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
	napi_schedule(&eqo->napi);
S
Sathya Perla 已提交
2506 2507 2508
	return IRQ_HANDLED;
}

2509
static inline bool do_gro(struct be_rx_compl_info *rxcp)
S
Sathya Perla 已提交
2510
{
2511
	return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
S
Sathya Perla 已提交
2512 2513
}

S
Sathya Perla 已提交
2514
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2515
			 int budget, int polling)
S
Sathya Perla 已提交
2516
{
2517 2518
	struct be_adapter *adapter = rxo->adapter;
	struct be_queue_info *rx_cq = &rxo->cq;
2519
	struct be_rx_compl_info *rxcp;
S
Sathya Perla 已提交
2520
	u32 work_done;
2521
	u32 frags_consumed = 0;
S
Sathya Perla 已提交
2522 2523

	for (work_done = 0; work_done < budget; work_done++) {
2524
		rxcp = be_rx_compl_get(rxo);
S
Sathya Perla 已提交
2525 2526 2527
		if (!rxcp)
			break;

2528 2529 2530 2531 2532 2533
		/* Is it a flush compl that has no data */
		if (unlikely(rxcp->num_rcvd == 0))
			goto loop_continue;

		/* Discard compl with partial DMA Lancer B0 */
		if (unlikely(!rxcp->pkt_size)) {
S
Sathya Perla 已提交
2534
			be_rx_compl_discard(rxo, rxcp);
2535 2536 2537 2538 2539 2540 2541
			goto loop_continue;
		}

		/* On BE drop pkts that arrive due to imperfect filtering in
		 * promiscuous mode on some skews
		 */
		if (unlikely(rxcp->port != adapter->port_num &&
2542
			     !lancer_chip(adapter))) {
S
Sathya Perla 已提交
2543
			be_rx_compl_discard(rxo, rxcp);
2544
			goto loop_continue;
2545
		}
2546

2547 2548
		/* Don't do gro when we're busy_polling */
		if (do_gro(rxcp) && polling != BUSY_POLLING)
S
Sathya Perla 已提交
2549
			be_rx_compl_process_gro(rxo, napi, rxcp);
2550
		else
2551 2552
			be_rx_compl_process(rxo, napi, rxcp);

2553
loop_continue:
2554
		frags_consumed += rxcp->num_rcvd;
2555
		be_rx_stats_update(rxo, rxcp);
S
Sathya Perla 已提交
2556 2557
	}

S
Sathya Perla 已提交
2558 2559
	if (work_done) {
		be_cq_notify(adapter, rx_cq->id, true, work_done);
2560

2561 2562 2563 2564 2565
		/* When an rx-obj gets into post_starved state, just
		 * let be_worker do the posting.
		 */
		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
		    !rxo->rx_post_starved)
2566 2567 2568
			be_post_rx_frags(rxo, GFP_ATOMIC,
					 max_t(u32, MAX_RX_POST,
					       frags_consumed));
S
Sathya Perla 已提交
2569
	}
S
Sathya Perla 已提交
2570

S
Sathya Perla 已提交
2571 2572 2573
	return work_done;
}

2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
{
	switch (status) {
	case BE_TX_COMP_HDR_PARSE_ERR:
		tx_stats(txo)->tx_hdr_parse_err++;
		break;
	case BE_TX_COMP_NDMA_ERR:
		tx_stats(txo)->tx_dma_err++;
		break;
	case BE_TX_COMP_ACL_ERR:
		tx_stats(txo)->tx_spoof_check_err++;
		break;
	}
}

static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
{
	switch (status) {
	case LANCER_TX_COMP_LSO_ERR:
		tx_stats(txo)->tx_tso_err++;
		break;
	case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
	case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
		tx_stats(txo)->tx_spoof_check_err++;
		break;
	case LANCER_TX_COMP_QINQ_ERR:
		tx_stats(txo)->tx_qinq_err++;
		break;
	case LANCER_TX_COMP_PARITY_ERR:
		tx_stats(txo)->tx_internal_parity_err++;
		break;
	case LANCER_TX_COMP_DMA_ERR:
		tx_stats(txo)->tx_dma_err++;
		break;
	}
}

S
Sathya Perla 已提交
2611 2612
static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
			  int idx)
S
Sathya Perla 已提交
2613 2614
{
	struct be_eth_tx_compl *txcp;
S
Sathya Perla 已提交
2615
	int num_wrbs = 0, work_done = 0;
2616
	u32 compl_status;
S
Sathya Perla 已提交
2617 2618 2619 2620 2621 2622
	u16 last_idx;

	while ((txcp = be_tx_compl_get(&txo->cq))) {
		last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
		num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
		work_done++;
2623

2624 2625 2626 2627 2628 2629 2630
		compl_status = GET_TX_COMPL_BITS(status, txcp);
		if (compl_status) {
			if (lancer_chip(adapter))
				lancer_update_tx_err(txo, compl_status);
			else
				be_update_tx_err(txo, compl_status);
		}
S
Sathya Perla 已提交
2631
	}
S
Sathya Perla 已提交
2632

S
Sathya Perla 已提交
2633 2634 2635
	if (work_done) {
		be_cq_notify(adapter, txo->cq.id, true, work_done);
		atomic_sub(num_wrbs, &txo->q.used);
2636

S
Sathya Perla 已提交
2637 2638 2639
		/* As Tx wrbs have been freed up, wake up netdev queue
		 * if it was stopped due to lack of tx wrbs.  */
		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2640
		    atomic_read(&txo->q.used) < txo->q.len / 2) {
S
Sathya Perla 已提交
2641
			netif_wake_subqueue(adapter->netdev, idx);
2642
		}
S
Sathya Perla 已提交
2643 2644 2645 2646

		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
		tx_stats(txo)->tx_compl += work_done;
		u64_stats_update_end(&tx_stats(txo)->sync_compl);
S
Sathya Perla 已提交
2647
	}
S
Sathya Perla 已提交
2648
}
S
Sathya Perla 已提交
2649

2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline bool be_lock_napi(struct be_eq_obj *eqo)
{
	bool status = true;

	spin_lock(&eqo->lock); /* BH is already disabled */
	if (eqo->state & BE_EQ_LOCKED) {
		WARN_ON(eqo->state & BE_EQ_NAPI);
		eqo->state |= BE_EQ_NAPI_YIELD;
		status = false;
	} else {
		eqo->state = BE_EQ_NAPI;
	}
	spin_unlock(&eqo->lock);
	return status;
}

static inline void be_unlock_napi(struct be_eq_obj *eqo)
{
	spin_lock(&eqo->lock); /* BH is already disabled */

	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
	eqo->state = BE_EQ_IDLE;

	spin_unlock(&eqo->lock);
}

static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
{
	bool status = true;

	spin_lock_bh(&eqo->lock);
	if (eqo->state & BE_EQ_LOCKED) {
		eqo->state |= BE_EQ_POLL_YIELD;
		status = false;
	} else {
		eqo->state |= BE_EQ_POLL;
	}
	spin_unlock_bh(&eqo->lock);
	return status;
}

static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
{
	spin_lock_bh(&eqo->lock);

	WARN_ON(eqo->state & (BE_EQ_NAPI));
	eqo->state = BE_EQ_IDLE;

	spin_unlock_bh(&eqo->lock);
}

static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
{
	spin_lock_init(&eqo->lock);
	eqo->state = BE_EQ_IDLE;
}

static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
{
	local_bh_disable();

	/* It's enough to just acquire napi lock on the eqo to stop
	 * be_busy_poll() from processing any queueus.
	 */
	while (!be_lock_napi(eqo))
		mdelay(1);

	local_bh_enable();
}

#else /* CONFIG_NET_RX_BUSY_POLL */

static inline bool be_lock_napi(struct be_eq_obj *eqo)
{
	return true;
}

static inline void be_unlock_napi(struct be_eq_obj *eqo)
{
}

static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
{
	return false;
}

static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
{
}

static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
{
}

static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
{
}
#endif /* CONFIG_NET_RX_BUSY_POLL */

2750
int be_poll(struct napi_struct *napi, int budget)
S
Sathya Perla 已提交
2751 2752 2753
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter = eqo->adapter;
2754
	int max_work = 0, work, i, num_evts;
2755
	struct be_rx_obj *rxo;
2756
	struct be_tx_obj *txo;
S
Sathya Perla 已提交
2757

2758 2759
	num_evts = events_get(eqo);

2760 2761
	for_all_tx_queues_on_eq(adapter, eqo, txo, i)
		be_process_tx(adapter, txo, i);
S
Sathya Perla 已提交
2762

2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
	if (be_lock_napi(eqo)) {
		/* This loop will iterate twice for EQ0 in which
		 * completions of the last RXQ (default one) are also processed
		 * For other EQs the loop iterates only once
		 */
		for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
			work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
			max_work = max(work, max_work);
		}
		be_unlock_napi(eqo);
	} else {
		max_work = budget;
S
Sathya Perla 已提交
2775
	}
S
Sathya Perla 已提交
2776

S
Sathya Perla 已提交
2777 2778
	if (is_mcc_eqo(eqo))
		be_process_mcc(adapter);
2779

S
Sathya Perla 已提交
2780 2781
	if (max_work < budget) {
		napi_complete(napi);
2782
		be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
S
Sathya Perla 已提交
2783 2784
	} else {
		/* As we'll continue in polling mode, count and clear events */
2785
		be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2786
	}
S
Sathya Perla 已提交
2787
	return max_work;
S
Sathya Perla 已提交
2788 2789
}

2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
#ifdef CONFIG_NET_RX_BUSY_POLL
static int be_busy_poll(struct napi_struct *napi)
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter = eqo->adapter;
	struct be_rx_obj *rxo;
	int i, work = 0;

	if (!be_lock_busy_poll(eqo))
		return LL_FLUSH_BUSY;

	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
		work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
		if (work)
			break;
	}

	be_unlock_busy_poll(eqo);
	return work;
}
#endif

2812
void be_detect_error(struct be_adapter *adapter)
2813
{
2814 2815
	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2816
	u32 i;
2817 2818 2819
	bool error_detected = false;
	struct device *dev = &adapter->pdev->dev;
	struct net_device *netdev = adapter->netdev;
2820

2821
	if (be_hw_error(adapter))
2822 2823
		return;

2824 2825 2826 2827
	if (lancer_chip(adapter)) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
			sliport_err1 = ioread32(adapter->db +
2828
						SLIPORT_ERROR1_OFFSET);
2829
			sliport_err2 = ioread32(adapter->db +
2830
						SLIPORT_ERROR2_OFFSET);
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845
			adapter->hw_error = true;
			/* Do not log error messages if its a FW reset */
			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
				dev_info(dev, "Firmware update in progress\n");
			} else {
				error_detected = true;
				dev_err(dev, "Error detected in the card\n");
				dev_err(dev, "ERR: sliport status 0x%x\n",
					sliport_status);
				dev_err(dev, "ERR: sliport error1 0x%x\n",
					sliport_err1);
				dev_err(dev, "ERR: sliport error2 0x%x\n",
					sliport_err2);
			}
2846 2847 2848
		}
	} else {
		pci_read_config_dword(adapter->pdev,
2849
				      PCICFG_UE_STATUS_LOW, &ue_lo);
2850
		pci_read_config_dword(adapter->pdev,
2851
				      PCICFG_UE_STATUS_HIGH, &ue_hi);
2852
		pci_read_config_dword(adapter->pdev,
2853
				      PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2854
		pci_read_config_dword(adapter->pdev,
2855
				      PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2856

2857 2858
		ue_lo = (ue_lo & ~ue_lo_mask);
		ue_hi = (ue_hi & ~ue_hi_mask);
2859

2860 2861 2862 2863
		/* On certain platforms BE hardware can indicate spurious UEs.
		 * Allow HW to stop working completely in case of a real UE.
		 * Hence not setting the hw_error for UE detection.
		 */
2864

2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
		if (ue_lo || ue_hi) {
			error_detected = true;
			dev_err(dev,
				"Unrecoverable Error detected in the adapter");
			dev_err(dev, "Please reboot server to recover");
			if (skyhawk_chip(adapter))
				adapter->hw_error = true;
			for (i = 0; ue_lo; ue_lo >>= 1, i++) {
				if (ue_lo & 1)
					dev_err(dev, "UE: %s bit set\n",
						ue_status_low_desc[i]);
			}
			for (i = 0; ue_hi; ue_hi >>= 1, i++) {
				if (ue_hi & 1)
					dev_err(dev, "UE: %s bit set\n",
						ue_status_hi_desc[i]);
			}
2882 2883
		}
	}
2884 2885
	if (error_detected)
		netif_carrier_off(netdev);
2886 2887
}

2888 2889
static void be_msix_disable(struct be_adapter *adapter)
{
2890
	if (msix_enabled(adapter)) {
2891
		pci_disable_msix(adapter->pdev);
2892
		adapter->num_msix_vec = 0;
2893
		adapter->num_msix_roce_vec = 0;
2894 2895 2896
	}
}

2897
static int be_msix_enable(struct be_adapter *adapter)
S
Sathya Perla 已提交
2898
{
2899
	int i, num_vec;
S
Sathya Perla 已提交
2900
	struct device *dev = &adapter->pdev->dev;
S
Sathya Perla 已提交
2901

2902 2903 2904 2905 2906 2907 2908 2909 2910
	/* If RoCE is supported, program the max number of NIC vectors that
	 * may be configured via set-channels, along with vectors needed for
	 * RoCe. Else, just program the number we'll use initially.
	 */
	if (be_roce_supported(adapter))
		num_vec = min_t(int, 2 * be_max_eqs(adapter),
				2 * num_online_cpus());
	else
		num_vec = adapter->cfg_num_qs;
2911

2912
	for (i = 0; i < num_vec; i++)
S
Sathya Perla 已提交
2913 2914
		adapter->msix_entries[i].entry = i;

2915 2916 2917 2918
	num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
					MIN_MSIX_VECTORS, num_vec);
	if (num_vec < 0)
		goto fail;
2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929

	if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
		adapter->num_msix_roce_vec = num_vec / 2;
		dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
			 adapter->num_msix_roce_vec);
	}

	adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;

	dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
		 adapter->num_msix_vec);
2930
	return 0;
2931 2932 2933 2934 2935 2936 2937 2938

fail:
	dev_warn(dev, "MSIx enable failed\n");

	/* INTx is not supported in VFs, so fail probe if enable_msix fails */
	if (!be_physfn(adapter))
		return num_vec;
	return 0;
S
Sathya Perla 已提交
2939 2940
}

2941
static inline int be_msix_vec_get(struct be_adapter *adapter,
2942
				  struct be_eq_obj *eqo)
2943
{
S
Sathya Perla 已提交
2944
	return adapter->msix_entries[eqo->msix_idx].vector;
2945
}
S
Sathya Perla 已提交
2946

2947 2948
static int be_msix_register(struct be_adapter *adapter)
{
S
Sathya Perla 已提交
2949 2950 2951
	struct net_device *netdev = adapter->netdev;
	struct be_eq_obj *eqo;
	int status, i, vec;
S
Sathya Perla 已提交
2952

S
Sathya Perla 已提交
2953 2954 2955 2956
	for_all_evt_queues(adapter, eqo, i) {
		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
		vec = be_msix_vec_get(adapter, eqo);
		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2957 2958 2959
		if (status)
			goto err_msix;
	}
2960

S
Sathya Perla 已提交
2961
	return 0;
2962
err_msix:
S
Sathya Perla 已提交
2963 2964 2965
	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);
	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2966
		 status);
2967
	be_msix_disable(adapter);
S
Sathya Perla 已提交
2968 2969 2970 2971 2972 2973 2974 2975
	return status;
}

static int be_irq_register(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

2976
	if (msix_enabled(adapter)) {
S
Sathya Perla 已提交
2977 2978 2979
		status = be_msix_register(adapter);
		if (status == 0)
			goto done;
2980 2981 2982
		/* INTx is not supported for VF */
		if (!be_physfn(adapter))
			return status;
S
Sathya Perla 已提交
2983 2984
	}

2985
	/* INTx: only the first EQ is used */
S
Sathya Perla 已提交
2986 2987
	netdev->irq = adapter->pdev->irq;
	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2988
			     &adapter->eq_obj[0]);
S
Sathya Perla 已提交
2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
	if (status) {
		dev_err(&adapter->pdev->dev,
			"INTx request IRQ failed - err %d\n", status);
		return status;
	}
done:
	adapter->isr_registered = true;
	return 0;
}

static void be_irq_unregister(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
S
Sathya Perla 已提交
3002
	struct be_eq_obj *eqo;
3003
	int i;
S
Sathya Perla 已提交
3004 3005 3006 3007 3008

	if (!adapter->isr_registered)
		return;

	/* INTx */
3009
	if (!msix_enabled(adapter)) {
3010
		free_irq(netdev->irq, &adapter->eq_obj[0]);
S
Sathya Perla 已提交
3011 3012 3013 3014
		goto done;
	}

	/* MSIx */
S
Sathya Perla 已提交
3015 3016
	for_all_evt_queues(adapter, eqo, i)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);
3017

S
Sathya Perla 已提交
3018 3019 3020 3021
done:
	adapter->isr_registered = false;
}

S
Sathya Perla 已提交
3022
static void be_rx_qs_destroy(struct be_adapter *adapter)
3023 3024 3025 3026 3027 3028 3029 3030 3031
{
	struct be_queue_info *q;
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->q;
		if (q->created) {
			be_cmd_rxq_destroy(adapter, q);
S
Sathya Perla 已提交
3032
			be_rx_cq_clean(rxo);
3033
		}
S
Sathya Perla 已提交
3034
		be_queue_free(adapter, q);
3035 3036 3037
	}
}

3038 3039 3040
static int be_close(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
3041 3042
	struct be_eq_obj *eqo;
	int i;
3043

3044 3045 3046 3047 3048 3049
	/* This protection is needed as be_close() may be called even when the
	 * adapter is in cleared state (after eeh perm failure)
	 */
	if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
		return 0;

3050 3051
	be_roce_dev_close(adapter);

3052 3053
	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
		for_all_evt_queues(adapter, eqo, i) {
3054
			napi_disable(&eqo->napi);
3055 3056
			be_disable_busy_poll(eqo);
		}
3057
		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3058
	}
3059 3060 3061 3062 3063 3064

	be_async_mcc_disable(adapter);

	/* Wait for all pending tx completions to arrive so that
	 * all tx skbs are freed.
	 */
S
Sathya Perla 已提交
3065
	netif_tx_disable(netdev);
3066
	be_tx_compl_clean(adapter);
3067 3068

	be_rx_qs_destroy(adapter);
3069
	be_clear_uc_list(adapter);
3070

3071
	for_all_evt_queues(adapter, eqo, i) {
S
Sathya Perla 已提交
3072 3073 3074 3075 3076
		if (msix_enabled(adapter))
			synchronize_irq(be_msix_vec_get(adapter, eqo));
		else
			synchronize_irq(netdev->irq);
		be_eq_clean(eqo);
3077 3078
	}

3079 3080
	be_irq_unregister(adapter);

3081 3082 3083
	return 0;
}

S
Sathya Perla 已提交
3084
static int be_rx_qs_create(struct be_adapter *adapter)
3085
{
3086 3087
	struct rss_info *rss = &adapter->rss_info;
	u8 rss_key[RSS_HASH_KEY_LEN];
3088
	struct be_rx_obj *rxo;
3089
	int rc, i, j;
3090 3091

	for_all_rx_queues(adapter, rxo, i) {
S
Sathya Perla 已提交
3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
				    sizeof(struct be_eth_rx_d));
		if (rc)
			return rc;
	}

	/* The FW would like the default RXQ to be created first */
	rxo = default_rxo(adapter);
	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
			       adapter->if_handle, false, &rxo->rss_id);
	if (rc)
		return rc;

	for_all_rss_queues(adapter, rxo, i) {
3106
		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
S
Sathya Perla 已提交
3107 3108
				       rx_frag_size, adapter->if_handle,
				       true, &rxo->rss_id);
3109 3110 3111 3112 3113
		if (rc)
			return rc;
	}

	if (be_multi_rxq(adapter)) {
3114 3115
		for (j = 0; j < RSS_INDIR_TABLE_LEN;
			j += adapter->num_rx_qs - 1) {
3116
			for_all_rss_queues(adapter, rxo, i) {
3117
				if ((j + i) >= RSS_INDIR_TABLE_LEN)
3118
					break;
3119 3120
				rss->rsstable[j + i] = rxo->rss_id;
				rss->rss_queue[j + i] = i;
3121 3122
			}
		}
3123 3124
		rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
			RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3125 3126

		if (!BEx_chip(adapter))
3127 3128
			rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
				RSS_ENABLE_UDP_IPV6;
3129 3130
	} else {
		/* Disable RSS, if only default RX Q is created */
3131
		rss->rss_flags = RSS_ENABLE_NONE;
3132
	}
3133

3134
	netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3135
	rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3136
			       128, rss_key);
3137
	if (rc) {
3138
		rss->rss_flags = RSS_ENABLE_NONE;
3139
		return rc;
3140 3141
	}

3142
	memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3143

3144
	/* First time posting */
S
Sathya Perla 已提交
3145
	for_all_rx_queues(adapter, rxo, i)
3146
		be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
3147 3148 3149
	return 0;
}

S
Sathya Perla 已提交
3150 3151 3152
static int be_open(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
3153
	struct be_eq_obj *eqo;
3154
	struct be_rx_obj *rxo;
S
Sathya Perla 已提交
3155
	struct be_tx_obj *txo;
3156
	u8 link_status;
3157
	int status, i;
3158

S
Sathya Perla 已提交
3159
	status = be_rx_qs_create(adapter);
3160 3161 3162
	if (status)
		goto err;

3163 3164 3165
	status = be_irq_register(adapter);
	if (status)
		goto err;
3166

S
Sathya Perla 已提交
3167
	for_all_rx_queues(adapter, rxo, i)
3168
		be_cq_notify(adapter, rxo->cq.id, true, 0);
3169

S
Sathya Perla 已提交
3170 3171 3172
	for_all_tx_queues(adapter, txo, i)
		be_cq_notify(adapter, txo->cq.id, true, 0);

3173 3174
	be_async_mcc_enable(adapter);

S
Sathya Perla 已提交
3175 3176
	for_all_evt_queues(adapter, eqo, i) {
		napi_enable(&eqo->napi);
3177
		be_enable_busy_poll(eqo);
3178
		be_eq_notify(adapter, eqo->q.id, true, true, 0);
S
Sathya Perla 已提交
3179
	}
3180
	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
S
Sathya Perla 已提交
3181

3182
	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3183 3184 3185
	if (!status)
		be_link_status_update(adapter, link_status);

S
Sathya Perla 已提交
3186
	netif_tx_start_all_queues(netdev);
3187
	be_roce_dev_open(adapter);
3188

3189
#ifdef CONFIG_BE2NET_VXLAN
3190 3191
	if (skyhawk_chip(adapter))
		vxlan_get_rx_port(netdev);
3192 3193
#endif

3194 3195 3196 3197
	return 0;
err:
	be_close(adapter->netdev);
	return -EIO;
3198 3199
}

3200 3201 3202 3203 3204 3205 3206 3207 3208
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
	struct be_dma_mem cmd;
	int status = 0;
	u8 mac[ETH_ALEN];

	memset(mac, 0, ETH_ALEN);

	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3209 3210
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
				     GFP_KERNEL);
K
Kalesh AP 已提交
3211
	if (!cmd.va)
3212
		return -ENOMEM;
3213 3214 3215

	if (enable) {
		status = pci_write_config_dword(adapter->pdev,
3216 3217
						PCICFG_PM_CONTROL_OFFSET,
						PCICFG_PM_CONTROL_MASK);
3218 3219
		if (status) {
			dev_err(&adapter->pdev->dev,
3220
				"Could not enable Wake-on-lan\n");
I
Ivan Vecera 已提交
3221 3222
			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
					  cmd.dma);
3223 3224 3225
			return status;
		}
		status = be_cmd_enable_magic_wol(adapter,
3226 3227
						 adapter->netdev->dev_addr,
						 &cmd);
3228 3229 3230 3231 3232 3233 3234 3235
		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
	} else {
		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
	}

I
Ivan Vecera 已提交
3236
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3237 3238 3239
	return status;
}

3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252
static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
	u32 addr;

	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);

	mac[5] = (u8)(addr & 0xFF);
	mac[4] = (u8)((addr >> 8) & 0xFF);
	mac[3] = (u8)((addr >> 16) & 0xFF);
	/* Use the OUI from the current MAC address */
	memcpy(mac, adapter->netdev->dev_addr, 3);
}

3253 3254 3255 3256 3257 3258
/*
 * Generate a seed MAC address from the PF MAC Address using jhash.
 * MAC Address for VFs are assigned incrementally starting from the seed.
 * These addresses are programmed in the ASIC by the PF and the VF driver
 * queries for the MAC address during its probe.
 */
3259
static int be_vf_eth_addr_config(struct be_adapter *adapter)
3260
{
3261
	u32 vf;
3262
	int status = 0;
3263
	u8 mac[ETH_ALEN];
3264
	struct be_vf_cfg *vf_cfg;
3265 3266 3267

	be_vf_eth_addr_generate(adapter, mac);

3268
	for_all_vfs(adapter, vf_cfg, vf) {
3269
		if (BEx_chip(adapter))
3270
			status = be_cmd_pmac_add(adapter, mac,
3271 3272
						 vf_cfg->if_handle,
						 &vf_cfg->pmac_id, vf + 1);
3273 3274 3275
		else
			status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
						vf + 1);
3276

3277 3278
		if (status)
			dev_err(&adapter->pdev->dev,
3279 3280
				"Mac address assignment failed for VF %d\n",
				vf);
3281
		else
3282
			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3283 3284 3285 3286 3287 3288

		mac[5] += 1;
	}
	return status;
}

3289 3290 3291 3292 3293 3294 3295
static int be_vfs_mac_query(struct be_adapter *adapter)
{
	int status, vf;
	u8 mac[ETH_ALEN];
	struct be_vf_cfg *vf_cfg;

	for_all_vfs(adapter, vf_cfg, vf) {
3296 3297 3298
		status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
					       mac, vf_cfg->if_handle,
					       false, vf+1);
3299 3300 3301 3302 3303 3304 3305
		if (status)
			return status;
		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
	}
	return 0;
}

3306
static void be_vf_clear(struct be_adapter *adapter)
3307
{
3308
	struct be_vf_cfg *vf_cfg;
3309 3310
	u32 vf;

3311
	if (pci_vfs_assigned(adapter->pdev)) {
3312 3313
		dev_warn(&adapter->pdev->dev,
			 "VFs are assigned to VMs: not disabling VFs\n");
3314 3315 3316
		goto done;
	}

3317 3318
	pci_disable_sriov(adapter->pdev);

3319
	for_all_vfs(adapter, vf_cfg, vf) {
3320
		if (BEx_chip(adapter))
3321 3322
			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
					vf_cfg->pmac_id, vf + 1);
3323 3324 3325
		else
			be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
				       vf + 1);
3326

3327 3328
		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
	}
3329 3330 3331
done:
	kfree(adapter->vf_cfg);
	adapter->num_vfs = 0;
3332
	adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3333 3334
}

3335 3336 3337 3338 3339 3340 3341 3342
static void be_clear_queues(struct be_adapter *adapter)
{
	be_mcc_queues_destroy(adapter);
	be_rx_cqs_destroy(adapter);
	be_tx_queues_destroy(adapter);
	be_evt_queues_destroy(adapter);
}

3343
static void be_cancel_worker(struct be_adapter *adapter)
3344
{
3345 3346 3347 3348
	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
		cancel_delayed_work_sync(&adapter->work);
		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
	}
3349 3350
}

3351
static void be_mac_clear(struct be_adapter *adapter)
3352
{
3353
	if (adapter->pmac_id) {
3354 3355
		be_cmd_pmac_del(adapter, adapter->if_handle,
				adapter->pmac_id[0], 0);
3356 3357 3358 3359 3360
		kfree(adapter->pmac_id);
		adapter->pmac_id = NULL;
	}
}

3361
#ifdef CONFIG_BE2NET_VXLAN
3362 3363
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
3364 3365
	struct net_device *netdev = adapter->netdev;

3366 3367 3368 3369 3370 3371 3372 3373 3374
	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
		be_cmd_manage_iface(adapter, adapter->if_handle,
				    OP_CONVERT_TUNNEL_TO_NORMAL);

	if (adapter->vxlan_port)
		be_cmd_set_vxlan_port(adapter, 0);

	adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
	adapter->vxlan_port = 0;
3375 3376 3377

	netdev->hw_enc_features = 0;
	netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3378
	netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3379
}
3380
#endif
3381

3382 3383
static int be_clear(struct be_adapter *adapter)
{
3384
	be_cancel_worker(adapter);
3385

3386
	if (sriov_enabled(adapter))
3387 3388
		be_vf_clear(adapter);

3389 3390 3391 3392 3393 3394 3395
	/* Re-configure FW to distribute resources evenly across max-supported
	 * number of VFs, only when VFs are not already enabled.
	 */
	if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
		be_cmd_set_sriov_config(adapter, adapter->pool_res,
					pci_sriov_get_totalvfs(adapter->pdev));

3396
#ifdef CONFIG_BE2NET_VXLAN
3397
	be_disable_vxlan_offloads(adapter);
3398
#endif
3399
	/* delete the primary mac along with the uc-mac list */
3400
	be_mac_clear(adapter);
3401

3402
	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3403

3404
	be_clear_queues(adapter);
3405

S
Sathya Perla 已提交
3406
	be_msix_disable(adapter);
3407
	adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3408 3409 3410
	return 0;
}

3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
			u32 cap_flags, u32 vf)
{
	u32 en_flags;
	int status;

	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
		   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
		   BE_IF_FLAGS_RSS;

	en_flags &= cap_flags;

	status = be_cmd_if_create(adapter, cap_flags, en_flags,
				  if_handle, vf);

	return status;
}

3429
static int be_vfs_if_create(struct be_adapter *adapter)
3430
{
3431
	struct be_resources res = {0};
3432
	struct be_vf_cfg *vf_cfg;
3433 3434
	u32 cap_flags, vf;
	int status;
3435

3436
	/* If a FW profile exists, then cap_flags are updated */
3437 3438
	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
		    BE_IF_FLAGS_MULTICAST;
3439

3440
	for_all_vfs(adapter, vf_cfg, vf) {
3441 3442 3443 3444 3445 3446
		if (!BE3_chip(adapter)) {
			status = be_cmd_get_profile_config(adapter, &res,
							   vf + 1);
			if (!status)
				cap_flags = res.if_cap_flags;
		}
3447

3448 3449
		status = be_if_create(adapter, &vf_cfg->if_handle,
				      cap_flags, vf + 1);
3450
		if (status)
3451
			return status;
3452
	}
3453 3454

	return 0;
3455 3456
}

3457
static int be_vf_setup_init(struct be_adapter *adapter)
3458
{
3459
	struct be_vf_cfg *vf_cfg;
3460 3461
	int vf;

3462 3463 3464 3465 3466
	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
				  GFP_KERNEL);
	if (!adapter->vf_cfg)
		return -ENOMEM;

3467 3468 3469
	for_all_vfs(adapter, vf_cfg, vf) {
		vf_cfg->if_handle = -1;
		vf_cfg->pmac_id = -1;
3470
	}
3471
	return 0;
3472 3473
}

3474 3475
static int be_vf_setup(struct be_adapter *adapter)
{
3476
	struct device *dev = &adapter->pdev->dev;
3477
	struct be_vf_cfg *vf_cfg;
3478
	int status, old_vfs, vf;
3479
	u32 privileges;
3480

3481
	old_vfs = pci_num_vf(adapter->pdev);
3482 3483 3484 3485

	status = be_vf_setup_init(adapter);
	if (status)
		goto err;
3486

3487 3488 3489 3490 3491 3492
	if (old_vfs) {
		for_all_vfs(adapter, vf_cfg, vf) {
			status = be_cmd_get_if_id(adapter, vf_cfg, vf);
			if (status)
				goto err;
		}
3493

3494 3495 3496 3497
		status = be_vfs_mac_query(adapter);
		if (status)
			goto err;
	} else {
3498 3499 3500 3501
		status = be_vfs_if_create(adapter);
		if (status)
			goto err;

3502 3503 3504 3505
		status = be_vf_eth_addr_config(adapter);
		if (status)
			goto err;
	}
3506

3507
	for_all_vfs(adapter, vf_cfg, vf) {
3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519
		/* Allow VFs to programs MAC/VLAN filters */
		status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
		if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
			status = be_cmd_set_fn_privileges(adapter,
							  privileges |
							  BE_PRIV_FILTMGMT,
							  vf + 1);
			if (!status)
				dev_info(dev, "VF%d has FILTMGMT privilege\n",
					 vf);
		}

3520 3521 3522
		/* Allow full available bandwidth */
		if (!old_vfs)
			be_cmd_config_qos(adapter, 0, 0, vf + 1);
3523

3524
		if (!old_vfs) {
3525
			be_cmd_enable_vf(adapter, vf + 1);
3526 3527 3528 3529
			be_cmd_set_logical_link_config(adapter,
						       IFLA_VF_LINK_STATE_AUTO,
						       vf+1);
		}
3530
	}
3531 3532 3533 3534 3535 3536 3537 3538 3539

	if (!old_vfs) {
		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
		if (status) {
			dev_err(dev, "SRIOV enable failed\n");
			adapter->num_vfs = 0;
			goto err;
		}
	}
3540 3541

	adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3542 3543
	return 0;
err:
3544 3545
	dev_err(dev, "VF setup failed\n");
	be_vf_clear(adapter);
3546 3547 3548
	return status;
}

3549 3550 3551 3552
/* Converting function_mode bits on BE3 to SH mc_type enums */

static u8 be_convert_mc_type(u32 function_mode)
{
3553
	if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3554
		return vNIC1;
3555
	else if (function_mode & QNQ_MODE)
3556 3557 3558 3559 3560 3561 3562 3563 3564
		return FLEX10;
	else if (function_mode & VNIC_MODE)
		return vNIC2;
	else if (function_mode & UMC_ENABLED)
		return UMC;
	else
		return MC_NONE;
}

3565 3566 3567 3568
/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
			      struct be_resources *res)
{
3569
	bool use_sriov = adapter->num_vfs ? 1 : 0;
3570 3571 3572 3573 3574 3575

	if (be_physfn(adapter))
		res->max_uc_mac = BE_UC_PMAC_COUNT;
	else
		res->max_uc_mac = BE_VF_UC_PMAC_COUNT;

3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589
	adapter->mc_type = be_convert_mc_type(adapter->function_mode);

	if (be_is_mc(adapter)) {
		/* Assuming that there are 4 channels per port,
		 * when multi-channel is enabled
		 */
		if (be_is_qnq_mode(adapter))
			res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
		else
			/* In a non-qnq multichannel mode, the pvid
			 * takes up one vlan entry
			 */
			res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
	} else {
3590
		res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3591 3592
	}

3593 3594
	res->max_mcast_mac = BE_MAX_MC;

3595 3596 3597 3598 3599 3600
	/* 1) For BE3 1Gb ports, FW does not support multiple TXQs
	 * 2) Create multiple TX rings on a BE3-R multi-channel interface
	 *    *only* if it is RSS-capable.
	 */
	if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
	    !be_physfn(adapter) || (be_is_mc(adapter) &&
3601
	    !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
3602
		res->max_tx_qs = 1;
3603 3604 3605 3606 3607 3608 3609 3610 3611 3612
	} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
		struct be_resources super_nic_res = {0};

		/* On a SuperNIC profile, the driver needs to use the
		 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
		 */
		be_cmd_get_profile_config(adapter, &super_nic_res, 0);
		/* Some old versions of BE3 FW don't report max_tx_qs value */
		res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
	} else {
3613
		res->max_tx_qs = BE3_MAX_TX_QS;
3614
	}
3615 3616 3617 3618 3619 3620 3621

	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
	    !use_sriov && be_physfn(adapter))
		res->max_rss_qs = (adapter->be3_native) ?
					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
	res->max_rx_qs = res->max_rss_qs + 1;

3622
	if (be_physfn(adapter))
3623
		res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
3624 3625 3626
					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
	else
		res->max_evt_qs = 1;
3627 3628 3629 3630 3631 3632

	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
		res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
}

3633 3634 3635
static void be_setup_init(struct be_adapter *adapter)
{
	adapter->vlan_prio_bmap = 0xff;
A
Ajit Khaparde 已提交
3636
	adapter->phy.link_speed = -1;
3637 3638
	adapter->if_handle = -1;
	adapter->be3_native = false;
3639
	adapter->if_flags = 0;
3640 3641 3642 3643
	if (be_physfn(adapter))
		adapter->cmd_privileges = MAX_PRIVILEGES;
	else
		adapter->cmd_privileges = MIN_PRIVILEGES;
3644 3645
}

3646 3647 3648 3649
static int be_get_sriov_config(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	struct be_resources res = {0};
3650
	int max_vfs, old_vfs;
3651 3652

	/* Some old versions of BE3 FW don't report max_vfs value */
3653 3654
	be_cmd_get_profile_config(adapter, &res, 0);

3655 3656 3657 3658 3659
	if (BE3_chip(adapter) && !res.max_vfs) {
		max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
		res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
	}

3660
	adapter->pool_res = res;
3661 3662 3663

	if (!be_max_vfs(adapter)) {
		if (num_vfs)
V
Vasundhara Volam 已提交
3664
			dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
3665 3666 3667 3668
		adapter->num_vfs = 0;
		return 0;
	}

3669 3670
	pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));

3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690
	/* validate num_vfs module param */
	old_vfs = pci_num_vf(adapter->pdev);
	if (old_vfs) {
		dev_info(dev, "%d VFs are already enabled\n", old_vfs);
		if (old_vfs != num_vfs)
			dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
		adapter->num_vfs = old_vfs;
	} else {
		if (num_vfs > be_max_vfs(adapter)) {
			dev_info(dev, "Resources unavailable to init %d VFs\n",
				 num_vfs);
			dev_info(dev, "Limiting to %d VFs\n",
				 be_max_vfs(adapter));
		}
		adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
	}

	return 0;
}

3691
static int be_get_resources(struct be_adapter *adapter)
3692
{
3693 3694 3695
	struct device *dev = &adapter->pdev->dev;
	struct be_resources res = {0};
	int status;
3696

3697 3698 3699
	if (BEx_chip(adapter)) {
		BEx_get_resources(adapter, &res);
		adapter->res = res;
3700 3701
	}

3702 3703 3704 3705 3706 3707 3708 3709
	/* For Lancer, SH etc read per-function resource limits from FW.
	 * GET_FUNC_CONFIG returns per function guaranteed limits.
	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
	 */
	if (!BEx_chip(adapter)) {
		status = be_cmd_get_func_config(adapter, &res);
		if (status)
			return status;
3710

3711 3712 3713 3714
		/* If RoCE may be enabled stash away half the EQs for RoCE */
		if (be_roce_supported(adapter))
			res.max_evt_qs /= 2;
		adapter->res = res;
3715
	}
3716

S
Sathya Perla 已提交
3717 3718 3719 3720 3721 3722 3723 3724
	dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
		 be_max_txqs(adapter), be_max_rxqs(adapter),
		 be_max_rss(adapter), be_max_eqs(adapter),
		 be_max_vfs(adapter));
	dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
		 be_max_uc(adapter), be_max_mc(adapter),
		 be_max_vlans(adapter));

3725
	return 0;
3726 3727
}

3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
static void be_sriov_config(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	int status;

	status = be_get_sriov_config(adapter);
	if (status) {
		dev_err(dev, "Failed to query SR-IOV configuration\n");
		dev_err(dev, "SR-IOV cannot be enabled\n");
		return;
	}

	/* When the HW is in SRIOV capable configuration, the PF-pool
	 * resources are equally distributed across the max-number of
	 * VFs. The user may request only a subset of the max-vfs to be
	 * enabled. Based on num_vfs, redistribute the resources across
	 * num_vfs so that each VF will have access to more number of
	 * resources. This facility is not available in BE3 FW.
	 * Also, this is done by FW in Lancer chip.
	 */
	if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
		status = be_cmd_set_sriov_config(adapter,
						 adapter->pool_res,
						 adapter->num_vfs);
		if (status)
			dev_err(dev, "Failed to optimize SR-IOV resources\n");
	}
}

3757 3758
static int be_get_config(struct be_adapter *adapter)
{
3759
	u16 profile_id;
3760
	int status;
3761

3762
	status = be_cmd_query_fw_cfg(adapter);
3763
	if (status)
3764
		return status;
3765

3766 3767 3768
	be_cmd_query_port_name(adapter);

	if (be_physfn(adapter)) {
3769 3770 3771 3772
		status = be_cmd_get_active_profile(adapter, &profile_id);
		if (!status)
			dev_info(&adapter->pdev->dev,
				 "Using profile 0x%x\n", profile_id);
3773
	}
3774

3775 3776
	if (!BE2_chip(adapter) && be_physfn(adapter))
		be_sriov_config(adapter);
3777

3778 3779 3780
	status = be_get_resources(adapter);
	if (status)
		return status;
3781

3782 3783
	adapter->pmac_id = kcalloc(be_max_uc(adapter),
				   sizeof(*adapter->pmac_id), GFP_KERNEL);
3784 3785
	if (!adapter->pmac_id)
		return -ENOMEM;
3786

3787 3788 3789 3790
	/* Sanitize cfg_num_qs based on HW and platform limits */
	adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));

	return 0;
3791 3792
}

3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809
static int be_mac_setup(struct be_adapter *adapter)
{
	u8 mac[ETH_ALEN];
	int status;

	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
		status = be_cmd_get_perm_mac(adapter, mac);
		if (status)
			return status;

		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
	} else {
		/* Maybe the HW was reset; dev_addr must be re-programmed */
		memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
	}

3810 3811 3812 3813
	/* For BE3-R VFs, the PF programs the initial MAC address */
	if (!(BEx_chip(adapter) && be_virtfn(adapter)))
		be_cmd_pmac_add(adapter, mac, adapter->if_handle,
				&adapter->pmac_id[0], 0);
3814 3815 3816
	return 0;
}

3817 3818 3819 3820 3821 3822
static void be_schedule_worker(struct be_adapter *adapter)
{
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}

3823
static int be_setup_queues(struct be_adapter *adapter)
3824
{
3825
	struct net_device *netdev = adapter->netdev;
S
Sathya Perla 已提交
3826
	int status;
3827

3828
	status = be_evt_queues_create(adapter);
3829 3830
	if (status)
		goto err;
3831

3832
	status = be_tx_qs_create(adapter);
3833 3834
	if (status)
		goto err;
S
Sathya Perla 已提交
3835

3836
	status = be_rx_cqs_create(adapter);
S
Sathya Perla 已提交
3837
	if (status)
3838
		goto err;
S
Sathya Perla 已提交
3839

3840
	status = be_mcc_queues_create(adapter);
S
Sathya Perla 已提交
3841 3842 3843
	if (status)
		goto err;

3844 3845 3846 3847 3848 3849 3850 3851
	status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
	if (status)
		goto err;

	status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
	if (status)
		goto err;

3852 3853 3854 3855 3856 3857
	return 0;
err:
	dev_err(&adapter->pdev->dev, "queue_setup failed\n");
	return status;
}

3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893
int be_update_queues(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

	if (netif_running(netdev))
		be_close(netdev);

	be_cancel_worker(adapter);

	/* If any vectors have been shared with RoCE we cannot re-program
	 * the MSIx table.
	 */
	if (!adapter->num_msix_roce_vec)
		be_msix_disable(adapter);

	be_clear_queues(adapter);

	if (!msix_enabled(adapter)) {
		status = be_msix_enable(adapter);
		if (status)
			return status;
	}

	status = be_setup_queues(adapter);
	if (status)
		return status;

	be_schedule_worker(adapter);

	if (netif_running(netdev))
		status = be_open(netdev);

	return status;
}

3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
static inline int fw_major_num(const char *fw_ver)
{
	int fw_major = 0, i;

	i = sscanf(fw_ver, "%d.", &fw_major);
	if (i != 1)
		return 0;

	return fw_major;
}

3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915
static int be_setup(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	int status;

	be_setup_init(adapter);

	if (!lancer_chip(adapter))
		be_cmd_req_native_mode(adapter);

	status = be_get_config(adapter);
S
Sathya Perla 已提交
3916
	if (status)
3917
		goto err;
S
Sathya Perla 已提交
3918

3919
	status = be_msix_enable(adapter);
S
Sathya Perla 已提交
3920
	if (status)
3921
		goto err;
S
Sathya Perla 已提交
3922

3923 3924
	status = be_if_create(adapter, &adapter->if_handle,
			      be_if_cap_flags(adapter), 0);
3925
	if (status)
3926
		goto err;
S
Sathya Perla 已提交
3927

3928 3929
	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
	rtnl_lock();
3930
	status = be_setup_queues(adapter);
3931
	rtnl_unlock();
3932
	if (status)
3933 3934
		goto err;

3935 3936 3937
	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);

	status = be_mac_setup(adapter);
S
Sathya Perla 已提交
3938 3939 3940
	if (status)
		goto err;

3941
	be_cmd_get_fw_ver(adapter);
S
Sathya Perla 已提交
3942
	dev_info(dev, "FW version is %s\n", adapter->fw_ver);
3943

3944
	if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
V
Vasundhara Volam 已提交
3945
		dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
3946 3947 3948 3949
			adapter->fw_ver);
		dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
	}

3950
	if (adapter->vlans_added)
S
Sathya Perla 已提交
3951
		be_vid_config(adapter);
3952

3953
	be_set_rx_mode(adapter->netdev);
3954

S
Suresh Reddy 已提交
3955 3956
	be_cmd_get_acpi_wol_cap(adapter);

3957 3958 3959 3960 3961
	status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
					 adapter->rx_fc);
	if (status)
		be_cmd_get_flow_control(adapter, &adapter->tx_fc,
					&adapter->rx_fc);
3962

3963 3964
	dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
		 adapter->tx_fc, adapter->rx_fc);
3965

3966 3967 3968 3969
	if (be_physfn(adapter))
		be_cmd_set_logical_link_config(adapter,
					       IFLA_VF_LINK_STATE_AUTO, 0);

3970 3971
	if (adapter->num_vfs)
		be_vf_setup(adapter);
3972

3973 3974
	status = be_cmd_get_phy_info(adapter);
	if (!status && be_pause_supported(adapter))
A
Ajit Khaparde 已提交
3975 3976
		adapter->phy.fc_autoneg = 1;

3977
	be_schedule_worker(adapter);
3978
	adapter->flags |= BE_FLAGS_SETUP_DONE;
3979
	return 0;
3980 3981 3982 3983
err:
	be_clear(adapter);
	return status;
}
S
Sathya Perla 已提交
3984

I
Ivan Vecera 已提交
3985 3986 3987 3988
#ifdef CONFIG_NET_POLL_CONTROLLER
static void be_netpoll(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
S
Sathya Perla 已提交
3989
	struct be_eq_obj *eqo;
I
Ivan Vecera 已提交
3990 3991
	int i;

3992 3993 3994 3995
	for_all_evt_queues(adapter, eqo, i) {
		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
		napi_schedule(&eqo->napi);
	}
I
Ivan Vecera 已提交
3996 3997 3998
}
#endif

3999
static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
4000

4001 4002
static bool phy_flashing_required(struct be_adapter *adapter)
{
4003
	return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
A
Ajit Khaparde 已提交
4004
		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
4005 4006
}

4007 4008 4009 4010 4011 4012
static bool is_comp_in_ufi(struct be_adapter *adapter,
			   struct flash_section_info *fsec, int type)
{
	int i = 0, img_type = 0;
	struct flash_section_info_g2 *fsec_g2 = NULL;

4013
	if (BE2_chip(adapter))
4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
		fsec_g2 = (struct flash_section_info_g2 *)fsec;

	for (i = 0; i < MAX_FLASH_COMP; i++) {
		if (fsec_g2)
			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
		else
			img_type = le32_to_cpu(fsec->fsec_entry[i].type);

		if (img_type == type)
			return true;
	}
	return false;

}

J
Jingoo Han 已提交
4029
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
4030 4031
						int header_size,
						const struct firmware *fw)
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
{
	struct flash_section_info *fsec = NULL;
	const u8 *p = fw->data;

	p += header_size;
	while (p < (fw->data + fw->size)) {
		fsec = (struct flash_section_info *)p;
		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
			return fsec;
		p += 32;
	}
	return NULL;
}

4046 4047 4048 4049 4050 4051 4052 4053
static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
			      u32 img_offset, u32 img_size, int hdr_size,
			      u16 img_optype, bool *crc_match)
{
	u32 crc_offset;
	int status;
	u8 crc[4];

4054 4055
	status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
				      img_size - 4);
4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069
	if (status)
		return status;

	crc_offset = hdr_size + img_offset + img_size - 4;

	/* Skip flashing, if crc of flashed region matches */
	if (!memcmp(crc, p + crc_offset, 4))
		*crc_match = true;
	else
		*crc_match = false;

	return status;
}

4070
static int be_flash(struct be_adapter *adapter, const u8 *img,
4071 4072
		    struct be_dma_mem *flash_cmd, int optype, int img_size,
		    u32 img_offset)
4073
{
4074
	u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
4075
	struct be_cmd_write_flashrom *req = flash_cmd->va;
4076
	int status;
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094

	while (total_bytes) {
		num_bytes = min_t(u32, 32*1024, total_bytes);

		total_bytes -= num_bytes;

		if (!total_bytes) {
			if (optype == OPTYPE_PHY_FW)
				flash_op = FLASHROM_OPER_PHY_FLASH;
			else
				flash_op = FLASHROM_OPER_FLASH;
		} else {
			if (optype == OPTYPE_PHY_FW)
				flash_op = FLASHROM_OPER_PHY_SAVE;
			else
				flash_op = FLASHROM_OPER_SAVE;
		}

4095
		memcpy(req->data_buf, img, num_bytes);
4096 4097
		img += num_bytes;
		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
4098 4099
					       flash_op, img_offset +
					       bytes_sent, num_bytes);
4100
		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
4101 4102 4103
		    optype == OPTYPE_PHY_FW)
			break;
		else if (status)
4104
			return status;
4105 4106

		bytes_sent += num_bytes;
4107 4108 4109 4110
	}
	return 0;
}

4111
/* For BE2, BE3 and BE3-R */
4112
static int be_flash_BEx(struct be_adapter *adapter,
4113 4114
			const struct firmware *fw,
			struct be_dma_mem *flash_cmd, int num_of_images)
4115
{
4116
	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
4117
	struct device *dev = &adapter->pdev->dev;
4118
	struct flash_section_info *fsec = NULL;
4119 4120 4121 4122
	int status, i, filehdr_size, num_comp;
	const struct flash_comp *pflashcomp;
	bool crc_match;
	const u8 *p;
4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144

	struct flash_comp gen3_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
4145
	};
4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163

	struct flash_comp gen2_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
4164 4165
	};

4166
	if (BE3_chip(adapter)) {
4167 4168
		pflashcomp = gen3_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g3);
J
Joe Perches 已提交
4169
		num_comp = ARRAY_SIZE(gen3_flash_types);
4170 4171 4172
	} else {
		pflashcomp = gen2_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g2);
J
Joe Perches 已提交
4173
		num_comp = ARRAY_SIZE(gen2_flash_types);
4174
		img_hdrs_size = 0;
4175
	}
4176

4177 4178 4179
	/* Get flash section info*/
	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
	if (!fsec) {
4180
		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4181 4182
		return -1;
	}
4183
	for (i = 0; i < num_comp; i++) {
4184
		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
4185
			continue;
4186 4187 4188 4189 4190

		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
			continue;

4191 4192
		if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
		    !phy_flashing_required(adapter))
4193
				continue;
4194

4195
		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209
			status = be_check_flash_crc(adapter, fw->data,
						    pflashcomp[i].offset,
						    pflashcomp[i].size,
						    filehdr_size +
						    img_hdrs_size,
						    OPTYPE_REDBOOT, &crc_match);
			if (status) {
				dev_err(dev,
					"Could not get CRC for 0x%x region\n",
					pflashcomp[i].optype);
				continue;
			}

			if (crc_match)
4210 4211
				continue;
		}
4212

4213 4214
		p = fw->data + filehdr_size + pflashcomp[i].offset +
			img_hdrs_size;
4215 4216
		if (p + pflashcomp[i].size > fw->data + fw->size)
			return -1;
4217 4218

		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
4219
				  pflashcomp[i].size, 0);
4220
		if (status) {
4221
			dev_err(dev, "Flashing section type 0x%x failed\n",
4222 4223
				pflashcomp[i].img_type);
			return status;
4224 4225 4226 4227 4228
		}
	}
	return 0;
}

4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280
static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
{
	u32 img_type = le32_to_cpu(fsec_entry.type);
	u16 img_optype = le16_to_cpu(fsec_entry.optype);

	if (img_optype != 0xFFFF)
		return img_optype;

	switch (img_type) {
	case IMAGE_FIRMWARE_iSCSI:
		img_optype = OPTYPE_ISCSI_ACTIVE;
		break;
	case IMAGE_BOOT_CODE:
		img_optype = OPTYPE_REDBOOT;
		break;
	case IMAGE_OPTION_ROM_ISCSI:
		img_optype = OPTYPE_BIOS;
		break;
	case IMAGE_OPTION_ROM_PXE:
		img_optype = OPTYPE_PXE_BIOS;
		break;
	case IMAGE_OPTION_ROM_FCoE:
		img_optype = OPTYPE_FCOE_BIOS;
		break;
	case IMAGE_FIRMWARE_BACKUP_iSCSI:
		img_optype = OPTYPE_ISCSI_BACKUP;
		break;
	case IMAGE_NCSI:
		img_optype = OPTYPE_NCSI_FW;
		break;
	case IMAGE_FLASHISM_JUMPVECTOR:
		img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
		break;
	case IMAGE_FIRMWARE_PHY:
		img_optype = OPTYPE_SH_PHY_FW;
		break;
	case IMAGE_REDBOOT_DIR:
		img_optype = OPTYPE_REDBOOT_DIR;
		break;
	case IMAGE_REDBOOT_CONFIG:
		img_optype = OPTYPE_REDBOOT_CONFIG;
		break;
	case IMAGE_UFI_DIR:
		img_optype = OPTYPE_UFI_DIR;
		break;
	default:
		break;
	}

	return img_optype;
}

4281
static int be_flash_skyhawk(struct be_adapter *adapter,
4282 4283
			    const struct firmware *fw,
			    struct be_dma_mem *flash_cmd, int num_of_images)
4284
{
4285
	int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4286
	bool crc_match, old_fw_img, flash_offset_support = true;
4287
	struct device *dev = &adapter->pdev->dev;
4288
	struct flash_section_info *fsec = NULL;
4289
	u32 img_offset, img_size, img_type;
4290
	u16 img_optype, flash_optype;
4291 4292
	int status, i, filehdr_size;
	const u8 *p;
4293 4294 4295 4296

	filehdr_size = sizeof(struct flash_file_hdr_g3);
	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
	if (!fsec) {
4297
		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4298
		return -EINVAL;
4299 4300
	}

4301
retry_flash:
4302 4303 4304
	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
4305 4306 4307
		img_type   = le32_to_cpu(fsec->fsec_entry[i].type);
		img_optype = be_get_img_optype(fsec->fsec_entry[i]);
		old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
4308

4309
		if (img_optype == 0xFFFF)
4310
			continue;
4311 4312 4313 4314 4315 4316

		if (flash_offset_support)
			flash_optype = OPTYPE_OFFSET_SPECIFIED;
		else
			flash_optype = img_optype;

4317 4318 4319 4320 4321 4322 4323 4324
		/* Don't bother verifying CRC if an old FW image is being
		 * flashed
		 */
		if (old_fw_img)
			goto flash;

		status = be_check_flash_crc(adapter, fw->data, img_offset,
					    img_size, filehdr_size +
4325
					    img_hdrs_size, flash_optype,
4326
					    &crc_match);
4327 4328
		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
		    base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
			/* The current FW image on the card does not support
			 * OFFSET based flashing. Retry using older mechanism
			 * of OPTYPE based flashing
			 */
			if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
				flash_offset_support = false;
				goto retry_flash;
			}

			/* The current FW image on the card does not recognize
			 * the new FLASH op_type. The FW download is partially
			 * complete. Reboot the server now to enable FW image
			 * to recognize the new FLASH op_type. To complete the
			 * remaining process, download the same FW again after
			 * the reboot.
			 */
4345 4346 4347 4348 4349 4350 4351
			dev_err(dev, "Flash incomplete. Reset the server\n");
			dev_err(dev, "Download FW image again after reset\n");
			return -EAGAIN;
		} else if (status) {
			dev_err(dev, "Could not get CRC for 0x%x region\n",
				img_optype);
			return -EFAULT;
4352 4353
		}

4354 4355
		if (crc_match)
			continue;
4356

4357 4358
flash:
		p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4359 4360 4361
		if (p + img_size > fw->data + fw->size)
			return -1;

4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374
		status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
				  img_offset);

		/* The current FW image on the card does not support OFFSET
		 * based flashing. Retry using older mechanism of OPTYPE based
		 * flashing
		 */
		if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
		    flash_optype == OPTYPE_OFFSET_SPECIFIED) {
			flash_offset_support = false;
			goto retry_flash;
		}

4375 4376 4377
		/* For old FW images ignore ILLEGAL_FIELD error or errors on
		 * UFI_DIR region
		 */
4378 4379 4380 4381
		if (old_fw_img &&
		    (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
		     (img_optype == OPTYPE_UFI_DIR &&
		      base_status(status) == MCC_STATUS_FAILED))) {
4382 4383 4384 4385 4386
			continue;
		} else if (status) {
			dev_err(dev, "Flashing section type 0x%x failed\n",
				img_type);
			return -EFAULT;
4387 4388 4389
		}
	}
	return 0;
4390 4391
}

4392
static int lancer_fw_download(struct be_adapter *adapter,
4393
			      const struct firmware *fw)
4394
{
4395 4396
#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
4397
	struct device *dev = &adapter->pdev->dev;
4398
	struct be_dma_mem flash_cmd;
4399 4400 4401 4402 4403 4404 4405 4406
	const u8 *data_ptr = NULL;
	u8 *dest_image_ptr = NULL;
	size_t image_size = 0;
	u32 chunk_size = 0;
	u32 data_written = 0;
	u32 offset = 0;
	int status = 0;
	u8 add_status = 0;
4407
	u8 change_status;
4408

4409
	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4410
		dev_err(dev, "FW image size should be multiple of 4\n");
K
Kalesh AP 已提交
4411
		return -EINVAL;
4412 4413
	}

4414 4415
	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
				+ LANCER_FW_DOWNLOAD_CHUNK;
4416
	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
4417
					  &flash_cmd.dma, GFP_KERNEL);
K
Kalesh AP 已提交
4418 4419
	if (!flash_cmd.va)
		return -ENOMEM;
4420

4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432
	dest_image_ptr = flash_cmd.va +
				sizeof(struct lancer_cmd_req_write_object);
	image_size = fw->size;
	data_ptr = fw->data;

	while (image_size) {
		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);

		/* Copy the image chunk content. */
		memcpy(dest_image_ptr, data_ptr, chunk_size);

		status = lancer_cmd_write_object(adapter, &flash_cmd,
4433 4434 4435 4436
						 chunk_size, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);
4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447
		if (status)
			break;

		offset += data_written;
		data_ptr += data_written;
		image_size -= data_written;
	}

	if (!status) {
		/* Commit the FW written */
		status = lancer_cmd_write_object(adapter, &flash_cmd,
4448 4449 4450 4451
						 0, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);
4452 4453
	}

4454
	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4455
	if (status) {
4456
		dev_err(dev, "Firmware load error\n");
K
Kalesh AP 已提交
4457
		return be_cmd_status(status);
4458 4459
	}

4460 4461
	dev_info(dev, "Firmware flashed successfully\n");

4462
	if (change_status == LANCER_FW_RESET_NEEDED) {
4463
		dev_info(dev, "Resetting adapter to activate new FW\n");
4464 4465
		status = lancer_physdev_ctrl(adapter,
					     PHYSDEV_CONTROL_FW_RESET_MASK);
4466
		if (status) {
4467 4468
			dev_err(dev, "Adapter busy, could not reset FW\n");
			dev_err(dev, "Reboot server to activate new FW\n");
4469 4470
		}
	} else if (change_status != LANCER_NO_RESET_NEEDED) {
4471
		dev_info(dev, "Reboot server to activate new FW\n");
4472
	}
K
Kalesh AP 已提交
4473 4474

	return 0;
4475 4476
}

4477 4478 4479 4480
#define BE2_UFI		2
#define BE3_UFI		3
#define BE3R_UFI	10
#define SH_UFI		4
4481
#define SH_P2_UFI	11
4482

4483
static int be_get_ufi_type(struct be_adapter *adapter,
4484
			   struct flash_file_hdr_g3 *fhdr)
4485
{
4486 4487 4488 4489
	if (!fhdr) {
		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
		return -1;
	}
4490

4491 4492 4493 4494 4495
	/* First letter of the build version is used to identify
	 * which chip this image file is meant for.
	 */
	switch (fhdr->build[0]) {
	case BLD_STR_UFI_TYPE_SH:
4496 4497
		return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
								SH_UFI;
4498 4499 4500 4501 4502 4503 4504 4505 4506
	case BLD_STR_UFI_TYPE_BE3:
		return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
								BE3_UFI;
	case BLD_STR_UFI_TYPE_BE2:
		return BE2_UFI;
	default:
		return -1;
	}
}
4507

4508 4509 4510
/* Check if the flash image file is compatible with the adapter that
 * is being flashed.
 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4511
 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
4512 4513 4514 4515 4516 4517 4518
 */
static bool be_check_ufi_compatibility(struct be_adapter *adapter,
				       struct flash_file_hdr_g3 *fhdr)
{
	int ufi_type = be_get_ufi_type(adapter, fhdr);

	switch (ufi_type) {
4519
	case SH_P2_UFI:
4520
		return skyhawk_chip(adapter);
4521 4522 4523
	case SH_UFI:
		return (skyhawk_chip(adapter) &&
			adapter->asic_rev < ASIC_REV_P2);
4524 4525 4526 4527 4528 4529 4530 4531 4532
	case BE3R_UFI:
		return BE3_chip(adapter);
	case BE3_UFI:
		return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
	case BE2_UFI:
		return BE2_chip(adapter);
	default:
		return false;
	}
4533 4534
}

4535 4536
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
4537
	struct device *dev = &adapter->pdev->dev;
4538
	struct flash_file_hdr_g3 *fhdr3;
4539 4540
	struct image_hdr *img_hdr_ptr;
	int status = 0, i, num_imgs;
4541
	struct be_dma_mem flash_cmd;
4542

4543 4544 4545 4546
	fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
	if (!be_check_ufi_compatibility(adapter, fhdr3)) {
		dev_err(dev, "Flash image is not compatible with adapter\n");
		return -EINVAL;
4547 4548
	}

4549 4550 4551 4552 4553
	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
					  GFP_KERNEL);
	if (!flash_cmd.va)
		return -ENOMEM;
4554 4555 4556 4557 4558 4559

	num_imgs = le32_to_cpu(fhdr3->num_imgs);
	for (i = 0; i < num_imgs; i++) {
		img_hdr_ptr = (struct image_hdr *)(fw->data +
				(sizeof(struct flash_file_hdr_g3) +
				 i * sizeof(struct image_hdr)));
4560 4561 4562
		if (!BE2_chip(adapter) &&
		    le32_to_cpu(img_hdr_ptr->imageid) != 1)
			continue;
4563

4564 4565 4566 4567 4568 4569
		if (skyhawk_chip(adapter))
			status = be_flash_skyhawk(adapter, fw, &flash_cmd,
						  num_imgs);
		else
			status = be_flash_BEx(adapter, fw, &flash_cmd,
					      num_imgs);
4570 4571
	}

4572 4573 4574
	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
	if (!status)
		dev_info(dev, "Firmware flashed successfully\n");
4575

4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586
	return status;
}

int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
{
	const struct firmware *fw;
	int status;

	if (!netif_running(adapter->netdev)) {
		dev_err(&adapter->pdev->dev,
			"Firmware load not allowed (interface is down)\n");
4587
		return -ENETDOWN;
4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600
	}

	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
	if (status)
		goto fw_exit;

	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);

	if (lancer_chip(adapter))
		status = lancer_fw_download(adapter, fw);
	else
		status = be_fw_download(adapter, fw);

S
Somnath Kotur 已提交
4601
	if (!status)
4602
		be_cmd_get_fw_ver(adapter);
S
Somnath Kotur 已提交
4603

4604 4605 4606 4607 4608
fw_exit:
	release_firmware(fw);
	return status;
}

4609 4610
static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
				 u16 flags)
4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621
{
	struct be_adapter *adapter = netdev_priv(dev);
	struct nlattr *attr, *br_spec;
	int rem;
	int status = 0;
	u16 mode = 0;

	if (!sriov_enabled(adapter))
		return -EOPNOTSUPP;

	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4622 4623
	if (!br_spec)
		return -EINVAL;
4624 4625 4626 4627 4628

	nla_for_each_nested(attr, br_spec, rem) {
		if (nla_type(attr) != IFLA_BRIDGE_MODE)
			continue;

4629 4630 4631
		if (nla_len(attr) < sizeof(mode))
			return -EINVAL;

4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656
		mode = nla_get_u16(attr);
		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
			return -EINVAL;

		status = be_cmd_set_hsw_config(adapter, 0, 0,
					       adapter->if_handle,
					       mode == BRIDGE_MODE_VEPA ?
					       PORT_FWD_TYPE_VEPA :
					       PORT_FWD_TYPE_VEB);
		if (status)
			goto err;

		dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
			 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");

		return status;
	}
err:
	dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
		mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");

	return status;
}

static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4657
				 struct net_device *dev, u32 filter_mask)
4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
{
	struct be_adapter *adapter = netdev_priv(dev);
	int status = 0;
	u8 hsw_mode;

	if (!sriov_enabled(adapter))
		return 0;

	/* BE and Lancer chips support VEB mode only */
	if (BEx_chip(adapter) || lancer_chip(adapter)) {
		hsw_mode = PORT_FWD_TYPE_VEB;
	} else {
		status = be_cmd_get_hsw_config(adapter, NULL, 0,
					       adapter->if_handle, &hsw_mode);
		if (status)
			return 0;
	}

	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
				       hsw_mode == PORT_FWD_TYPE_VEPA ?
4678 4679
				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
				       0, 0);
4680 4681
}

4682
#ifdef CONFIG_BE2NET_VXLAN
4683 4684 4685 4686 4687 4688
/* VxLAN offload Notes:
 *
 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
 * is expected to work across all types of IP tunnels once exported. Skyhawk
 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4689 4690 4691
 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
 * those other tunnels are unexported on the fly through ndo_features_check().
4692 4693 4694 4695 4696
 *
 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
 * adds more than one port, disable offloads and don't re-enable them again
 * until after all the tunnels are removed.
 */
4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709
static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
			      __be16 port)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct device *dev = &adapter->pdev->dev;
	int status;

	if (lancer_chip(adapter) || BEx_chip(adapter))
		return;

	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
		dev_info(dev,
			 "Only one UDP port supported for VxLAN offloads\n");
4710 4711 4712
		dev_info(dev, "Disabling VxLAN offloads\n");
		adapter->vxlan_port_count++;
		goto err;
4713 4714
	}

4715 4716 4717
	if (adapter->vxlan_port_count++ >= 1)
		return;

4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732
	status = be_cmd_manage_iface(adapter, adapter->if_handle,
				     OP_CONVERT_NORMAL_TO_TUNNEL);
	if (status) {
		dev_warn(dev, "Failed to convert normal interface to tunnel\n");
		goto err;
	}

	status = be_cmd_set_vxlan_port(adapter, port);
	if (status) {
		dev_warn(dev, "Failed to add VxLAN port\n");
		goto err;
	}
	adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
	adapter->vxlan_port = port;

4733 4734 4735 4736
	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
				   NETIF_F_TSO | NETIF_F_TSO6 |
				   NETIF_F_GSO_UDP_TUNNEL;
	netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4737
	netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4738

4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754
	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
		 be16_to_cpu(port));
	return;
err:
	be_disable_vxlan_offloads(adapter);
}

static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
			      __be16 port)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	if (lancer_chip(adapter) || BEx_chip(adapter))
		return;

	if (adapter->vxlan_port != port)
4755
		goto done;
4756 4757 4758 4759 4760 4761

	be_disable_vxlan_offloads(adapter);

	dev_info(&adapter->pdev->dev,
		 "Disabled VxLAN offloads for UDP port %d\n",
		 be16_to_cpu(port));
4762 4763
done:
	adapter->vxlan_port_count--;
4764
}
J
Joe Stringer 已提交
4765

4766 4767 4768
static netdev_features_t be_features_check(struct sk_buff *skb,
					   struct net_device *dev,
					   netdev_features_t features)
J
Joe Stringer 已提交
4769
{
4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804
	struct be_adapter *adapter = netdev_priv(dev);
	u8 l4_hdr = 0;

	/* The code below restricts offload features for some tunneled packets.
	 * Offload features for normal (non tunnel) packets are unchanged.
	 */
	if (!skb->encapsulation ||
	    !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
		return features;

	/* It's an encapsulated packet and VxLAN offloads are enabled. We
	 * should disable tunnel offload features if it's not a VxLAN packet,
	 * as tunnel offloads have been enabled only for VxLAN. This is done to
	 * allow other tunneled traffic like GRE work fine while VxLAN
	 * offloads are configured in Skyhawk-R.
	 */
	switch (vlan_get_protocol(skb)) {
	case htons(ETH_P_IP):
		l4_hdr = ip_hdr(skb)->protocol;
		break;
	case htons(ETH_P_IPV6):
		l4_hdr = ipv6_hdr(skb)->nexthdr;
		break;
	default:
		return features;
	}

	if (l4_hdr != IPPROTO_UDP ||
	    skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
	    skb->inner_protocol != htons(ETH_P_TEB) ||
	    skb_inner_mac_header(skb) - skb_transport_header(skb) !=
	    sizeof(struct udphdr) + sizeof(struct vxlanhdr))
		return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);

	return features;
J
Joe Stringer 已提交
4805
}
4806
#endif
4807

4808
static const struct net_device_ops be_netdev_ops = {
S
Sathya Perla 已提交
4809 4810 4811
	.ndo_open		= be_open,
	.ndo_stop		= be_close,
	.ndo_start_xmit		= be_xmit,
4812
	.ndo_set_rx_mode	= be_set_rx_mode,
S
Sathya Perla 已提交
4813 4814
	.ndo_set_mac_address	= be_mac_addr_set,
	.ndo_change_mtu		= be_change_mtu,
4815
	.ndo_get_stats64	= be_get_stats64,
S
Sathya Perla 已提交
4816 4817 4818
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
4819
	.ndo_set_vf_mac		= be_set_vf_mac,
4820
	.ndo_set_vf_vlan	= be_set_vf_vlan,
4821
	.ndo_set_vf_rate	= be_set_vf_tx_rate,
I
Ivan Vecera 已提交
4822
	.ndo_get_vf_config	= be_get_vf_config,
4823
	.ndo_set_vf_link_state  = be_set_vf_link_state,
I
Ivan Vecera 已提交
4824 4825 4826
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= be_netpoll,
#endif
4827 4828
	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
4829
#ifdef CONFIG_NET_RX_BUSY_POLL
4830
	.ndo_busy_poll		= be_busy_poll,
4831
#endif
4832
#ifdef CONFIG_BE2NET_VXLAN
4833 4834
	.ndo_add_vxlan_port	= be_add_vxlan_port,
	.ndo_del_vxlan_port	= be_del_vxlan_port,
4835
	.ndo_features_check	= be_features_check,
4836
#endif
S
Sathya Perla 已提交
4837 4838 4839 4840 4841 4842
};

static void be_netdev_init(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);

4843
	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4844
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4845
		NETIF_F_HW_VLAN_CTAG_TX;
4846 4847
	if (be_multi_rxq(adapter))
		netdev->hw_features |= NETIF_F_RXHASH;
4848 4849

	netdev->features |= netdev->hw_features |
4850
		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
A
Ajit Khaparde 已提交
4851

4852
	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4853
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4854

4855 4856
	netdev->priv_flags |= IFF_UNICAST_FLT;

S
Sathya Perla 已提交
4857 4858
	netdev->flags |= IFF_MULTICAST;

4859
	netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4860

S
Sathya Perla 已提交
4861
	netdev->netdev_ops = &be_netdev_ops;
S
Sathya Perla 已提交
4862

4863
	netdev->ethtool_ops = &be_ethtool_ops;
S
Sathya Perla 已提交
4864 4865 4866 4867
}

static void be_unmap_pci_bars(struct be_adapter *adapter)
{
4868 4869
	if (adapter->csr)
		pci_iounmap(adapter->pdev, adapter->csr);
4870
	if (adapter->db)
S
Sathya Perla 已提交
4871
		pci_iounmap(adapter->pdev, adapter->db);
4872 4873
}

S
Sathya Perla 已提交
4874 4875 4876 4877 4878 4879 4880 4881 4882
static int db_bar(struct be_adapter *adapter)
{
	if (lancer_chip(adapter) || !be_physfn(adapter))
		return 0;
	else
		return 4;
}

static int be_roce_map_pci_bars(struct be_adapter *adapter)
4883
{
S
Sathya Perla 已提交
4884
	if (skyhawk_chip(adapter)) {
S
Sathya Perla 已提交
4885 4886 4887 4888 4889 4890
		adapter->roce_db.size = 4096;
		adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
							      db_bar(adapter));
		adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
							       db_bar(adapter));
	}
4891
	return 0;
S
Sathya Perla 已提交
4892 4893 4894 4895 4896
}

static int be_map_pci_bars(struct be_adapter *adapter)
{
	u8 __iomem *addr;
4897

4898 4899
	if (BEx_chip(adapter) && be_physfn(adapter)) {
		adapter->csr = pci_iomap(adapter->pdev, 2, 0);
K
Kalesh AP 已提交
4900
		if (!adapter->csr)
4901 4902 4903
			return -ENOMEM;
	}

S
Sathya Perla 已提交
4904
	addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
K
Kalesh AP 已提交
4905
	if (!addr)
S
Sathya Perla 已提交
4906
		goto pci_map_err;
4907
	adapter->db = addr;
S
Sathya Perla 已提交
4908 4909

	be_roce_map_pci_bars(adapter);
S
Sathya Perla 已提交
4910
	return 0;
S
Sathya Perla 已提交
4911

S
Sathya Perla 已提交
4912
pci_map_err:
S
Sathya Perla 已提交
4913
	dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
S
Sathya Perla 已提交
4914 4915 4916 4917 4918 4919
	be_unmap_pci_bars(adapter);
	return -ENOMEM;
}

static void be_ctrl_cleanup(struct be_adapter *adapter)
{
4920
	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
S
Sathya Perla 已提交
4921 4922 4923 4924

	be_unmap_pci_bars(adapter);

	if (mem->va)
I
Ivan Vecera 已提交
4925 4926
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
4927

4928
	mem = &adapter->rx_filter;
4929
	if (mem->va)
I
Ivan Vecera 已提交
4930 4931
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
S
Sathya Perla 已提交
4932 4933 4934 4935
}

static int be_ctrl_init(struct be_adapter *adapter)
{
4936 4937
	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4938
	struct be_dma_mem *rx_filter = &adapter->rx_filter;
S
Sathya Perla 已提交
4939
	u32 sli_intf;
S
Sathya Perla 已提交
4940 4941
	int status;

S
Sathya Perla 已提交
4942 4943 4944 4945 4946
	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
	adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
				 SLI_INTF_FAMILY_SHIFT;
	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;

S
Sathya Perla 已提交
4947 4948
	status = be_map_pci_bars(adapter);
	if (status)
4949
		goto done;
S
Sathya Perla 已提交
4950 4951

	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
I
Ivan Vecera 已提交
4952 4953 4954 4955
	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
						mbox_mem_alloc->size,
						&mbox_mem_alloc->dma,
						GFP_KERNEL);
S
Sathya Perla 已提交
4956
	if (!mbox_mem_alloc->va) {
4957 4958
		status = -ENOMEM;
		goto unmap_pci_bars;
S
Sathya Perla 已提交
4959 4960 4961 4962 4963
	}
	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4964

4965
	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4966 4967 4968
	rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
					    rx_filter->size, &rx_filter->dma,
					    GFP_KERNEL);
K
Kalesh AP 已提交
4969
	if (!rx_filter->va) {
4970 4971 4972
		status = -ENOMEM;
		goto free_mbox;
	}
4973

4974
	mutex_init(&adapter->mbox_lock);
4975 4976
	spin_lock_init(&adapter->mcc_lock);
	spin_lock_init(&adapter->mcc_cq_lock);
4977

4978
	init_completion(&adapter->et_cmd_compl);
4979
	pci_save_state(adapter->pdev);
S
Sathya Perla 已提交
4980
	return 0;
4981 4982

free_mbox:
I
Ivan Vecera 已提交
4983 4984
	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
4985 4986 4987 4988 4989 4990

unmap_pci_bars:
	be_unmap_pci_bars(adapter);

done:
	return status;
S
Sathya Perla 已提交
4991 4992 4993 4994
}

static void be_stats_cleanup(struct be_adapter *adapter)
{
4995
	struct be_dma_mem *cmd = &adapter->stats_cmd;
S
Sathya Perla 已提交
4996 4997

	if (cmd->va)
I
Ivan Vecera 已提交
4998 4999
		dma_free_coherent(&adapter->pdev->dev, cmd->size,
				  cmd->va, cmd->dma);
S
Sathya Perla 已提交
5000 5001 5002 5003
}

static int be_stats_init(struct be_adapter *adapter)
{
5004
	struct be_dma_mem *cmd = &adapter->stats_cmd;
S
Sathya Perla 已提交
5005

5006 5007 5008
	if (lancer_chip(adapter))
		cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
	else if (BE2_chip(adapter))
5009
		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5010
	else if (BE3_chip(adapter))
5011
		cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5012 5013 5014
	else
		/* ALL non-BE ASICs */
		cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5015

5016 5017
	cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
				      GFP_KERNEL);
K
Kalesh AP 已提交
5018
	if (!cmd->va)
5019
		return -ENOMEM;
S
Sathya Perla 已提交
5020 5021 5022
	return 0;
}

B
Bill Pemberton 已提交
5023
static void be_remove(struct pci_dev *pdev)
S
Sathya Perla 已提交
5024 5025
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
5026

S
Sathya Perla 已提交
5027 5028 5029
	if (!adapter)
		return;

5030
	be_roce_dev_remove(adapter);
5031
	be_intr_set(adapter, false);
5032

5033 5034
	cancel_delayed_work_sync(&adapter->func_recovery_work);

S
Sathya Perla 已提交
5035 5036
	unregister_netdev(adapter->netdev);

5037 5038
	be_clear(adapter);

5039 5040 5041
	/* tell fw we're done with firing cmds */
	be_cmd_fw_clean(adapter);

S
Sathya Perla 已提交
5042 5043 5044 5045
	be_stats_cleanup(adapter);

	be_ctrl_cleanup(adapter);

S
Sathya Perla 已提交
5046 5047
	pci_disable_pcie_error_reporting(pdev);

S
Sathya Perla 已提交
5048 5049 5050 5051 5052 5053
	pci_release_regions(pdev);
	pci_disable_device(pdev);

	free_netdev(adapter->netdev);
}

5054
static int be_get_initial_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
5055
{
5056
	int status, level;
S
Sathya Perla 已提交
5057

5058 5059 5060 5061
	status = be_cmd_get_cntl_attributes(adapter);
	if (status)
		return status;

5062 5063 5064
	/* Must be a power of 2 or else MODULO will BUG_ON */
	adapter->be_get_temp_freq = 64;

5065 5066 5067 5068 5069
	if (BEx_chip(adapter)) {
		level = be_cmd_get_fw_log_level(adapter);
		adapter->msg_enable =
			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
	}
5070

5071
	adapter->cfg_num_qs = netif_get_num_default_rss_queues();
5072
	return 0;
S
Sathya Perla 已提交
5073 5074
}

5075
static int lancer_recover_func(struct be_adapter *adapter)
5076
{
5077
	struct device *dev = &adapter->pdev->dev;
5078 5079
	int status;

5080 5081 5082
	status = lancer_test_and_set_rdy_state(adapter);
	if (status)
		goto err;
5083

5084 5085
	if (netif_running(adapter->netdev))
		be_close(adapter->netdev);
5086

5087 5088
	be_clear(adapter);

5089
	be_clear_all_error(adapter);
5090 5091 5092 5093

	status = be_setup(adapter);
	if (status)
		goto err;
5094

5095 5096
	if (netif_running(adapter->netdev)) {
		status = be_open(adapter->netdev);
5097 5098
		if (status)
			goto err;
5099
	}
5100

5101
	dev_err(dev, "Adapter recovery successful\n");
5102 5103
	return 0;
err:
5104 5105 5106
	if (status == -EAGAIN)
		dev_err(dev, "Waiting for resource provisioning\n");
	else
5107
		dev_err(dev, "Adapter recovery failed\n");
5108

5109 5110 5111 5112 5113 5114 5115
	return status;
}

static void be_func_recovery_task(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter,  func_recovery_work.work);
5116
	int status = 0;
5117

5118
	be_detect_error(adapter);
5119

5120 5121 5122 5123
	if (adapter->hw_error && lancer_chip(adapter)) {
		rtnl_lock();
		netif_device_detach(adapter->netdev);
		rtnl_unlock();
5124

5125 5126 5127
		status = lancer_recover_func(adapter);
		if (!status)
			netif_device_attach(adapter->netdev);
5128
	}
5129

5130 5131 5132 5133 5134 5135
	/* In Lancer, for all errors other than provisioning error (-EAGAIN),
	 * no need to attempt further recovery.
	 */
	if (!status || status == -EAGAIN)
		schedule_delayed_work(&adapter->func_recovery_work,
				      msecs_to_jiffies(1000));
5136 5137
}

5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151
static void be_log_sfp_info(struct be_adapter *adapter)
{
	int status;

	status = be_cmd_query_sfp_info(adapter);
	if (!status) {
		dev_err(&adapter->pdev->dev,
			"Unqualified SFP+ detected on %c from %s part no: %s",
			adapter->port_name, adapter->phy.vendor_name,
			adapter->phy.vendor_pn);
	}
	adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
}

5152 5153 5154 5155 5156 5157 5158 5159 5160 5161
static void be_worker(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter, work.work);
	struct be_rx_obj *rxo;
	int i;

	/* when interrupts are not yet enabled, just reap any pending
	* mcc completions */
	if (!netif_running(adapter->netdev)) {
5162
		local_bh_disable();
S
Sathya Perla 已提交
5163
		be_process_mcc(adapter);
5164
		local_bh_enable();
5165 5166 5167 5168 5169 5170
		goto reschedule;
	}

	if (!adapter->stats_cmd_sent) {
		if (lancer_chip(adapter))
			lancer_cmd_get_pport_stats(adapter,
K
Kalesh AP 已提交
5171
						   &adapter->stats_cmd);
5172 5173 5174 5175
		else
			be_cmd_get_stats(adapter, &adapter->stats_cmd);
	}

5176 5177
	if (be_physfn(adapter) &&
	    MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5178 5179
		be_cmd_get_die_temperature(adapter);

5180
	for_all_rx_queues(adapter, rxo, i) {
5181 5182 5183 5184
		/* Replenish RX-queues starved due to memory
		 * allocation failures.
		 */
		if (rxo->rx_post_starved)
5185
			be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5186 5187
	}

5188
	be_eqd_update(adapter);
S
Sathya Perla 已提交
5189

5190 5191 5192
	if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
		be_log_sfp_info(adapter);

5193 5194 5195 5196 5197
reschedule:
	adapter->work_counter++;
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}

5198
/* If any VFs are already enabled don't FLR the PF */
5199 5200
static bool be_reset_required(struct be_adapter *adapter)
{
5201
	return pci_num_vf(adapter->pdev) ? false : true;
5202 5203
}

S
Sathya Perla 已提交
5204 5205
static char *mc_name(struct be_adapter *adapter)
{
5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231
	char *str = "";	/* default */

	switch (adapter->mc_type) {
	case UMC:
		str = "UMC";
		break;
	case FLEX10:
		str = "FLEX10";
		break;
	case vNIC1:
		str = "vNIC-1";
		break;
	case nPAR:
		str = "nPAR";
		break;
	case UFP:
		str = "UFP";
		break;
	case vNIC2:
		str = "vNIC-2";
		break;
	default:
		str = "";
	}

	return str;
S
Sathya Perla 已提交
5232 5233 5234 5235 5236 5237 5238
}

static inline char *func_name(struct be_adapter *adapter)
{
	return be_physfn(adapter) ? "PF" : "VF";
}

5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258
static inline char *nic_name(struct pci_dev *pdev)
{
	switch (pdev->device) {
	case OC_DEVICE_ID1:
		return OC_NAME;
	case OC_DEVICE_ID2:
		return OC_NAME_BE;
	case OC_DEVICE_ID3:
	case OC_DEVICE_ID4:
		return OC_NAME_LANCER;
	case BE_DEVICE_ID2:
		return BE3_NAME;
	case OC_DEVICE_ID5:
	case OC_DEVICE_ID6:
		return OC_NAME_SH;
	default:
		return BE_NAME;
	}
}

5259
static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
S
Sathya Perla 已提交
5260 5261 5262
{
	struct be_adapter *adapter;
	struct net_device *netdev;
5263
	int status = 0;
S
Sathya Perla 已提交
5264

S
Sathya Perla 已提交
5265 5266
	dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);

S
Sathya Perla 已提交
5267 5268 5269 5270 5271 5272 5273 5274 5275
	status = pci_enable_device(pdev);
	if (status)
		goto do_none;

	status = pci_request_regions(pdev, DRV_NAME);
	if (status)
		goto disable_dev;
	pci_set_master(pdev);

5276
	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
K
Kalesh AP 已提交
5277
	if (!netdev) {
S
Sathya Perla 已提交
5278 5279 5280 5281 5282 5283 5284
		status = -ENOMEM;
		goto rel_reg;
	}
	adapter = netdev_priv(netdev);
	adapter->pdev = pdev;
	pci_set_drvdata(pdev, adapter);
	adapter->netdev = netdev;
5285
	SET_NETDEV_DEV(netdev, &pdev->dev);
S
Sathya Perla 已提交
5286

5287
	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
S
Sathya Perla 已提交
5288 5289 5290
	if (!status) {
		netdev->features |= NETIF_F_HIGHDMA;
	} else {
5291
		status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
S
Sathya Perla 已提交
5292 5293 5294 5295 5296 5297
		if (status) {
			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
			goto free_netdev;
		}
	}

5298 5299 5300
	status = pci_enable_pcie_error_reporting(pdev);
	if (!status)
		dev_info(&pdev->dev, "PCIe error reporting enabled\n");
S
Sathya Perla 已提交
5301

S
Sathya Perla 已提交
5302 5303
	status = be_ctrl_init(adapter);
	if (status)
5304
		goto free_netdev;
S
Sathya Perla 已提交
5305

5306
	/* sync up with fw's ready state */
5307
	if (be_physfn(adapter)) {
5308
		status = be_fw_wait_ready(adapter);
5309 5310 5311
		if (status)
			goto ctrl_clean;
	}
S
Sathya Perla 已提交
5312

5313 5314 5315 5316
	if (be_reset_required(adapter)) {
		status = be_cmd_reset_function(adapter);
		if (status)
			goto ctrl_clean;
5317

5318 5319 5320
		/* Wait for interrupts to quiesce after an FLR */
		msleep(100);
	}
5321 5322 5323

	/* Allow interrupts for other ULPs running on NIC function */
	be_intr_set(adapter, true);
S
Sathya Perla 已提交
5324

5325 5326 5327 5328 5329
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		goto ctrl_clean;

5330 5331 5332 5333
	status = be_stats_init(adapter);
	if (status)
		goto ctrl_clean;

5334
	status = be_get_initial_config(adapter);
S
Sathya Perla 已提交
5335 5336 5337 5338
	if (status)
		goto stats_clean;

	INIT_DELAYED_WORK(&adapter->work, be_worker);
5339
	INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5340 5341
	adapter->rx_fc = true;
	adapter->tx_fc = true;
S
Sathya Perla 已提交
5342

5343 5344
	status = be_setup(adapter);
	if (status)
5345
		goto stats_clean;
5346

5347
	be_netdev_init(netdev);
S
Sathya Perla 已提交
5348 5349
	status = register_netdev(netdev);
	if (status != 0)
5350
		goto unsetup;
S
Sathya Perla 已提交
5351

5352 5353
	be_roce_dev_add(adapter);

5354 5355
	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
5356

S
Sathya Perla 已提交
5357
	dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5358
		 func_name(adapter), mc_name(adapter), adapter->port_name);
5359

S
Sathya Perla 已提交
5360 5361
	return 0;

5362 5363
unsetup:
	be_clear(adapter);
S
Sathya Perla 已提交
5364 5365 5366 5367
stats_clean:
	be_stats_cleanup(adapter);
ctrl_clean:
	be_ctrl_cleanup(adapter);
5368
free_netdev:
5369
	free_netdev(netdev);
S
Sathya Perla 已提交
5370 5371 5372 5373 5374
rel_reg:
	pci_release_regions(pdev);
disable_dev:
	pci_disable_device(pdev);
do_none:
5375
	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
S
Sathya Perla 已提交
5376 5377 5378 5379 5380 5381 5382 5383
	return status;
}

static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

S
Suresh Reddy 已提交
5384
	if (adapter->wol_en)
5385 5386
		be_setup_wol(adapter, true);

5387
	be_intr_set(adapter, false);
5388 5389
	cancel_delayed_work_sync(&adapter->func_recovery_work);

S
Sathya Perla 已提交
5390 5391 5392 5393 5394 5395
	netif_device_detach(netdev);
	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
5396
	be_clear(adapter);
S
Sathya Perla 已提交
5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415

	pci_save_state(pdev);
	pci_disable_device(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}

static int be_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	netif_device_detach(netdev);

	status = pci_enable_device(pdev);
	if (status)
		return status;

5416
	pci_set_power_state(pdev, PCI_D0);
S
Sathya Perla 已提交
5417 5418
	pci_restore_state(pdev);

5419 5420 5421 5422
	status = be_fw_wait_ready(adapter);
	if (status)
		return status;

5423 5424 5425 5426
	status = be_cmd_reset_function(adapter);
	if (status)
		return status;

5427
	be_intr_set(adapter, true);
5428 5429 5430 5431 5432
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		return status;

5433
	be_setup(adapter);
S
Sathya Perla 已提交
5434 5435 5436 5437 5438
	if (netif_running(netdev)) {
		rtnl_lock();
		be_open(netdev);
		rtnl_unlock();
	}
5439 5440 5441

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
S
Sathya Perla 已提交
5442
	netif_device_attach(netdev);
5443

S
Suresh Reddy 已提交
5444
	if (adapter->wol_en)
5445
		be_setup_wol(adapter, false);
5446

S
Sathya Perla 已提交
5447 5448 5449
	return 0;
}

5450 5451 5452 5453 5454 5455 5456
/*
 * An FLR will stop BE from DMAing any data.
 */
static void be_shutdown(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);

5457 5458
	if (!adapter)
		return;
5459

5460
	be_roce_dev_shutdown(adapter);
5461
	cancel_delayed_work_sync(&adapter->work);
5462
	cancel_delayed_work_sync(&adapter->func_recovery_work);
5463

5464
	netif_device_detach(adapter->netdev);
5465

5466 5467
	be_cmd_reset_function(adapter);

5468 5469 5470
	pci_disable_device(pdev);
}

5471
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5472
					    pci_channel_state_t state)
5473 5474 5475 5476 5477 5478
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_err(&adapter->pdev->dev, "EEH error detected\n");

5479 5480
	if (!adapter->eeh_error) {
		adapter->eeh_error = true;
5481

5482
		cancel_delayed_work_sync(&adapter->func_recovery_work);
5483 5484

		rtnl_lock();
5485 5486 5487
		netif_device_detach(netdev);
		if (netif_running(netdev))
			be_close(netdev);
5488
		rtnl_unlock();
5489 5490

		be_clear(adapter);
5491 5492 5493 5494 5495 5496 5497
	}

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_disable_device(pdev);

5498 5499
	/* The error could cause the FW to trigger a flash debug dump.
	 * Resetting the card while flash dump is in progress
5500 5501 5502
	 * can cause it not to recover; wait for it to finish.
	 * Wait only for first function as it is needed only once per
	 * adapter.
5503
	 */
5504 5505 5506
	if (pdev->devfn == 0)
		ssleep(30);

5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521
	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	int status;

	dev_info(&adapter->pdev->dev, "EEH reset\n");

	status = pci_enable_device(pdev);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_set_master(pdev);
5522
	pci_set_power_state(pdev, PCI_D0);
5523 5524 5525
	pci_restore_state(pdev);

	/* Check if card is ok and fw is ready */
5526 5527
	dev_info(&adapter->pdev->dev,
		 "Waiting for FW to be ready after EEH reset\n");
5528
	status = be_fw_wait_ready(adapter);
5529 5530 5531
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

S
Sathya Perla 已提交
5532
	pci_cleanup_aer_uncorrect_error_status(pdev);
5533
	be_clear_all_error(adapter);
5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546
	return PCI_ERS_RESULT_RECOVERED;
}

static void be_eeh_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_info(&adapter->pdev->dev, "EEH resume\n");

	pci_save_state(pdev);

5547
	status = be_cmd_reset_function(adapter);
5548 5549 5550
	if (status)
		goto err;

5551 5552 5553 5554 5555 5556
	/* On some BE3 FW versions, after a HW reset,
	 * interrupts will remain disabled for each function.
	 * So, explicitly enable interrupts
	 */
	be_intr_set(adapter, true);

5557 5558
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
5559 5560 5561
	if (status)
		goto err;

5562 5563 5564 5565 5566 5567 5568 5569 5570
	status = be_setup(adapter);
	if (status)
		goto err;

	if (netif_running(netdev)) {
		status = be_open(netdev);
		if (status)
			goto err;
	}
5571 5572 5573

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
5574 5575 5576 5577 5578 5579
	netif_device_attach(netdev);
	return;
err:
	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
}

5580
static const struct pci_error_handlers be_eeh_handlers = {
5581 5582 5583 5584 5585
	.error_detected = be_eeh_err_detected,
	.slot_reset = be_eeh_reset,
	.resume = be_eeh_resume,
};

S
Sathya Perla 已提交
5586 5587 5588 5589 5590 5591
static struct pci_driver be_driver = {
	.name = DRV_NAME,
	.id_table = be_dev_ids,
	.probe = be_probe,
	.remove = be_remove,
	.suspend = be_suspend,
5592
	.resume = be_resume,
5593
	.shutdown = be_shutdown,
5594
	.err_handler = &be_eeh_handlers
S
Sathya Perla 已提交
5595 5596 5597 5598
};

static int __init be_init_module(void)
{
5599 5600
	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
	    rx_frag_size != 2048) {
S
Sathya Perla 已提交
5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615
		printk(KERN_WARNING DRV_NAME
			" : Module param rx_frag_size must be 2048/4096/8192."
			" Using 2048\n");
		rx_frag_size = 2048;
	}

	return pci_register_driver(&be_driver);
}
module_init(be_init_module);

static void __exit be_exit_module(void)
{
	pci_unregister_driver(&be_driver);
}
module_exit(be_exit_module);