be_main.c 64.3 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#include "be.h"
19
#include "be_cmds.h"
20
#include <asm/div64.h>
S
Sathya Perla 已提交
21 22 23 24 25 26 27 28 29 30 31 32

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");

static unsigned int rx_frag_size = 2048;
module_param(rx_frag_size, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
35 36
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
S
Sathya Perla 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);

static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
	if (mem->va)
		pci_free_consistent(adapter->pdev, mem->size,
			mem->va, mem->dma);
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
		u16 len, u16 entry_size)
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
	mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
	if (!mem->va)
		return -1;
	memset(mem->va, 0, mem->size);
	return 0;
}

65
static void be_intr_set(struct be_adapter *adapter, bool enable)
S
Sathya Perla 已提交
66
{
67
	u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
S
Sathya Perla 已提交
68 69
	u32 reg = ioread32(addr);
	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
70

71 72 73
	if (adapter->eeh_err)
		return;

74
	if (!enabled && enable)
S
Sathya Perla 已提交
75
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
76
	else if (enabled && !enable)
S
Sathya Perla 已提交
77
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
78
	else
S
Sathya Perla 已提交
79
		return;
80

S
Sathya Perla 已提交
81 82 83
	iowrite32(reg, addr);
}

84
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
S
Sathya Perla 已提交
85 86 87 88
{
	u32 val = 0;
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
89
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
S
Sathya Perla 已提交
90 91
}

92
static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
S
Sathya Perla 已提交
93 94 95 96
{
	u32 val = 0;
	val |= qid & DB_TXULP_RING_ID_MASK;
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
97
	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
S
Sathya Perla 已提交
98 99
}

100
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
S
Sathya Perla 已提交
101 102 103 104
		bool arm, bool clear_int, u16 num_popped)
{
	u32 val = 0;
	val |= qid & DB_EQ_RING_ID_MASK;
105 106 107 108

	if (adapter->eeh_err)
		return;

S
Sathya Perla 已提交
109 110 111 112 113 114
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
115
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
S
Sathya Perla 已提交
116 117
}

118
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
S
Sathya Perla 已提交
119 120 121
{
	u32 val = 0;
	val |= qid & DB_CQ_RING_ID_MASK;
122 123 124 125

	if (adapter->eeh_err)
		return;

S
Sathya Perla 已提交
126 127 128
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
129
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
S
Sathya Perla 已提交
130 131 132 133 134 135 136 137
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;
	int status = 0;

138 139 140
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

141 142 143
	status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
	if (status)
		return status;
S
Sathya Perla 已提交
144

145 146
	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
			adapter->if_handle, &adapter->pmac_id);
S
Sathya Perla 已提交
147 148 149 150 151 152
	if (!status)
		memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);

	return status;
}

153
void netdev_stats_update(struct be_adapter *adapter)
S
Sathya Perla 已提交
154 155 156 157 158
{
	struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
	struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats *port_stats =
			&rxf_stats->port[adapter->port_num];
159
	struct net_device_stats *dev_stats = &adapter->netdev->stats;
160
	struct be_erx_stats *erx_stats = &hw_stats->erx;
S
Sathya Perla 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173

	dev_stats->rx_packets = port_stats->rx_total_frames;
	dev_stats->tx_packets = port_stats->tx_unicastframes +
		port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
	dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
				(u64) port_stats->rx_bytes_lsd;
	dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
				(u64) port_stats->tx_bytes_lsd;

	/* bad pkts received */
	dev_stats->rx_errors = port_stats->rx_crc_errors +
		port_stats->rx_alignment_symbol_errors +
		port_stats->rx_in_range_errors +
174 175 176 177 178 179 180 181 182 183 184 185
		port_stats->rx_out_range_errors +
		port_stats->rx_frame_too_long +
		port_stats->rx_dropped_too_small +
		port_stats->rx_dropped_too_short +
		port_stats->rx_dropped_header_too_small +
		port_stats->rx_dropped_tcp_length +
		port_stats->rx_dropped_runt +
		port_stats->rx_tcp_checksum_errs +
		port_stats->rx_ip_checksum_errs +
		port_stats->rx_udp_checksum_errs;

	/*  no space in linux buffers: best possible approximation */
186 187
	dev_stats->rx_dropped =
		erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
S
Sathya Perla 已提交
188 189 190

	/* detailed rx errors */
	dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
191 192 193
		port_stats->rx_out_range_errors +
		port_stats->rx_frame_too_long;

S
Sathya Perla 已提交
194 195
	/* receive ring buffer overflow */
	dev_stats->rx_over_errors = 0;
196

S
Sathya Perla 已提交
197 198 199 200
	dev_stats->rx_crc_errors = port_stats->rx_crc_errors;

	/* frame alignment errors */
	dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
201

S
Sathya Perla 已提交
202 203 204 205 206 207 208
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
	dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
					port_stats->rx_input_fifo_overflow +
					rxf_stats->rx_drops_no_pbuf;
	/* receiver missed packetd */
	dev_stats->rx_missed_errors = 0;
209 210 211 212 213 214 215

	/*  packet transmit problems */
	dev_stats->tx_errors = 0;

	/* no space available in linux */
	dev_stats->tx_dropped = 0;

216
	dev_stats->multicast = port_stats->rx_multicast_frames;
217 218
	dev_stats->collisions = 0;

S
Sathya Perla 已提交
219 220 221 222 223 224 225 226
	/* detailed tx_errors */
	dev_stats->tx_aborted_errors = 0;
	dev_stats->tx_carrier_errors = 0;
	dev_stats->tx_fifo_errors = 0;
	dev_stats->tx_heartbeat_errors = 0;
	dev_stats->tx_window_errors = 0;
}

227
void be_link_status_update(struct be_adapter *adapter, bool link_up)
S
Sathya Perla 已提交
228 229 230 231
{
	struct net_device *netdev = adapter->netdev;

	/* If link came up or went down */
232
	if (adapter->link_up != link_up) {
233
		adapter->link_speed = -1;
234
		if (link_up) {
S
Sathya Perla 已提交
235 236 237
			netif_start_queue(netdev);
			netif_carrier_on(netdev);
			printk(KERN_INFO "%s: Link up\n", netdev->name);
238 239 240 241
		} else {
			netif_stop_queue(netdev);
			netif_carrier_off(netdev);
			printk(KERN_INFO "%s: Link down\n", netdev->name);
S
Sathya Perla 已提交
242
		}
243
		adapter->link_up = link_up;
S
Sathya Perla 已提交
244 245 246 247 248 249 250 251
	}
}

/* Update the EQ delay n BE based on the RX frags consumed / sec */
static void be_rx_eqd_update(struct be_adapter *adapter)
{
	struct be_eq_obj *rx_eq = &adapter->rx_eq;
	struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
252 253 254 255 256 257 258 259 260 261 262
	ulong now = jiffies;
	u32 eqd;

	if (!rx_eq->enable_aic)
		return;

	/* Wrapped around */
	if (time_before(now, stats->rx_fps_jiffies)) {
		stats->rx_fps_jiffies = now;
		return;
	}
S
Sathya Perla 已提交
263 264

	/* Update once a second */
265
	if ((now - stats->rx_fps_jiffies) < HZ)
S
Sathya Perla 已提交
266 267 268
		return;

	stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
269
			((now - stats->rx_fps_jiffies) / HZ);
S
Sathya Perla 已提交
270

271
	stats->rx_fps_jiffies = now;
S
Sathya Perla 已提交
272 273 274 275 276 277 278 279 280 281
	stats->be_prev_rx_frags = stats->be_rx_frags;
	eqd = stats->be_rx_fps / 110000;
	eqd = eqd << 3;
	if (eqd > rx_eq->max_eqd)
		eqd = rx_eq->max_eqd;
	if (eqd < rx_eq->min_eqd)
		eqd = rx_eq->min_eqd;
	if (eqd < 10)
		eqd = 0;
	if (eqd != rx_eq->cur_eqd)
282
		be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
S
Sathya Perla 已提交
283 284 285 286 287 288

	rx_eq->cur_eqd = eqd;
}

static struct net_device_stats *be_get_stats(struct net_device *dev)
{
289
	return &dev->stats;
S
Sathya Perla 已提交
290 291
}

292 293 294 295 296 297 298 299 300 301 302
static u32 be_calc_rate(u64 bytes, unsigned long ticks)
{
	u64 rate = bytes;

	do_div(rate, ticks / HZ);
	rate <<= 3;			/* bytes/sec -> bits/sec */
	do_div(rate, 1000000ul);	/* MB/Sec */

	return rate;
}

303 304 305 306 307 308 309 310 311 312 313 314 315
static void be_tx_rate_update(struct be_adapter *adapter)
{
	struct be_drvr_stats *stats = drvr_stats(adapter);
	ulong now = jiffies;

	/* Wrapped around? */
	if (time_before(now, stats->be_tx_jiffies)) {
		stats->be_tx_jiffies = now;
		return;
	}

	/* Update tx rate once in two seconds */
	if ((now - stats->be_tx_jiffies) > 2 * HZ) {
316 317 318
		stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
						  - stats->be_tx_bytes_prev,
						 now - stats->be_tx_jiffies);
319 320 321 322 323
		stats->be_tx_jiffies = now;
		stats->be_tx_bytes_prev = stats->be_tx_bytes;
	}
}

S
Sathya Perla 已提交
324 325 326
static void be_tx_stats_update(struct be_adapter *adapter,
			u32 wrb_cnt, u32 copied, bool stopped)
{
327
	struct be_drvr_stats *stats = drvr_stats(adapter);
S
Sathya Perla 已提交
328 329 330 331 332 333 334 335 336 337
	stats->be_tx_reqs++;
	stats->be_tx_wrbs += wrb_cnt;
	stats->be_tx_bytes += copied;
	if (stopped)
		stats->be_tx_stops++;
}

/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
{
338 339 340 341
	int cnt = (skb->len > skb->data_len);

	cnt += skb_shinfo(skb)->nr_frags;

S
Sathya Perla 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
	/* to account for hdr wrb */
	cnt++;
	if (cnt & 1) {
		/* add a dummy to make it an even num */
		cnt++;
		*dummy = true;
	} else
		*dummy = false;
	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
	return cnt;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
}

static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
		bool vlan, u32 wrb_cnt, u32 len)
{
	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

	if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
			hdr, skb_shinfo(skb)->gso_size);
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (is_tcp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
		else if (is_udp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

	if (vlan && vlan_tx_tag_present(skb)) {
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
			hdr, vlan_tx_tag_get(skb));
	}

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}


static int make_tx_wrbs(struct be_adapter *adapter,
		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
	u64 busaddr;
	u32 i, copied = 0;
	struct pci_dev *pdev = adapter->pdev;
	struct sk_buff *first_skb = skb;
	struct be_queue_info *txq = &adapter->tx_obj.q;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;

	hdr = queue_head_node(txq);
404
	atomic_add(wrb_cnt, &txq->used);
S
Sathya Perla 已提交
405 406
	queue_head_inc(txq);

407 408
	if (skb->len > skb->data_len) {
		int len = skb->len - skb->data_len;
409 410
		busaddr = pci_map_single(pdev, skb->data, len,
					 PCI_DMA_TODEVICE);
411 412 413 414 415 416
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}
S
Sathya Perla 已提交
417

418 419 420
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		struct skb_frag_struct *frag =
			&skb_shinfo(skb)->frags[i];
421 422 423
		busaddr = pci_map_page(pdev, frag->page,
				       frag->page_offset,
				       frag->size, PCI_DMA_TODEVICE);
424 425 426 427 428
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, frag->size);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += frag->size;
S
Sathya Perla 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	}

	if (dummy_wrb) {
		wrb = queue_head_node(txq);
		wrb_fill(wrb, 0, 0);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
	}

	wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
		wrb_cnt, copied);
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return copied;
}

445
static netdev_tx_t be_xmit(struct sk_buff *skb,
446
			struct net_device *netdev)
S
Sathya Perla 已提交
447 448 449 450 451 452 453 454 455 456 457
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_tx_obj *tx_obj = &adapter->tx_obj;
	struct be_queue_info *txq = &tx_obj->q;
	u32 wrb_cnt = 0, copied = 0;
	u32 start = txq->head;
	bool dummy_wrb, stopped = false;

	wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);

	copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
458 459 460 461 462 463 464 465 466 467 468 469 470 471
	if (copied) {
		/* record the sent skb in the sent_skb table */
		BUG_ON(tx_obj->sent_skb_list[start]);
		tx_obj->sent_skb_list[start] = skb;

		/* Ensure txq has space for the next skb; Else stop the queue
		 * *BEFORE* ringing the tx doorbell, so that we serialze the
		 * tx compls of the current transmit which'll wake up the queue
		 */
		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
								txq->len) {
			netif_stop_queue(netdev);
			stopped = true;
		}
S
Sathya Perla 已提交
472

473
		be_txq_notify(adapter, txq->id, wrb_cnt);
S
Sathya Perla 已提交
474

475 476 477 478
		be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
	} else {
		txq->head = start;
		dev_kfree_skb_any(skb);
S
Sathya Perla 已提交
479 480 481 482 483 484 485 486
	}
	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	if (new_mtu < BE_MIN_MTU ||
A
Ajit Khaparde 已提交
487 488
			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
					(ETH_HLEN + ETH_FCS_LEN))) {
S
Sathya Perla 已提交
489 490
		dev_info(&adapter->pdev->dev,
			"MTU must be between %d and %d bytes\n",
A
Ajit Khaparde 已提交
491 492
			BE_MIN_MTU,
			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
S
Sathya Perla 已提交
493 494 495 496 497 498 499 500 501
		return -EINVAL;
	}
	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
			netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
	return 0;
}

/*
502 503
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
S
Sathya Perla 已提交
504
 */
505
static int be_vid_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
506 507 508
{
	u16 vtag[BE_NUM_VLANS_SUPPORTED];
	u16 ntags = 0, i;
509
	int status = 0;
S
Sathya Perla 已提交
510

511
	if (adapter->vlans_added <= adapter->max_vlans)  {
S
Sathya Perla 已提交
512 513 514 515 516 517 518
		/* Construct VLAN Table to give to HW */
		for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
			if (adapter->vlan_tag[i]) {
				vtag[ntags] = cpu_to_le16(i);
				ntags++;
			}
		}
519 520
		status = be_cmd_vlan_config(adapter, adapter->if_handle,
					vtag, ntags, 1, 0);
S
Sathya Perla 已提交
521
	} else {
522 523
		status = be_cmd_vlan_config(adapter, adapter->if_handle,
					NULL, 0, 1, 1);
S
Sathya Perla 已提交
524
	}
525
	return status;
S
Sathya Perla 已提交
526 527 528 529 530 531 532 533
}

static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *rx_eq = &adapter->rx_eq;
	struct be_eq_obj *tx_eq = &adapter->tx_eq;

534 535
	be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
	be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
S
Sathya Perla 已提交
536
	adapter->vlan_grp = grp;
537 538
	be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
S
Sathya Perla 已提交
539 540 541 542 543 544 545
}

static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	adapter->vlan_tag[vid] = 1;
546 547 548
	adapter->vlans_added++;
	if (adapter->vlans_added <= (adapter->max_vlans + 1))
		be_vid_config(adapter);
S
Sathya Perla 已提交
549 550 551 552 553 554 555 556
}

static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	adapter->vlan_tag[vid] = 0;
	vlan_group_set_device(adapter->vlan_grp, vid, NULL);
557 558 559
	adapter->vlans_added--;
	if (adapter->vlans_added <= adapter->max_vlans)
		be_vid_config(adapter);
S
Sathya Perla 已提交
560 561
}

562
static void be_set_multicast_list(struct net_device *netdev)
S
Sathya Perla 已提交
563 564 565
{
	struct be_adapter *adapter = netdev_priv(netdev);

566
	if (netdev->flags & IFF_PROMISC) {
567
		be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
568 569
		adapter->promiscuous = true;
		goto done;
S
Sathya Perla 已提交
570 571
	}

572 573 574
	/* BE was previously in promiscous mode; disable it */
	if (adapter->promiscuous) {
		adapter->promiscuous = false;
575
		be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
S
Sathya Perla 已提交
576 577
	}

578
	/* Enable multicast promisc if num configured exceeds what we support */
579 580
	if (netdev->flags & IFF_ALLMULTI ||
	    netdev_mc_count(netdev) > BE_MAX_MC) {
581 582
		be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
				&adapter->mc_cmd_mem);
583
		goto done;
S
Sathya Perla 已提交
584 585
	}

586
	be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
587
		netdev_mc_count(netdev), &adapter->mc_cmd_mem);
588 589
done:
	return;
S
Sathya Perla 已提交
590 591
}

592
static void be_rx_rate_update(struct be_adapter *adapter)
S
Sathya Perla 已提交
593
{
594 595
	struct be_drvr_stats *stats = drvr_stats(adapter);
	ulong now = jiffies;
S
Sathya Perla 已提交
596

597 598 599 600 601
	/* Wrapped around */
	if (time_before(now, stats->be_rx_jiffies)) {
		stats->be_rx_jiffies = now;
		return;
	}
S
Sathya Perla 已提交
602 603

	/* Update the rate once in two seconds */
604
	if ((now - stats->be_rx_jiffies) < 2 * HZ)
S
Sathya Perla 已提交
605 606
		return;

607 608 609
	stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
					  - stats->be_rx_bytes_prev,
					 now - stats->be_rx_jiffies);
610
	stats->be_rx_jiffies = now;
S
Sathya Perla 已提交
611 612 613
	stats->be_rx_bytes_prev = stats->be_rx_bytes;
}

614 615 616 617 618 619 620 621 622 623
static void be_rx_stats_update(struct be_adapter *adapter,
		u32 pktsize, u16 numfrags)
{
	struct be_drvr_stats *stats = drvr_stats(adapter);

	stats->be_rx_compl++;
	stats->be_rx_frags += numfrags;
	stats->be_rx_bytes += pktsize;
}

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
{
	u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;

	l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
	ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
	ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
	if (ip_version) {
		tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
		udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
	}
	ipv6_chk = (ip_version && (tcpf || udpf));

	return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
}

S
Sathya Perla 已提交
640 641 642 643 644 645 646 647 648
static struct be_rx_page_info *
get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
{
	struct be_rx_page_info *rx_page_info;
	struct be_queue_info *rxq = &adapter->rx_obj.q;

	rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
	BUG_ON(!rx_page_info->page);

A
Ajit Khaparde 已提交
649
	if (rx_page_info->last_page_user) {
S
Sathya Perla 已提交
650 651
		pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
			adapter->big_page_size, PCI_DMA_FROMDEVICE);
A
Ajit Khaparde 已提交
652 653
		rx_page_info->last_page_user = false;
	}
S
Sathya Perla 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682

	atomic_dec(&rxq->used);
	return rx_page_info;
}

/* Throwaway the data in the Rx completion */
static void be_rx_compl_discard(struct be_adapter *adapter,
			struct be_eth_rx_compl *rxcp)
{
	struct be_queue_info *rxq = &adapter->rx_obj.q;
	struct be_rx_page_info *page_info;
	u16 rxq_idx, i, num_rcvd;

	rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);

	for (i = 0; i < num_rcvd; i++) {
		page_info = get_rx_page_info(adapter, rxq_idx);
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
		index_inc(&rxq_idx, rxq->len);
	}
}

/*
 * skb_fill_rx_data forms a complete skb for an ether frame
 * indicated by rxcp.
 */
static void skb_fill_rx_data(struct be_adapter *adapter,
683 684
			struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
			u16 num_rcvd)
S
Sathya Perla 已提交
685 686 687
{
	struct be_queue_info *rxq = &adapter->rx_obj.q;
	struct be_rx_page_info *page_info;
688
	u16 rxq_idx, i, j;
689
	u32 pktsize, hdr_len, curr_frag_len, size;
S
Sathya Perla 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	u8 *start;

	rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
	pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);

	page_info = get_rx_page_info(adapter, rxq_idx);

	start = page_address(page_info->page) + page_info->page_offset;
	prefetch(start);

	/* Copy data in the first descriptor of this completion */
	curr_frag_len = min(pktsize, rx_frag_size);

	/* Copy the header portion into skb_data */
	hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
	memcpy(skb->data, start, hdr_len);
	skb->len = curr_frag_len;
	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
		/* Complete packet has now been moved to data */
		put_page(page_info->page);
		skb->data_len = 0;
		skb->tail += curr_frag_len;
	} else {
		skb_shinfo(skb)->nr_frags = 1;
		skb_shinfo(skb)->frags[0].page = page_info->page;
		skb_shinfo(skb)->frags[0].page_offset =
					page_info->page_offset + hdr_len;
		skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
		skb->data_len = curr_frag_len - hdr_len;
		skb->tail += hdr_len;
	}
A
Ajit Khaparde 已提交
721
	page_info->page = NULL;
S
Sathya Perla 已提交
722 723 724

	if (pktsize <= rx_frag_size) {
		BUG_ON(num_rcvd != 1);
725
		goto done;
S
Sathya Perla 已提交
726 727 728
	}

	/* More frags present for this completion */
729
	size = pktsize;
730
	for (i = 1, j = 0; i < num_rcvd; i++) {
731
		size -= curr_frag_len;
S
Sathya Perla 已提交
732 733 734
		index_inc(&rxq_idx, rxq->len);
		page_info = get_rx_page_info(adapter, rxq_idx);

735
		curr_frag_len = min(size, rx_frag_size);
S
Sathya Perla 已提交
736

737 738 739 740 741 742 743 744 745 746 747 748 749 750
		/* Coalesce all frags from the same physical page in one slot */
		if (page_info->page_offset == 0) {
			/* Fresh page */
			j++;
			skb_shinfo(skb)->frags[j].page = page_info->page;
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
			skb_shinfo(skb)->frags[j].size = 0;
			skb_shinfo(skb)->nr_frags++;
		} else {
			put_page(page_info->page);
		}

		skb_shinfo(skb)->frags[j].size += curr_frag_len;
S
Sathya Perla 已提交
751 752 753
		skb->len += curr_frag_len;
		skb->data_len += curr_frag_len;

A
Ajit Khaparde 已提交
754
		page_info->page = NULL;
S
Sathya Perla 已提交
755
	}
756
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
757

758
done:
759
	be_rx_stats_update(adapter, pktsize, num_rcvd);
S
Sathya Perla 已提交
760 761 762
	return;
}

763
/* Process the RX completion indicated by rxcp when GRO is disabled */
S
Sathya Perla 已提交
764 765 766 767
static void be_rx_compl_process(struct be_adapter *adapter,
			struct be_eth_rx_compl *rxcp)
{
	struct sk_buff *skb;
768
	u32 vlanf, vid;
769
	u16 num_rcvd;
770
	u8 vtm;
S
Sathya Perla 已提交
771

772 773 774 775 776
	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
	/* Is it a flush compl that has no data */
	if (unlikely(num_rcvd == 0))
		return;

777
	skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
778
	if (unlikely(!skb)) {
S
Sathya Perla 已提交
779 780 781 782 783 784
		if (net_ratelimit())
			dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
		be_rx_compl_discard(adapter, rxcp);
		return;
	}

785
	skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
S
Sathya Perla 已提交
786

787
	if (do_pkt_csum(rxcp, adapter->rx_csum))
S
Sathya Perla 已提交
788
		skb->ip_summed = CHECKSUM_NONE;
789 790
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
S
Sathya Perla 已提交
791 792 793 794 795

	skb->truesize = skb->len + sizeof(struct sk_buff);
	skb->protocol = eth_type_trans(skb, adapter->netdev);
	skb->dev = adapter->netdev;

796 797 798 799 800 801 802 803 804
	vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
	vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);

	/* vlanf could be wrongly set in some cards.
	 * ignore if vtm is not set */
	if ((adapter->cap & 0x400) && !vtm)
		vlanf = 0;

	if (unlikely(vlanf)) {
805
		if (!adapter->vlan_grp || adapter->vlans_added == 0) {
S
Sathya Perla 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818
			kfree_skb(skb);
			return;
		}
		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
		vid = be16_to_cpu(vid);
		vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
	} else {
		netif_receive_skb(skb);
	}

	return;
}

819 820
/* Process the RX completion indicated by rxcp when GRO is enabled */
static void be_rx_compl_process_gro(struct be_adapter *adapter,
S
Sathya Perla 已提交
821 822 823
			struct be_eth_rx_compl *rxcp)
{
	struct be_rx_page_info *page_info;
824
	struct sk_buff *skb = NULL;
S
Sathya Perla 已提交
825
	struct be_queue_info *rxq = &adapter->rx_obj.q;
826
	struct be_eq_obj *eq_obj =  &adapter->rx_eq;
S
Sathya Perla 已提交
827
	u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
828
	u16 i, rxq_idx = 0, vid, j;
829
	u8 vtm;
S
Sathya Perla 已提交
830 831

	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
832 833 834 835
	/* Is it a flush compl that has no data */
	if (unlikely(num_rcvd == 0))
		return;

S
Sathya Perla 已提交
836 837 838
	pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
	vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
	rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
839 840 841 842
	vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);

	/* vlanf could be wrongly set in some cards.
	 * ignore if vtm is not set */
843
	if ((adapter->cap & 0x400) && !vtm)
844
		vlanf = 0;
S
Sathya Perla 已提交
845

846 847 848 849 850 851
	skb = napi_get_frags(&eq_obj->napi);
	if (!skb) {
		be_rx_compl_discard(adapter, rxcp);
		return;
	}

S
Sathya Perla 已提交
852
	remaining = pkt_size;
853
	for (i = 0, j = -1; i < num_rcvd; i++) {
S
Sathya Perla 已提交
854 855 856 857
		page_info = get_rx_page_info(adapter, rxq_idx);

		curr_frag_len = min(remaining, rx_frag_size);

858 859 860 861
		/* Coalesce all frags from the same physical page in one slot */
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
862 863 864 865
			skb_shinfo(skb)->frags[j].page = page_info->page;
			skb_shinfo(skb)->frags[j].page_offset =
							page_info->page_offset;
			skb_shinfo(skb)->frags[j].size = 0;
866 867 868
		} else {
			put_page(page_info->page);
		}
869
		skb_shinfo(skb)->frags[j].size += curr_frag_len;
S
Sathya Perla 已提交
870

871
		remaining -= curr_frag_len;
S
Sathya Perla 已提交
872 873 874
		index_inc(&rxq_idx, rxq->len);
		memset(page_info, 0, sizeof(*page_info));
	}
875
	BUG_ON(j > MAX_SKB_FRAGS);
S
Sathya Perla 已提交
876

877 878 879 880 881 882
	skb_shinfo(skb)->nr_frags = j + 1;
	skb->len = pkt_size;
	skb->data_len = pkt_size;
	skb->truesize += pkt_size;
	skb->ip_summed = CHECKSUM_UNNECESSARY;

S
Sathya Perla 已提交
883
	if (likely(!vlanf)) {
884
		napi_gro_frags(&eq_obj->napi);
S
Sathya Perla 已提交
885 886 887 888
	} else {
		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
		vid = be16_to_cpu(vid);

889
		if (!adapter->vlan_grp || adapter->vlans_added == 0)
S
Sathya Perla 已提交
890 891
			return;

892
		vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
S
Sathya Perla 已提交
893 894
	}

895
	be_rx_stats_update(adapter, pkt_size, num_rcvd);
S
Sathya Perla 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
	return;
}

static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
{
	struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);

	if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
		return NULL;

	be_dws_le_to_cpu(rxcp, sizeof(*rxcp));

	queue_tail_inc(&adapter->rx_obj.cq);
	return rxcp;
}

912 913 914 915 916 917 918 919 920
/* To reset the valid bit, we need to reset the whole word as
 * when walking the queue the valid entries are little-endian
 * and invalid entries are host endian
 */
static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
{
	rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
}

S
Sathya Perla 已提交
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
static inline struct page *be_alloc_pages(u32 size)
{
	gfp_t alloc_flags = GFP_ATOMIC;
	u32 order = get_order(size);
	if (order > 0)
		alloc_flags |= __GFP_COMP;
	return  alloc_pages(alloc_flags, order);
}

/*
 * Allocate a page, split it to fragments of size rx_frag_size and post as
 * receive buffers to BE
 */
static void be_post_rx_frags(struct be_adapter *adapter)
{
	struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
937
	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
S
Sathya Perla 已提交
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	struct be_queue_info *rxq = &adapter->rx_obj.q;
	struct page *pagep = NULL;
	struct be_eth_rx_d *rxd;
	u64 page_dmaaddr = 0, frag_dmaaddr;
	u32 posted, page_offset = 0;

	page_info = &page_info_tbl[rxq->head];
	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
		if (!pagep) {
			pagep = be_alloc_pages(adapter->big_page_size);
			if (unlikely(!pagep)) {
				drvr_stats(adapter)->be_ethrx_post_fail++;
				break;
			}
			page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
						adapter->big_page_size,
						PCI_DMA_FROMDEVICE);
			page_info->page_offset = 0;
		} else {
			get_page(pagep);
			page_info->page_offset = page_offset + rx_frag_size;
		}
		page_offset = page_info->page_offset;
		page_info->page = pagep;
		pci_unmap_addr_set(page_info, bus, page_dmaaddr);
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;

		rxd = queue_head_node(rxq);
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));

		/* Any space left in the current big page for another frag? */
		if ((page_offset + rx_frag_size + rx_frag_size) >
					adapter->big_page_size) {
			pagep = NULL;
			page_info->last_page_user = true;
		}
975 976 977

		prev_page_info = page_info;
		queue_head_inc(rxq);
S
Sathya Perla 已提交
978 979 980
		page_info = &page_info_tbl[rxq->head];
	}
	if (pagep)
981
		prev_page_info->last_page_user = true;
S
Sathya Perla 已提交
982 983 984

	if (posted) {
		atomic_add(posted, &rxq->used);
985
		be_rxq_notify(adapter, rxq->id, posted);
986 987 988
	} else if (atomic_read(&rxq->used) == 0) {
		/* Let be_worker replenish when memory is available */
		adapter->rx_post_starved = true;
S
Sathya Perla 已提交
989 990 991 992 993
	}

	return;
}

994
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
S
Sathya Perla 已提交
995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
{
	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);

	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
		return NULL;

	be_dws_le_to_cpu(txcp, sizeof(*txcp));

	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;

	queue_tail_inc(tx_cq);
	return txcp;
}

static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
{
	struct be_queue_info *txq = &adapter->tx_obj.q;
1012
	struct be_eth_wrb *wrb;
S
Sathya Perla 已提交
1013 1014
	struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
	struct sk_buff *sent_skb;
1015
	u64 busaddr;
S
Sathya Perla 已提交
1016 1017 1018 1019 1020 1021
	u16 cur_index, num_wrbs = 0;

	cur_index = txq->tail;
	sent_skb = sent_skbs[cur_index];
	BUG_ON(!sent_skb);
	sent_skbs[cur_index] = NULL;
1022 1023 1024 1025 1026 1027 1028 1029 1030
	wrb = queue_tail_node(txq);
	be_dws_le_to_cpu(wrb, sizeof(*wrb));
	busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
	if (busaddr != 0) {
		pci_unmap_single(adapter->pdev, busaddr,
				 wrb->frag_len, PCI_DMA_TODEVICE);
	}
	num_wrbs++;
	queue_tail_inc(txq);
S
Sathya Perla 已提交
1031

1032
	while (cur_index != last_index) {
S
Sathya Perla 已提交
1033
		cur_index = txq->tail;
1034 1035 1036 1037 1038 1039 1040
		wrb = queue_tail_node(txq);
		be_dws_le_to_cpu(wrb, sizeof(*wrb));
		busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
		if (busaddr != 0) {
			pci_unmap_page(adapter->pdev, busaddr,
				       wrb->frag_len, PCI_DMA_TODEVICE);
		}
S
Sathya Perla 已提交
1041 1042
		num_wrbs++;
		queue_tail_inc(txq);
1043
	}
S
Sathya Perla 已提交
1044 1045

	atomic_sub(num_wrbs, &txq->used);
1046

S
Sathya Perla 已提交
1047 1048 1049
	kfree_skb(sent_skb);
}

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
{
	struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);

	if (!eqe->evt)
		return NULL;

	eqe->evt = le32_to_cpu(eqe->evt);
	queue_tail_inc(&eq_obj->q);
	return eqe;
}

static int event_handle(struct be_adapter *adapter,
			struct be_eq_obj *eq_obj)
{
	struct be_eq_entry *eqe;
	u16 num = 0;

	while ((eqe = event_get(eq_obj)) != NULL) {
		eqe->evt = 0;
		num++;
	}

	/* Deal with any spurious interrupts that come
	 * without events
	 */
	be_eq_notify(adapter, eq_obj->q.id, true, true, num);
	if (num)
		napi_schedule(&eq_obj->napi);

	return num;
}

/* Just read and notify events without processing them.
 * Used at the time of destroying event queues */
static void be_eq_clean(struct be_adapter *adapter,
			struct be_eq_obj *eq_obj)
{
	struct be_eq_entry *eqe;
	u16 num = 0;

	while ((eqe = event_get(eq_obj)) != NULL) {
		eqe->evt = 0;
		num++;
	}

	if (num)
		be_eq_notify(adapter, eq_obj->q.id, false, true, num);
}

S
Sathya Perla 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
static void be_rx_q_clean(struct be_adapter *adapter)
{
	struct be_rx_page_info *page_info;
	struct be_queue_info *rxq = &adapter->rx_obj.q;
	struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
	struct be_eth_rx_compl *rxcp;
	u16 tail;

	/* First cleanup pending rx completions */
	while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
		be_rx_compl_discard(adapter, rxcp);
1111
		be_rx_compl_reset(rxcp);
1112
		be_cq_notify(adapter, rx_cq->id, true, 1);
S
Sathya Perla 已提交
1113 1114 1115 1116
	}

	/* Then free posted rx buffer that were not used */
	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1117
	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
S
Sathya Perla 已提交
1118 1119 1120 1121 1122 1123 1124
		page_info = get_rx_page_info(adapter, tail);
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(atomic_read(&rxq->used));
}

1125
static void be_tx_compl_clean(struct be_adapter *adapter)
S
Sathya Perla 已提交
1126
{
1127
	struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
S
Sathya Perla 已提交
1128
	struct be_queue_info *txq = &adapter->tx_obj.q;
1129 1130
	struct be_eth_tx_compl *txcp;
	u16 end_idx, cmpl = 0, timeo = 0;
1131 1132 1133
	struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
	struct sk_buff *sent_skb;
	bool dummy_wrb;
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

	/* Wait for a max of 200ms for all the tx-completions to arrive. */
	do {
		while ((txcp = be_tx_compl_get(tx_cq))) {
			end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
					wrb_index, txcp);
			be_tx_compl_process(adapter, end_idx);
			cmpl++;
		}
		if (cmpl) {
			be_cq_notify(adapter, tx_cq->id, false, cmpl);
			cmpl = 0;
		}

		if (atomic_read(&txq->used) == 0 || ++timeo > 200)
			break;

		mdelay(1);
	} while (true);

	if (atomic_read(&txq->used))
		dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
			atomic_read(&txq->used));
1157 1158 1159 1160 1161 1162 1163 1164 1165

	/* free posted tx for which compls will never arrive */
	while (atomic_read(&txq->used)) {
		sent_skb = sent_skbs[txq->tail];
		end_idx = txq->tail;
		index_adv(&end_idx,
			wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
		be_tx_compl_process(adapter, end_idx);
	}
S
Sathya Perla 已提交
1166 1167
}

1168 1169 1170 1171
static void be_mcc_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

1172
	q = &adapter->mcc_obj.q;
1173
	if (q->created)
1174
		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1175 1176
	be_queue_free(adapter, q);

1177
	q = &adapter->mcc_obj.cq;
1178
	if (q->created)
1179
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1180 1181 1182 1183 1184 1185 1186 1187 1188
	be_queue_free(adapter, q);
}

/* Must be called only after TX qs are created as MCC shares TX EQ */
static int be_mcc_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *q, *cq;

	/* Alloc MCC compl queue */
1189
	cq = &adapter->mcc_obj.cq;
1190
	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1191
			sizeof(struct be_mcc_compl)))
1192 1193 1194
		goto err;

	/* Ask BE to create MCC compl queue; share TX's eq */
1195
	if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1196 1197 1198
		goto mcc_cq_free;

	/* Alloc MCC queue */
1199
	q = &adapter->mcc_obj.q;
1200 1201 1202 1203
	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
		goto mcc_cq_destroy;

	/* Ask BE to create MCC queue */
1204
	if (be_cmd_mccq_create(adapter, q, cq))
1205 1206 1207 1208 1209 1210 1211
		goto mcc_q_free;

	return 0;

mcc_q_free:
	be_queue_free(adapter, q);
mcc_cq_destroy:
1212
	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1213 1214 1215 1216 1217 1218
mcc_cq_free:
	be_queue_free(adapter, cq);
err:
	return -1;
}

S
Sathya Perla 已提交
1219 1220 1221 1222 1223
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

	q = &adapter->tx_obj.q;
1224
	if (q->created)
1225
		be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
S
Sathya Perla 已提交
1226 1227 1228 1229
	be_queue_free(adapter, q);

	q = &adapter->tx_obj.cq;
	if (q->created)
1230
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
S
Sathya Perla 已提交
1231 1232
	be_queue_free(adapter, q);

1233 1234 1235
	/* Clear any residual events */
	be_eq_clean(adapter, &adapter->tx_eq);

S
Sathya Perla 已提交
1236 1237
	q = &adapter->tx_eq.q;
	if (q->created)
1238
		be_cmd_q_destroy(adapter, q, QTYPE_EQ);
S
Sathya Perla 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
	be_queue_free(adapter, q);
}

static int be_tx_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq, *q, *cq;

	adapter->tx_eq.max_eqd = 0;
	adapter->tx_eq.min_eqd = 0;
	adapter->tx_eq.cur_eqd = 96;
	adapter->tx_eq.enable_aic = false;
	/* Alloc Tx Event queue */
	eq = &adapter->tx_eq.q;
	if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
		return -1;

	/* Ask BE to create Tx Event queue */
1256
	if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
S
Sathya Perla 已提交
1257 1258 1259 1260 1261 1262 1263 1264
		goto tx_eq_free;
	/* Alloc TX eth compl queue */
	cq = &adapter->tx_obj.cq;
	if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
			sizeof(struct be_eth_tx_compl)))
		goto tx_eq_destroy;

	/* Ask BE to create Tx eth compl queue */
1265
	if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
S
Sathya Perla 已提交
1266 1267 1268 1269 1270 1271 1272 1273
		goto tx_cq_free;

	/* Alloc TX eth queue */
	q = &adapter->tx_obj.q;
	if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
		goto tx_cq_destroy;

	/* Ask BE to create Tx eth queue */
1274
	if (be_cmd_txq_create(adapter, q, cq))
S
Sathya Perla 已提交
1275 1276 1277 1278 1279 1280
		goto tx_q_free;
	return 0;

tx_q_free:
	be_queue_free(adapter, q);
tx_cq_destroy:
1281
	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
S
Sathya Perla 已提交
1282 1283 1284
tx_cq_free:
	be_queue_free(adapter, cq);
tx_eq_destroy:
1285
	be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
S
Sathya Perla 已提交
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
tx_eq_free:
	be_queue_free(adapter, eq);
	return -1;
}

static void be_rx_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

	q = &adapter->rx_obj.q;
	if (q->created) {
1297
		be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1298 1299 1300 1301 1302

		/* After the rxq is invalidated, wait for a grace time
		 * of 1ms for all dma to end and the flush compl to arrive
		 */
		mdelay(1);
S
Sathya Perla 已提交
1303 1304 1305 1306 1307 1308
		be_rx_q_clean(adapter);
	}
	be_queue_free(adapter, q);

	q = &adapter->rx_obj.cq;
	if (q->created)
1309
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
S
Sathya Perla 已提交
1310 1311
	be_queue_free(adapter, q);

1312 1313 1314
	/* Clear any residual events */
	be_eq_clean(adapter, &adapter->rx_eq);

S
Sathya Perla 已提交
1315 1316
	q = &adapter->rx_eq.q;
	if (q->created)
1317
		be_cmd_q_destroy(adapter, q, QTYPE_EQ);
S
Sathya Perla 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
	be_queue_free(adapter, q);
}

static int be_rx_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq, *q, *cq;
	int rc;

	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
	adapter->rx_eq.max_eqd = BE_MAX_EQD;
	adapter->rx_eq.min_eqd = 0;
	adapter->rx_eq.cur_eqd = 0;
	adapter->rx_eq.enable_aic = true;

	/* Alloc Rx Event queue */
	eq = &adapter->rx_eq.q;
	rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
				sizeof(struct be_eq_entry));
	if (rc)
		return rc;

	/* Ask BE to create Rx Event queue */
1340
	rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
S
Sathya Perla 已提交
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	if (rc)
		goto rx_eq_free;

	/* Alloc RX eth compl queue */
	cq = &adapter->rx_obj.cq;
	rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
			sizeof(struct be_eth_rx_compl));
	if (rc)
		goto rx_eq_destroy;

	/* Ask BE to create Rx eth compl queue */
1352
	rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
S
Sathya Perla 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	if (rc)
		goto rx_cq_free;

	/* Alloc RX eth queue */
	q = &adapter->rx_obj.q;
	rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
	if (rc)
		goto rx_cq_destroy;

	/* Ask BE to create Rx eth queue */
1363
	rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
S
Sathya Perla 已提交
1364 1365 1366 1367 1368 1369 1370 1371
		BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
	if (rc)
		goto rx_q_free;

	return 0;
rx_q_free:
	be_queue_free(adapter, q);
rx_cq_destroy:
1372
	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
S
Sathya Perla 已提交
1373 1374 1375
rx_cq_free:
	be_queue_free(adapter, cq);
rx_eq_destroy:
1376
	be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
S
Sathya Perla 已提交
1377 1378 1379 1380 1381
rx_eq_free:
	be_queue_free(adapter, eq);
	return rc;
}

1382 1383 1384 1385 1386 1387
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
	return eq_id - 8 * be_pci_func(adapter);
}

S
Sathya Perla 已提交
1388 1389 1390
static irqreturn_t be_intx(int irq, void *dev)
{
	struct be_adapter *adapter = dev;
1391
	int isr;
S
Sathya Perla 已提交
1392

1393
	isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1394
		(adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1395
	if (!isr)
1396
		return IRQ_NONE;
S
Sathya Perla 已提交
1397

1398 1399
	event_handle(adapter, &adapter->tx_eq);
	event_handle(adapter, &adapter->rx_eq);
1400

1401
	return IRQ_HANDLED;
S
Sathya Perla 已提交
1402 1403 1404 1405 1406 1407
}

static irqreturn_t be_msix_rx(int irq, void *dev)
{
	struct be_adapter *adapter = dev;

1408
	event_handle(adapter, &adapter->rx_eq);
S
Sathya Perla 已提交
1409 1410 1411 1412

	return IRQ_HANDLED;
}

1413
static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
S
Sathya Perla 已提交
1414 1415 1416
{
	struct be_adapter *adapter = dev;

1417
	event_handle(adapter, &adapter->tx_eq);
S
Sathya Perla 已提交
1418 1419 1420 1421

	return IRQ_HANDLED;
}

1422
static inline bool do_gro(struct be_adapter *adapter,
S
Sathya Perla 已提交
1423 1424 1425 1426 1427 1428 1429 1430
			struct be_eth_rx_compl *rxcp)
{
	int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
	int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);

	if (err)
		drvr_stats(adapter)->be_rxcp_err++;

1431
	return (tcp_frame && !err) ? true : false;
S
Sathya Perla 已提交
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
}

int be_poll_rx(struct napi_struct *napi, int budget)
{
	struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter =
		container_of(rx_eq, struct be_adapter, rx_eq);
	struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
	struct be_eth_rx_compl *rxcp;
	u32 work_done;

A
Ajit Khaparde 已提交
1443
	adapter->stats.drvr_stats.be_rx_polls++;
S
Sathya Perla 已提交
1444 1445 1446 1447 1448
	for (work_done = 0; work_done < budget; work_done++) {
		rxcp = be_rx_compl_get(adapter);
		if (!rxcp)
			break;

1449 1450
		if (do_gro(adapter, rxcp))
			be_rx_compl_process_gro(adapter, rxcp);
S
Sathya Perla 已提交
1451 1452
		else
			be_rx_compl_process(adapter, rxcp);
1453 1454

		be_rx_compl_reset(rxcp);
S
Sathya Perla 已提交
1455 1456 1457 1458 1459 1460 1461 1462 1463
	}

	/* Refill the queue */
	if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
		be_post_rx_frags(adapter);

	/* All consumed */
	if (work_done < budget) {
		napi_complete(napi);
1464
		be_cq_notify(adapter, rx_cq->id, true, work_done);
S
Sathya Perla 已提交
1465 1466
	} else {
		/* More to be consumed; continue with interrupts disabled */
1467
		be_cq_notify(adapter, rx_cq->id, false, work_done);
S
Sathya Perla 已提交
1468 1469 1470 1471
	}
	return work_done;
}

1472
void be_process_tx(struct be_adapter *adapter)
S
Sathya Perla 已提交
1473
{
1474 1475
	struct be_queue_info *txq = &adapter->tx_obj.q;
	struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
S
Sathya Perla 已提交
1476 1477 1478 1479
	struct be_eth_tx_compl *txcp;
	u32 num_cmpl = 0;
	u16 end_idx;

1480
	while ((txcp = be_tx_compl_get(tx_cq))) {
S
Sathya Perla 已提交
1481 1482 1483 1484 1485 1486
		end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
					wrb_index, txcp);
		be_tx_compl_process(adapter, end_idx);
		num_cmpl++;
	}

1487
	if (num_cmpl) {
1488
		be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
1489 1490 1491 1492 1493

		/* As Tx wrbs have been freed up, wake up netdev queue if
		 * it was stopped due to lack of tx wrbs.
		 */
		if (netif_queue_stopped(adapter->netdev) &&
S
Sathya Perla 已提交
1494
			atomic_read(&txq->used) < txq->len / 2) {
1495 1496 1497 1498 1499
			netif_wake_queue(adapter->netdev);
		}

		drvr_stats(adapter)->be_tx_events++;
		drvr_stats(adapter)->be_tx_compl += num_cmpl;
S
Sathya Perla 已提交
1500
	}
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
}

/* As TX and MCC share the same EQ check for both TX and MCC completions.
 * For TX/MCC we don't honour budget; consume everything
 */
static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
{
	struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter =
		container_of(tx_eq, struct be_adapter, tx_eq);
S
Sathya Perla 已提交
1511 1512 1513

	napi_complete(napi);

1514
	be_process_tx(adapter);
S
Sathya Perla 已提交
1515

1516
	be_process_mcc(adapter);
S
Sathya Perla 已提交
1517 1518 1519 1520

	return 1;
}

1521 1522 1523 1524 1525
static void be_worker(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter, work.work);

1526
	be_cmd_get_stats(adapter, &adapter->stats.cmd);
1527 1528 1529 1530

	/* Set EQ delay */
	be_rx_eqd_update(adapter);

1531 1532 1533
	be_tx_rate_update(adapter);
	be_rx_rate_update(adapter);

1534 1535 1536 1537 1538 1539 1540 1541
	if (adapter->rx_post_starved) {
		adapter->rx_post_starved = false;
		be_post_rx_frags(adapter);
	}

	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}

1542 1543 1544 1545 1546 1547 1548 1549
static void be_msix_disable(struct be_adapter *adapter)
{
	if (adapter->msix_enabled) {
		pci_disable_msix(adapter->pdev);
		adapter->msix_enabled = false;
	}
}

S
Sathya Perla 已提交
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
static void be_msix_enable(struct be_adapter *adapter)
{
	int i, status;

	for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
		adapter->msix_entries[i].entry = i;

	status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
		BE_NUM_MSIX_VECTORS);
	if (status == 0)
		adapter->msix_enabled = true;
	return;
}

static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
{
1566 1567
	return adapter->msix_entries[
			be_evt_bit_get(adapter, eq_id)].vector;
S
Sathya Perla 已提交
1568 1569
}

1570 1571 1572
static int be_request_irq(struct be_adapter *adapter,
		struct be_eq_obj *eq_obj,
		void *handler, char *desc)
S
Sathya Perla 已提交
1573 1574
{
	struct net_device *netdev = adapter->netdev;
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
	int vec;

	sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
	vec = be_msix_vec_get(adapter, eq_obj->q.id);
	return request_irq(vec, handler, 0, eq_obj->desc, adapter);
}

static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
{
	int vec = be_msix_vec_get(adapter, eq_obj->q.id);
	free_irq(vec, adapter);
}
S
Sathya Perla 已提交
1587

1588 1589 1590 1591 1592
static int be_msix_register(struct be_adapter *adapter)
{
	int status;

	status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
S
Sathya Perla 已提交
1593 1594 1595
	if (status)
		goto err;

1596 1597 1598 1599
	status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
	if (status)
		goto free_tx_irq;

S
Sathya Perla 已提交
1600
	return 0;
1601 1602 1603

free_tx_irq:
	be_free_irq(adapter, &adapter->tx_eq);
S
Sathya Perla 已提交
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
err:
	dev_warn(&adapter->pdev->dev,
		"MSIX Request IRQ failed - err %d\n", status);
	pci_disable_msix(adapter->pdev);
	adapter->msix_enabled = false;
	return status;
}

static int be_irq_register(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

	if (adapter->msix_enabled) {
		status = be_msix_register(adapter);
		if (status == 0)
			goto done;
	}

	/* INTx */
	netdev->irq = adapter->pdev->irq;
	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
			adapter);
	if (status) {
		dev_err(&adapter->pdev->dev,
			"INTx request IRQ failed - err %d\n", status);
		return status;
	}
done:
	adapter->isr_registered = true;
	return 0;
}

static void be_irq_unregister(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;

	if (!adapter->isr_registered)
		return;

	/* INTx */
	if (!adapter->msix_enabled) {
		free_irq(netdev->irq, adapter);
		goto done;
	}

	/* MSIx */
1651 1652
	be_free_irq(adapter, &adapter->tx_eq);
	be_free_irq(adapter, &adapter->rx_eq);
S
Sathya Perla 已提交
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
done:
	adapter->isr_registered = false;
	return;
}

static int be_open(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *rx_eq = &adapter->rx_eq;
	struct be_eq_obj *tx_eq = &adapter->tx_eq;
1663 1664
	bool link_up;
	int status;
1665 1666
	u8 mac_speed;
	u16 link_speed;
1667 1668 1669 1670 1671 1672 1673 1674 1675

	/* First time posting */
	be_post_rx_frags(adapter);

	napi_enable(&rx_eq->napi);
	napi_enable(&tx_eq->napi);

	be_irq_register(adapter);

1676
	be_intr_set(adapter, true);
1677 1678

	/* The evt queues are created in unarmed state; arm them */
1679 1680
	be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1681 1682

	/* Rx compl queue may be in unarmed state; rearm it */
1683
	be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1684

1685 1686 1687
	/* Now that interrupts are on we can process async mcc */
	be_async_mcc_enable(adapter);

1688 1689
	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
			&link_speed);
1690
	if (status)
1691
		goto ret_sts;
1692
	be_link_status_update(adapter, link_up);
1693

1694 1695 1696 1697 1698 1699 1700 1701 1702
	status = be_vid_config(adapter);
	if (status)
		goto ret_sts;

	status = be_cmd_set_flow_control(adapter,
					adapter->tx_fc, adapter->rx_fc);
	if (status)
		goto ret_sts;

1703
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1704 1705
ret_sts:
	return status;
1706 1707
}

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
	struct be_dma_mem cmd;
	int status = 0;
	u8 mac[ETH_ALEN];

	memset(mac, 0, ETH_ALEN);

	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
	if (cmd.va == NULL)
		return -1;
	memset(cmd.va, 0, cmd.size);

	if (enable) {
		status = pci_write_config_dword(adapter->pdev,
			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
		if (status) {
			dev_err(&adapter->pdev->dev,
				"Could not enable Wake-on-lan \n");
			pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
					cmd.dma);
			return status;
		}
		status = be_cmd_enable_magic_wol(adapter,
				adapter->netdev->dev_addr, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
	} else {
		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
	}

	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
	return status;
}

1746 1747 1748
static int be_setup(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
1749
	u32 cap_flags, en_flags;
S
Sathya Perla 已提交
1750 1751
	int status;

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
			BE_IF_FLAGS_MCAST_PROMISCUOUS |
			BE_IF_FLAGS_PROMISCUOUS |
			BE_IF_FLAGS_PASS_L3L4_ERRORS;
	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
			BE_IF_FLAGS_PASS_L3L4_ERRORS;

	status = be_cmd_if_create(adapter, cap_flags, en_flags,
			netdev->dev_addr, false/* pmac_invalid */,
			&adapter->if_handle, &adapter->pmac_id);
S
Sathya Perla 已提交
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
	if (status != 0)
		goto do_none;

	status = be_tx_queues_create(adapter);
	if (status != 0)
		goto if_destroy;

	status = be_rx_queues_create(adapter);
	if (status != 0)
		goto tx_qs_destroy;

1773 1774 1775
	status = be_mcc_queues_create(adapter);
	if (status != 0)
		goto rx_qs_destroy;
S
Sathya Perla 已提交
1776

1777 1778
	adapter->link_speed = -1;

S
Sathya Perla 已提交
1779 1780
	return 0;

1781 1782
rx_qs_destroy:
	be_rx_queues_destroy(adapter);
S
Sathya Perla 已提交
1783 1784 1785
tx_qs_destroy:
	be_tx_queues_destroy(adapter);
if_destroy:
1786
	be_cmd_if_destroy(adapter, adapter->if_handle);
S
Sathya Perla 已提交
1787 1788 1789 1790
do_none:
	return status;
}

1791 1792
static int be_clear(struct be_adapter *adapter)
{
1793
	be_mcc_queues_destroy(adapter);
1794 1795 1796
	be_rx_queues_destroy(adapter);
	be_tx_queues_destroy(adapter);

1797
	be_cmd_if_destroy(adapter, adapter->if_handle);
1798

1799 1800
	/* tell fw we're done with firing cmds */
	be_cmd_fw_clean(adapter);
1801 1802 1803
	return 0;
}

S
Sathya Perla 已提交
1804 1805 1806 1807 1808 1809 1810
static int be_close(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *rx_eq = &adapter->rx_eq;
	struct be_eq_obj *tx_eq = &adapter->tx_eq;
	int vec;

1811
	cancel_delayed_work_sync(&adapter->work);
S
Sathya Perla 已提交
1812

1813 1814
	be_async_mcc_disable(adapter);

S
Sathya Perla 已提交
1815 1816
	netif_stop_queue(netdev);
	netif_carrier_off(netdev);
1817
	adapter->link_up = false;
S
Sathya Perla 已提交
1818

1819
	be_intr_set(adapter, false);
S
Sathya Perla 已提交
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833

	if (adapter->msix_enabled) {
		vec = be_msix_vec_get(adapter, tx_eq->q.id);
		synchronize_irq(vec);
		vec = be_msix_vec_get(adapter, rx_eq->q.id);
		synchronize_irq(vec);
	} else {
		synchronize_irq(netdev->irq);
	}
	be_irq_unregister(adapter);

	napi_disable(&rx_eq->napi);
	napi_disable(&tx_eq->napi);

1834 1835 1836 1837 1838
	/* Wait for all pending tx completions to arrive so that
	 * all tx skbs are freed.
	 */
	be_tx_compl_clean(adapter);

S
Sathya Perla 已提交
1839 1840 1841
	return 0;
}

1842 1843 1844
#define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
char flash_cookie[2][16] =	{"*** SE FLAS",
				"H DIRECTORY *** "};
1845 1846

static bool be_flash_redboot(struct be_adapter *adapter,
1847 1848
			const u8 *p, u32 img_start, int image_size,
			int hdr_size)
1849 1850 1851 1852
{
	u32 crc_offset;
	u8 flashed_crc[4];
	int status;
1853 1854 1855

	crc_offset = hdr_size + img_start + image_size - 4;

1856
	p += crc_offset;
1857 1858 1859

	status = be_cmd_get_flash_crc(adapter, flashed_crc,
			(img_start + image_size - 4));
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
	if (status) {
		dev_err(&adapter->pdev->dev,
		"could not get crc from flash, not flashing redboot\n");
		return false;
	}

	/*update redboot only if crc does not match*/
	if (!memcmp(flashed_crc, p, 4))
		return false;
	else
		return true;
}

1873
static int be_flash_data(struct be_adapter *adapter,
1874
			const struct firmware *fw,
1875 1876
			struct be_dma_mem *flash_cmd, int num_of_images)

1877
{
1878 1879
	int status = 0, i, filehdr_size = 0;
	u32 total_bytes = 0, flash_op;
1880 1881 1882
	int num_bytes;
	const u8 *p = fw->data;
	struct be_cmd_write_flashrom *req = flash_cmd->va;
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
	struct flash_comp *pflashcomp;

	struct flash_comp gen3_flash_types[8] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3},
		{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
		{ FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
		{ FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
		{ FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3}
	};
	struct flash_comp gen2_flash_types[8] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2},
		{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
		{ FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
		{ FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
		{ FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g2},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
			 FLASH_IMAGE_MAX_SIZE_g2}
	};

	if (adapter->generation == BE_GEN3) {
		pflashcomp = gen3_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g3);
	} else {
		pflashcomp = gen2_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g2);
1928
	}
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
	for (i = 0; i < 8; i++) {
		if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
			(!be_flash_redboot(adapter, fw->data,
			 pflashcomp[i].offset, pflashcomp[i].size,
			 filehdr_size)))
			continue;
		p = fw->data;
		p += filehdr_size + pflashcomp[i].offset
			+ (num_of_images * sizeof(struct image_hdr));
	if (p + pflashcomp[i].size > fw->data + fw->size)
1939
		return -1;
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	total_bytes = pflashcomp[i].size;
		while (total_bytes) {
			if (total_bytes > 32*1024)
				num_bytes = 32*1024;
			else
				num_bytes = total_bytes;
			total_bytes -= num_bytes;

			if (!total_bytes)
				flash_op = FLASHROM_OPER_FLASH;
			else
				flash_op = FLASHROM_OPER_SAVE;
			memcpy(req->params.data_buf, p, num_bytes);
			p += num_bytes;
			status = be_cmd_write_flashrom(adapter, flash_cmd,
				pflashcomp[i].optype, flash_op, num_bytes);
			if (status) {
				dev_err(&adapter->pdev->dev,
					"cmd to write to flash rom failed.\n");
				return -1;
			}
			yield();
1962 1963 1964 1965 1966
		}
	}
	return 0;
}

1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
{
	if (fhdr == NULL)
		return 0;
	if (fhdr->build[0] == '3')
		return BE_GEN3;
	else if (fhdr->build[0] == '2')
		return BE_GEN2;
	else
		return 0;
}

1979 1980 1981 1982
int be_load_fw(struct be_adapter *adapter, u8 *func)
{
	char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
	const struct firmware *fw;
1983 1984 1985
	struct flash_file_hdr_g2 *fhdr;
	struct flash_file_hdr_g3 *fhdr3;
	struct image_hdr *img_hdr_ptr = NULL;
1986
	struct be_dma_mem flash_cmd;
1987
	int status, i = 0;
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
	const u8 *p;
	char fw_ver[FW_VER_LEN];
	char fw_cfg;

	status = be_cmd_get_fw_ver(adapter, fw_ver);
	if (status)
		return status;

	fw_cfg = *(fw_ver + 2);
	if (fw_cfg == '0')
		fw_cfg = '1';
	strcpy(fw_file, func);

	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
	if (status)
		goto fw_exit;

	p = fw->data;
2006
	fhdr = (struct flash_file_hdr_g2 *) p;
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);

	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
	flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
					&flash_cmd.dma);
	if (!flash_cmd.va) {
		status = -ENOMEM;
		dev_err(&adapter->pdev->dev,
			"Memory allocation failure while flashing\n");
		goto fw_exit;
	}

2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	if ((adapter->generation == BE_GEN3) &&
			(get_ufigen_type(fhdr) == BE_GEN3)) {
		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
		for (i = 0; i < fhdr3->num_imgs; i++) {
			img_hdr_ptr = (struct image_hdr *) (fw->data +
					(sizeof(struct flash_file_hdr_g3) +
					i * sizeof(struct image_hdr)));
			if (img_hdr_ptr->imageid == 1) {
				status = be_flash_data(adapter, fw,
						&flash_cmd, fhdr3->num_imgs);
			}

		}
	} else if ((adapter->generation == BE_GEN2) &&
			(get_ufigen_type(fhdr) == BE_GEN2)) {
		status = be_flash_data(adapter, fw, &flash_cmd, 0);
	} else {
		dev_err(&adapter->pdev->dev,
			"UFI and Interface are not compatible for flashing\n");
		status = -1;
2039 2040 2041 2042 2043 2044 2045 2046 2047
	}

	pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
				flash_cmd.dma);
	if (status) {
		dev_err(&adapter->pdev->dev, "Firmware load error\n");
		goto fw_exit;
	}

2048
	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2049 2050 2051 2052 2053 2054

fw_exit:
	release_firmware(fw);
	return status;
}

S
Sathya Perla 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
static struct net_device_ops be_netdev_ops = {
	.ndo_open		= be_open,
	.ndo_stop		= be_close,
	.ndo_start_xmit		= be_xmit,
	.ndo_get_stats		= be_get_stats,
	.ndo_set_rx_mode	= be_set_multicast_list,
	.ndo_set_mac_address	= be_mac_addr_set,
	.ndo_change_mtu		= be_change_mtu,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_vlan_rx_register	= be_vlan_register,
	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
};

static void be_netdev_init(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2074 2075
		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
		NETIF_F_GRO;
S
Sathya Perla 已提交
2076

2077 2078
	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;

S
Sathya Perla 已提交
2079 2080
	netdev->flags |= IFF_MULTICAST;

2081 2082
	adapter->rx_csum = true;

2083 2084 2085 2086
	/* Default settings for Rx and Tx flow control */
	adapter->rx_fc = true;
	adapter->tx_fc = true;

2087 2088
	netif_set_gso_max_size(netdev, 65535);

S
Sathya Perla 已提交
2089 2090 2091 2092 2093 2094
	BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);

	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);

	netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
		BE_NAPI_WEIGHT);
2095
	netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
S
Sathya Perla 已提交
2096 2097 2098 2099 2100 2101 2102 2103
		BE_NAPI_WEIGHT);

	netif_carrier_off(netdev);
	netif_stop_queue(netdev);
}

static void be_unmap_pci_bars(struct be_adapter *adapter)
{
2104 2105 2106 2107 2108 2109
	if (adapter->csr)
		iounmap(adapter->csr);
	if (adapter->db)
		iounmap(adapter->db);
	if (adapter->pcicfg)
		iounmap(adapter->pcicfg);
S
Sathya Perla 已提交
2110 2111 2112 2113 2114
}

static int be_map_pci_bars(struct be_adapter *adapter)
{
	u8 __iomem *addr;
2115
	int pcicfg_reg;
S
Sathya Perla 已提交
2116 2117 2118 2119 2120

	addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
			pci_resource_len(adapter->pdev, 2));
	if (addr == NULL)
		return -ENOMEM;
2121
	adapter->csr = addr;
S
Sathya Perla 已提交
2122 2123 2124 2125 2126

	addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
			128 * 1024);
	if (addr == NULL)
		goto pci_map_err;
2127
	adapter->db = addr;
S
Sathya Perla 已提交
2128

2129 2130 2131 2132 2133 2134 2135
	if (adapter->generation == BE_GEN2)
		pcicfg_reg = 1;
	else
		pcicfg_reg = 0;

	addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
			pci_resource_len(adapter->pdev, pcicfg_reg));
S
Sathya Perla 已提交
2136 2137
	if (addr == NULL)
		goto pci_map_err;
2138
	adapter->pcicfg = addr;
S
Sathya Perla 已提交
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148

	return 0;
pci_map_err:
	be_unmap_pci_bars(adapter);
	return -ENOMEM;
}


static void be_ctrl_cleanup(struct be_adapter *adapter)
{
2149
	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
S
Sathya Perla 已提交
2150 2151 2152 2153 2154 2155

	be_unmap_pci_bars(adapter);

	if (mem->va)
		pci_free_consistent(adapter->pdev, mem->size,
			mem->va, mem->dma);
2156 2157 2158 2159 2160

	mem = &adapter->mc_cmd_mem;
	if (mem->va)
		pci_free_consistent(adapter->pdev, mem->size,
			mem->va, mem->dma);
S
Sathya Perla 已提交
2161 2162 2163 2164
}

static int be_ctrl_init(struct be_adapter *adapter)
{
2165 2166
	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2167
	struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
S
Sathya Perla 已提交
2168 2169 2170 2171
	int status;

	status = be_map_pci_bars(adapter);
	if (status)
2172
		goto done;
S
Sathya Perla 已提交
2173 2174 2175 2176 2177

	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
	mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
				mbox_mem_alloc->size, &mbox_mem_alloc->dma);
	if (!mbox_mem_alloc->va) {
2178 2179
		status = -ENOMEM;
		goto unmap_pci_bars;
S
Sathya Perla 已提交
2180
	}
2181

S
Sathya Perla 已提交
2182 2183 2184 2185
	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195

	mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
	mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
			&mc_cmd_mem->dma);
	if (mc_cmd_mem->va == NULL) {
		status = -ENOMEM;
		goto free_mbox;
	}
	memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);

2196 2197 2198
	spin_lock_init(&adapter->mbox_lock);
	spin_lock_init(&adapter->mcc_lock);
	spin_lock_init(&adapter->mcc_cq_lock);
2199

2200
	pci_save_state(adapter->pdev);
S
Sathya Perla 已提交
2201
	return 0;
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211

free_mbox:
	pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
		mbox_mem_alloc->va, mbox_mem_alloc->dma);

unmap_pci_bars:
	be_unmap_pci_bars(adapter);

done:
	return status;
S
Sathya Perla 已提交
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
}

static void be_stats_cleanup(struct be_adapter *adapter)
{
	struct be_stats_obj *stats = &adapter->stats;
	struct be_dma_mem *cmd = &stats->cmd;

	if (cmd->va)
		pci_free_consistent(adapter->pdev, cmd->size,
			cmd->va, cmd->dma);
}

static int be_stats_init(struct be_adapter *adapter)
{
	struct be_stats_obj *stats = &adapter->stats;
	struct be_dma_mem *cmd = &stats->cmd;

	cmd->size = sizeof(struct be_cmd_req_get_stats);
	cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
	if (cmd->va == NULL)
		return -1;
2233
	memset(cmd->va, 0, cmd->size);
S
Sathya Perla 已提交
2234 2235 2236 2237 2238 2239
	return 0;
}

static void __devexit be_remove(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
2240

S
Sathya Perla 已提交
2241 2242 2243 2244 2245
	if (!adapter)
		return;

	unregister_netdev(adapter->netdev);

2246 2247
	be_clear(adapter);

S
Sathya Perla 已提交
2248 2249 2250 2251
	be_stats_cleanup(adapter);

	be_ctrl_cleanup(adapter);

2252
	be_msix_disable(adapter);
S
Sathya Perla 已提交
2253 2254 2255 2256 2257 2258 2259 2260

	pci_set_drvdata(pdev, NULL);
	pci_release_regions(pdev);
	pci_disable_device(pdev);

	free_netdev(adapter->netdev);
}

2261
static int be_get_config(struct be_adapter *adapter)
S
Sathya Perla 已提交
2262 2263
{
	int status;
2264
	u8 mac[ETH_ALEN];
S
Sathya Perla 已提交
2265

2266
	status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
S
Sathya Perla 已提交
2267 2268 2269
	if (status)
		return status;

2270 2271
	status = be_cmd_query_fw_cfg(adapter,
				&adapter->port_num, &adapter->cap);
2272 2273 2274
	if (status)
		return status;

2275 2276 2277
	memset(mac, 0, ETH_ALEN);
	status = be_cmd_mac_addr_query(adapter, mac,
			MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
S
Sathya Perla 已提交
2278 2279
	if (status)
		return status;
2280 2281 2282 2283

	if (!is_valid_ether_addr(mac))
		return -EADDRNOTAVAIL;

2284
	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2285
	memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
S
Sathya Perla 已提交
2286

2287 2288 2289 2290 2291
	if (adapter->cap & 0x400)
		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
	else
		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;

2292
	return 0;
S
Sathya Perla 已提交
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
}

static int __devinit be_probe(struct pci_dev *pdev,
			const struct pci_device_id *pdev_id)
{
	int status = 0;
	struct be_adapter *adapter;
	struct net_device *netdev;

	status = pci_enable_device(pdev);
	if (status)
		goto do_none;

	status = pci_request_regions(pdev, DRV_NAME);
	if (status)
		goto disable_dev;
	pci_set_master(pdev);

	netdev = alloc_etherdev(sizeof(struct be_adapter));
	if (netdev == NULL) {
		status = -ENOMEM;
		goto rel_reg;
	}
	adapter = netdev_priv(netdev);
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330

	switch (pdev->device) {
	case BE_DEVICE_ID1:
	case OC_DEVICE_ID1:
		adapter->generation = BE_GEN2;
		break;
	case BE_DEVICE_ID2:
	case OC_DEVICE_ID2:
		adapter->generation = BE_GEN3;
		break;
	default:
		adapter->generation = 0;
	}

S
Sathya Perla 已提交
2331 2332 2333
	adapter->pdev = pdev;
	pci_set_drvdata(pdev, adapter);
	adapter->netdev = netdev;
2334 2335
	be_netdev_init(netdev);
	SET_NETDEV_DEV(netdev, &pdev->dev);
S
Sathya Perla 已提交
2336 2337 2338

	be_msix_enable(adapter);

2339
	status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
S
Sathya Perla 已提交
2340 2341 2342
	if (!status) {
		netdev->features |= NETIF_F_HIGHDMA;
	} else {
2343
		status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
S
Sathya Perla 已提交
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
		if (status) {
			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
			goto free_netdev;
		}
	}

	status = be_ctrl_init(adapter);
	if (status)
		goto free_netdev;

2354 2355
	/* sync up with fw's ready state */
	status = be_cmd_POST(adapter);
S
Sathya Perla 已提交
2356 2357 2358
	if (status)
		goto ctrl_clean;

2359 2360
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
S
Sathya Perla 已提交
2361
	if (status)
2362 2363 2364 2365 2366
		goto ctrl_clean;

	status = be_cmd_reset_function(adapter);
	if (status)
		goto ctrl_clean;
S
Sathya Perla 已提交
2367

2368 2369 2370 2371 2372
	status = be_stats_init(adapter);
	if (status)
		goto ctrl_clean;

	status = be_get_config(adapter);
S
Sathya Perla 已提交
2373 2374 2375 2376 2377
	if (status)
		goto stats_clean;

	INIT_DELAYED_WORK(&adapter->work, be_worker);

2378 2379 2380
	status = be_setup(adapter);
	if (status)
		goto stats_clean;
2381

S
Sathya Perla 已提交
2382 2383
	status = register_netdev(netdev);
	if (status != 0)
2384
		goto unsetup;
S
Sathya Perla 已提交
2385

2386
	dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
S
Sathya Perla 已提交
2387 2388
	return 0;

2389 2390
unsetup:
	be_clear(adapter);
S
Sathya Perla 已提交
2391 2392 2393 2394 2395
stats_clean:
	be_stats_cleanup(adapter);
ctrl_clean:
	be_ctrl_cleanup(adapter);
free_netdev:
2396
	be_msix_disable(adapter);
S
Sathya Perla 已提交
2397
	free_netdev(adapter->netdev);
2398
	pci_set_drvdata(pdev, NULL);
S
Sathya Perla 已提交
2399 2400 2401 2402 2403
rel_reg:
	pci_release_regions(pdev);
disable_dev:
	pci_disable_device(pdev);
do_none:
2404
	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
S
Sathya Perla 已提交
2405 2406 2407 2408 2409 2410 2411 2412
	return status;
}

static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

2413 2414 2415
	if (adapter->wol)
		be_setup_wol(adapter, true);

S
Sathya Perla 已提交
2416 2417 2418 2419 2420 2421
	netif_device_detach(netdev);
	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
2422
	be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2423
	be_clear(adapter);
S
Sathya Perla 已提交
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445

	pci_save_state(pdev);
	pci_disable_device(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}

static int be_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	netif_device_detach(netdev);

	status = pci_enable_device(pdev);
	if (status)
		return status;

	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

2446 2447 2448 2449 2450
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		return status;

2451
	be_setup(adapter);
S
Sathya Perla 已提交
2452 2453 2454 2455 2456 2457
	if (netif_running(netdev)) {
		rtnl_lock();
		be_open(netdev);
		rtnl_unlock();
	}
	netif_device_attach(netdev);
2458 2459 2460

	if (adapter->wol)
		be_setup_wol(adapter, false);
S
Sathya Perla 已提交
2461 2462 2463
	return 0;
}

2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
/*
 * An FLR will stop BE from DMAing any data.
 */
static void be_shutdown(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	netif_device_detach(netdev);

	be_cmd_reset_function(adapter);

	if (adapter->wol)
		be_setup_wol(adapter, true);

	pci_disable_device(pdev);

	return;
}

2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
				pci_channel_state_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_err(&adapter->pdev->dev, "EEH error detected\n");

	adapter->eeh_err = true;

	netif_device_detach(netdev);

	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
	be_clear(adapter);

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_disable_device(pdev);

	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	int status;

	dev_info(&adapter->pdev->dev, "EEH reset\n");
	adapter->eeh_err = false;

	status = pci_enable_device(pdev);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_set_master(pdev);
	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

	/* Check if card is ok and fw is ready */
	status = be_cmd_POST(adapter);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	return PCI_ERS_RESULT_RECOVERED;
}

static void be_eeh_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_info(&adapter->pdev->dev, "EEH resume\n");

	pci_save_state(pdev);

	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		goto err;

	status = be_setup(adapter);
	if (status)
		goto err;

	if (netif_running(netdev)) {
		status = be_open(netdev);
		if (status)
			goto err;
	}
	netif_device_attach(netdev);
	return;
err:
	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
	return;
}

static struct pci_error_handlers be_eeh_handlers = {
	.error_detected = be_eeh_err_detected,
	.slot_reset = be_eeh_reset,
	.resume = be_eeh_resume,
};

S
Sathya Perla 已提交
2572 2573 2574 2575 2576 2577
static struct pci_driver be_driver = {
	.name = DRV_NAME,
	.id_table = be_dev_ids,
	.probe = be_probe,
	.remove = be_remove,
	.suspend = be_suspend,
2578
	.resume = be_resume,
2579
	.shutdown = be_shutdown,
2580
	.err_handler = &be_eeh_handlers
S
Sathya Perla 已提交
2581 2582 2583 2584
};

static int __init be_init_module(void)
{
2585 2586
	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
	    rx_frag_size != 2048) {
S
Sathya Perla 已提交
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601
		printk(KERN_WARNING DRV_NAME
			" : Module param rx_frag_size must be 2048/4096/8192."
			" Using 2048\n");
		rx_frag_size = 2048;
	}

	return pci_register_driver(&be_driver);
}
module_init(be_init_module);

static void __exit be_exit_module(void)
{
	pci_unregister_driver(&be_driver);
}
module_exit(be_exit_module);