be.h 13.4 KB
Newer Older
S
Sathya Perla 已提交
1
/*
2
 * Copyright (C) 2005 - 2011 Emulex
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
S
Sathya Perla 已提交
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
S
Sathya Perla 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#ifndef BE_H
#define BE_H

#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
30
#include <linux/firmware.h>
31
#include <linux/slab.h>
32
#include <linux/u64_stats_sync.h>
S
Sathya Perla 已提交
33 34 35

#include "be_hw.h"

36
#define DRV_VER			"4.0.100u"
S
Sathya Perla 已提交
37 38
#define DRV_NAME		"be2net"
#define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
39
#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
40
#define OC_NAME			"Emulex OneConnect 10Gbps NIC"
41 42
#define OC_NAME_BE		OC_NAME	"(be3)"
#define OC_NAME_LANCER		OC_NAME "(Lancer)"
43
#define OC_NAME_SH		OC_NAME "(Skyhawk)"
44
#define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver"
S
Sathya Perla 已提交
45

46
#define BE_VENDOR_ID 		0x19a2
47
#define EMULEX_VENDOR_ID	0x10df
48
#define BE_DEVICE_ID1		0x211
49
#define BE_DEVICE_ID2		0x221
50 51 52
#define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
#define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
#define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
53
#define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
54
#define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
55 56 57

static inline char *nic_name(struct pci_dev *pdev)
{
58 59
	switch (pdev->device) {
	case OC_DEVICE_ID1:
60
		return OC_NAME;
61
	case OC_DEVICE_ID2:
62 63
		return OC_NAME_BE;
	case OC_DEVICE_ID3:
64
	case OC_DEVICE_ID4:
65
		return OC_NAME_LANCER;
66 67
	case BE_DEVICE_ID2:
		return BE3_NAME;
68 69
	case OC_DEVICE_ID5:
		return OC_NAME_SH;
70
	default:
71
		return BE_NAME;
72
	}
73 74
}

S
Sathya Perla 已提交
75
/* Number of bytes of an RX frame that are copied to skb->data */
76
#define BE_HDR_LEN		((u16) 64)
77 78 79
/* allocate extra space to allow tunneling decapsulation without head reallocation */
#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)

S
Sathya Perla 已提交
80 81 82 83 84 85 86 87 88 89 90 91
#define BE_MAX_JUMBO_FRAME_SIZE	9018
#define BE_MIN_MTU		256

#define BE_NUM_VLANS_SUPPORTED	64
#define BE_MAX_EQD		96
#define	BE_MAX_TX_FRAG_COUNT	30

#define EVNT_Q_LEN		1024
#define TX_Q_LEN		2048
#define TX_CQ_LEN		1024
#define RX_Q_LEN		1024	/* Does not support any other value */
#define RX_CQ_LEN		1024
92
#define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
S
Sathya Perla 已提交
93 94
#define MCC_CQ_LEN		256

95
#define MAX_RSS_QS		4	/* BE limit is 4 queues/port */
96
#define MAX_RX_QS		(MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
97
#define MAX_TX_QS		8
98
#define BE_MAX_MSIX_VECTORS	(MAX_RX_QS + 1)/* RX + TX */
S
Sathya Perla 已提交
99 100 101 102
#define BE_NAPI_WEIGHT		64
#define MAX_RX_POST 		BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)

103 104
#define FW_VER_LEN		32

S
Sathya Perla 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
struct be_dma_mem {
	void *va;
	dma_addr_t dma;
	u32 size;
};

struct be_queue_info {
	struct be_dma_mem dma_mem;
	u16 len;
	u16 entry_size;	/* Size of an element in the queue */
	u16 id;
	u16 tail, head;
	bool created;
	atomic_t used;	/* Number of valid elements in the queue */
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static inline u32 MODULO(u16 val, u16 limit)
{
	BUG_ON(limit & (limit - 1));
	return val & (limit - 1);
}

static inline void index_adv(u16 *index, u16 val, u16 limit)
{
	*index = MODULO((*index + val), limit);
}

static inline void index_inc(u16 *index, u16 limit)
{
	*index = MODULO((*index + 1), limit);
}

static inline void *queue_head_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->head * q->entry_size;
}

static inline void *queue_tail_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->tail * q->entry_size;
}

147 148 149 150 151
static inline void *queue_index_node(struct be_queue_info *q, u16 index)
{
	return q->dma_mem.va + index * q->entry_size;
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static inline void queue_head_inc(struct be_queue_info *q)
{
	index_inc(&q->head, q->len);
}

static inline void queue_tail_inc(struct be_queue_info *q)
{
	index_inc(&q->tail, q->len);
}

struct be_eq_obj {
	struct be_queue_info q;
	char desc[32];

	/* Adaptive interrupt coalescing (AIC) info */
	bool enable_aic;
	u16 min_eqd;		/* in usecs */
	u16 max_eqd;		/* in usecs */
	u16 cur_eqd;		/* in usecs */
171
	u8  eq_idx;
172 173 174 175 176 177 178

	struct napi_struct napi;
};

struct be_mcc_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
179
	bool rearm_cq;
180 181
};

182
struct be_tx_stats {
183 184 185 186 187 188 189
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_reqs;
	u64 tx_wrbs;
	u64 tx_compl;
	ulong tx_jiffies;
	u32 tx_stops;
190 191
	struct u64_stats_sync sync;
	struct u64_stats_sync sync_compl;
S
Sathya Perla 已提交
192 193 194 195 196 197 198
};

struct be_tx_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
	/* Remember the skbs that were transmitted */
	struct sk_buff *sent_skb_list[TX_Q_LEN];
199
	struct be_tx_stats stats;
S
Sathya Perla 已提交
200 201 202 203 204
};

/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
	struct page *page;
205
	DEFINE_DMA_UNMAP_ADDR(bus);
S
Sathya Perla 已提交
206 207 208 209
	u16 page_offset;
	bool last_page_user;
};

210 211 212
struct be_rx_stats {
	u64 rx_bytes;
	u64 rx_pkts;
213 214 215 216 217 218 219 220
	u64 rx_pkts_prev;
	ulong rx_jiffies;
	u32 rx_drops_no_skbs;	/* skb allocation errors */
	u32 rx_drops_no_frags;	/* HW has no fetched frags */
	u32 rx_post_fail;	/* page post alloc failures */
	u32 rx_polls;		/* NAPI calls */
	u32 rx_events;
	u32 rx_compl;
221
	u32 rx_mcast_pkts;
222 223
	u32 rx_compl_err;	/* completions with err set */
	u32 rx_pps;		/* pkts per second */
224
	struct u64_stats_sync sync;
225 226
};

227 228
struct be_rx_compl_info {
	u32 rss_hash;
229
	u16 vlan_tag;
230 231
	u16 pkt_size;
	u16 rxq_idx;
232
	u16 port;
233 234 235 236 237 238 239 240 241 242 243 244 245
	u8 vlanf;
	u8 num_rcvd;
	u8 err;
	u8 ipf;
	u8 tcpf;
	u8 udpf;
	u8 ip_csum;
	u8 l4_csum;
	u8 ipv6;
	u8 vtm;
	u8 pkt_type;
};

S
Sathya Perla 已提交
246
struct be_rx_obj {
247
	struct be_adapter *adapter;
S
Sathya Perla 已提交
248 249
	struct be_queue_info q;
	struct be_queue_info cq;
250
	struct be_rx_compl_info rxcp;
S
Sathya Perla 已提交
251
	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
252 253 254 255
	struct be_eq_obj rx_eq;
	struct be_rx_stats stats;
	u8 rss_id;
	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
256
	u32 cache_line_barrier[16];
S
Sathya Perla 已提交
257 258
};

259
struct be_drv_stats {
260
	u32 be_on_die_temperature;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	u32 tx_events;
	u32 eth_red_drops;
	u32 rx_drops_no_pbuf;
	u32 rx_drops_no_txpb;
	u32 rx_drops_no_erx_descr;
	u32 rx_drops_no_tpre_descr;
	u32 rx_drops_too_many_frags;
	u32 rx_drops_invalid_ring;
	u32 forwarded_packets;
	u32 rx_drops_mtu;
	u32 rx_crc_errors;
	u32 rx_alignment_symbol_errors;
	u32 rx_pause_frames;
	u32 rx_priority_pause_frames;
	u32 rx_control_frames;
	u32 rx_in_range_errors;
	u32 rx_out_range_errors;
	u32 rx_frame_too_long;
	u32 rx_address_match_errors;
	u32 rx_dropped_too_small;
	u32 rx_dropped_too_short;
	u32 rx_dropped_header_too_small;
	u32 rx_dropped_tcp_length;
	u32 rx_dropped_runt;
	u32 rx_ip_checksum_errs;
	u32 rx_tcp_checksum_errs;
	u32 rx_udp_checksum_errs;
	u32 tx_pauseframes;
	u32 tx_priority_pauseframes;
	u32 tx_controlframes;
	u32 rxpp_fifo_overflow_drop;
	u32 rx_input_fifo_overflow_drop;
	u32 pmem_fifo_overflow_drop;
	u32 jabber_events;
295 296
};

297
struct be_vf_cfg {
298 299 300 301 302
	unsigned char mac_addr[ETH_ALEN];
	int if_handle;
	int pmac_id;
	u16 vlan_tag;
	u32 tx_rate;
303 304
};

305 306
#define BE_FLAGS_LINK_STATUS_INIT		1

S
Sathya Perla 已提交
307 308 309 310
struct be_adapter {
	struct pci_dev *pdev;
	struct net_device *netdev;

311 312 313
	u8 __iomem *csr;
	u8 __iomem *db;		/* Door Bell */

314
	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
315 316 317 318 319 320 321 322
	struct be_dma_mem mbox_mem;
	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
	 * is stored for freeing purpose */
	struct be_dma_mem mbox_mem_alloced;

	struct be_mcc_obj mcc_obj;
	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
	spinlock_t mcc_cq_lock;
S
Sathya Perla 已提交
323

324
	struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
325
	u32 num_msix_vec;
S
Sathya Perla 已提交
326 327 328 329
	bool isr_registered;

	/* TX Rings */
	struct be_eq_obj tx_eq;
330 331
	struct be_tx_obj tx_obj[MAX_TX_QS];
	u8 num_tx_qs;
S
Sathya Perla 已提交
332 333 334 335

	u32 cache_line_break[8];

	/* Rx rings */
336
	struct be_rx_obj rx_obj[MAX_RX_QS];
337
	u32 num_rx_qs;
S
Sathya Perla 已提交
338 339
	u32 big_page_size;	/* Compounded page size shared by rx wrbs */

340
	u8 eq_next_idx;
341
	struct be_drv_stats drv_stats;
342

343 344
	u16 vlans_added;
	u16 max_vlans;	/* Number of vlans supported */
345
	u8 vlan_tag[VLAN_N_VID];
346 347
	u8 vlan_prio_bmap;	/* Available Priority BitMap */
	u16 recommended_prio;	/* Recommended Priority */
348
	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
S
Sathya Perla 已提交
349

350
	struct be_dma_mem stats_cmd;
S
Sathya Perla 已提交
351 352
	/* Work queue used to perform periodic tasks like getting statistics */
	struct delayed_work work;
353
	u16 work_counter;
S
Sathya Perla 已提交
354

355
	u32 flags;
S
Sathya Perla 已提交
356 357
	/* Ethtool knobs and info */
	char fw_ver[FW_VER_LEN];
358
	int if_handle;		/* Used to configure filtering */
S
Sathya Perla 已提交
359
	u32 pmac_id;		/* MAC addr handle used by BE card */
360
	u32 beacon_state;	/* for set_phys_id */
S
Sathya Perla 已提交
361

362
	bool eeh_err;
363 364
	bool ue_detected;
	bool fw_timeout;
S
Sathya Perla 已提交
365
	u32 port_num;
366
	bool promiscuous;
367
	bool wol;
A
Ajit Khaparde 已提交
368
	u32 function_mode;
369
	u32 function_caps;
370 371
	u32 rx_fc;		/* Rx flow control */
	u32 tx_fc;		/* Tx flow control */
A
Ajit Khaparde 已提交
372
	bool stats_cmd_sent;
373 374
	int link_speed;
	u8 port_type;
375
	u8 transceiver;
376
	u8 autoneg;
377
	u8 generation;		/* BladeEngine ASIC generation */
378 379
	u32 flash_status;
	struct completion flash_compl;
380

381
	u32 num_vfs;
382
	u8 is_virtfn;
383 384
	struct be_vf_cfg *vf_cfg;
	bool be3_native;
385
	u32 sli_family;
386
	u8 hba_port_num;
387
	u16 pvid;
S
Sathya Perla 已提交
388 389
};

390
#define be_physfn(adapter) (!adapter->is_virtfn)
391 392 393 394
#define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
#define for_all_vfs(adapter, vf_cfg, i)					\
	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
		i++, vf_cfg++)
395

396 397 398 399
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3

400 401
#define ON				1
#define OFF				0
402 403
#define lancer_chip(adapter)	((adapter->pdev->device == OC_DEVICE_ID3) || \
				 (adapter->pdev->device == OC_DEVICE_ID4))
404

405
extern const struct ethtool_ops be_ethtool_ops;
S
Sathya Perla 已提交
406

407
#define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
408
#define tx_stats(txo)			(&txo->stats)
409
#define rx_stats(rxo)			(&rxo->stats)
S
Sathya Perla 已提交
410 411 412

#define BE_SET_NETDEV_OPS(netdev, ops)	(netdev->netdev_ops = ops)

413 414 415 416 417 418 419 420 421
#define for_all_rx_queues(adapter, rxo, i)				\
	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
		i++, rxo++)

/* Just skip the first default non-rss queue */
#define for_all_rss_queues(adapter, rxo, i)				\
	for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
		i++, rxo++)

422 423 424 425
#define for_all_tx_queues(adapter, txo, i)				\
	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
		i++, txo++)

S
Sathya Perla 已提交
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
#define PAGE_SHIFT_4K		12
#define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)

/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) 				\
		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))

/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(addr)						\
		 ((size_t)(addr) & (PAGE_SIZE_4K-1))

/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field)  				\
		(((size_t)&(((_struct *)0)->field))%32)

/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}

static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
	u32 *dw = (u32 *) ptr + dw_offset;
	*dw &= ~(mask << offset);
	*dw |= (mask & value) << offset;
}

#define AMAP_SET_BITS(_struct, field, ptr, val)				\
		amap_set(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field),		\
			val)

static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
	u32 *dw = (u32 *) ptr;
	return mask & (*(dw + dw_offset) >> offset);
}

#define AMAP_GET_BITS(_struct, field, ptr)				\
		amap_get(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field))

#define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
	u32 *dw = wrb;
	BUG_ON(len % 4);
	do {
		*dw = cpu_to_le32(*dw);
		dw++;
		len -= 4;
	} while (len);
#endif				/* __BIG_ENDIAN */
}

static inline u8 is_tcp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);

	return val;
}

static inline u8 is_udp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);

	return val;
}

514 515
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
516 517
	u32 sli_intf;

518 519
	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
	adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
520 521
}

522 523 524 525 526 527 528 529 530
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
	u32 addr;

	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);

	mac[5] = (u8)(addr & 0xFF);
	mac[4] = (u8)((addr >> 8) & 0xFF);
	mac[3] = (u8)((addr >> 16) & 0xFF);
531 532
	/* Use the OUI from the current MAC address */
	memcpy(mac, adapter->netdev->dev_addr, 3);
533 534
}

A
Ajit Khaparde 已提交
535 536 537 538 539
static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
	return adapter->num_rx_qs > 1;
}

540 541 542 543 544
static inline bool be_error(struct be_adapter *adapter)
{
	return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
}

545
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
546
		u16 num_popped);
547
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
548
extern void be_parse_stats(struct be_adapter *adapter);
549
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
S
Sathya Perla 已提交
550
#endif				/* BE_H */