be.h 11.6 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#ifndef BE_H
#define BE_H

#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
31
#include <linux/firmware.h>
32
#include <linux/slab.h>
S
Sathya Perla 已提交
33 34 35

#include "be_hw.h"

36
#define DRV_VER			"2.103.175u"
S
Sathya Perla 已提交
37 38
#define DRV_NAME		"be2net"
#define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
39
#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
40
#define OC_NAME			"Emulex OneConnect 10Gbps NIC"
41 42
#define OC_NAME_BE		OC_NAME	"(be3)"
#define OC_NAME_LANCER		OC_NAME "(Lancer)"
43
#define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver"
S
Sathya Perla 已提交
44

45
#define BE_VENDOR_ID 		0x19a2
46
#define EMULEX_VENDOR_ID	0x10df
47
#define BE_DEVICE_ID1		0x211
48
#define BE_DEVICE_ID2		0x221
49 50 51
#define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
#define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
#define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
52 53 54

static inline char *nic_name(struct pci_dev *pdev)
{
55 56
	switch (pdev->device) {
	case OC_DEVICE_ID1:
57
		return OC_NAME;
58
	case OC_DEVICE_ID2:
59 60 61
		return OC_NAME_BE;
	case OC_DEVICE_ID3:
		return OC_NAME_LANCER;
62 63 64
	case BE_DEVICE_ID2:
		return BE3_NAME;
	default:
65
		return BE_NAME;
66
	}
67 68
}

S
Sathya Perla 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN 		64
#define BE_MAX_JUMBO_FRAME_SIZE	9018
#define BE_MIN_MTU		256

#define BE_NUM_VLANS_SUPPORTED	64
#define BE_MAX_EQD		96
#define	BE_MAX_TX_FRAG_COUNT	30

#define EVNT_Q_LEN		1024
#define TX_Q_LEN		2048
#define TX_CQ_LEN		1024
#define RX_Q_LEN		1024	/* Does not support any other value */
#define RX_CQ_LEN		1024
83
#define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
S
Sathya Perla 已提交
84 85
#define MCC_CQ_LEN		256

86 87
#define MAX_RSS_QS		4	/* BE limit is 4 queues/port */
#define BE_MAX_MSIX_VECTORS	(MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
S
Sathya Perla 已提交
88 89 90 91
#define BE_NAPI_WEIGHT		64
#define MAX_RX_POST 		BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)

92 93
#define FW_VER_LEN		32

94 95
#define BE_MAX_VF		32

S
Sathya Perla 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
struct be_dma_mem {
	void *va;
	dma_addr_t dma;
	u32 size;
};

struct be_queue_info {
	struct be_dma_mem dma_mem;
	u16 len;
	u16 entry_size;	/* Size of an element in the queue */
	u16 id;
	u16 tail, head;
	bool created;
	atomic_t used;	/* Number of valid elements in the queue */
};

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static inline u32 MODULO(u16 val, u16 limit)
{
	BUG_ON(limit & (limit - 1));
	return val & (limit - 1);
}

static inline void index_adv(u16 *index, u16 val, u16 limit)
{
	*index = MODULO((*index + val), limit);
}

static inline void index_inc(u16 *index, u16 limit)
{
	*index = MODULO((*index + 1), limit);
}

static inline void *queue_head_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->head * q->entry_size;
}

static inline void *queue_tail_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->tail * q->entry_size;
}

static inline void queue_head_inc(struct be_queue_info *q)
{
	index_inc(&q->head, q->len);
}

static inline void queue_tail_inc(struct be_queue_info *q)
{
	index_inc(&q->tail, q->len);
}

struct be_eq_obj {
	struct be_queue_info q;
	char desc[32];

	/* Adaptive interrupt coalescing (AIC) info */
	bool enable_aic;
	u16 min_eqd;		/* in usecs */
	u16 max_eqd;		/* in usecs */
	u16 cur_eqd;		/* in usecs */
157
	u8  msix_vec_idx;
158 159 160 161 162 163 164

	struct napi_struct napi;
};

struct be_mcc_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
165
	bool rearm_cq;
166 167
};

168
struct be_tx_stats {
S
Sathya Perla 已提交
169 170 171 172 173
	u32 be_tx_reqs;		/* number of TX requests initiated */
	u32 be_tx_stops;	/* number of times TX Q was stopped */
	u32 be_tx_wrbs;		/* number of tx WRBs used */
	u32 be_tx_events;	/* number of tx completion events  */
	u32 be_tx_compl;	/* number of tx completion entries processed */
174 175 176
	ulong be_tx_jiffies;
	u64 be_tx_bytes;
	u64 be_tx_bytes_prev;
177
	u64 be_tx_pkts;
S
Sathya Perla 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190
	u32 be_tx_rate;
};

struct be_tx_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
	/* Remember the skbs that were transmitted */
	struct sk_buff *sent_skb_list[TX_Q_LEN];
};

/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
	struct page *page;
191
	DEFINE_DMA_UNMAP_ADDR(bus);
S
Sathya Perla 已提交
192 193 194 195
	u16 page_offset;
	bool last_page_user;
};

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
struct be_rx_stats {
	u32 rx_post_fail;/* number of ethrx buffer alloc failures */
	u32 rx_polls;	/* number of times NAPI called poll function */
	u32 rx_events;	/* number of ucast rx completion events  */
	u32 rx_compl;	/* number of rx completion entries processed */
	ulong rx_jiffies;
	u64 rx_bytes;
	u64 rx_bytes_prev;
	u64 rx_pkts;
	u32 rx_rate;
	u32 rx_mcast_pkts;
	u32 rxcp_err;	/* Num rx completion entries w/ err set. */
	ulong rx_fps_jiffies;	/* jiffies at last FPS calc */
	u32 rx_frags;
	u32 prev_rx_frags;
	u32 rx_fps;		/* Rx frags per second */
};

S
Sathya Perla 已提交
214
struct be_rx_obj {
215
	struct be_adapter *adapter;
S
Sathya Perla 已提交
216 217 218
	struct be_queue_info q;
	struct be_queue_info cq;
	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
219 220 221 222 223
	struct be_eq_obj rx_eq;
	struct be_rx_stats stats;
	u8 rss_id;
	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
	u32 cache_line_barrier[16];
S
Sathya Perla 已提交
224 225
};

226 227 228 229
struct be_vf_cfg {
	unsigned char vf_mac_addr[ETH_ALEN];
	u32 vf_if_handle;
	u32 vf_pmac_id;
230
	u16 vf_vlan_tag;
231
	u32 vf_tx_rate;
232 233
};

234
#define BE_INVALID_PMAC_ID		0xffffffff
S
Sathya Perla 已提交
235 236 237 238
struct be_adapter {
	struct pci_dev *pdev;
	struct net_device *netdev;

239 240 241 242 243 244 245 246 247 248 249 250 251
	u8 __iomem *csr;
	u8 __iomem *db;		/* Door Bell */
	u8 __iomem *pcicfg;	/* PCI config space */

	spinlock_t mbox_lock;	/* For serializing mbox cmds to BE card */
	struct be_dma_mem mbox_mem;
	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
	 * is stored for freeing purpose */
	struct be_dma_mem mbox_mem_alloced;

	struct be_mcc_obj mcc_obj;
	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
	spinlock_t mcc_cq_lock;
S
Sathya Perla 已提交
252

253
	struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
S
Sathya Perla 已提交
254 255 256 257 258 259
	bool msix_enabled;
	bool isr_registered;

	/* TX Rings */
	struct be_eq_obj tx_eq;
	struct be_tx_obj tx_obj;
260
	struct be_tx_stats tx_stats;
S
Sathya Perla 已提交
261 262 263 264

	u32 cache_line_break[8];

	/* Rx rings */
265 266
	struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
	u32 num_rx_qs;
S
Sathya Perla 已提交
267 268
	u32 big_page_size;	/* Compounded page size shared by rx wrbs */

269 270
	u8 msix_vec_next_idx;

S
Sathya Perla 已提交
271
	struct vlan_group *vlan_grp;
272 273
	u16 vlans_added;
	u16 max_vlans;	/* Number of vlans supported */
274
	u8 vlan_tag[VLAN_N_VID];
275 276
	u8 vlan_prio_bmap;	/* Available Priority BitMap */
	u16 recommended_prio;	/* Recommended Priority */
277
	struct be_dma_mem mc_cmd_mem;
S
Sathya Perla 已提交
278

279
	struct be_dma_mem stats_cmd;
S
Sathya Perla 已提交
280 281 282 283 284 285 286 287 288
	/* Work queue used to perform periodic tasks like getting statistics */
	struct delayed_work work;

	/* Ethtool knobs and info */
	bool rx_csum; 		/* BE card must perform rx-checksumming */
	char fw_ver[FW_VER_LEN];
	u32 if_handle;		/* Used to configure filtering */
	u32 pmac_id;		/* MAC addr handle used by BE card */

289
	bool eeh_err;
290
	bool link_up;
S
Sathya Perla 已提交
291
	u32 port_num;
292
	bool promiscuous;
293
	bool wol;
A
Ajit Khaparde 已提交
294
	u32 function_mode;
295
	u32 function_caps;
296 297
	u32 rx_fc;		/* Rx flow control */
	u32 tx_fc;		/* Tx flow control */
298
	bool ue_detected;
299
	bool stats_ioctl_sent;
300 301
	int link_speed;
	u8 port_type;
302
	u8 transceiver;
303
	u8 autoneg;
304
	u8 generation;		/* BladeEngine ASIC generation */
305 306
	u32 flash_status;
	struct completion flash_compl;
307 308

	bool sriov_enabled;
309
	struct be_vf_cfg vf_cfg[BE_MAX_VF];
310
	u8 is_virtfn;
311
	u32 sli_family;
S
Sathya Perla 已提交
312 313
};

314
#define be_physfn(adapter) (!adapter->is_virtfn)
315

316 317 318 319
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3

320 321
#define lancer_chip(adapter)		(adapter->pdev->device == OC_DEVICE_ID3)

322
extern const struct ethtool_ops be_ethtool_ops;
S
Sathya Perla 已提交
323

324 325
#define tx_stats(adapter)		(&adapter->tx_stats)
#define rx_stats(rxo)			(&rxo->stats)
S
Sathya Perla 已提交
326 327 328

#define BE_SET_NETDEV_OPS(netdev, ops)	(netdev->netdev_ops = ops)

329 330 331 332 333 334 335 336 337
#define for_all_rx_queues(adapter, rxo, i)				\
	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
		i++, rxo++)

/* Just skip the first default non-rss queue */
#define for_all_rss_queues(adapter, rxo, i)				\
	for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
		i++, rxo++)

S
Sathya Perla 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
#define PAGE_SHIFT_4K		12
#define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)

/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) 				\
		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))

/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(addr)						\
		 ((size_t)(addr) & (PAGE_SIZE_4K-1))

/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field)  				\
		(((size_t)&(((_struct *)0)->field))%32)

/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}

static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
	u32 *dw = (u32 *) ptr + dw_offset;
	*dw &= ~(mask << offset);
	*dw |= (mask & value) << offset;
}

#define AMAP_SET_BITS(_struct, field, ptr, val)				\
		amap_set(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field),		\
			val)

static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
	u32 *dw = (u32 *) ptr;
	return mask & (*(dw + dw_offset) >> offset);
}

#define AMAP_GET_BITS(_struct, field, ptr)				\
		amap_get(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field))

#define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
	u32 *dw = wrb;
	BUG_ON(len % 4);
	do {
		*dw = cpu_to_le32(*dw);
		dw++;
		len -= 4;
	} while (len);
#endif				/* __BIG_ENDIAN */
}

static inline u8 is_tcp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);

	return val;
}

static inline u8 is_udp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);

	return val;
}

426 427 428
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
	u8 data;
429 430 431 432 433 434 435 436 437 438 439
	u32 sli_intf;

	if (lancer_chip(adapter)) {
		pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
								&sli_intf);
		adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
	} else {
		pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
		pci_read_config_byte(adapter->pdev, 0xFE, &data);
		adapter->is_virtfn = (data != 0xAA);
	}
440 441
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
	u32 addr;

	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);

	mac[5] = (u8)(addr & 0xFF);
	mac[4] = (u8)((addr >> 8) & 0xFF);
	mac[3] = (u8)((addr >> 16) & 0xFF);
	mac[2] = 0xC9;
	mac[1] = 0x00;
	mac[0] = 0x00;
}

456
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
457
		u16 num_popped);
458
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
459
extern void netdev_stats_update(struct be_adapter *adapter);
460
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
S
Sathya Perla 已提交
461
#endif				/* BE_H */