be.h 9.9 KB
Newer Older
S
Sathya Perla 已提交
1
/*
A
Ajit Khaparde 已提交
2
 * Copyright (C) 2005 - 2010 ServerEngines
S
Sathya Perla 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */

#ifndef BE_H
#define BE_H

#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
31
#include <linux/firmware.h>
S
Sathya Perla 已提交
32 33 34

#include "be_hw.h"

35
#define DRV_VER			"2.101.346u"
S
Sathya Perla 已提交
36 37
#define DRV_NAME		"be2net"
#define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
38
#define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
39
#define OC_NAME			"Emulex OneConnect 10Gbps NIC"
40
#define OC_NAME1		"Emulex OneConnect 10Gbps NIC (be3)"
41
#define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver"
S
Sathya Perla 已提交
42

43 44
#define BE_VENDOR_ID 		0x19a2
#define BE_DEVICE_ID1		0x211
45
#define BE_DEVICE_ID2		0x221
46
#define OC_DEVICE_ID1		0x700
47
#define OC_DEVICE_ID2		0x710
48 49 50

static inline char *nic_name(struct pci_dev *pdev)
{
51 52
	switch (pdev->device) {
	case OC_DEVICE_ID1:
53
		return OC_NAME;
54
	case OC_DEVICE_ID2:
55 56 57 58
		return OC_NAME1;
	case BE_DEVICE_ID2:
		return BE3_NAME;
	default:
59
		return BE_NAME;
60
	}
61 62
}

S
Sathya Perla 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN 		64
#define BE_MAX_JUMBO_FRAME_SIZE	9018
#define BE_MIN_MTU		256

#define BE_NUM_VLANS_SUPPORTED	64
#define BE_MAX_EQD		96
#define	BE_MAX_TX_FRAG_COUNT	30

#define EVNT_Q_LEN		1024
#define TX_Q_LEN		2048
#define TX_CQ_LEN		1024
#define RX_Q_LEN		1024	/* Does not support any other value */
#define RX_CQ_LEN		1024
77
#define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
S
Sathya Perla 已提交
78 79 80 81 82 83
#define MCC_CQ_LEN		256

#define BE_NAPI_WEIGHT		64
#define MAX_RX_POST 		BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)

84 85
#define FW_VER_LEN		32

S
Sathya Perla 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
struct be_dma_mem {
	void *va;
	dma_addr_t dma;
	u32 size;
};

struct be_queue_info {
	struct be_dma_mem dma_mem;
	u16 len;
	u16 entry_size;	/* Size of an element in the queue */
	u16 id;
	u16 tail, head;
	bool created;
	atomic_t used;	/* Number of valid elements in the queue */
};

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
static inline u32 MODULO(u16 val, u16 limit)
{
	BUG_ON(limit & (limit - 1));
	return val & (limit - 1);
}

static inline void index_adv(u16 *index, u16 val, u16 limit)
{
	*index = MODULO((*index + val), limit);
}

static inline void index_inc(u16 *index, u16 limit)
{
	*index = MODULO((*index + 1), limit);
}

static inline void *queue_head_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->head * q->entry_size;
}

static inline void *queue_tail_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->tail * q->entry_size;
}

static inline void queue_head_inc(struct be_queue_info *q)
{
	index_inc(&q->head, q->len);
}

static inline void queue_tail_inc(struct be_queue_info *q)
{
	index_inc(&q->tail, q->len);
}

struct be_eq_obj {
	struct be_queue_info q;
	char desc[32];

	/* Adaptive interrupt coalescing (AIC) info */
	bool enable_aic;
	u16 min_eqd;		/* in usecs */
	u16 max_eqd;		/* in usecs */
	u16 cur_eqd;		/* in usecs */

	struct napi_struct napi;
};

struct be_mcc_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
154
	bool rearm_cq;
155 156
};

S
Sathya Perla 已提交
157 158 159 160 161 162 163
struct be_drvr_stats {
	u32 be_tx_reqs;		/* number of TX requests initiated */
	u32 be_tx_stops;	/* number of times TX Q was stopped */
	u32 be_fwd_reqs;	/* number of send reqs through forwarding i/f */
	u32 be_tx_wrbs;		/* number of tx WRBs used */
	u32 be_tx_events;	/* number of tx completion events  */
	u32 be_tx_compl;	/* number of tx completion entries processed */
164 165 166
	ulong be_tx_jiffies;
	u64 be_tx_bytes;
	u64 be_tx_bytes_prev;
S
Sathya Perla 已提交
167 168 169 170 171
	u32 be_tx_rate;

	u32 cache_barrier[16];

	u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
A
Ajit Khaparde 已提交
172
	u32 be_rx_polls;	/* number of times NAPI called poll function */
S
Sathya Perla 已提交
173 174
	u32 be_rx_events;	/* number of ucast rx completion events  */
	u32 be_rx_compl;	/* number of rx completion entries processed */
175 176 177
	ulong be_rx_jiffies;
	u64 be_rx_bytes;
	u64 be_rx_bytes_prev;
S
Sathya Perla 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	u32 be_rx_rate;
	/* number of non ether type II frames dropped where
	 * frame len > length field of Mac Hdr */
	u32 be_802_3_dropped_frames;
	/* number of non ether type II frames malformed where
	 * in frame len < length field of Mac Hdr */
	u32 be_802_3_malformed_frames;
	u32 be_rxcp_err;	/* Num rx completion entries w/ err set. */
	ulong rx_fps_jiffies;	/* jiffies at last FPS calc */
	u32 be_rx_frags;
	u32 be_prev_rx_frags;
	u32 be_rx_fps;		/* Rx frags per second */
};

struct be_stats_obj {
	struct be_drvr_stats drvr_stats;
	struct be_dma_mem cmd;
};

struct be_tx_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
	/* Remember the skbs that were transmitted */
	struct sk_buff *sent_skb_list[TX_Q_LEN];
};

/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
	struct page *page;
	dma_addr_t bus;
	u16 page_offset;
	bool last_page_user;
};

struct be_rx_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
};

#define BE_NUM_MSIX_VECTORS		2	/* 1 each for Tx and Rx */
struct be_adapter {
	struct pci_dev *pdev;
	struct net_device *netdev;

223 224 225 226 227 228 229 230 231 232 233 234 235
	u8 __iomem *csr;
	u8 __iomem *db;		/* Door Bell */
	u8 __iomem *pcicfg;	/* PCI config space */

	spinlock_t mbox_lock;	/* For serializing mbox cmds to BE card */
	struct be_dma_mem mbox_mem;
	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
	 * is stored for freeing purpose */
	struct be_dma_mem mbox_mem_alloced;

	struct be_mcc_obj mcc_obj;
	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
	spinlock_t mcc_cq_lock;
S
Sathya Perla 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

	struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
	bool msix_enabled;
	bool isr_registered;

	/* TX Rings */
	struct be_eq_obj tx_eq;
	struct be_tx_obj tx_obj;

	u32 cache_line_break[8];

	/* Rx rings */
	struct be_eq_obj rx_eq;
	struct be_rx_obj rx_obj;
	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
251
	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
S
Sathya Perla 已提交
252 253

	struct vlan_group *vlan_grp;
254 255
	u16 vlans_added;
	u16 max_vlans;	/* Number of vlans supported */
S
Sathya Perla 已提交
256
	u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
257
	struct be_dma_mem mc_cmd_mem;
S
Sathya Perla 已提交
258 259 260 261 262 263 264 265 266 267 268

	struct be_stats_obj stats;
	/* Work queue used to perform periodic tasks like getting statistics */
	struct delayed_work work;

	/* Ethtool knobs and info */
	bool rx_csum; 		/* BE card must perform rx-checksumming */
	char fw_ver[FW_VER_LEN];
	u32 if_handle;		/* Used to configure filtering */
	u32 pmac_id;		/* MAC addr handle used by BE card */

269
	bool eeh_err;
270
	bool link_up;
S
Sathya Perla 已提交
271
	u32 port_num;
272
	bool promiscuous;
273
	bool wol;
274
	u32 cap;
275 276
	u32 rx_fc;		/* Rx flow control */
	u32 tx_fc;		/* Tx flow control */
277 278
	int link_speed;
	u8 port_type;
279
	u8 transceiver;
280
	u8 generation;		/* BladeEngine ASIC generation */
S
Sathya Perla 已提交
281 282
};

283 284 285 286
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3

287
extern const struct ethtool_ops be_ethtool_ops;
S
Sathya Perla 已提交
288 289 290

#define drvr_stats(adapter)		(&adapter->stats.drvr_stats)

291 292 293 294 295
static inline unsigned int be_pci_func(struct be_adapter *adapter)
{
	return PCI_FUNC(adapter->pdev->devfn);
}

S
Sathya Perla 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
#define BE_SET_NETDEV_OPS(netdev, ops)	(netdev->netdev_ops = ops)

#define PAGE_SHIFT_4K		12
#define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)

/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) 				\
		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))

/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(addr)						\
		 ((size_t)(addr) & (PAGE_SIZE_4K-1))

/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field)  				\
		(((size_t)&(((_struct *)0)->field))%32)

/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}

static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
	u32 *dw = (u32 *) ptr + dw_offset;
	*dw &= ~(mask << offset);
	*dw |= (mask & value) << offset;
}

#define AMAP_SET_BITS(_struct, field, ptr, val)				\
		amap_set(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field),		\
			val)

static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
	u32 *dw = (u32 *) ptr;
	return mask & (*(dw + dw_offset) >> offset);
}

#define AMAP_GET_BITS(_struct, field, ptr)				\
		amap_get(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field))

#define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
	u32 *dw = wrb;
	BUG_ON(len % 4);
	do {
		*dw = cpu_to_le32(*dw);
		dw++;
		len -= 4;
	} while (len);
#endif				/* __BIG_ENDIAN */
}

static inline u8 is_tcp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);

	return val;
}

static inline u8 is_udp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);

	return val;
}

386
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
387
		u16 num_popped);
388
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
389
extern void netdev_stats_update(struct be_adapter *adapter);
390
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
S
Sathya Perla 已提交
391
#endif				/* BE_H */