bna.h 16.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Linux network driver for Brocade Converged Network Adapter.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License (GPL) Version 2 as
 * published by the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
R
Rasesh Mody 已提交
13 14 15 16 17
/*
 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
 * All rights reserved
 * www.brocade.com
 */
18 19 20
#ifndef __BNA_H__
#define __BNA_H__

R
Rasesh Mody 已提交
21
#include "bfa_defs.h"
22
#include "bfa_ioc.h"
R
Rasesh Mody 已提交
23
#include "bfi_enet.h"
24 25
#include "bna_types.h"

R
Rasesh Mody 已提交
26
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
27

28
/*  Macros and constants  */
29 30 31 32 33 34

#define BNA_IOC_TIMER_FREQ		200

/* Log string size */
#define BNA_MESSAGE_SIZE		256

35
#define bna_is_small_rxq(_id) ((_id) & 0x1)
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82

#define BNA_MAC_IS_EQUAL(_mac1, _mac2)					\
	(!memcmp((_mac1), (_mac2), sizeof(mac_t)))

#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)

#define BNA_TO_POWER_OF_2(x)						\
do {									\
	int _shift = 0;							\
	while ((x) && (x) != 1) {					\
		(x) >>= 1;						\
		_shift++;						\
	}								\
	(x) <<= _shift;							\
} while (0)

#define BNA_TO_POWER_OF_2_HIGH(x)					\
do {									\
	int n = 1;							\
	while (n < (x))							\
		n <<= 1;						\
	(x) = n;							\
} while (0)

/*
 * input : _addr-> os dma addr in host endian format,
 * output : _bna_dma_addr-> pointer to hw dma addr
 */
#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr)				\
do {									\
	u64 tmp_addr =						\
	cpu_to_be64((u64)(_addr));				\
	(_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
	(_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
} while (0)

/*
 * input : _bna_dma_addr-> pointer to hw dma addr
 * output : _addr-> os dma addr in host endian format
 */
#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr)			\
do {								\
	(_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32)		\
	| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff));	\
} while (0)

#define	containing_rec(addr, type, field)				\
R
Rasesh Mody 已提交
83
	((type *)((unsigned char *)(addr) -				\
84 85 86 87 88 89 90 91 92 93 94 95
	(unsigned char *)(&((type *)0)->field)))

#define BNA_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)

/* TxQ element is 64 bytes */
#define BNA_TXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 6)
#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 6)

#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{									\
	unsigned int page_index;	/* index within a page */	\
	void *page_addr;						\
R
Rasesh Mody 已提交
96 97
	page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index);	\
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	page_addr = (_qpt_ptr)[((_qe_idx) >>  BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
	(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}

/* RxQ element is 8 bytes */
#define BNA_RXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 3)
#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 3)

#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
{									\
	unsigned int page_index;	/* index within a page */	\
	void *page_addr;						\
	page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index);	\
	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
				BNA_RXQ_PAGE_INDEX_MAX_SHIFT)];		\
	(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
}

/* CQ element is 16 bytes */
#define BNA_CQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 4)
#define BNA_CQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 4)

#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range)	\
{									\
	unsigned int page_index;	  /* index within a page */	\
	void *page_addr;						\
									\
	page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1);		\
	(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index);		\
	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
				    BNA_CQ_PAGE_INDEX_MAX_SHIFT)];	\
	(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
}

#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base)			\
	(&((_cast *)(_q_base))[(_qe_idx)])

#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))

#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)			\
	((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))

#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)		\
	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))

#define BNA_QE_FREE_CNT(_q_ptr, _q_depth)				\
	(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &	\
	 ((_q_depth) - 1))

#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)				\
	((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &	\
	 (_q_depth - 1))

#define BNA_Q_GET_CI(_q_ptr)		((_q_ptr)->q.consumer_index)

#define BNA_Q_GET_PI(_q_ptr)		((_q_ptr)->q.producer_index)

#define BNA_Q_PI_ADD(_q_ptr, _num)					\
	(_q_ptr)->q.producer_index =					\
		(((_q_ptr)->q.producer_index + (_num)) &		\
		((_q_ptr)->q.q_depth - 1))

R
Rasesh Mody 已提交
161
#define BNA_Q_CI_ADD(_q_ptr, _num)					\
162
	(_q_ptr)->q.consumer_index =					\
R
Rasesh Mody 已提交
163
		(((_q_ptr)->q.consumer_index + (_num))			\
164 165 166 167 168
		& ((_q_ptr)->q.q_depth - 1))

#define BNA_Q_FREE_COUNT(_q_ptr)					\
	(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))

R
Rasesh Mody 已提交
169
#define BNA_Q_IN_USE_COUNT(_q_ptr)					\
170 171 172 173 174 175 176 177 178 179 180 181 182
	(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))

#define BNA_LARGE_PKT_SIZE		1000

#define BNA_UPDATE_PKT_CNT(_pkt, _len)					\
do {									\
	if ((_len) > BNA_LARGE_PKT_SIZE) {				\
		(_pkt)->large_pkt_cnt++;				\
	} else {							\
		(_pkt)->small_pkt_cnt++;				\
	}								\
} while (0)

183 184
#define	call_rxf_stop_cbfn(rxf)						\
do {									\
185
	if ((rxf)->stop_cbfn) {						\
186 187 188 189
		void (*cbfn)(struct bna_rx *);			\
		struct bna_rx *cbarg;					\
		cbfn = (rxf)->stop_cbfn;				\
		cbarg = (rxf)->stop_cbarg;				\
190 191
		(rxf)->stop_cbfn = NULL;				\
		(rxf)->stop_cbarg = NULL;				\
192 193 194
		cbfn(cbarg);						\
	}								\
} while (0)
195

196 197
#define	call_rxf_start_cbfn(rxf)					\
do {									\
198
	if ((rxf)->start_cbfn) {					\
199 200 201 202
		void (*cbfn)(struct bna_rx *);			\
		struct bna_rx *cbarg;					\
		cbfn = (rxf)->start_cbfn;				\
		cbarg = (rxf)->start_cbarg;				\
203 204
		(rxf)->start_cbfn = NULL;				\
		(rxf)->start_cbarg = NULL;				\
205 206 207
		cbfn(cbarg);						\
	}								\
} while (0)
208

209 210
#define	call_rxf_cam_fltr_cbfn(rxf)					\
do {									\
211
	if ((rxf)->cam_fltr_cbfn) {					\
212 213 214 215
		void (*cbfn)(struct bnad *, struct bna_rx *);	\
		struct bnad *cbarg;					\
		cbfn = (rxf)->cam_fltr_cbfn;				\
		cbarg = (rxf)->cam_fltr_cbarg;				\
216 217
		(rxf)->cam_fltr_cbfn = NULL;				\
		(rxf)->cam_fltr_cbarg = NULL;				\
218 219 220
		cbfn(cbarg, rxf->rx);					\
	}								\
} while (0)
221

222 223
#define	call_rxf_pause_cbfn(rxf)					\
do {									\
224
	if ((rxf)->oper_state_cbfn) {					\
225 226 227 228
		void (*cbfn)(struct bnad *, struct bna_rx *);	\
		struct bnad *cbarg;					\
		cbfn = (rxf)->oper_state_cbfn;				\
		cbarg = (rxf)->oper_state_cbarg;			\
229 230
		(rxf)->oper_state_cbfn = NULL;				\
		(rxf)->oper_state_cbarg = NULL;				\
231 232 233
		cbfn(cbarg, rxf->rx);					\
	}								\
} while (0)
234

235
#define	call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320

#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))

#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))

#define xxx_enable(mode, bitmask, xxx)					\
do {									\
	bitmask |= xxx;							\
	mode |= xxx;							\
} while (0)

#define xxx_disable(mode, bitmask, xxx)					\
do {									\
	bitmask |= xxx;							\
	mode &= ~xxx;							\
} while (0)

#define xxx_inactive(mode, bitmask, xxx)				\
do {									\
	bitmask &= ~xxx;						\
	mode &= ~xxx;							\
} while (0)

#define is_promisc_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)

#define is_promisc_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)

#define promisc_inactive(mode, bitmask)					\
	xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)

#define is_default_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define is_default_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)

#define default_inactive(mode, bitmask)					\
	xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)

#define is_allmulti_enable(mode, bitmask)				\
	is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define is_allmulti_disable(mode, bitmask)				\
	is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_enable(mode, bitmask)					\
	xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_disable(mode, bitmask)					\
	xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define allmulti_inactive(mode, bitmask)				\
	xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)

#define	GET_RXQS(rxp, q0, q1)	do {					\
	switch ((rxp)->type) {						\
	case BNA_RXP_SINGLE:						\
		(q0) = rxp->rxq.single.only;				\
		(q1) = NULL;						\
		break;							\
	case BNA_RXP_SLR:						\
		(q0) = rxp->rxq.slr.large;				\
		(q1) = rxp->rxq.slr.small;				\
		break;							\
	case BNA_RXP_HDS:						\
		(q0) = rxp->rxq.hds.data;				\
		(q1) = rxp->rxq.hds.hdr;				\
		break;							\
	}								\
} while (0)

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)

#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)

#define bna_tx_from_rid(_bna, _rid, _tx)				\
do {								    \
	struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod;	  \
	struct bna_tx *__tx;					    \
	struct list_head *qe;					   \
	_tx = NULL;						     \
	list_for_each(qe, &__tx_mod->tx_active_q) {		     \
		__tx = (struct bna_tx *)qe;			     \
		if (__tx->rid == (_rid)) {			      \
			(_tx) = __tx;				   \
			break;					  \
		}						       \
	}							       \
} while (0)

#define bna_rx_from_rid(_bna, _rid, _rx)				\
do {									\
	struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod;			\
	struct bna_rx *__rx;						\
	struct list_head *qe;						\
	_rx = NULL;							\
	list_for_each(qe, &__rx_mod->rx_active_q) {			\
		__rx = (struct bna_rx *)qe;				\
		if (__rx->rid == (_rid)) {				\
			(_rx) = __rx;					\
			break;						\
		}							\
	}								\
} while (0)

355
/*  Inline functions  */
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
	struct bna_mac *mac = NULL;
	struct list_head *qe;
	list_for_each(qe, q) {
		if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
			mac = (struct bna_mac *)qe;
			break;
		}
	}
	return mac;
}

#define bna_attr(_bna) (&(_bna)->ioceth.attr)

372
/* Function prototypes */
373

374
/* BNA */
375

376 377 378
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);

379 380
/* APIs for BNAD */
void bna_res_req(struct bna_res_info *res_info);
381
void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
382 383 384
void bna_init(struct bna *bna, struct bnad *bnad,
			struct bfa_pcidev *pcidev,
			struct bna_res_info *res_info);
385
void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
386
void bna_uninit(struct bna *bna);
387 388 389
int bna_num_txq_set(struct bna *bna, int num_txq);
int bna_num_rxp_set(struct bna *bna, int num_rxp);
void bna_hw_stats_get(struct bna *bna);
390 391 392 393 394 395 396 397

/* APIs for RxF */
struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
			  struct bna_mac *mac);
struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
			  struct bna_mac *mac);
398 399 400
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
			  struct bna_mcam_handle *handle);
401

402
/* MBOX */
403 404

/* API for BNAD */
R
Rasesh Mody 已提交
405
void bna_mbox_handler(struct bna *bna, u32 intr_status);
406

407
/* ETHPORT */
408 409 410 411 412

/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);

413 414
/* TX MODULE AND TX */

415 416 417 418 419 420
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
			       struct bfi_msgq_mhdr *msghdr);
void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
			      struct bfi_msgq_mhdr *msghdr);
void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
421 422 423 424 425 426

/* APIs for BNA */
void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
		     struct bna_res_info *res_info);
void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);

427
/* APIs for ENET */
428 429 430 431 432 433 434 435 436
void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);

/* APIs for BNAD */
void bna_tx_res_req(int num_txq, int txq_depth,
		    struct bna_res_info *res_info);
struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
			       struct bna_tx_config *tx_cfg,
437
			       const struct bna_tx_event_cbfn *tx_cbfn,
438 439 440 441
			       struct bna_res_info *res_info, void *priv);
void bna_tx_destroy(struct bna_tx *tx);
void bna_tx_enable(struct bna_tx *tx);
void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
442 443
		    void (*cbfn)(void *, struct bna_tx *));
void bna_tx_cleanup_complete(struct bna_tx *tx);
444 445
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);

446
/* RX MODULE, RX, RXF */
447

448 449 450 451 452 453 454 455 456
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
			       struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
			      struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
			       struct bfi_msgq_mhdr *msghdr);

457 458 459 460 461
/* APIs for BNA */
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
		     struct bna_res_info *res_info);
void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);

462
/* APIs for ENET */
463 464 465 466 467 468 469 470 471
void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);

/* APIs for BNAD */
void bna_rx_res_req(struct bna_rx_config *rx_config,
		    struct bna_res_info *res_info);
struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
			       struct bna_rx_config *rx_cfg,
472
			       const struct bna_rx_event_cbfn *rx_cbfn,
473 474 475 476
			       struct bna_res_info *res_info, void *priv);
void bna_rx_destroy(struct bna_rx *rx);
void bna_rx_enable(struct bna_rx *rx);
void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
477 478
		    void (*cbfn)(void *, struct bna_rx *));
void bna_rx_cleanup_complete(struct bna_rx *rx);
479
void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
R
Rasesh Mody 已提交
480
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
481 482 483
void bna_rx_dim_update(struct bna_ccb *ccb);
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
484 485 486 487 488 489 490
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
		 void (*cbfn)(struct bnad *, struct bna_rx *));
491 492
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
493
		 void (*cbfn)(struct bnad *, struct bna_rx *));
494 495
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
496
		     void (*cbfn)(struct bnad *, struct bna_rx *));
497 498 499
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
		enum bna_rxmode bitmask,
500
		void (*cbfn)(struct bnad *, struct bna_rx *));
501 502 503
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
504
/* ENET */
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);

/* Callbacks for TX, RX */
void bna_enet_cb_tx_stopped(struct bna_enet *enet);
void bna_enet_cb_rx_stopped(struct bna_enet *enet);

/* API for BNAD */
void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
		      void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
			   struct bna_pause_config *pause_config,
			   void (*cbfn)(struct bnad *));
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
		      void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);

524
/* IOCETH */
525 526 527 528 529

/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
			enum bna_cleanup_type type);
530

531
/* BNAD */
532

533 534 535 536 537 538 539 540 541 542 543
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
			      enum bna_link_status status);

/* Callbacks for IOCETH */
void bnad_cb_ioceth_ready(struct bnad *bnad);
void bnad_cb_ioceth_failed(struct bnad *bnad);
void bnad_cb_ioceth_disabled(struct bnad *bnad);
void bnad_cb_mbox_intr_enable(struct bnad *bnad);
void bnad_cb_mbox_intr_disable(struct bnad *bnad);

544 545 546 547 548
/* Callbacks for BNA */
void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
		       struct bna_stats *stats);

#endif  /* __BNA_H__ */