nic.h 13.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (C) 2015 Cavium, Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 */

#ifndef NIC_H
#define	NIC_H

#include <linux/netdevice.h>
#include <linux/interrupt.h>
14
#include <linux/pci.h>
15 16 17 18 19 20 21 22
#include "thunder_bgx.h"

/* PCI device IDs */
#define	PCI_DEVICE_ID_THUNDER_NIC_PF		0xA01E
#define	PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF	0x0011
#define	PCI_DEVICE_ID_THUNDER_NIC_VF		0xA034
#define	PCI_DEVICE_ID_THUNDER_BGX		0xA026

23
/* Subsystem device IDs */
24 25 26 27 28 29 30 31 32
#define PCI_SUBSYS_DEVID_88XX_NIC_PF		0xA11E
#define PCI_SUBSYS_DEVID_81XX_NIC_PF		0xA21E
#define PCI_SUBSYS_DEVID_83XX_NIC_PF		0xA31E

#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF	0xA11E
#define PCI_SUBSYS_DEVID_88XX_NIC_VF		0xA134
#define PCI_SUBSYS_DEVID_81XX_NIC_VF		0xA234
#define PCI_SUBSYS_DEVID_83XX_NIC_VF		0xA334

33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
/* PCI BAR nos */
#define	PCI_CFG_REG_BAR_NUM		0
#define	PCI_MSIX_REG_BAR_NUM		4

/* NIC SRIOV VF count */
#define	MAX_NUM_VFS_SUPPORTED		128
#define	DEFAULT_NUM_VF_ENABLED		8

#define	NIC_TNS_BYPASS_MODE		0
#define	NIC_TNS_MODE			1

/* NIC priv flags */
#define	NIC_SRIOV_ENABLED		BIT(0)

/* Min/Max packet size */
#define	NIC_HW_MIN_FRS			64
50
#define	NIC_HW_MAX_FRS			9190 /* Excluding L2 header and FCS */
51 52 53 54

/* Max pkinds */
#define	NIC_MAX_PKIND			16

55 56
/* Max when CPI_ALG is IP diffserv */
#define	NIC_MAX_CPI_PER_LMAC		64
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

/* NIC VF Interrupts */
#define	NICVF_INTR_CQ			0
#define	NICVF_INTR_SQ			1
#define	NICVF_INTR_RBDR			2
#define	NICVF_INTR_PKT_DROP		3
#define	NICVF_INTR_TCP_TIMER		4
#define	NICVF_INTR_MBOX			5
#define	NICVF_INTR_QS_ERR		6

#define	NICVF_INTR_CQ_SHIFT		0
#define	NICVF_INTR_SQ_SHIFT		8
#define	NICVF_INTR_RBDR_SHIFT		16
#define	NICVF_INTR_PKT_DROP_SHIFT	20
#define	NICVF_INTR_TCP_TIMER_SHIFT	21
#define	NICVF_INTR_MBOX_SHIFT		22
#define	NICVF_INTR_QS_ERR_SHIFT		23

#define	NICVF_INTR_CQ_MASK		(0xFF << NICVF_INTR_CQ_SHIFT)
#define	NICVF_INTR_SQ_MASK		(0xFF << NICVF_INTR_SQ_SHIFT)
#define	NICVF_INTR_RBDR_MASK		(0x03 << NICVF_INTR_RBDR_SHIFT)
#define	NICVF_INTR_PKT_DROP_MASK	BIT(NICVF_INTR_PKT_DROP_SHIFT)
#define	NICVF_INTR_TCP_TIMER_MASK	BIT(NICVF_INTR_TCP_TIMER_SHIFT)
#define	NICVF_INTR_MBOX_MASK		BIT(NICVF_INTR_MBOX_SHIFT)
#define	NICVF_INTR_QS_ERR_MASK		BIT(NICVF_INTR_QS_ERR_SHIFT)

/* MSI-X interrupts */
#define	NIC_PF_MSIX_VECTORS		10
#define	NIC_VF_MSIX_VECTORS		20

#define NIC_PF_INTR_ID_ECC0_SBE		0
#define NIC_PF_INTR_ID_ECC0_DBE		1
#define NIC_PF_INTR_ID_ECC1_SBE		2
#define NIC_PF_INTR_ID_ECC1_DBE		3
#define NIC_PF_INTR_ID_ECC2_SBE		4
#define NIC_PF_INTR_ID_ECC2_DBE		5
#define NIC_PF_INTR_ID_ECC3_SBE		6
#define NIC_PF_INTR_ID_ECC3_DBE		7
#define NIC_PF_INTR_ID_MBOX0		8
#define NIC_PF_INTR_ID_MBOX1		9

98 99 100 101 102 103 104 105 106
/* Minimum FIFO level before all packets for the CQ are dropped
 *
 * This value ensures that once a packet has been "accepted"
 * for reception it will not get dropped due to non-availability
 * of CQ descriptor. An errata in HW mandates this value to be
 * atleast 0x100.
 */
#define NICPF_CQM_MIN_DROP_LEVEL       0x100

107 108 109 110
/* Global timer for CQ timer thresh interrupts
 * Calculated for SCLK of 700Mhz
 * value written should be a 1/16th of what is expected
 *
111
 * 1 tick per 0.025usec
112
 */
113
#define NICPF_CLK_PER_INT_TICK		1
114

115 116 117 118 119 120 121 122 123
/* Time to wait before we decide that a SQ is stuck.
 *
 * Since both pkt rx and tx notifications are done with same CQ,
 * when packets are being received at very high rate (eg: L2 forwarding)
 * then freeing transmitted skbs will be delayed and watchdog
 * will kick in, resetting interface. Hence keeping this value high.
 */
#define	NICVF_TX_TIMEOUT		(50 * HZ)

124
struct nicvf_cq_poll {
125
	struct  nicvf *nicvf;
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	u8	cq_idx;		/* Completion queue index */
	struct	napi_struct napi;
};

#define NIC_MAX_RSS_HASH_BITS		8
#define NIC_MAX_RSS_IDR_TBL_SIZE	(1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE		5 /* 320 bit key */

struct nicvf_rss_info {
	bool enable;
#define	RSS_L2_EXTENDED_HASH_ENA	BIT(0)
#define	RSS_IP_HASH_ENA			BIT(1)
#define	RSS_TCP_HASH_ENA		BIT(2)
#define	RSS_TCP_SYN_DIS			BIT(3)
#define	RSS_UDP_HASH_ENA		BIT(4)
#define RSS_L4_EXTENDED_HASH_ENA	BIT(5)
#define	RSS_ROCE_ENA			BIT(6)
#define	RSS_L3_BI_DIRECTION_ENA		BIT(7)
#define	RSS_L4_BI_DIRECTION_ENA		BIT(8)
	u64 cfg;
	u8  hash_bits;
	u16 rss_size;
	u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
	u64 key[RSS_HASH_KEY_SIZE];
} ____cacheline_aligned_in_smp;

S
Sunil Goutham 已提交
152 153 154 155 156 157
struct nicvf_pfc {
	u8    autoneg;
	u8    fc_rx;
	u8    fc_tx;
};

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
enum rx_stats_reg_offset {
	RX_OCTS = 0x0,
	RX_UCAST = 0x1,
	RX_BCAST = 0x2,
	RX_MCAST = 0x3,
	RX_RED = 0x4,
	RX_RED_OCTS = 0x5,
	RX_ORUN = 0x6,
	RX_ORUN_OCTS = 0x7,
	RX_FCS = 0x8,
	RX_L2ERR = 0x9,
	RX_DRP_BCAST = 0xa,
	RX_DRP_MCAST = 0xb,
	RX_DRP_L3BCAST = 0xc,
	RX_DRP_L3MCAST = 0xd,
	RX_STATS_ENUM_LAST,
};

enum tx_stats_reg_offset {
	TX_OCTS = 0x0,
	TX_UCAST = 0x1,
	TX_BCAST = 0x2,
	TX_MCAST = 0x3,
	TX_DROP = 0x4,
	TX_STATS_ENUM_LAST,
};

struct nicvf_hw_stats {
186
	u64 rx_bytes;
187
	u64 rx_frames;
188 189 190
	u64 rx_ucast_frames;
	u64 rx_bcast_frames;
	u64 rx_mcast_frames;
191
	u64 rx_drops;
192 193 194 195 196 197 198 199
	u64 rx_drop_red;
	u64 rx_drop_red_bytes;
	u64 rx_drop_overrun;
	u64 rx_drop_overrun_bytes;
	u64 rx_drop_bcast;
	u64 rx_drop_mcast;
	u64 rx_drop_l3_bcast;
	u64 rx_drop_l3_mcast;
200 201 202 203 204 205 206 207 208 209 210 211 212
	u64 rx_fcs_errors;
	u64 rx_l2_errors;

	u64 tx_bytes;
	u64 tx_frames;
	u64 tx_ucast_frames;
	u64 tx_bcast_frames;
	u64 tx_mcast_frames;
	u64 tx_drops;
};

struct nicvf_drv_stats {
	/* CQE Rx errs */
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
	u64 rx_bgx_truncated_pkts;
	u64 rx_jabber_errs;
	u64 rx_fcs_errs;
	u64 rx_bgx_errs;
	u64 rx_prel2_errs;
	u64 rx_l2_hdr_malformed;
	u64 rx_oversize;
	u64 rx_undersize;
	u64 rx_l2_len_mismatch;
	u64 rx_l2_pclp;
	u64 rx_ip_ver_errs;
	u64 rx_ip_csum_errs;
	u64 rx_ip_hdr_malformed;
	u64 rx_ip_payload_malformed;
	u64 rx_ip_ttl_errs;
	u64 rx_l3_pclp;
	u64 rx_l4_malformed;
	u64 rx_l4_csum_errs;
	u64 rx_udp_len_errs;
	u64 rx_l4_port_errs;
	u64 rx_tcp_flag_errs;
	u64 rx_tcp_offset_errs;
	u64 rx_l4_pclp;
	u64 rx_truncated_pkts;

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	/* CQE Tx errs */
	u64 tx_desc_fault;
	u64 tx_hdr_cons_err;
	u64 tx_subdesc_err;
	u64 tx_max_size_exceeded;
	u64 tx_imm_size_oflow;
	u64 tx_data_seq_err;
	u64 tx_mem_seq_err;
	u64 tx_lock_viol;
	u64 tx_data_fault;
	u64 tx_tstmp_conflict;
	u64 tx_tstmp_timeout;
	u64 tx_mem_fault;
	u64 tx_csum_overlap;
	u64 tx_csum_overflow;

	/* driver debug stats */
255
	u64 tx_tso;
256
	u64 tx_timeout;
257 258
	u64 txq_stop;
	u64 txq_wake;
259

260 261 262
	u64 rcv_buffer_alloc_failures;
	u64 page_alloc;

263
	struct u64_stats_sync   syncp;
264 265 266
};

struct nicvf {
267
	struct nicvf		*pnicvf;
268 269
	struct net_device	*netdev;
	struct pci_dev		*pdev;
270
	void __iomem		*reg_base;
271
	struct bpf_prog         *xdp_prog;
272
#define	MAX_QUEUES_PER_QSET			8
273
	struct queue_set	*qs;
274
	void			*iommu_domain;
275
	u8			vf_id;
276 277
	u8			sqs_id;
	bool                    sqs_mode;
278
	bool			hw_tso;
279
	bool			t88;
280 281 282 283 284 285 286 287 288 289 290 291

	/* Receive buffer alloc */
	u32			rb_page_offset;
	u16			rb_pageref;
	bool			rb_alloc_fail;
	bool			rb_work_scheduled;
	struct page		*rb_page;
	struct delayed_work	rbdr_work;
	struct tasklet_struct	rbdr_task;

	/* Secondary Qset */
	u8			sqs_count;
292 293 294
#define	MAX_SQS_PER_VF_SINGLE_NODE		5
#define	MAX_SQS_PER_VF				11
	struct nicvf		*snicvf[MAX_SQS_PER_VF];
295 296

	/* Queue count */
297 298
	u8			rx_queues;
	u8			tx_queues;
299
	u8			xdp_tx_queues;
300
	u8			max_queues;
301 302 303

	u8			node;
	u8			cpi_alg;
304
	bool			link_up;
305
	u8			mac_type;
306 307
	u8			duplex;
	u32			speed;
308 309
	bool			tns_mode;
	bool			loopback_supported;
310
	struct nicvf_rss_info	rss_info;
S
Sunil Goutham 已提交
311
	struct nicvf_pfc	pfc;
312 313 314
	struct tasklet_struct	qs_err_task;
	struct work_struct	reset_task;

315 316 317
	/* Interrupt coalescing settings */
	u32			cq_coalesce_usecs;
	u32			msg_enable;
318 319

	/* Stats */
320
	struct nicvf_hw_stats   hw_stats;
321
	struct nicvf_drv_stats  __percpu *drv_stats;
322 323
	struct bgx_stats	bgx_stats;

324 325 326
	/* Napi */
	struct nicvf_cq_poll	*napi[8];

327 328
	/* MSI-X  */
	u8			num_vec;
329
	char			irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
330
	bool			irq_allocated[NIC_VF_MSIX_VECTORS];
331
	cpumask_var_t		affinity_mask[NIC_VF_MSIX_VECTORS];
332

333
	/* VF <-> PF mailbox communication */
334 335
	bool			pf_acked;
	bool			pf_nacked;
336
	bool			set_mac_pending;
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
} ____cacheline_aligned_in_smp;

/* PF <--> VF Mailbox communication
 * Eight 64bit registers are shared between PF and VF.
 * Separate set for each VF.
 * Writing '1' into last register mbx7 means end of message.
 */

/* PF <--> VF mailbox communication */
#define	NIC_PF_VF_MAILBOX_SIZE		2
#define	NIC_MBOX_MSG_TIMEOUT		2000 /* ms */

/* Mailbox message types */
#define	NIC_MBOX_MSG_READY		0x01	/* Is PF ready to rcv msgs */
#define	NIC_MBOX_MSG_ACK		0x02	/* ACK the message received */
#define	NIC_MBOX_MSG_NACK		0x03	/* NACK the message received */
#define	NIC_MBOX_MSG_QS_CFG		0x04	/* Configure Qset */
#define	NIC_MBOX_MSG_RQ_CFG		0x05	/* Configure receive queue */
#define	NIC_MBOX_MSG_SQ_CFG		0x06	/* Configure Send queue */
#define	NIC_MBOX_MSG_RQ_DROP_CFG	0x07	/* Configure receive queue */
#define	NIC_MBOX_MSG_SET_MAC		0x08	/* Add MAC ID to DMAC filter */
#define	NIC_MBOX_MSG_SET_MAX_FRS	0x09	/* Set max frame size */
#define	NIC_MBOX_MSG_CPI_CFG		0x0A	/* Config CPI, RSSI */
#define	NIC_MBOX_MSG_RSS_SIZE		0x0B	/* Get RSS indir_tbl size */
#define	NIC_MBOX_MSG_RSS_CFG		0x0C	/* Config RSS table */
#define	NIC_MBOX_MSG_RSS_CFG_CONT	0x0D	/* RSS config continuation */
#define	NIC_MBOX_MSG_RQ_BP_CFG		0x0E	/* RQ backpressure config */
#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
#define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
367 368 369 370
#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
#define	NIC_MBOX_MSG_NICVF_PTR		0x13	/* Send nicvf ptr to PF */
#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
371
#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
372
#define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
S
Sunil Goutham 已提交
373
#define	NIC_MBOX_MSG_PFC		0x18	/* Pause frame control */
374 375
#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
376 377 378 379 380

struct nic_cfg_msg {
	u8    msg;
	u8    vf_id;
	u8    node_id;
381 382
	u8    tns_mode:1;
	u8    sqs_mode:1;
383
	u8    loopback_supported:1;
384
	u8    mac_addr[ETH_ALEN];
385 386 387 388 389 390
};

/* Qset configuration */
struct qs_cfg_msg {
	u8    msg;
	u8    num;
391
	u8    sqs_count;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	u64   cfg;
};

/* Receive queue configuration */
struct rq_cfg_msg {
	u8    msg;
	u8    qs_num;
	u8    rq_num;
	u64   cfg;
};

/* Send queue configuration */
struct sq_cfg_msg {
	u8    msg;
	u8    qs_num;
	u8    sq_num;
408
	bool  sqs_mode;
409 410 411 412 413 414 415
	u64   cfg;
};

/* Set VF's MAC address */
struct set_mac_msg {
	u8    msg;
	u8    vf_id;
416
	u8    mac_addr[ETH_ALEN];
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
};

/* Set Maximum frame size */
struct set_frs_msg {
	u8    msg;
	u8    vf_id;
	u16   max_frs;
};

/* Set CPI algorithm type */
struct cpi_cfg_msg {
	u8    msg;
	u8    vf_id;
	u8    rq_cnt;
	u8    cpi_alg;
};

/* Get RSS table size */
struct rss_sz_msg {
	u8    msg;
	u8    vf_id;
	u16   ind_tbl_size;
};

/* Set RSS configuration */
struct rss_cfg_msg {
	u8    msg;
	u8    vf_id;
	u8    hash_bits;
	u8    tbl_len;
	u8    tbl_offset;
#define RSS_IND_TBL_LEN_PER_MBX_MSG	8
	u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
};

struct bgx_stats_msg {
	u8    msg;
	u8    vf_id;
	u8    rx;
	u8    idx;
	u64   stats;
};

/* Physical interface link status */
struct bgx_link_status {
	u8    msg;
463
	u8    mac_type;
464 465 466 467 468
	u8    link_up;
	u8    duplex;
	u32   speed;
};

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/* Get Extra Qset IDs */
struct sqs_alloc {
	u8    msg;
	u8    vf_id;
	u8    qs_count;
};

struct nicvf_ptr {
	u8    msg;
	u8    vf_id;
	bool  sqs_mode;
	u8    sqs_id;
	u64   nicvf;
};

484 485 486 487 488 489 490
/* Set interface in loopback mode */
struct set_loopback {
	u8    msg;
	u8    vf_id;
	bool  enable;
};

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
/* Reset statistics counters */
struct reset_stat_cfg {
	u8    msg;
	/* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
	u16   rx_stat_mask;
	/* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
	u8    tx_stat_mask;
	/* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
	 * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
	 * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
	 * ..
	 * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
	 * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
	 */
	u16   rq_stat_mask;
	/* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
	 * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
	 * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
	 * ..
	 * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
	 * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
	 */
	u16   sq_stat_mask;
};

S
Sunil Goutham 已提交
516 517 518 519 520 521 522 523
struct pfc {
	u8    msg;
	u8    get; /* Get or set PFC settings */
	u8    autoneg;
	u8    fc_rx;
	u8    fc_tx;
};

524 525 526 527 528 529 530 531 532 533 534 535 536 537
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
	struct { u8 msg; }	msg;
	struct nic_cfg_msg	nic_cfg;
	struct qs_cfg_msg	qs;
	struct rq_cfg_msg	rq;
	struct sq_cfg_msg	sq;
	struct set_mac_msg	mac;
	struct set_frs_msg	frs;
	struct cpi_cfg_msg	cpi_cfg;
	struct rss_sz_msg	rss_size;
	struct rss_cfg_msg	rss_cfg;
	struct bgx_stats_msg    bgx_stats;
	struct bgx_link_status  link_status;
538 539
	struct sqs_alloc        sqs_alloc;
	struct nicvf_ptr	nicvf;
540
	struct set_loopback	lbk;
541
	struct reset_stat_cfg	reset_stat;
S
Sunil Goutham 已提交
542
	struct pfc		pfc;
543 544
};

545 546 547 548 549 550 551 552 553
#define NIC_NODE_ID_MASK	0x03
#define NIC_NODE_ID_SHIFT	44

static inline int nic_get_node_id(struct pci_dev *pdev)
{
	u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
	return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
}

554 555
static inline bool pass1_silicon(struct pci_dev *pdev)
{
556 557 558 559 560 561 562 563
	return (pdev->revision < 8) &&
		(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}

static inline bool pass2_silicon(struct pci_dev *pdev)
{
	return (pdev->revision >= 8) &&
		(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
564 565
}

566 567 568 569 570 571 572 573 574 575 576 577
int nicvf_set_real_num_queues(struct net_device *netdev,
			      int tx_queues, int rx_queues);
int nicvf_open(struct net_device *netdev);
int nicvf_stop(struct net_device *netdev);
int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
void nicvf_config_rss(struct nicvf *nic);
void nicvf_set_rss_key(struct nicvf *nic);
void nicvf_set_ethtool_ops(struct net_device *netdev);
void nicvf_update_stats(struct nicvf *nic);
void nicvf_update_lmac_stats(struct nicvf *nic);

#endif /* NIC_H */