提交 793768f5 编写于 作者: D David S. Miller

Merge branch 'thunderx-features-fixes'

Aleksey Makarov says:

====================
net: thunderx: New features and fixes

v2:
  - The unused affinity_mask field of the structure cmp_queue
  has been deleted. (thanks to David Miller)
  - The unneeded initializers have been dropped. (thanks to Alexey Klimov)
  - The commit message "net: thunderx: Rework interrupt handling"
  has been fixed. (thanks to Alexey Klimov)
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -928,7 +928,7 @@ M: Sunil Goutham <sgoutham@cavium.com> ...@@ -928,7 +928,7 @@ M: Sunil Goutham <sgoutham@cavium.com>
M: Robert Richter <rric@kernel.org> M: Robert Richter <rric@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported
F: drivers/net/ethernet/cavium/ F: drivers/net/ethernet/cavium/thunder/
ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
M: Alexander Shiyan <shc_work@mail.ru> M: Alexander Shiyan <shc_work@mail.ru>
...@@ -2543,7 +2543,6 @@ M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> ...@@ -2543,7 +2543,6 @@ M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.cavium.com W: http://www.cavium.com
S: Supported S: Supported
F: drivers/net/ethernet/cavium/
F: drivers/net/ethernet/cavium/liquidio/ F: drivers/net/ethernet/cavium/liquidio/
CC2520 IEEE-802.15.4 RADIO DRIVER CC2520 IEEE-802.15.4 RADIO DRIVER
......
...@@ -135,6 +135,7 @@ ...@@ -135,6 +135,7 @@
#define NICVF_TX_TIMEOUT (50 * HZ) #define NICVF_TX_TIMEOUT (50 * HZ)
struct nicvf_cq_poll { struct nicvf_cq_poll {
struct nicvf *nicvf;
u8 cq_idx; /* Completion queue index */ u8 cq_idx; /* Completion queue index */
struct napi_struct napi; struct napi_struct napi;
}; };
...@@ -190,10 +191,10 @@ enum tx_stats_reg_offset { ...@@ -190,10 +191,10 @@ enum tx_stats_reg_offset {
}; };
struct nicvf_hw_stats { struct nicvf_hw_stats {
u64 rx_bytes_ok; u64 rx_bytes;
u64 rx_ucast_frames_ok; u64 rx_ucast_frames;
u64 rx_bcast_frames_ok; u64 rx_bcast_frames;
u64 rx_mcast_frames_ok; u64 rx_mcast_frames;
u64 rx_fcs_errors; u64 rx_fcs_errors;
u64 rx_l2_errors; u64 rx_l2_errors;
u64 rx_drop_red; u64 rx_drop_red;
...@@ -204,6 +205,31 @@ struct nicvf_hw_stats { ...@@ -204,6 +205,31 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast; u64 rx_drop_mcast;
u64 rx_drop_l3_bcast; u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast; u64 rx_drop_l3_mcast;
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
u64 rx_bgx_errs;
u64 rx_prel2_errs;
u64 rx_l2_hdr_malformed;
u64 rx_oversize;
u64 rx_undersize;
u64 rx_l2_len_mismatch;
u64 rx_l2_pclp;
u64 rx_ip_ver_errs;
u64 rx_ip_csum_errs;
u64 rx_ip_hdr_malformed;
u64 rx_ip_payload_malformed;
u64 rx_ip_ttl_errs;
u64 rx_l3_pclp;
u64 rx_l4_malformed;
u64 rx_l4_csum_errs;
u64 rx_udp_len_errs;
u64 rx_l4_port_errs;
u64 rx_tcp_flag_errs;
u64 rx_tcp_offset_errs;
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
u64 tx_bytes_ok; u64 tx_bytes_ok;
u64 tx_ucast_frames_ok; u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok; u64 tx_bcast_frames_ok;
...@@ -222,6 +248,7 @@ struct nicvf_drv_stats { ...@@ -222,6 +248,7 @@ struct nicvf_drv_stats {
u64 rx_frames_1518; u64 rx_frames_1518;
u64 rx_frames_jumbo; u64 rx_frames_jumbo;
u64 rx_drops; u64 rx_drops;
/* Tx */ /* Tx */
u64 tx_frames_ok; u64 tx_frames_ok;
u64 tx_drops; u64 tx_drops;
...@@ -231,13 +258,24 @@ struct nicvf_drv_stats { ...@@ -231,13 +258,24 @@ struct nicvf_drv_stats {
}; };
struct nicvf { struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev; struct net_device *netdev;
struct pci_dev *pdev; struct pci_dev *pdev;
u8 vf_id; u8 vf_id;
u8 node; u8 node;
u8 tns_mode; u8 tns_mode:1;
u8 sqs_mode:1;
u8 loopback_supported:1;
u16 mtu; u16 mtu;
struct queue_set *qs; struct queue_set *qs;
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
u8 sqs_id;
u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF];
u8 rx_queues;
u8 tx_queues;
u8 max_queues;
void __iomem *reg_base; void __iomem *reg_base;
bool link_up; bool link_up;
u8 duplex; u8 duplex;
...@@ -257,7 +295,7 @@ struct nicvf { ...@@ -257,7 +295,7 @@ struct nicvf {
u32 cq_coalesce_usecs; u32 cq_coalesce_usecs;
u32 msg_enable; u32 msg_enable;
struct nicvf_hw_stats stats; struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats; struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats; struct bgx_stats bgx_stats;
struct work_struct reset_task; struct work_struct reset_task;
...@@ -269,10 +307,9 @@ struct nicvf { ...@@ -269,10 +307,9 @@ struct nicvf {
char irq_name[NIC_VF_MSIX_VECTORS][20]; char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS]; bool irq_allocated[NIC_VF_MSIX_VECTORS];
bool pf_ready_to_rcv_msg; /* VF <-> PF mailbox communication */
bool pf_acked; bool pf_acked;
bool pf_nacked; bool pf_nacked;
bool bgx_stats_acked;
bool set_mac_pending; bool set_mac_pending;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
...@@ -304,14 +341,21 @@ struct nicvf { ...@@ -304,14 +341,21 @@ struct nicvf {
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */ #define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */ #define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg { struct nic_cfg_msg {
u8 msg; u8 msg;
u8 vf_id; u8 vf_id;
u8 tns_mode;
u8 node_id; u8 node_id;
u8 tns_mode:1;
u8 sqs_mode:1;
u8 loopback_supported:1;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
}; };
...@@ -319,6 +363,7 @@ struct nic_cfg_msg { ...@@ -319,6 +363,7 @@ struct nic_cfg_msg {
struct qs_cfg_msg { struct qs_cfg_msg {
u8 msg; u8 msg;
u8 num; u8 num;
u8 sqs_count;
u64 cfg; u64 cfg;
}; };
...@@ -335,6 +380,7 @@ struct sq_cfg_msg { ...@@ -335,6 +380,7 @@ struct sq_cfg_msg {
u8 msg; u8 msg;
u8 qs_num; u8 qs_num;
u8 sq_num; u8 sq_num;
bool sqs_mode;
u64 cfg; u64 cfg;
}; };
...@@ -394,6 +440,28 @@ struct bgx_link_status { ...@@ -394,6 +440,28 @@ struct bgx_link_status {
u32 speed; u32 speed;
}; };
/* Get Extra Qset IDs */
struct sqs_alloc {
u8 msg;
u8 vf_id;
u8 qs_count;
};
struct nicvf_ptr {
u8 msg;
u8 vf_id;
bool sqs_mode;
u8 sqs_id;
u64 nicvf;
};
/* Set interface in loopback mode */
struct set_loopback {
u8 msg;
u8 vf_id;
bool enable;
};
/* 128 bit shared memory between PF and each VF */ /* 128 bit shared memory between PF and each VF */
union nic_mbx { union nic_mbx {
struct { u8 msg; } msg; struct { u8 msg; } msg;
...@@ -408,6 +476,9 @@ union nic_mbx { ...@@ -408,6 +476,9 @@ union nic_mbx {
struct rss_cfg_msg rss_cfg; struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats; struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status; struct bgx_link_status link_status;
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
struct set_loopback lbk;
}; };
#define NIC_NODE_ID_MASK 0x03 #define NIC_NODE_ID_MASK 0x03
......
...@@ -28,6 +28,11 @@ struct nicpf { ...@@ -28,6 +28,11 @@ struct nicpf {
u8 num_vf_en; /* No of VF enabled */ u8 num_vf_en; /* No of VF enabled */
bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
void __iomem *reg_base; /* Register start address */ void __iomem *reg_base; /* Register start address */
u8 num_sqs_en; /* Secondary qsets enabled */
u64 nicvf[MAX_NUM_VFS_SUPPORTED];
u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
bool sqs_used[MAX_NUM_VFS_SUPPORTED];
struct pkind_cfg pkind; struct pkind_cfg pkind;
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
...@@ -139,14 +144,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) ...@@ -139,14 +144,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); if (vf < MAX_LMAC) {
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
if (mac)
ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
if (mac)
ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
}
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node; mbx.nic_cfg.node_id = nic->node;
mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
nic_send_msg_to_vf(nic, vf, &mbx); nic_send_msg_to_vf(nic, vf, &mbx);
} }
...@@ -329,6 +339,10 @@ static void nic_init_hw(struct nicpf *nic) ...@@ -329,6 +339,10 @@ static void nic_init_hw(struct nicpf *nic)
/* Timer config */ /* Timer config */
nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
/* Enable VLAN ethertype matching and stripping */
nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
(2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
} }
/* Channel parse index configuration */ /* Channel parse index configuration */
...@@ -429,6 +443,12 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) ...@@ -429,6 +443,12 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
qset = cfg->vf_id; qset = cfg->vf_id;
for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
u8 svf = cfg->ind_tbl[idx] >> 3;
if (svf)
qset = nic->vf_sqs[cfg->vf_id][svf - 1];
else
qset = cfg->vf_id;
nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
(qset << 3) | (cfg->ind_tbl[idx] & 0x7)); (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
idx++; idx++;
...@@ -452,19 +472,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) ...@@ -452,19 +472,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
* VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
* VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
*/ */
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq)
{ {
u32 bgx, lmac, chan; u32 bgx, lmac, chan;
u32 tl2, tl3, tl4; u32 tl2, tl3, tl4;
u32 rr_quantum; u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
else
pqs_vnic = vnic;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
/* 24 bytes for FCS, IPG and preamble */ /* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
tl4 += sq_idx; tl4 += sq_idx;
if (sq->sqs_mode)
tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) | ((u64)vnic << NIC_QS_ID_SHIFT) |
...@@ -485,6 +517,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) ...@@ -485,6 +517,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
} }
/* Send primary nicvf pointer to secondary QS's VF */
static void nic_send_pnicvf(struct nicpf *nic, int sqs)
{
union nic_mbx mbx = {};
mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
nic_send_msg_to_vf(nic, sqs, &mbx);
}
/* Send SQS's nicvf pointer to primary QS's VF */
static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
{
union nic_mbx mbx = {};
int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
mbx.nicvf.sqs_id = nicvf->sqs_id;
mbx.nicvf.nicvf = nic->nicvf[sqs_id];
nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
}
/* Find next available Qset that can be assigned as a
* secondary Qset to a VF.
*/
static int nic_nxt_avail_sqs(struct nicpf *nic)
{
int sqs;
for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
if (!nic->sqs_used[sqs])
nic->sqs_used[sqs] = true;
else
continue;
return sqs + nic->num_vf_en;
}
return -1;
}
/* Allocate additional Qsets for requested VF */
static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
{
union nic_mbx mbx = {};
int idx, alloc_qs = 0;
int sqs_id;
if (!nic->num_sqs_en)
goto send_mbox;
for (idx = 0; idx < sqs->qs_count; idx++) {
sqs_id = nic_nxt_avail_sqs(nic);
if (sqs_id < 0)
break;
nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
nic->pqs_vf[sqs_id] = sqs->vf_id;
alloc_qs++;
}
send_mbox:
mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
mbx.sqs_alloc.vf_id = sqs->vf_id;
mbx.sqs_alloc.qs_count = alloc_qs;
nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
}
static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
int bgx_idx, lmac_idx;
if (lbk->vf_id > MAX_LMAC)
return -1;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
return 0;
}
/* Interrupt handler to handle mailbox messages from VFs */ /* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf) static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{ {
...@@ -492,6 +604,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) ...@@ -492,6 +604,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
u64 *mbx_data; u64 *mbx_data;
u64 mbx_addr; u64 mbx_addr;
u64 reg_addr; u64 reg_addr;
u64 cfg;
int bgx, lmac; int bgx, lmac;
int i; int i;
int ret = 0; int ret = 0;
...@@ -512,15 +625,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) ...@@ -512,15 +625,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) { switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY: case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf); nic_mbx_send_ready(nic, vf);
nic->link[vf] = 0; if (vf < MAX_LMAC) {
nic->duplex[vf] = 0; nic->link[vf] = 0;
nic->speed[vf] = 0; nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
ret = 1; ret = 1;
break; break;
case NIC_MBOX_MSG_QS_CFG: case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG | reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT); (mbx.qs.num << NIC_QS_ID_SHIFT);
nic_reg_write(nic, reg_addr, mbx.qs.cfg); cfg = mbx.qs.cfg;
/* Check if its a secondary Qset */
if (vf >= nic->num_vf_en) {
cfg = cfg & (~0x7FULL);
/* Assign this Qset to primary Qset's VF */
cfg |= nic->pqs_vf[vf];
}
nic_reg_write(nic, reg_addr, cfg);
break; break;
case NIC_MBOX_MSG_RQ_CFG: case NIC_MBOX_MSG_RQ_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
...@@ -548,9 +670,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) ...@@ -548,9 +670,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.sq.qs_num << NIC_QS_ID_SHIFT) | (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.sq.sq_num << NIC_Q_NUM_SHIFT); (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.sq.cfg); nic_reg_write(nic, reg_addr, mbx.sq.cfg);
nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num); nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break; break;
case NIC_MBOX_MSG_SET_MAC: case NIC_MBOX_MSG_SET_MAC:
if (vf >= nic->num_vf_en)
break;
lmac = mbx.mac.vf_id; lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
...@@ -577,10 +701,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) ...@@ -577,10 +701,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_SHUTDOWN: case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */ /* First msg in VF teardown sequence */
nic->vf_enabled[vf] = false; nic->vf_enabled[vf] = false;
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
break; break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
goto unlock;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
goto unlock;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
goto unlock;
case NIC_MBOX_MSG_BGX_STATS: case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats); nic_get_bgx_stats(nic, &mbx.bgx_stats);
goto unlock; goto unlock;
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
default: default:
dev_err(&nic->pdev->dev, dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
...@@ -606,8 +748,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) ...@@ -606,8 +748,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
if (intr & (1ULL << vf)) { if (intr & (1ULL << vf)) {
dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
vf + (mbx * vf_per_mbx_reg)); vf + (mbx * vf_per_mbx_reg));
if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
break;
nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
nic_clear_mbx_intr(nic, vf, mbx); nic_clear_mbx_intr(nic, vf, mbx);
} }
...@@ -713,9 +854,24 @@ static void nic_unregister_interrupts(struct nicpf *nic) ...@@ -713,9 +854,24 @@ static void nic_unregister_interrupts(struct nicpf *nic)
nic_disable_msix(nic); nic_disable_msix(nic);
} }
static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
{
int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
u16 total_vf;
/* Check if its a multi-node environment */
if (nr_node_ids > 1)
sqs_per_vf = MAX_SQS_PER_VF;
pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
return min(total_vf - vf_en, vf_en * sqs_per_vf);
}
static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
{ {
int pos = 0; int pos = 0;
int vf_en;
int err; int err;
u16 total_vf_cnt; u16 total_vf_cnt;
...@@ -732,16 +888,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) ...@@ -732,16 +888,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
if (!total_vf_cnt) if (!total_vf_cnt)
return 0; return 0;
err = pci_enable_sriov(pdev, nic->num_vf_en); vf_en = nic->num_vf_en;
nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
vf_en += nic->num_sqs_en;
err = pci_enable_sriov(pdev, vf_en);
if (err) { if (err) {
dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
nic->num_vf_en); vf_en);
nic->num_vf_en = 0; nic->num_vf_en = 0;
return err; return err;
} }
dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
nic->num_vf_en); vf_en);
nic->flags |= NIC_SRIOV_ENABLED; nic->flags |= NIC_SRIOV_ENABLED;
return 0; return 0;
......
...@@ -35,10 +35,10 @@ struct nicvf_stat { ...@@ -35,10 +35,10 @@ struct nicvf_stat {
} }
static const struct nicvf_stat nicvf_hw_stats[] = { static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes_ok), NICVF_HW_STAT(rx_bytes),
NICVF_HW_STAT(rx_ucast_frames_ok), NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames_ok), NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames_ok), NICVF_HW_STAT(rx_mcast_frames),
NICVF_HW_STAT(rx_fcs_errors), NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_l2_errors), NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_drop_red), NICVF_HW_STAT(rx_drop_red),
...@@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = { ...@@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast), NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast), NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast), NICVF_HW_STAT(rx_drop_l3_mcast),
NICVF_HW_STAT(rx_bgx_truncated_pkts),
NICVF_HW_STAT(rx_jabber_errs),
NICVF_HW_STAT(rx_fcs_errs),
NICVF_HW_STAT(rx_bgx_errs),
NICVF_HW_STAT(rx_prel2_errs),
NICVF_HW_STAT(rx_l2_hdr_malformed),
NICVF_HW_STAT(rx_oversize),
NICVF_HW_STAT(rx_undersize),
NICVF_HW_STAT(rx_l2_len_mismatch),
NICVF_HW_STAT(rx_l2_pclp),
NICVF_HW_STAT(rx_ip_ver_errs),
NICVF_HW_STAT(rx_ip_csum_errs),
NICVF_HW_STAT(rx_ip_hdr_malformed),
NICVF_HW_STAT(rx_ip_payload_malformed),
NICVF_HW_STAT(rx_ip_ttl_errs),
NICVF_HW_STAT(rx_l3_pclp),
NICVF_HW_STAT(rx_l4_malformed),
NICVF_HW_STAT(rx_l4_csum_errs),
NICVF_HW_STAT(rx_udp_len_errs),
NICVF_HW_STAT(rx_l4_port_errs),
NICVF_HW_STAT(rx_tcp_flag_errs),
NICVF_HW_STAT(rx_tcp_offset_errs),
NICVF_HW_STAT(rx_l4_pclp),
NICVF_HW_STAT(rx_truncated_pkts),
NICVF_HW_STAT(tx_bytes_ok), NICVF_HW_STAT(tx_bytes_ok),
NICVF_HW_STAT(tx_ucast_frames_ok), NICVF_HW_STAT(tx_ucast_frames_ok),
NICVF_HW_STAT(tx_bcast_frames_ok), NICVF_HW_STAT(tx_bcast_frames_ok),
...@@ -125,10 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) ...@@ -125,10 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
nic->msg_enable = lvl; nic->msg_enable = lvl;
} }
static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
{
int stats, qidx;
int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
sprintf(*data, "rxq%d: %s", qidx + start_qidx,
nicvf_queue_stats[stats].name);
*data += ETH_GSTRING_LEN;
}
}
for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
sprintf(*data, "txq%d: %s", qidx + start_qidx,
nicvf_queue_stats[stats].name);
*data += ETH_GSTRING_LEN;
}
}
}
static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
int stats, qidx; int stats;
int sqs;
if (sset != ETH_SS_STATS) if (sset != ETH_SS_STATS)
return; return;
...@@ -143,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -143,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
data += ETH_GSTRING_LEN; data += ETH_GSTRING_LEN;
} }
for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { nicvf_get_qset_strings(nic, &data, 0);
for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
sprintf(data, "rxq%d: %s", qidx,
nicvf_queue_stats[stats].name);
data += ETH_GSTRING_LEN;
}
}
for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (sqs = 0; sqs < nic->sqs_count; sqs++) {
for (stats = 0; stats < nicvf_n_queue_stats; stats++) { if (!nic->snicvf[sqs])
sprintf(data, "txq%d: %s", qidx, continue;
nicvf_queue_stats[stats].name); nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
data += ETH_GSTRING_LEN;
}
} }
for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
...@@ -173,21 +212,58 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -173,21 +212,58 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static int nicvf_get_sset_count(struct net_device *netdev, int sset) static int nicvf_get_sset_count(struct net_device *netdev, int sset)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
int qstats_count;
int sqs;
if (sset != ETH_SS_STATS) if (sset != ETH_SS_STATS)
return -EINVAL; return -EINVAL;
qstats_count = nicvf_n_queue_stats *
(nic->qs->rq_cnt + nic->qs->sq_cnt);
for (sqs = 0; sqs < nic->sqs_count; sqs++) {
struct nicvf *snic;
snic = nic->snicvf[sqs];
if (!snic)
continue;
qstats_count += nicvf_n_queue_stats *
(snic->qs->rq_cnt + snic->qs->sq_cnt);
}
return nicvf_n_hw_stats + nicvf_n_drv_stats + return nicvf_n_hw_stats + nicvf_n_drv_stats +
(nicvf_n_queue_stats * qstats_count +
(nic->qs->rq_cnt + nic->qs->sq_cnt)) +
BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
} }
static void nicvf_get_qset_stats(struct nicvf *nic,
struct ethtool_stats *stats, u64 **data)
{
int stat, qidx;
if (!nic)
return;
for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
nicvf_update_rq_stats(nic, qidx);
for (stat = 0; stat < nicvf_n_queue_stats; stat++)
*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
[nicvf_queue_stats[stat].index];
}
for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
nicvf_update_sq_stats(nic, qidx);
for (stat = 0; stat < nicvf_n_queue_stats; stat++)
*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
[nicvf_queue_stats[stat].index];
}
}
static void nicvf_get_ethtool_stats(struct net_device *netdev, static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
int stat, qidx; int stat;
int sqs;
nicvf_update_stats(nic); nicvf_update_stats(nic);
...@@ -195,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, ...@@ -195,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
nicvf_update_lmac_stats(nic); nicvf_update_lmac_stats(nic);
for (stat = 0; stat < nicvf_n_hw_stats; stat++) for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->stats) *(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index]; [nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++) for (stat = 0; stat < nicvf_n_drv_stats; stat++)
*(data++) = ((u64 *)&nic->drv_stats) *(data++) = ((u64 *)&nic->drv_stats)
[nicvf_drv_stats[stat].index]; [nicvf_drv_stats[stat].index];
for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { nicvf_get_qset_stats(nic, stats, &data);
for (stat = 0; stat < nicvf_n_queue_stats; stat++)
*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
[nicvf_queue_stats[stat].index];
}
for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (sqs = 0; sqs < nic->sqs_count; sqs++) {
for (stat = 0; stat < nicvf_n_queue_stats; stat++) if (!nic->snicvf[sqs])
*(data++) = ((u64 *)&nic->qs->sq[qidx].stats) continue;
[nicvf_queue_stats[stat].index]; nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
} }
for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
...@@ -369,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev, ...@@ -369,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev,
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
info->data = nic->qs->rq_cnt; info->data = nic->rx_queues;
ret = 0; ret = 0;
break; break;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
...@@ -501,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -501,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
struct nicvf_rss_info *rss = &nic->rss_info; struct nicvf_rss_info *rss = &nic->rss_info;
int idx; int idx;
if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
rss->enable = false;
rss->hash_bits = 0;
return -EIO;
}
/* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP; return -EOPNOTSUPP;
rss->enable = true; if (!rss->enable) {
netdev_err(nic->netdev,
"RSS is disabled, cannot change settings\n");
return -EIO;
}
if (indir) { if (indir) {
for (idx = 0; idx < rss->rss_size; idx++) for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = indir[idx]; rss->ind_tbl[idx] = indir[idx];
...@@ -534,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev, ...@@ -534,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev,
memset(channel, 0, sizeof(*channel)); memset(channel, 0, sizeof(*channel));
channel->max_rx = MAX_RCV_QUEUES_PER_QS; channel->max_rx = nic->max_queues;
channel->max_tx = MAX_SND_QUEUES_PER_QS; channel->max_tx = nic->max_queues;
channel->rx_count = nic->qs->rq_cnt; channel->rx_count = nic->rx_queues;
channel->tx_count = nic->qs->sq_cnt; channel->tx_count = nic->tx_queues;
} }
/* Set no of Tx, Rx queues to be used */ /* Set no of Tx, Rx queues to be used */
...@@ -548,22 +618,34 @@ static int nicvf_set_channels(struct net_device *dev, ...@@ -548,22 +618,34 @@ static int nicvf_set_channels(struct net_device *dev,
struct nicvf *nic = netdev_priv(dev); struct nicvf *nic = netdev_priv(dev);
int err = 0; int err = 0;
bool if_up = netif_running(dev); bool if_up = netif_running(dev);
int cqcount;
if (!channel->rx_count || !channel->tx_count) if (!channel->rx_count || !channel->tx_count)
return -EINVAL; return -EINVAL;
if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) if (channel->rx_count > nic->max_queues)
return -EINVAL; return -EINVAL;
if (channel->tx_count > MAX_SND_QUEUES_PER_QS) if (channel->tx_count > nic->max_queues)
return -EINVAL; return -EINVAL;
if (if_up) if (if_up)
nicvf_stop(dev); nicvf_stop(dev);
nic->qs->rq_cnt = channel->rx_count; cqcount = max(channel->rx_count, channel->tx_count);
nic->qs->sq_cnt = channel->tx_count;
if (cqcount > MAX_CMP_QUEUES_PER_QS) {
nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
} else {
nic->sqs_count = 0;
}
nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); nic->rx_queues = channel->rx_count;
nic->tx_queues = channel->tx_count;
err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err) if (err)
return err; return err;
...@@ -571,7 +653,7 @@ static int nicvf_set_channels(struct net_device *dev, ...@@ -571,7 +653,7 @@ static int nicvf_set_channels(struct net_device *dev,
nicvf_open(dev); nicvf_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
nic->qs->sq_cnt, nic->qs->rq_cnt); nic->tx_queues, nic->rx_queues);
return err; return err;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/log2.h> #include <linux/log2.h>
...@@ -50,6 +51,14 @@ module_param(cpi_alg, int, S_IRUGO); ...@@ -50,6 +51,14 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg, MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
else
return qidx;
}
static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -105,7 +114,6 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) ...@@ -105,7 +114,6 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
} }
/* VF -> PF mailbox communication */ /* VF -> PF mailbox communication */
static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
{ {
u64 *msg = (u64 *)mbx; u64 *msg = (u64 *)mbx;
...@@ -147,26 +155,15 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) ...@@ -147,26 +155,15 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
*/ */
static int nicvf_check_pf_ready(struct nicvf *nic) static int nicvf_check_pf_ready(struct nicvf *nic)
{ {
int timeout = 5000, sleep = 20;
union nic_mbx mbx = {}; union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_READY; mbx.msg.msg = NIC_MBOX_MSG_READY;
if (nicvf_send_msg_to_pf(nic, &mbx)) {
nic->pf_ready_to_rcv_msg = false; netdev_err(nic->netdev,
"PF didn't respond to READY msg\n");
nicvf_write_to_mbx(nic, &mbx); return 0;
while (!nic->pf_ready_to_rcv_msg) {
msleep(sleep);
if (nic->pf_ready_to_rcv_msg)
break;
timeout -= sleep;
if (!timeout) {
netdev_err(nic->netdev,
"PF didn't respond to READY msg\n");
return 0;
}
} }
return 1; return 1;
} }
...@@ -197,13 +194,15 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) ...@@ -197,13 +194,15 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
switch (mbx.msg.msg) { switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY: case NIC_MBOX_MSG_READY:
nic->pf_ready_to_rcv_msg = true; nic->pf_acked = true;
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id; nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending) if (!nic->set_mac_pending)
ether_addr_copy(nic->netdev->dev_addr, ether_addr_copy(nic->netdev->dev_addr,
mbx.nic_cfg.mac_addr); mbx.nic_cfg.mac_addr);
nic->sqs_mode = mbx.nic_cfg.sqs_mode;
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false; nic->link_up = false;
nic->duplex = 0; nic->duplex = 0;
nic->speed = 0; nic->speed = 0;
...@@ -221,7 +220,6 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) ...@@ -221,7 +220,6 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_BGX_STATS: case NIC_MBOX_MSG_BGX_STATS:
nicvf_read_bgx_stats(nic, &mbx.bgx_stats); nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
nic->pf_acked = true; nic->pf_acked = true;
nic->bgx_stats_acked = true;
break; break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE: case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true; nic->pf_acked = true;
...@@ -242,6 +240,26 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) ...@@ -242,6 +240,26 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netif_tx_stop_all_queues(nic->netdev); netif_tx_stop_all_queues(nic->netdev);
} }
break; break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic->sqs_count = mbx.sqs_alloc.qs_count;
nic->pf_acked = true;
break;
case NIC_MBOX_MSG_SNICVF_PTR:
/* Primary VF: make note of secondary VF's pointer
* to be used while packet transmission.
*/
nic->snicvf[mbx.nicvf.sqs_id] =
(struct nicvf *)mbx.nicvf.nicvf;
nic->pf_acked = true;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
/* Secondary VF/Qset: make note of primary VF's pointer
* to be used while packet reception, to handover packet
* to primary VF's netdev.
*/
nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
nic->pf_acked = true;
break;
default: default:
netdev_err(nic->netdev, netdev_err(nic->netdev,
"Invalid message from PF, msg 0x%x\n", mbx.msg.msg); "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
...@@ -326,7 +344,7 @@ static int nicvf_rss_init(struct nicvf *nic) ...@@ -326,7 +344,7 @@ static int nicvf_rss_init(struct nicvf *nic)
nicvf_get_rss_size(nic); nicvf_get_rss_size(nic);
if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { if (cpi_alg != CPI_ALG_NONE) {
rss->enable = false; rss->enable = false;
rss->hash_bits = 0; rss->hash_bits = 0;
return 0; return 0;
...@@ -350,11 +368,100 @@ static int nicvf_rss_init(struct nicvf *nic) ...@@ -350,11 +368,100 @@ static int nicvf_rss_init(struct nicvf *nic)
for (idx = 0; idx < rss->rss_size; idx++) for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
nic->qs->rq_cnt); nic->rx_queues);
nicvf_config_rss(nic); nicvf_config_rss(nic);
return 1; return 1;
} }
/* Request PF to allocate additional Qsets */
static void nicvf_request_sqs(struct nicvf *nic)
{
union nic_mbx mbx = {};
int sqs;
int sqs_count = nic->sqs_count;
int rx_queues = 0, tx_queues = 0;
/* Only primary VF should request */
if (nic->sqs_mode || !nic->sqs_count)
return;
mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
mbx.sqs_alloc.vf_id = nic->vf_id;
mbx.sqs_alloc.qs_count = nic->sqs_count;
if (nicvf_send_msg_to_pf(nic, &mbx)) {
/* No response from PF */
nic->sqs_count = 0;
return;
}
/* Return if no Secondary Qsets available */
if (!nic->sqs_count)
return;
if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
/* Set no of Rx/Tx queues in each of the SQsets */
for (sqs = 0; sqs < nic->sqs_count; sqs++) {
mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
mbx.nicvf.vf_id = nic->vf_id;
mbx.nicvf.sqs_id = sqs;
nicvf_send_msg_to_pf(nic, &mbx);
nic->snicvf[sqs]->sqs_id = sqs;
if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
rx_queues -= MAX_RCV_QUEUES_PER_QS;
} else {
nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
rx_queues = 0;
}
if (tx_queues > MAX_SND_QUEUES_PER_QS) {
nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
tx_queues -= MAX_SND_QUEUES_PER_QS;
} else {
nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
tx_queues = 0;
}
nic->snicvf[sqs]->qs->cq_cnt =
max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
/* Initialize secondary Qset's queues and its interrupts */
nicvf_open(nic->snicvf[sqs]->netdev);
}
/* Update stack with actual Rx/Tx queue count allocated */
if (sqs_count != nic->sqs_count)
nicvf_set_real_num_queues(nic->netdev,
nic->tx_queues, nic->rx_queues);
}
/* Send this Qset's nicvf pointer to PF.
* PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
* so that packets received by these Qsets can use primary VF's netdev
*/
static void nicvf_send_vf_struct(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
mbx.nicvf.sqs_mode = nic->sqs_mode;
mbx.nicvf.nicvf = (u64)nic;
nicvf_send_msg_to_pf(nic, &mbx);
}
static void nicvf_get_primary_vf_struct(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
nicvf_send_msg_to_pf(nic, &mbx);
}
int nicvf_set_real_num_queues(struct net_device *netdev, int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues) int tx_queues, int rx_queues)
{ {
...@@ -429,6 +536,34 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, ...@@ -429,6 +536,34 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
} }
} }
static inline void nicvf_set_rxhash(struct net_device *netdev,
struct cqe_rx_t *cqe_rx,
struct sk_buff *skb)
{
u8 hash_type;
u32 hash;
if (!(netdev->features & NETIF_F_RXHASH))
return;
switch (cqe_rx->rss_alg) {
case RSS_ALG_TCP_IP:
case RSS_ALG_UDP_IP:
hash_type = PKT_HASH_TYPE_L4;
hash = cqe_rx->rss_tag;
break;
case RSS_ALG_IP:
hash_type = PKT_HASH_TYPE_L3;
hash = cqe_rx->rss_tag;
break;
default:
hash_type = PKT_HASH_TYPE_NONE;
hash = 0;
}
skb_set_hash(skb, hash, hash_type);
}
static void nicvf_rcv_pkt_handler(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi, struct napi_struct *napi,
struct cmp_queue *cq, struct cmp_queue *cq,
...@@ -437,6 +572,15 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, ...@@ -437,6 +572,15 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct sk_buff *skb; struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
int err = 0; int err = 0;
int rq_idx;
rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
if (nic->sqs_mode) {
/* Use primary VF's 'nicvf' struct */
nic = nic->pnicvf;
netdev = nic->netdev;
}
/* Check for errors */ /* Check for errors */
err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
...@@ -456,9 +600,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, ...@@ -456,9 +600,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->data, skb->len, true); skb->data, skb->len, true);
} }
/* If error packet, drop it here */
if (err) {
dev_kfree_skb_any(skb);
return;
}
nicvf_set_rx_frame_cnt(nic, skb); nicvf_set_rx_frame_cnt(nic, skb);
skb_record_rx_queue(skb, cqe_rx->rq_idx); nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
if (netdev->hw_features & NETIF_F_RXCSUM) { if (netdev->hw_features & NETIF_F_RXCSUM) {
/* HW by default verifies TCP/UDP/SCTP checksums */ /* HW by default verifies TCP/UDP/SCTP checksums */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
...@@ -468,6 +620,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, ...@@ -468,6 +620,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
/* Check for stripped VLAN */
if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
ntohs((__force __be16)cqe_rx->vlan_tci));
if (napi && (netdev->features & NETIF_F_GRO)) if (napi && (netdev->features & NETIF_F_GRO))
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
else else
...@@ -549,8 +706,11 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, ...@@ -549,8 +706,11 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
done: done:
/* Wakeup TXQ if its stopped earlier due to SQ full */ /* Wakeup TXQ if its stopped earlier due to SQ full */
if (tx_done) { if (tx_done) {
txq = netdev_get_tx_queue(netdev, cq_idx); netdev = nic->pnicvf->netdev;
if (netif_tx_queue_stopped(txq)) { txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq); netif_tx_start_queue(txq);
nic->drv_stats.txq_wake++; nic->drv_stats.txq_wake++;
if (netif_msg_tx_err(nic)) if (netif_msg_tx_err(nic))
...@@ -624,11 +784,20 @@ static void nicvf_handle_qs_err(unsigned long data) ...@@ -624,11 +784,20 @@ static void nicvf_handle_qs_err(unsigned long data)
nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
} }
static void nicvf_dump_intr_status(struct nicvf *nic)
{
if (netif_msg_intr(nic))
netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
}
static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
{ {
struct nicvf *nic = (struct nicvf *)nicvf_irq; struct nicvf *nic = (struct nicvf *)nicvf_irq;
u64 intr; u64 intr;
nicvf_dump_intr_status(nic);
intr = nicvf_reg_read(nic, NIC_VF_INT); intr = nicvf_reg_read(nic, NIC_VF_INT);
/* Check for spurious interrupt */ /* Check for spurious interrupt */
if (!(intr & NICVF_INTR_MBOX_MASK)) if (!(intr & NICVF_INTR_MBOX_MASK))
...@@ -639,59 +808,58 @@ static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) ...@@ -639,59 +808,58 @@ static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
{
struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
struct nicvf *nic = cq_poll->nicvf;
int qidx = cq_poll->cq_idx;
nicvf_dump_intr_status(nic);
/* Disable interrupts */
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
/* Schedule NAPI */
napi_schedule(&cq_poll->napi);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
return IRQ_HANDLED;
}
static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
{ {
u64 qidx, intr, clear_intr = 0;
u64 cq_intr, rbdr_intr, qs_err_intr;
struct nicvf *nic = (struct nicvf *)nicvf_irq; struct nicvf *nic = (struct nicvf *)nicvf_irq;
struct queue_set *qs = nic->qs; u8 qidx;
struct nicvf_cq_poll *cq_poll = NULL;
intr = nicvf_reg_read(nic, NIC_VF_INT);
if (netif_msg_intr(nic))
netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
nic->netdev->name, intr);
qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
if (qs_err_intr) {
/* Disable Qset err interrupt and schedule softirq */
nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
tasklet_hi_schedule(&nic->qs_err_task);
clear_intr |= qs_err_intr;
}
/* Disable interrupts and start polling */ nicvf_dump_intr_status(nic);
cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
for (qidx = 0; qidx < qs->cq_cnt; qidx++) { /* Disable RBDR interrupt and schedule softirq */
if (!(cq_intr & (1 << qidx))) for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
continue; if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
continue; continue;
nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
tasklet_hi_schedule(&nic->rbdr_task);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
}
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); return IRQ_HANDLED;
clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); }
cq_poll = nic->napi[qidx]; static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
/* Schedule NAPI */ {
if (cq_poll) struct nicvf *nic = (struct nicvf *)nicvf_irq;
napi_schedule(&cq_poll->napi);
}
/* Handle RBDR interrupts */ nicvf_dump_intr_status(nic);
rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
if (rbdr_intr) { /* Disable Qset err interrupt and schedule softirq */
/* Disable RBDR interrupt and schedule softirq */ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { tasklet_hi_schedule(&nic->qs_err_task);
if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
continue;
nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
tasklet_hi_schedule(&nic->rbdr_task);
clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
}
}
/* Clear interrupts */
nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -725,7 +893,7 @@ static void nicvf_disable_msix(struct nicvf *nic) ...@@ -725,7 +893,7 @@ static void nicvf_disable_msix(struct nicvf *nic)
static int nicvf_register_interrupts(struct nicvf *nic) static int nicvf_register_interrupts(struct nicvf *nic)
{ {
int irq, free, ret = 0; int irq, ret = 0;
int vector; int vector;
for_each_cq_irq(irq) for_each_cq_irq(irq)
...@@ -740,44 +908,42 @@ static int nicvf_register_interrupts(struct nicvf *nic) ...@@ -740,44 +908,42 @@ static int nicvf_register_interrupts(struct nicvf *nic)
sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
nic->vf_id, irq - NICVF_INTR_ID_RBDR); nic->vf_id, irq - NICVF_INTR_ID_RBDR);
/* Register all interrupts except mailbox */ /* Register CQ interrupts */
for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
vector = nic->msix_entries[irq].vector; vector = nic->msix_entries[irq].vector;
ret = request_irq(vector, nicvf_intr_handler, ret = request_irq(vector, nicvf_intr_handler,
0, nic->irq_name[irq], nic); 0, nic->irq_name[irq], nic->napi[irq]);
if (ret) if (ret)
break; goto err;
nic->irq_allocated[irq] = true; nic->irq_allocated[irq] = true;
} }
for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { /* Register RBDR interrupt */
for (irq = NICVF_INTR_ID_RBDR;
irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
vector = nic->msix_entries[irq].vector; vector = nic->msix_entries[irq].vector;
ret = request_irq(vector, nicvf_intr_handler, ret = request_irq(vector, nicvf_rbdr_intr_handler,
0, nic->irq_name[irq], nic); 0, nic->irq_name[irq], nic);
if (ret) if (ret)
break; goto err;
nic->irq_allocated[irq] = true; nic->irq_allocated[irq] = true;
} }
/* Register QS error interrupt */
sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
"NICVF%d Qset error", nic->vf_id); "NICVF%d Qset error", nic->vf_id);
if (!ret) { irq = NICVF_INTR_ID_QS_ERR;
vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; ret = request_irq(nic->msix_entries[irq].vector,
irq = NICVF_INTR_ID_QS_ERR; nicvf_qs_err_intr_handler,
ret = request_irq(vector, nicvf_intr_handler, 0, nic->irq_name[irq], nic);
0, nic->irq_name[irq], nic); if (!ret)
if (!ret) nic->irq_allocated[irq] = true;
nic->irq_allocated[irq] = true;
}
if (ret) { err:
netdev_err(nic->netdev, "Request irq failed\n"); if (ret)
for (free = 0; free < irq; free++) netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
free_irq(nic->msix_entries[free].vector, nic);
return ret;
}
return 0; return ret;
} }
static void nicvf_unregister_interrupts(struct nicvf *nic) static void nicvf_unregister_interrupts(struct nicvf *nic)
...@@ -786,8 +952,14 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) ...@@ -786,8 +952,14 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
/* Free registered interrupts */ /* Free registered interrupts */
for (irq = 0; irq < nic->num_vec; irq++) { for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq]) if (!nic->irq_allocated[irq])
continue;
if (irq < NICVF_INTR_ID_SQ)
free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
else
free_irq(nic->msix_entries[irq].vector, nic); free_irq(nic->msix_entries[irq].vector, nic);
nic->irq_allocated[irq] = false; nic->irq_allocated[irq] = false;
} }
...@@ -852,13 +1024,26 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -852,13 +1024,26 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
netdev_warn(netdev, netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n", "%s: Transmit ring full, stopping SQ%d\n",
netdev->name, qid); netdev->name, qid);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static inline void nicvf_free_cq_poll(struct nicvf *nic)
{
struct nicvf_cq_poll *cq_poll;
int qidx;
for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
cq_poll = nic->napi[qidx];
if (!cq_poll)
continue;
nic->napi[qidx] = NULL;
kfree(cq_poll);
}
}
int nicvf_stop(struct net_device *netdev) int nicvf_stop(struct net_device *netdev)
{ {
int irq, qidx; int irq, qidx;
...@@ -871,6 +1056,17 @@ int nicvf_stop(struct net_device *netdev) ...@@ -871,6 +1056,17 @@ int nicvf_stop(struct net_device *netdev)
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_stop_all_queues(nic->netdev);
/* Teardown secondary qsets first */
if (!nic->sqs_mode) {
for (qidx = 0; qidx < nic->sqs_count; qidx++) {
if (!nic->snicvf[qidx])
continue;
nicvf_stop(nic->snicvf[qidx]->netdev);
nic->snicvf[qidx] = NULL;
}
}
/* Disable RBDR & QS error interrupts */ /* Disable RBDR & QS error interrupts */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
...@@ -893,7 +1089,6 @@ int nicvf_stop(struct net_device *netdev) ...@@ -893,7 +1089,6 @@ int nicvf_stop(struct net_device *netdev)
cq_poll = nic->napi[qidx]; cq_poll = nic->napi[qidx];
if (!cq_poll) if (!cq_poll)
continue; continue;
nic->napi[qidx] = NULL;
napi_synchronize(&cq_poll->napi); napi_synchronize(&cq_poll->napi);
/* CQ intr is enabled while napi_complete, /* CQ intr is enabled while napi_complete,
* so disable it now * so disable it now
...@@ -902,7 +1097,6 @@ int nicvf_stop(struct net_device *netdev) ...@@ -902,7 +1097,6 @@ int nicvf_stop(struct net_device *netdev)
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
napi_disable(&cq_poll->napi); napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi); netif_napi_del(&cq_poll->napi);
kfree(cq_poll);
} }
netif_tx_disable(netdev); netif_tx_disable(netdev);
...@@ -918,6 +1112,12 @@ int nicvf_stop(struct net_device *netdev) ...@@ -918,6 +1112,12 @@ int nicvf_stop(struct net_device *netdev)
nicvf_unregister_interrupts(nic); nicvf_unregister_interrupts(nic);
nicvf_free_cq_poll(nic);
/* Clear multiqset info */
nic->pnicvf = nic;
nic->sqs_count = 0;
return 0; return 0;
} }
...@@ -944,6 +1144,7 @@ int nicvf_open(struct net_device *netdev) ...@@ -944,6 +1144,7 @@ int nicvf_open(struct net_device *netdev)
goto napi_del; goto napi_del;
} }
cq_poll->cq_idx = qidx; cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
napi_enable(&cq_poll->napi); napi_enable(&cq_poll->napi);
...@@ -972,10 +1173,16 @@ int nicvf_open(struct net_device *netdev) ...@@ -972,10 +1173,16 @@ int nicvf_open(struct net_device *netdev)
/* Configure CPI alorithm */ /* Configure CPI alorithm */
nic->cpi_alg = cpi_alg; nic->cpi_alg = cpi_alg;
nicvf_config_cpi(nic); if (!nic->sqs_mode)
nicvf_config_cpi(nic);
nicvf_request_sqs(nic);
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
/* Configure receive side scaling */ /* Configure receive side scaling */
nicvf_rss_init(nic); if (!nic->sqs_mode)
nicvf_rss_init(nic);
err = nicvf_register_interrupts(nic); err = nicvf_register_interrupts(nic);
if (err) if (err)
...@@ -1011,6 +1218,8 @@ int nicvf_open(struct net_device *netdev) ...@@ -1011,6 +1218,8 @@ int nicvf_open(struct net_device *netdev)
cleanup: cleanup:
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic); nicvf_unregister_interrupts(nic);
tasklet_kill(&nic->qs_err_task);
tasklet_kill(&nic->rbdr_task);
napi_del: napi_del:
for (qidx = 0; qidx < qs->cq_cnt; qidx++) { for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
cq_poll = nic->napi[qidx]; cq_poll = nic->napi[qidx];
...@@ -1018,9 +1227,8 @@ int nicvf_open(struct net_device *netdev) ...@@ -1018,9 +1227,8 @@ int nicvf_open(struct net_device *netdev)
continue; continue;
napi_disable(&cq_poll->napi); napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi); netif_napi_del(&cq_poll->napi);
kfree(cq_poll);
nic->napi[qidx] = NULL;
} }
nicvf_free_cq_poll(nic);
return err; return err;
} }
...@@ -1077,7 +1285,6 @@ void nicvf_update_lmac_stats(struct nicvf *nic) ...@@ -1077,7 +1285,6 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
{ {
int stat = 0; int stat = 0;
union nic_mbx mbx = {}; union nic_mbx mbx = {};
int timeout;
if (!netif_running(nic->netdev)) if (!netif_running(nic->netdev))
return; return;
...@@ -1087,14 +1294,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic) ...@@ -1087,14 +1294,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Rx stats */ /* Rx stats */
mbx.bgx_stats.rx = 1; mbx.bgx_stats.rx = 1;
while (stat < BGX_RX_STATS_COUNT) { while (stat < BGX_RX_STATS_COUNT) {
nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat; mbx.bgx_stats.idx = stat;
nicvf_send_msg_to_pf(nic, &mbx); if (nicvf_send_msg_to_pf(nic, &mbx))
timeout = 0; return;
while ((!nic->bgx_stats_acked) && (timeout < 10)) {
msleep(2);
timeout++;
}
stat++; stat++;
} }
...@@ -1103,14 +1305,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic) ...@@ -1103,14 +1305,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Tx stats */ /* Tx stats */
mbx.bgx_stats.rx = 0; mbx.bgx_stats.rx = 0;
while (stat < BGX_TX_STATS_COUNT) { while (stat < BGX_TX_STATS_COUNT) {
nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat; mbx.bgx_stats.idx = stat;
nicvf_send_msg_to_pf(nic, &mbx); if (nicvf_send_msg_to_pf(nic, &mbx))
timeout = 0; return;
while ((!nic->bgx_stats_acked) && (timeout < 10)) {
msleep(2);
timeout++;
}
stat++; stat++;
} }
} }
...@@ -1118,7 +1315,7 @@ void nicvf_update_lmac_stats(struct nicvf *nic) ...@@ -1118,7 +1315,7 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic) void nicvf_update_stats(struct nicvf *nic)
{ {
int qidx; int qidx;
struct nicvf_hw_stats *stats = &nic->stats; struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
struct queue_set *qs = nic->qs; struct queue_set *qs = nic->qs;
...@@ -1127,14 +1324,16 @@ void nicvf_update_stats(struct nicvf *nic) ...@@ -1127,14 +1324,16 @@ void nicvf_update_stats(struct nicvf *nic)
#define GET_TX_STATS(reg) \ #define GET_TX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); stats->rx_bytes = GET_RX_STATS(RX_OCTS);
stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
stats->rx_drop_red = GET_RX_STATS(RX_RED); stats->rx_drop_red = GET_RX_STATS(RX_RED);
stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
...@@ -1146,9 +1345,6 @@ void nicvf_update_stats(struct nicvf *nic) ...@@ -1146,9 +1345,6 @@ void nicvf_update_stats(struct nicvf *nic)
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP); stats->tx_drops = GET_TX_STATS(TX_DROP);
drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
stats->rx_bcast_frames_ok +
stats->rx_mcast_frames_ok;
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok + stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok; stats->tx_mcast_frames_ok;
...@@ -1167,14 +1363,15 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, ...@@ -1167,14 +1363,15 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->stats; struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic); nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes_ok; stats->rx_bytes = hw_stats->rx_bytes;
stats->rx_packets = drv_stats->rx_frames_ok; stats->rx_packets = drv_stats->rx_frames_ok;
stats->rx_dropped = drv_stats->rx_drops; stats->rx_dropped = drv_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames;
stats->tx_bytes = hw_stats->tx_bytes_ok; stats->tx_bytes = hw_stats->tx_bytes_ok;
stats->tx_packets = drv_stats->tx_frames_ok; stats->tx_packets = drv_stats->tx_frames_ok;
...@@ -1208,6 +1405,45 @@ static void nicvf_reset_task(struct work_struct *work) ...@@ -1208,6 +1405,45 @@ static void nicvf_reset_task(struct work_struct *work)
nic->netdev->trans_start = jiffies; nic->netdev->trans_start = jiffies;
} }
static int nicvf_config_loopback(struct nicvf *nic,
netdev_features_t features)
{
union nic_mbx mbx = {};
mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
mbx.lbk.vf_id = nic->vf_id;
mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
return nicvf_send_msg_to_pf(nic, &mbx);
}
static netdev_features_t nicvf_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct nicvf *nic = netdev_priv(netdev);
if ((features & NETIF_F_LOOPBACK) &&
netif_running(netdev) && !nic->loopback_supported)
features &= ~NETIF_F_LOOPBACK;
return features;
}
static int nicvf_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct nicvf *nic = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
nicvf_config_vlan_stripping(nic, features);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return nicvf_config_loopback(nic, features);
return 0;
}
static const struct net_device_ops nicvf_netdev_ops = { static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open, .ndo_open = nicvf_open,
.ndo_stop = nicvf_stop, .ndo_stop = nicvf_stop,
...@@ -1216,6 +1452,8 @@ static const struct net_device_ops nicvf_netdev_ops = { ...@@ -1216,6 +1452,8 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_set_mac_address = nicvf_set_mac_address, .ndo_set_mac_address = nicvf_set_mac_address,
.ndo_get_stats64 = nicvf_get_stats64, .ndo_get_stats64 = nicvf_get_stats64,
.ndo_tx_timeout = nicvf_tx_timeout, .ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
}; };
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
...@@ -1223,8 +1461,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1223,8 +1461,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct net_device *netdev; struct net_device *netdev;
struct nicvf *nic; struct nicvf *nic;
struct queue_set *qs; int err, qcount;
int err;
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
if (err) { if (err) {
...@@ -1250,9 +1487,17 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1250,9 +1487,17 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions; goto err_release_regions;
} }
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount = MAX_CMP_QUEUES_PER_QS;
MAX_RCV_QUEUES_PER_QS,
MAX_SND_QUEUES_PER_QS); /* Restrict multiqset support only for host bound VFs */
if (pdev->is_virtfn) {
/* Set max number of queues per VF */
qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
qcount = min(qcount,
(MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
}
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
if (!netdev) { if (!netdev) {
err = -ENOMEM; err = -ENOMEM;
goto err_release_regions; goto err_release_regions;
...@@ -1265,6 +1510,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1265,6 +1510,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic = netdev_priv(netdev); nic = netdev_priv(netdev);
nic->netdev = netdev; nic->netdev = netdev;
nic->pdev = pdev; nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
/* MAP VF's configuration registers */ /* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
...@@ -1278,20 +1525,31 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1278,20 +1525,31 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
goto err_free_netdev; goto err_free_netdev;
qs = nic->qs;
err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
if (err)
goto err_free_netdev;
/* Check if PF is alive and get MAC address for this VF */ /* Check if PF is alive and get MAC address for this VF */
err = nicvf_register_misc_interrupt(nic); err = nicvf_register_misc_interrupt(nic);
if (err) if (err)
goto err_free_netdev; goto err_free_netdev;
netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | nicvf_send_vf_struct(nic);
NETIF_F_TSO | NETIF_F_GRO);
netdev->hw_features = netdev->features; /* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
if (err)
goto err_unregister_interrupts;
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK;
netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->netdev_ops = &nicvf_netdev_ops; netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT; netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
...@@ -1326,8 +1584,13 @@ static void nicvf_remove(struct pci_dev *pdev) ...@@ -1326,8 +1584,13 @@ static void nicvf_remove(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
struct net_device *pnetdev = nic->pnicvf->netdev;
unregister_netdev(netdev); /* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
*/
if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic); nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
free_netdev(netdev); free_netdev(netdev);
......
...@@ -475,6 +475,27 @@ static void nicvf_reclaim_rbdr(struct nicvf *nic, ...@@ -475,6 +475,27 @@ static void nicvf_reclaim_rbdr(struct nicvf *nic,
return; return;
} }
void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
{
u64 rq_cfg;
int sqs;
rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
/* Enable first VLAN stripping */
if (features & NETIF_F_HW_VLAN_CTAG_RX)
rq_cfg |= (1ULL << 25);
else
rq_cfg &= ~(1ULL << 25);
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
/* Configure Secondary Qsets, if any */
for (sqs = 0; sqs < nic->sqs_count; sqs++)
if (nic->snicvf[sqs])
nicvf_queue_reg_write(nic->snicvf[sqs],
NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
}
/* Configures receive queue */ /* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable) int qidx, bool enable)
...@@ -524,7 +545,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, ...@@ -524,7 +545,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
if (!nic->sqs_mode)
nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */ /* Enable Receive queue */
rq_cfg.ena = 1; rq_cfg.ena = 1;
...@@ -598,6 +621,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, ...@@ -598,6 +621,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
mbx.sq.qs_num = qs->vnic_id; mbx.sq.qs_num = qs->vnic_id;
mbx.sq.sq_num = qidx; mbx.sq.sq_num = qidx;
mbx.sq.sqs_mode = nic->sqs_mode;
mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
...@@ -679,6 +703,7 @@ void nicvf_qset_config(struct nicvf *nic, bool enable) ...@@ -679,6 +703,7 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
/* Send a mailbox msg to PF to config Qset */ /* Send a mailbox msg to PF to config Qset */
mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
mbx.qs.num = qs->vnic_id; mbx.qs.num = qs->vnic_id;
mbx.qs.sqs_count = nic->sqs_count;
mbx.qs.cfg = 0; mbx.qs.cfg = 0;
qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
...@@ -759,6 +784,10 @@ int nicvf_set_qset_resources(struct nicvf *nic) ...@@ -759,6 +784,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
qs->rbdr_len = RCV_BUF_COUNT; qs->rbdr_len = RCV_BUF_COUNT;
qs->sq_len = SND_QUEUE_LEN; qs->sq_len = SND_QUEUE_LEN;
qs->cq_len = CMP_QUEUE_LEN; qs->cq_len = CMP_QUEUE_LEN;
nic->rx_queues = qs->rq_cnt;
nic->tx_queues = qs->sq_cnt;
return 0; return 0;
} }
...@@ -961,9 +990,6 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, ...@@ -961,9 +990,6 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
/* Offload checksum calculation to HW */ /* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->protocol != htons(ETH_P_IP))
return;
hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb); hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb); hdr->l4_offset = skb_transport_offset(skb);
...@@ -1005,7 +1031,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, ...@@ -1005,7 +1031,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
* them to SQ for transfer * them to SQ for transfer
*/ */
static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int qentry, struct sk_buff *skb) int sq_num, int qentry, struct sk_buff *skb)
{ {
struct tso_t tso; struct tso_t tso;
int seg_subdescs = 0, desc_cnt = 0; int seg_subdescs = 0, desc_cnt = 0;
...@@ -1065,7 +1091,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, ...@@ -1065,7 +1091,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Inform HW to xmit all TSO segments */ /* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
skb_get_queue_mapping(skb), desc_cnt); sq_num, desc_cnt);
nic->drv_stats.tx_tso++; nic->drv_stats.tx_tso++;
return 1; return 1;
} }
...@@ -1076,10 +1102,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) ...@@ -1076,10 +1102,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
int i, size; int i, size;
int subdesc_cnt; int subdesc_cnt;
int sq_num, qentry; int sq_num, qentry;
struct queue_set *qs = nic->qs; struct queue_set *qs;
struct snd_queue *sq; struct snd_queue *sq;
sq_num = skb_get_queue_mapping(skb); sq_num = skb_get_queue_mapping(skb);
if (sq_num >= MAX_SND_QUEUES_PER_QS) {
/* Get secondary Qset's SQ structure */
i = sq_num / MAX_SND_QUEUES_PER_QS;
if (!nic->snicvf[i - 1]) {
netdev_warn(nic->netdev,
"Secondary Qset#%d's ptr not initialized\n",
i - 1);
return 1;
}
nic = (struct nicvf *)nic->snicvf[i - 1];
sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
}
qs = nic->qs;
sq = &qs->sq[sq_num]; sq = &qs->sq[sq_num];
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
...@@ -1090,7 +1130,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) ...@@ -1090,7 +1130,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Check if its a TSO packet */ /* Check if its a TSO packet */
if (skb_shinfo(skb)->gso_size) if (skb_shinfo(skb)->gso_size)
return nicvf_sq_append_tso(nic, sq, qentry, skb); return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */ /* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
...@@ -1126,6 +1166,8 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) ...@@ -1126,6 +1166,8 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
return 1; return 1;
append_fail: append_fail:
/* Use original PCI dev for debug log */
nic = nic->pnicvf;
netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
return 0; return 0;
} }
...@@ -1371,10 +1413,11 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) ...@@ -1371,10 +1413,11 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
int nicvf_check_cqe_rx_errs(struct nicvf *nic, int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
{ {
struct cmp_queue_stats *stats = &cq->stats; struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode) { if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
stats->rx.errop.good++; drv_stats->rx_frames_ok++;
return 0; return 0;
} }
...@@ -1384,111 +1427,78 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, ...@@ -1384,111 +1427,78 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic,
nic->netdev->name, nic->netdev->name,
cqe_rx->err_level, cqe_rx->err_opcode); cqe_rx->err_level, cqe_rx->err_opcode);
switch (cqe_rx->err_level) {
case CQ_ERRLVL_MAC:
stats->rx.errlvl.mac_errs++;
break;
case CQ_ERRLVL_L2:
stats->rx.errlvl.l2_errs++;
break;
case CQ_ERRLVL_L3:
stats->rx.errlvl.l3_errs++;
break;
case CQ_ERRLVL_L4:
stats->rx.errlvl.l4_errs++;
break;
}
switch (cqe_rx->err_opcode) { switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL: case CQ_RX_ERROP_RE_PARTIAL:
stats->rx.errop.partial_pkts++; stats->rx_bgx_truncated_pkts++;
break; break;
case CQ_RX_ERROP_RE_JABBER: case CQ_RX_ERROP_RE_JABBER:
stats->rx.errop.jabber_errs++; stats->rx_jabber_errs++;
break; break;
case CQ_RX_ERROP_RE_FCS: case CQ_RX_ERROP_RE_FCS:
stats->rx.errop.fcs_errs++; stats->rx_fcs_errs++;
break;
case CQ_RX_ERROP_RE_TERMINATE:
stats->rx.errop.terminate_errs++;
break; break;
case CQ_RX_ERROP_RE_RX_CTL: case CQ_RX_ERROP_RE_RX_CTL:
stats->rx.errop.bgx_rx_errs++; stats->rx_bgx_errs++;
break; break;
case CQ_RX_ERROP_PREL2_ERR: case CQ_RX_ERROP_PREL2_ERR:
stats->rx.errop.prel2_errs++; stats->rx_prel2_errs++;
break;
case CQ_RX_ERROP_L2_FRAGMENT:
stats->rx.errop.l2_frags++;
break;
case CQ_RX_ERROP_L2_OVERRUN:
stats->rx.errop.l2_overruns++;
break;
case CQ_RX_ERROP_L2_PFCS:
stats->rx.errop.l2_pfcs++;
break;
case CQ_RX_ERROP_L2_PUNY:
stats->rx.errop.l2_puny++;
break; break;
case CQ_RX_ERROP_L2_MAL: case CQ_RX_ERROP_L2_MAL:
stats->rx.errop.l2_hdr_malformed++; stats->rx_l2_hdr_malformed++;
break; break;
case CQ_RX_ERROP_L2_OVERSIZE: case CQ_RX_ERROP_L2_OVERSIZE:
stats->rx.errop.l2_oversize++; stats->rx_oversize++;
break; break;
case CQ_RX_ERROP_L2_UNDERSIZE: case CQ_RX_ERROP_L2_UNDERSIZE:
stats->rx.errop.l2_undersize++; stats->rx_undersize++;
break; break;
case CQ_RX_ERROP_L2_LENMISM: case CQ_RX_ERROP_L2_LENMISM:
stats->rx.errop.l2_len_mismatch++; stats->rx_l2_len_mismatch++;
break; break;
case CQ_RX_ERROP_L2_PCLP: case CQ_RX_ERROP_L2_PCLP:
stats->rx.errop.l2_pclp++; stats->rx_l2_pclp++;
break; break;
case CQ_RX_ERROP_IP_NOT: case CQ_RX_ERROP_IP_NOT:
stats->rx.errop.non_ip++; stats->rx_ip_ver_errs++;
break; break;
case CQ_RX_ERROP_IP_CSUM_ERR: case CQ_RX_ERROP_IP_CSUM_ERR:
stats->rx.errop.ip_csum_err++; stats->rx_ip_csum_errs++;
break; break;
case CQ_RX_ERROP_IP_MAL: case CQ_RX_ERROP_IP_MAL:
stats->rx.errop.ip_hdr_malformed++; stats->rx_ip_hdr_malformed++;
break; break;
case CQ_RX_ERROP_IP_MALD: case CQ_RX_ERROP_IP_MALD:
stats->rx.errop.ip_payload_malformed++; stats->rx_ip_payload_malformed++;
break; break;
case CQ_RX_ERROP_IP_HOP: case CQ_RX_ERROP_IP_HOP:
stats->rx.errop.ip_hop_errs++; stats->rx_ip_ttl_errs++;
break;
case CQ_RX_ERROP_L3_ICRC:
stats->rx.errop.l3_icrc_errs++;
break; break;
case CQ_RX_ERROP_L3_PCLP: case CQ_RX_ERROP_L3_PCLP:
stats->rx.errop.l3_pclp++; stats->rx_l3_pclp++;
break; break;
case CQ_RX_ERROP_L4_MAL: case CQ_RX_ERROP_L4_MAL:
stats->rx.errop.l4_malformed++; stats->rx_l4_malformed++;
break; break;
case CQ_RX_ERROP_L4_CHK: case CQ_RX_ERROP_L4_CHK:
stats->rx.errop.l4_csum_errs++; stats->rx_l4_csum_errs++;
break; break;
case CQ_RX_ERROP_UDP_LEN: case CQ_RX_ERROP_UDP_LEN:
stats->rx.errop.udp_len_err++; stats->rx_udp_len_errs++;
break; break;
case CQ_RX_ERROP_L4_PORT: case CQ_RX_ERROP_L4_PORT:
stats->rx.errop.bad_l4_port++; stats->rx_l4_port_errs++;
break; break;
case CQ_RX_ERROP_TCP_FLAG: case CQ_RX_ERROP_TCP_FLAG:
stats->rx.errop.bad_tcp_flag++; stats->rx_tcp_flag_errs++;
break; break;
case CQ_RX_ERROP_TCP_OFFSET: case CQ_RX_ERROP_TCP_OFFSET:
stats->rx.errop.tcp_offset_errs++; stats->rx_tcp_offset_errs++;
break; break;
case CQ_RX_ERROP_L4_PCLP: case CQ_RX_ERROP_L4_PCLP:
stats->rx.errop.l4_pclp++; stats->rx_l4_pclp++;
break; break;
case CQ_RX_ERROP_RBDR_TRUNC: case CQ_RX_ERROP_RBDR_TRUNC:
stats->rx.errop.pkt_truncated++; stats->rx_truncated_pkts++;
break; break;
} }
......
...@@ -181,47 +181,6 @@ enum CQ_TX_ERROP_E { ...@@ -181,47 +181,6 @@ enum CQ_TX_ERROP_E {
}; };
struct cmp_queue_stats { struct cmp_queue_stats {
struct rx_stats {
struct {
u64 mac_errs;
u64 l2_errs;
u64 l3_errs;
u64 l4_errs;
} errlvl;
struct {
u64 good;
u64 partial_pkts;
u64 jabber_errs;
u64 fcs_errs;
u64 terminate_errs;
u64 bgx_rx_errs;
u64 prel2_errs;
u64 l2_frags;
u64 l2_overruns;
u64 l2_pfcs;
u64 l2_puny;
u64 l2_hdr_malformed;
u64 l2_oversize;
u64 l2_undersize;
u64 l2_len_mismatch;
u64 l2_pclp;
u64 non_ip;
u64 ip_csum_err;
u64 ip_hdr_malformed;
u64 ip_payload_malformed;
u64 ip_hop_errs;
u64 l3_icrc_errs;
u64 l3_pclp;
u64 l4_malformed;
u64 l4_csum_errs;
u64 udp_len_err;
u64 bad_l4_port;
u64 bad_tcp_flag;
u64 tcp_offset_errs;
u64 l4_pclp;
u64 pkt_truncated;
} errop;
} rx;
struct tx_stats { struct tx_stats {
u64 good; u64 good;
u64 desc_fault; u64 desc_fault;
...@@ -292,6 +251,7 @@ struct cmp_queue { ...@@ -292,6 +251,7 @@ struct cmp_queue {
void *desc; void *desc;
struct q_desc_mem dmem; struct q_desc_mem dmem;
struct cmp_queue_stats stats; struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct snd_queue { struct snd_queue {
...@@ -347,6 +307,8 @@ struct queue_set { ...@@ -347,6 +307,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic); int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable); int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable); void nicvf_qset_config(struct nicvf *nic, bool enable);
......
...@@ -329,6 +329,37 @@ static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) ...@@ -329,6 +329,37 @@ static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
} }
} }
/* Configure BGX LMAC in internal loopback mode */
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable)
{
struct bgx *bgx;
struct lmac *lmac;
u64 cfg;
bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
if (!bgx)
return;
lmac = &bgx->lmac[lmac_idx];
if (lmac->is_sgmii) {
cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
if (enable)
cfg |= PCS_MRX_CTL_LOOPBACK1;
else
cfg &= ~PCS_MRX_CTL_LOOPBACK1;
bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
} else {
cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
if (enable)
cfg |= SPU_CTL_LOOPBACK;
else
cfg &= ~SPU_CTL_LOOPBACK;
bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
}
}
EXPORT_SYMBOL(bgx_lmac_internal_loopback);
static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
{ {
u64 cfg; u64 cfg;
......
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#define BGX_SPUX_CONTROL1 0x10000 #define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11) #define SPU_CTL_LOW_POWER BIT_ULL(11)
#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15) #define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008 #define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2) #define SPU_STATUS1_RCV_LNK BIT_ULL(2)
...@@ -126,6 +127,7 @@ ...@@ -126,6 +127,7 @@
#define PCS_MRX_CTL_RST_AN BIT_ULL(9) #define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11) #define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12) #define PCS_MRX_CTL_AN_EN BIT_ULL(12)
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15) #define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008 #define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
...@@ -186,6 +188,8 @@ int bgx_get_lmac_count(int node, int bgx); ...@@ -186,6 +188,8 @@ int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11 #define BGX_RX_STATS_COUNT 11
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册