提交 acae4b48 编写于 作者: D David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Updates.

Various changes including updated firmware interface, improved TX ring
allocation scheme, improved out-of-memory logic in NAPI loop, reduced
default rings on multi-port devices, new PCI IDs. Of particular note,

CPU affinity hints from Vasundhara Volam.

TC Flower eswitch support from Sathya Perla.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -212,6 +212,15 @@ config BNXT_SRIOV
Virtualization support in the NetXtreme-C/E products. This
allows for virtual function acceleration in virtual environments.
config BNXT_FLOWER_OFFLOAD
bool "TC Flower offload support for NetXtreme-C/E"
depends on BNXT
default y
---help---
This configuration parameter enables TC Flower packet classifier
offload for eswitch. This option enables SR-IOV switchdev eswitch
offload.
config BNXT_DCB
bool "Data Center Bridging (DCB) Support"
default n
......
obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o
......@@ -49,6 +49,8 @@
#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
......@@ -58,6 +60,7 @@
#include "bnxt_dcb.h"
#include "bnxt_xdp.h"
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
......@@ -103,6 +106,8 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
BCM58802,
BCM58808,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
......@@ -111,39 +116,42 @@ enum board_idx {
static const struct {
char *name;
} board_info[] = {
{ "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
{ "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
{ "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
......@@ -174,8 +182,9 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
......@@ -1843,6 +1852,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
&event);
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
* the NAPI budget. Otherwise, we may potentially loop
* here forever if we consistently cannot allocate
* buffers.
*/
else if (rc == -ENOMEM)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
......@@ -4461,9 +4477,33 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
mutex_lock(&bp->hwrm_cmd_lock);
rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc)
bp->tx_reserved_rings = *tx_rings;
return rc;
}
static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
{
struct hwrm_func_cfg_input req = {0};
int rc;
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_VF(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
req.num_tx_rings = cpu_to_le16(tx_rings);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
......@@ -5115,6 +5155,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
if (bp->tx_reserved_rings != bp->tx_nr_rings) {
int tx = bp->tx_nr_rings;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
tx < bp->tx_nr_rings) {
rc = -ENOMEM;
goto err_out;
}
}
}
rc = bnxt_hwrm_ring_alloc(bp);
......@@ -5521,8 +5570,15 @@ static void bnxt_free_irq(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
irq = &bp->irq_tbl[i];
if (irq->requested)
if (irq->requested) {
if (irq->have_cpumask) {
irq_set_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
}
......@@ -5555,6 +5611,21 @@ static int bnxt_request_irq(struct bnxt *bp)
break;
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
irq->have_cpumask = 1;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
"Set affinity failed, IRQ = %d\n",
irq->vector);
break;
}
}
}
return rc;
}
......@@ -5726,6 +5797,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
bp->port_count = resp->port_cnt;
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
......@@ -6998,8 +7071,8 @@ static void bnxt_sp_task(struct work_struct *work)
}
/* Under rtnl_lock */
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
......@@ -7019,10 +7092,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
tx_rings_needed < (tx * tx_sets + tx_xdp))
return -ENOMEM;
return 0;
return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
......@@ -7211,8 +7281,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
sh, tc, bp->tx_nr_rings_xdp);
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
sh, tc, bp->tx_nr_rings_xdp);
if (rc)
return rc;
......@@ -7237,17 +7307,33 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return 0;
}
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
static int bnxt_setup_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
struct tc_mqprio_qopt *mqprio = type_data;
struct bnxt *bp = netdev_priv(dev);
if (type != TC_SETUP_MQPRIO)
if (BNXT_VF(bp))
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
}
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_CLSFLOWER:
return bnxt_setup_flower(dev, type_data);
case TC_SETUP_MQPRIO: {
struct tc_mqprio_qopt *mqprio = type_data;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return bnxt_setup_mq_tc(dev, mqprio->num_tc);
return bnxt_setup_mq_tc(dev, mqprio->num_tc);
}
default:
return -EOPNOTSUPP;
}
}
#ifdef CONFIG_RFS_ACCEL
......@@ -7643,6 +7729,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_shutdown_tc(bp);
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
......@@ -7811,6 +7898,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
/* Reduce default rings to reduce memory usage on multi-port cards */
if (bp->port_count > 1)
dflt_rings = min_t(int, dflt_rings, 4);
rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
......@@ -7983,6 +8073,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
rc = bnxt_probe_phy(bp);
if (rc)
goto init_err_pci_clean;
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
......@@ -8017,10 +8111,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
rc = bnxt_probe_phy(bp);
if (rc)
goto init_err_pci_clean;
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err_pci_clean;
......@@ -8031,9 +8121,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
if (BNXT_PF(bp))
bnxt_init_tc(bp);
rc = register_netdev(dev);
if (rc)
goto init_err_clr_int;
goto init_err_cleanup_tc;
if (BNXT_PF(bp))
bnxt_dl_register(bp);
......@@ -8046,7 +8139,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_clr_int:
init_err_cleanup_tc:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
init_err_pci_clean:
......
......@@ -19,6 +19,7 @@
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
......@@ -701,8 +702,10 @@ struct bnxt_napi {
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested;
u8 requested:1;
u8 have_cpumask:1;
char name[IFNAMSIZ + 2];
cpumask_var_t cpu_mask;
};
#define HWRM_RING_ALLOC_TX 0x1
......@@ -941,6 +944,27 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
struct bnxt_tc_info {
bool enabled;
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
/* hash table to store L2 keys of TC flows */
struct rhashtable l2_table;
struct rhashtable_params l2_ht_params;
/* lock to atomically add/del an l2 node when a flow is
* added or deleted.
*/
struct mutex lock;
/* Stat counter mask (width) */
u64 bytes_mask;
u64 packets_mask;
};
struct bnxt_vf_rep_stats {
u64 packets;
u64 bytes;
......@@ -988,6 +1012,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58808 0xd808
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
......@@ -1019,6 +1046,10 @@ struct bnxt {
#define BNXT_CHIP_NUM_57X1X(chip_num) \
(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
#define BNXT_CHIP_NUM_588XX(chip_num) \
((chip_num) == CHIP_NUM_58802 || \
(chip_num) == CHIP_NUM_58808)
struct net_device *dev;
struct pci_dev *pdev;
......@@ -1077,6 +1108,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
......@@ -1118,6 +1150,7 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
......@@ -1196,6 +1229,7 @@ struct bnxt {
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
u16 rx_coal_ticks;
......@@ -1277,6 +1311,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
struct bnxt_tc_info tc_info;
};
#define BNXT_RX_STATS_OFFSET(counter) \
......@@ -1346,8 +1381,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
......
......@@ -435,8 +435,7 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs,
tx_xdp);
rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc;
......
......@@ -11,14 +11,14 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
/* HSI and HWRM Specification 1.8.0 */
/* HSI and HWRM Specification 1.8.1 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 8
#define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_UPDATE 1
#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */
#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */
#define HWRM_VERSION_STR "1.8.0.0"
#define HWRM_VERSION_STR "1.8.1.4"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
......@@ -946,6 +946,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
......@@ -1972,7 +1973,12 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
u8 unused_0;
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
......@@ -4407,6 +4413,164 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid;
};
/* hwrm_cfa_flow_alloc */
/* Input (128 bytes) */
struct hwrm_cfa_flow_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 flags;
#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
__le16 act_meter_id;
__le16 ref_flow_handle;
__be16 ethertype;
__be16 outer_vlan_tci;
__be16 dmac[3];
__be16 inner_vlan_tci;
__be16 smac[3];
u8 ip_dst_mask_len;
u8 ip_src_mask_len;
__be32 ip_dst[4];
__be32 ip_src[4];
__be16 l4_src_port;
__be16 l4_src_port_mask;
__be16 l4_dst_port;
__be16 l4_dst_port_mask;
__be32 nat_ip_address[4];
__be16 l2_rewrite_dmac[3];
__be16 nat_port;
__be16 l2_rewrite_smac[3];
u8 ip_proto;
u8 unused_0;
};
/* Output (16 bytes) */
struct hwrm_cfa_flow_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 flow_handle;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 unused_4;
u8 valid;
};
/* hwrm_cfa_flow_free */
/* Input (24 bytes) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
__le16 unused_0[3];
};
/* Output (32 bytes) */
struct hwrm_cfa_flow_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le64 packet;
__le64 byte;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_cfa_flow_stats */
/* Input (40 bytes) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 num_flows;
__le16 flow_handle_0;
__le16 flow_handle_1;
__le16 flow_handle_2;
__le16 flow_handle_3;
__le16 flow_handle_4;
__le16 flow_handle_5;
__le16 flow_handle_6;
__le16 flow_handle_7;
__le16 flow_handle_8;
__le16 flow_handle_9;
__le16 unused_0;
};
/* Output (176 bytes) */
struct hwrm_cfa_flow_stats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le64 packet_0;
__le64 packet_1;
__le64 packet_2;
__le64 packet_3;
__le64 packet_4;
__le64 packet_5;
__le64 packet_6;
__le64 packet_7;
__le64 packet_8;
__le64 packet_9;
__le64 byte_0;
__le64 byte_1;
__le64 byte_2;
__le64 byte_3;
__le64 byte_4;
__le64 byte_5;
__le64 byte_6;
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_cfa_vfr_alloc */
/* Input (32 bytes) */
struct hwrm_cfa_vfr_alloc_input {
......@@ -5534,11 +5698,15 @@ struct hwrm_selftest_qlist_output {
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 offline_tests;
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0;
__le16 test_timeout;
u8 unused_1;
......@@ -5566,6 +5734,8 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0[7];
};
......@@ -5580,11 +5750,15 @@ struct hwrm_selftest_exec_output {
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 test_success;
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL
__le16 unused_0[3];
};
......@@ -5767,6 +5941,7 @@ struct cmd_nums {
#define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
......@@ -5984,6 +6159,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
__le16 len;
u8 version;
u8 count;
......
此差异已折叠。
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_TC_H
#define BNXT_TC_H
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
/* Structs used for storing the filter/actions of the TC cmd.
*/
struct bnxt_tc_l2_key {
u8 dmac[ETH_ALEN];
u8 smac[ETH_ALEN];
__be16 inner_vlan_tpid;
__be16 inner_vlan_tci;
__be16 ether_type;
u8 num_vlans;
};
struct bnxt_tc_l3_key {
union {
struct {
struct in_addr daddr;
struct in_addr saddr;
} ipv4;
struct {
struct in6_addr daddr;
struct in6_addr saddr;
} ipv6;
};
};
struct bnxt_tc_l4_key {
u8 ip_proto;
union {
struct {
__be16 sport;
__be16 dport;
} ports;
struct {
u8 type;
u8 code;
} icmp;
};
};
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1)
#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
u16 dst_fid;
struct net_device *dst_dev;
__be16 push_vlan_tpid;
__be16 push_vlan_tci;
};
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
};
struct bnxt_tc_flow {
u32 flags;
#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1)
#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2)
#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
struct bnxt_tc_l3_key l3_mask;
struct bnxt_tc_l4_key l4_key;
struct bnxt_tc_l4_key l4_mask;
struct bnxt_tc_actions actions;
/* updated stats accounting for hw-counter wrap-around */
struct bnxt_tc_flow_stats stats;
/* previous snap-shot of stats */
struct bnxt_tc_flow_stats prev_stats;
unsigned long lastused; /* jiffies */
};
/* L2 hash table
* This data-struct is used for L2-flow table.
* The L2 part of a flow is stored in a hash table.
* A flow that shares the same L2 key/mask with an
* already existing flow must refer to it's flow handle.
*/
struct bnxt_tc_l2_node {
/* hash key: first 16b of key */
#define BNXT_TC_L2_KEY_LEN 16
struct bnxt_tc_l2_key key;
struct rhash_head node;
/* a linked list of flows that share the same l2 key */
struct list_head common_l2_flows;
/* number of flows sharing the l2 key */
u16 refcount;
struct rcu_head rcu;
};
struct bnxt_tc_flow_node {
/* hash key: provided by TC */
unsigned long cookie;
struct rhash_head node;
struct bnxt_tc_flow flow;
__le16 flow_handle;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
/* for the shared_flows list maintained in l2_node */
struct list_head l2_list_node;
struct rcu_head rcu;
};
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower);
int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp);
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
return -EOPNOTSUPP;
}
static inline int bnxt_init_tc(struct bnxt *bp)
{
return 0;
}
static inline void bnxt_shutdown_tc(struct bnxt *bp)
{
}
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
#endif /* BNXT_TC_H */
......@@ -11,10 +11,12 @@
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#ifdef CONFIG_BNXT_SRIOV
......@@ -113,6 +115,21 @@ bnxt_vf_rep_get_stats64(struct net_device *dev,
stats->tx_bytes = vf_rep->tx_stats.bytes;
}
static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
switch (type) {
case TC_SETUP_CLSFLOWER:
return bnxt_tc_setup_flower(bp, vf_fid, type_data);
default:
return -EOPNOTSUPP;
}
}
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{
u16 vf_idx;
......@@ -182,6 +199,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_stop = bnxt_vf_rep_close,
.ndo_start_xmit = bnxt_vf_rep_xmit,
.ndo_get_stats64 = bnxt_vf_rep_get_stats64,
.ndo_setup_tc = bnxt_vf_rep_setup_tc,
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
......@@ -468,11 +486,11 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM;
}
bnxt_link_bp_to_dl(dl, bp);
bnxt_link_bp_to_dl(bp, dl);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
bnxt_link_bp_to_dl(dl, NULL);
bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
return rc;
......
......@@ -24,13 +24,17 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp)
/* To clear devlink pointer from bp, pass NULL dl */
static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
{
struct bnxt_dl *bp_dl = devlink_priv(dl);
bp->dl = dl;
bp_dl->bp = bp;
if (bp)
bp->dl = dl;
/* add a back pointer in dl to bp */
if (dl) {
struct bnxt_dl *bp_dl = devlink_priv(dl);
bp_dl->bp = bp;
}
}
int bnxt_dl_register(struct bnxt *bp);
......@@ -41,6 +45,14 @@ void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
#else
static inline int bnxt_dl_register(struct bnxt *bp)
......
......@@ -169,8 +169,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
tc = netdev_get_num_tc(dev);
if (!tc)
tc = 1;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
true, tc, tx_xdp);
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
true, tc, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
return rc;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册