提交 e5c5180a 编写于 作者: D David S. Miller

Merge branch 'nfp-ctrl-vNIC'

Jakub Kicinski says:

====================
nfp: ctrl vNIC

This series adds the ability to use one vNIC as a control channel
for passing messages to and from the application firmware.  The
implementation restructures the existing netdev vNIC code to be able
to deal with nfp_nets with netdev pointer set to NULL.  Control vNICs
are not visible to userspace (other than for dumping ring state), and
since they don't have netdevs we use a tasklet for RX and simple skb
list for TX queuing.

Due to special status of the control vNIC we have to reshuffle the
init code a bit to make sure control vNIC will be fully brought up
(and therefore communication with app FW can happen) before any netdev
or port is visible to user space.

FW will designate which vNIC is supposed to be used as control one
by setting _pf%u_net_ctrl_bar symbol.  Some FWs depend on metadata
being prepended to control message, some prefer to look at queue ID
to decide that something is a control message.  Our implementation
can cater to both.

First two users of this code will be eBPF maps and flower offloads.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -31,6 +31,7 @@
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "nfpcore/nfp_cpp.h"
......@@ -42,6 +43,23 @@ static const struct nfp_app_type *apps[] = {
&app_bpf,
};
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
{
struct sk_buff *skb;
if (nfp_app_ctrl_has_meta(app))
size += 8;
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return NULL;
if (nfp_app_ctrl_has_meta(app))
skb_reserve(skb, 8);
return skb;
}
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
......
......@@ -37,7 +37,9 @@
struct bpf_prog;
struct net_device;
struct pci_dev;
struct sk_buff;
struct tc_to_netdev;
struct sk_buff;
struct nfp_app;
struct nfp_cpp;
struct nfp_pf;
......@@ -55,12 +57,16 @@ extern const struct nfp_app_type app_bpf;
* struct nfp_app_type - application definition
* @id: application ID
* @name: application name
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
* @init: perform basic app checks
* @extra_cap: extra capabilities string
* @vnic_init: init vNICs (assign port types, etc.)
* @vnic_clean: clean up app's vNIC state
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
......@@ -69,6 +75,8 @@ struct nfp_app_type {
enum nfp_app_id id;
const char *name;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
......@@ -77,6 +85,11 @@ struct nfp_app_type {
unsigned int id);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
u32 handle, __be16 proto, struct tc_to_netdev *tc);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
......@@ -89,6 +102,7 @@ struct nfp_app_type {
* @pdev: backpointer to PCI device
* @pf: backpointer to NFP PF structure
* @cpp: pointer to the CPP handle
* @ctrl: pointer to ctrl vNIC struct
* @type: pointer to const application ops and info
*/
struct nfp_app {
......@@ -96,9 +110,13 @@ struct nfp_app {
struct nfp_pf *pf;
struct nfp_cpp *cpp;
struct nfp_net *ctrl;
const struct nfp_app_type *type;
};
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app)
{
if (!app->type->init)
......@@ -118,6 +136,21 @@ static inline void nfp_app_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
app->type->vnic_clean(app, nn);
}
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
if (!app->type->start)
return 0;
return app->type->start(app);
}
static inline void nfp_app_stop(struct nfp_app *app)
{
if (!app->type->stop)
return;
app->type->stop(app);
}
static inline const char *nfp_app_name(struct nfp_app *app)
{
if (!app)
......@@ -125,6 +158,16 @@ static inline const char *nfp_app_name(struct nfp_app *app)
return app->type->name;
}
static inline bool nfp_app_needs_ctrl_vnic(struct nfp_app *app)
{
return app && app->type->ctrl_msg_rx;
}
static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
{
return app->type->ctrl_has_meta;
}
static inline const char *nfp_app_extra_cap(struct nfp_app *app,
struct nfp_net *nn)
{
......@@ -163,6 +206,18 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
app->type->ctrl_msg_rx(app, skb);
}
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
......
......@@ -63,12 +63,13 @@ struct nfp_nsp_identify;
* @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues
* @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
* @qc_area: Pointer to the CPP area for the queues
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
* @ctrl_vnic: Pointer to the control vNIC if available
* @eth_tbl: NSP ETH table
* @nspi: NSP identification info
* @hwmon_dev: pointer to hwmon device
......@@ -88,8 +89,8 @@ struct nfp_pf {
struct nfp_app *app;
struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area;
struct nfp_cpp_area *ctrl_vnic_bar;
struct nfp_cpp_area *qc_area;
struct msix_entry *irq_entries;
......@@ -98,6 +99,8 @@ struct nfp_pf {
bool fw_loaded;
struct nfp_net *ctrl_vnic;
struct nfp_eth_table *eth_tbl;
struct nfp_nsp_identify *nspi;
......@@ -129,4 +132,6 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
void
nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
#endif /* NFP_MAIN_H */
......@@ -50,15 +50,32 @@
#include "nfp_net_ctrl.h"
#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
#define nn_pr(nn, lvl, fmt, args...) \
({ \
struct nfp_net *__nn = (nn); \
\
if (__nn->dp.netdev) \
netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
else \
dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
})
#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
#define nn_dp_warn(dp, fmt, args...) \
do { \
if (unlikely(net_ratelimit())) \
netdev_warn((dp)->netdev, fmt, ## args); \
} while (0)
({ \
struct nfp_net_dp *__dp = (dp); \
\
if (unlikely(net_ratelimit())) { \
if (__dp->netdev) \
netdev_warn(__dp->netdev, fmt, ## args); \
else \
dev_warn(__dp->dev, fmt, ## args); \
} \
})
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
......@@ -388,7 +405,14 @@ struct nfp_net_rx_ring {
*/
struct nfp_net_r_vector {
struct nfp_net *nfp_net;
struct napi_struct napi;
union {
struct napi_struct napi;
struct {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
struct spinlock lock;
};
};
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_ring *rx_ring;
......@@ -681,6 +705,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
......@@ -788,6 +813,22 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
{
WARN_ON_ONCE(!nn->dp.netdev && nn->port);
return !!nn->dp.netdev;
}
static inline bool nfp_net_running(struct nfp_net *nn)
{
return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
}
static inline const char *nfp_net_name(struct nfp_net *nn)
{
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
/* Globals */
extern const char nfp_driver_version[];
......@@ -803,13 +844,16 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
nfp_net_alloc(struct pci_dev *pdev,
nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
int nfp_net_init(struct nfp_net *nn);
void nfp_net_clean(struct nfp_net *nn);
int nfp_ctrl_open(struct nfp_net *nn);
void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
......
......@@ -71,8 +71,11 @@
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_META_PORT_ID_CTRL ~0U
/**
* Hash type pre-pended when a RSS hash was computed
*/
......
......@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
if (!netif_running(nn->dp.netdev))
if (!nfp_net_running(nn))
goto out;
rxd_cnt = rx_ring->cnt;
......@@ -138,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
if (!netif_running(nn->dp.netdev))
if (!nfp_net_running(nn))
goto out;
txd_cnt = tx_ring->cnt;
......@@ -209,7 +209,10 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
if (IS_ERR_OR_NULL(nfp_dir))
return;
sprintf(name, "vnic%d", id);
if (nfp_net_is_data_vnic(nn))
sprintf(name, "vnic%d", id);
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir))
return;
......
......@@ -223,65 +223,37 @@ static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
NFP_APP_CORE_NIC);
}
static unsigned int
nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
unsigned int stride, u32 start_off, u32 num_off)
static u8 __iomem *
nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area)
{
unsigned int i, min_qc, max_qc;
min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc;
for (i = 0; i < pf->max_data_vnics; i++) {
/* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1).
*/
if (max_qc > readl(ctrl_bar + start_off))
return 0;
max_qc = readl(ctrl_bar + start_off);
max_qc += readl(ctrl_bar + num_off) * stride;
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
return max_qc - min_qc;
}
static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
{
const struct nfp_rtsym *ctrl_sym;
u8 __iomem *ctrl_bar;
const struct nfp_rtsym *sym;
char pf_symbol[256];
u8 __iomem *mem;
snprintf(pf_symbol, sizeof(pf_symbol), "_pf%u_net_bar0",
snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp));
ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
if (!ctrl_sym) {
dev_err(&pf->pdev->dev,
"Failed to find PF BAR0 symbol %s\n", pf_symbol);
return NULL;
sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
if (!sym) {
nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
return (u8 __iomem *)ERR_PTR(-ENOENT);
}
if (ctrl_sym->size < pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE) {
dev_err(&pf->pdev->dev,
"PF BAR0 too small to contain %d vNICs\n",
pf->max_data_vnics);
return NULL;
if (sym->size < min_size) {
nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
return (u8 __iomem *)ERR_PTR(-EINVAL);
}
ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
ctrl_sym->domain, ctrl_sym->target,
ctrl_sym->addr, ctrl_sym->size,
&pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) {
dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
PTR_ERR(ctrl_bar));
return NULL;
mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
sym->addr, sym->size, area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
pf_symbol, PTR_ERR(mem));
return mem;
}
return ctrl_bar;
return mem;
}
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
......@@ -294,45 +266,47 @@ static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
{
struct nfp_net *nn;
struct nfp_net *nn, *next;
while (!list_empty(&pf->vnics)) {
nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
nfp_net_pf_free_vnic(pf, nn);
}
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_free_vnic(pf, nn);
}
static struct nfp_net *
nfp_net_pf_alloc_vnic(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver,
unsigned int eth_id)
nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
void __iomem *ctrl_bar, void __iomem *qc_bar,
int stride, unsigned int eth_id)
{
u32 n_tx_rings, n_rx_rings;
u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
struct nfp_net *nn;
int err;
tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
nn = nfp_net_alloc(pf->pdev, n_tx_rings, n_rx_rings);
nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nn->fw_ver = *fw_ver;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
err = nfp_app_vnic_init(pf->app, nn, eth_id);
if (err) {
nfp_net_free(nn);
return ERR_PTR(err);
if (needs_netdev) {
err = nfp_app_vnic_init(pf->app, nn, eth_id);
if (err) {
nfp_net_free(nn);
return ERR_PTR(err);
}
}
pf->num_vnics++;
......@@ -376,27 +350,15 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
static int
nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
int stride, struct nfp_net_fw_version *fw_ver)
void __iomem *qc_bar, int stride)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
struct nfp_net *nn;
unsigned int i;
int err;
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
for (i = 0; i < pf->max_data_vnics; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
nn = nfp_net_pf_alloc_vnic(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver, i);
nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
stride, i);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_free_prev;
......@@ -430,21 +392,10 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
nfp_app_vnic_clean(pf->app, nn);
}
static int
nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
void __iomem *ctrl_bar, void __iomem *tx_bar,
void __iomem *rx_bar, int stride,
struct nfp_net_fw_version *fw_ver)
static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
{
unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
struct nfp_net *nn;
int err;
/* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, fw_ver);
if (err)
return err;
/* Get MSI-X vectors */
wanted_irqs = 0;
......@@ -452,18 +403,16 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
err = -ENOMEM;
goto err_nn_free;
}
if (!pf->irq_entries)
return -ENOMEM;
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -ENOMEM;
goto err_vec_free;
nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
kfree(pf->irq_entries);
return -ENOMEM;
}
/* Distribute IRQs to vNICs */
......@@ -472,16 +421,34 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n;
n = DIV_ROUND_UP(irqs_left, vnics_left);
n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
DIV_ROUND_UP(irqs_left, vnics_left));
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n);
irqs_left -= n;
vnics_left--;
}
return 0;
}
static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
{
nfp_net_irqs_disable(pf->pdev);
kfree(pf->irq_entries);
}
static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
{
struct nfp_net *nn;
unsigned int id;
int err;
/* Finish vNIC init and register */
id = 0;
list_for_each_entry(nn, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
err = nfp_net_pf_init_vnic(pf, nn, id);
if (err)
goto err_prev_deinit;
......@@ -493,17 +460,15 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
nfp_net_pf_clean_vnic(pf, nn);
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
kfree(pf->irq_entries);
err_nn_free:
nfp_net_pf_free_vnics(pf);
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
return err;
}
static int nfp_net_pf_app_init(struct nfp_pf *pf)
static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{
u8 __iomem *ctrl_bar;
int err;
pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
......@@ -514,8 +479,28 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf)
if (err)
goto err_free;
if (!nfp_app_needs_ctrl_vnic(pf->app))
return 0;
ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
NFP_PF_CSR_SLICE_SIZE,
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
err = PTR_ERR(ctrl_bar);
goto err_free;
}
pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
stride, 0);
if (IS_ERR(pf->ctrl_vnic)) {
err = PTR_ERR(pf->ctrl_vnic);
goto err_unmap;
}
return 0;
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_free:
nfp_app_free(pf->app);
return err;
......@@ -523,21 +508,79 @@ static int nfp_net_pf_app_init(struct nfp_pf *pf)
static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
nfp_app_free(pf->app);
pf->app = NULL;
}
static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
{
int err;
if (!pf->ctrl_vnic)
return 0;
err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
if (err)
return err;
err = nfp_ctrl_open(pf->ctrl_vnic);
if (err)
goto err_clean_ctrl;
return 0;
err_clean_ctrl:
nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
return err;
}
static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
{
if (!pf->ctrl_vnic)
return;
nfp_ctrl_close(pf->ctrl_vnic);
nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
}
static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
err = nfp_net_pf_app_start_ctrl(pf);
if (err)
return err;
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
goto err_ctrl_stop;
return 0;
err_ctrl_stop:
nfp_net_pf_app_stop_ctrl(pf);
return err;
}
static void nfp_net_pf_app_stop(struct nfp_pf *pf)
{
nfp_app_stop(pf->app);
nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
nfp_net_pf_app_stop(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_irqs_disable(pf->pdev);
kfree(pf->irq_entries);
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
nfp_cpp_area_release_free(pf->rx_area);
nfp_cpp_area_release_free(pf->tx_area);
nfp_cpp_area_release_free(pf->qc_area);
nfp_cpp_area_release_free(pf->data_vnic_bar);
}
......@@ -661,11 +704,9 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
u32 total_tx_qcs, total_rx_qcs;
struct nfp_net_fw_version fw_ver;
u32 tx_area_sz, rx_area_sz;
u32 start_q;
u8 __iomem *ctrl_bar, *qc_bar;
u32 ctrl_bar_sz;
int stride;
int err;
......@@ -684,9 +725,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_unlock;
}
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
if (!ctrl_bar) {
err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
ctrl_bar_sz, &pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) {
err = PTR_ERR(ctrl_bar);
if (!pf->fw_loaded && err == -ENOENT)
err = -EPROBE_DEFER;
goto err_unlock;
}
......@@ -704,7 +749,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
case 1 ... 4:
case 1 ... 5:
stride = 4;
break;
default:
......@@ -716,67 +761,54 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
}
/* Find how many QC structs need to be mapped */
total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_TXQ,
NFP_NET_CFG_MAX_TXRINGS);
total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
NFP_NET_CFG_START_RXQ,
NFP_NET_CFG_MAX_RXRINGS);
if (!total_tx_qcs || !total_rx_qcs) {
nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
total_tx_qcs, total_rx_qcs);
err = -EINVAL;
goto err_ctrl_unmap;
}
tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
/* Map TX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
NFP_PCIE_QUEUE(start_q),
tx_area_sz, &pf->tx_area);
if (IS_ERR(tx_bar)) {
nfp_err(pf->cpp, "Failed to map TX area.\n");
err = PTR_ERR(tx_bar);
/* Map queues */
qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
&pf->qc_area);
if (IS_ERR(qc_bar)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(qc_bar);
goto err_ctrl_unmap;
}
/* Map RX queues */
start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
NFP_PCIE_QUEUE(start_q),
rx_area_sz, &pf->rx_area);
if (IS_ERR(rx_bar)) {
nfp_err(pf->cpp, "Failed to map RX area.\n");
err = PTR_ERR(rx_bar);
goto err_unmap_tx;
}
err = nfp_net_pf_app_init(pf);
err = nfp_net_pf_app_init(pf, qc_bar, stride);
if (err)
goto err_unmap_rx;
goto err_unmap_qc;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar,
stride, &fw_ver);
/* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
if (err)
goto err_clean_ddir;
err = nfp_net_pf_alloc_irqs(pf);
if (err)
goto err_free_vnics;
err = nfp_net_pf_app_start(pf);
if (err)
goto err_free_irqs;
err = nfp_net_pf_init_vnics(pf);
if (err)
goto err_stop_app;
mutex_unlock(&pf->lock);
return 0;
err_stop_app:
nfp_net_pf_app_stop(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_app_clean(pf);
err_unmap_rx:
nfp_cpp_area_release_free(pf->rx_area);
err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_unmap_qc:
nfp_cpp_area_release_free(pf->qc_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock:
......@@ -793,7 +825,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
goto out;
list_for_each_entry(nn, &pf->vnics, vnic_list)
nfp_net_pf_clean_vnic(pf, nn);
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
nfp_net_pf_free_vnics(pf);
......
......@@ -161,7 +161,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
case 1 ... 4:
case 1 ... 5:
stride = 4;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = tx_bar_no;
......@@ -202,7 +202,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
nn = nfp_net_alloc(pdev, max_tx_rings, max_rx_rings);
nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册