提交 6e1a2882 编写于 作者: D David S. Miller

Merge branch 'nfp-TC-block-fixes-app-fallback-and-dev_alloc'

Jakub Kicinski says:

====================
nfp: TC block fixes, app fallback and dev_alloc()

This series has three parts.  First of all John and I fix some
fallout from the TC block conversion.  John also fixes sleeping
in the neigh notifier.

Secondly I reorganise the nfp_app table to make it easier to
deal with excluding apps which have unmet Kconfig dependencies.

Last but not least after the fixes which went into -net some time
ago I refactor the page allocation, add a ethtool counter for
failed allocations and clean the ethtool stat code while at it.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -27,8 +27,6 @@ nfp-objs := \
nfp_net_sriov.o \
nfp_netvf_main.o \
nfp_port.o \
bpf/main.o \
bpf/offload.o \
nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
......@@ -44,6 +42,8 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \
bpf/jit.o
endif
......
......@@ -130,6 +130,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
return nfp_net_bpf_offload(nn, cls_bpf);
default:
return -EOPNOTSUPP;
......
......@@ -150,9 +150,6 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
unsigned int max_mtu;
int ret;
if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
return -EOPNOTSUPP;
ret = nfp_net_bpf_get_act(nn, cls_bpf);
if (ret < 0)
return ret;
......
......@@ -50,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type)
enum nfp_flower_cmsg_type_port type, gfp_t flag)
{
struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN;
skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
skb = nfp_app_ctrl_msg_alloc(app, size, flag);
if (!skb)
return NULL;
......@@ -78,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
unsigned int size;
size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
GFP_KERNEL);
if (!skb)
return NULL;
......@@ -109,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
NFP_FLOWER_CMSG_TYPE_PORT_MOD);
NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
......
......@@ -458,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type);
enum nfp_flower_cmsg_type_port type, gfp_t flag);
#endif
......@@ -115,7 +115,7 @@ struct nfp_flower_priv {
struct mutex nfp_mac_off_lock;
struct mutex nfp_mac_index_lock;
struct mutex nfp_ipv4_off_lock;
struct mutex nfp_neigh_off_lock;
spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids;
int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb;
......
......@@ -95,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
......@@ -468,14 +468,14 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_net *nn = cb_priv;
struct nfp_repr *repr = cb_priv;
if (!tc_can_offload(nn->dp.netdev))
if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(nn->app, nn->port->netdev,
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data);
default:
return -EOPNOTSUPP;
......@@ -485,7 +485,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
static int nfp_flower_setup_tc_block(struct net_device *netdev,
struct tc_block_offload *f)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_repr *repr = netdev_priv(netdev);
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
......@@ -494,11 +494,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb,
nn, nn);
repr, repr);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb,
nn);
repr);
return 0;
default:
return -EOPNOTSUPP;
......
......@@ -224,12 +224,13 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
}
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata)
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
{
struct sk_buff *skb;
unsigned char *msg;
skb = nfp_flower_cmsg_alloc(app, plen, mtype);
skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
if (!skb)
return -ENOMEM;
......@@ -246,15 +247,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock);
spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
return true;
}
}
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
return false;
}
......@@ -264,24 +265,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock);
spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
return;
}
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
return;
}
entry->ipv4_addr = ipv4_addr;
list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
}
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
......@@ -290,7 +291,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
mutex_lock(&priv->nfp_neigh_off_lock);
spin_lock_bh(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
......@@ -299,12 +300,12 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
break;
}
}
mutex_unlock(&priv->nfp_neigh_off_lock);
spin_unlock_bh(&priv->nfp_neigh_off_lock);
}
static void
nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh)
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
......@@ -334,7 +335,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
send_msg:
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
sizeof(struct nfp_tun_neigh),
(unsigned char *)&payload);
(unsigned char *)&payload, flag);
}
static int
......@@ -385,7 +386,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
#endif
flow.flowi4_proto = IPPROTO_UDP;
nfp_tun_write_neigh(n->dev, app, &flow, n);
nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
return NOTIFY_OK;
}
......@@ -423,7 +424,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto route_fail_warning;
nfp_tun_write_neigh(n->dev, app, &flow, n);
nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
neigh_release(n);
return;
......@@ -456,7 +457,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
sizeof(struct nfp_tun_ipv4_addr),
&payload);
&payload, GFP_KERNEL);
}
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
......@@ -548,7 +549,7 @@ void nfp_tunnel_write_macs(struct nfp_app *app)
}
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
pay_size, payload);
pay_size, payload, GFP_KERNEL);
kfree(payload);
......@@ -729,7 +730,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
/* Initialise priv data for neighbour offloading. */
mutex_init(&priv->nfp_neigh_off_lock);
spin_lock_init(&priv->nfp_neigh_off_lock);
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
......@@ -769,43 +770,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */
mutex_lock(&priv->nfp_mac_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
list);
list_del(&mac_entry->list);
kfree(mac_entry);
}
mutex_unlock(&priv->nfp_mac_off_lock);
/* Free any memory that may be occupied by MAC index list. */
mutex_lock(&priv->nfp_mac_index_lock);
list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
list);
list_del(&mac_idx->list);
kfree(mac_idx);
}
mutex_unlock(&priv->nfp_mac_index_lock);
ida_destroy(&priv->nfp_mac_off_ids);
/* Free any memory that may be occupied by ipv4 list. */
mutex_lock(&priv->nfp_ipv4_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
list_del(&ip_entry->list);
kfree(ip_entry);
}
mutex_unlock(&priv->nfp_ipv4_off_lock);
/* Free any memory that may be occupied by the route list. */
mutex_lock(&priv->nfp_neigh_off_lock);
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
list);
list_del(&route_entry->list);
kfree(route_entry);
}
mutex_unlock(&priv->nfp_neigh_off_lock);
}
......@@ -43,10 +43,14 @@
#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
&app_nic,
&app_bpf,
[NFP_APP_CORE_NIC] = &app_nic,
#ifdef CONFIG_BPF_SYSCALL
[NFP_APP_BPF_NIC] = &app_bpf,
#else
[NFP_APP_BPF_NIC] = &app_nic,
#endif
#ifdef CONFIG_NFP_APP_FLOWER
&app_flower,
[NFP_APP_FLOWER_NIC] = &app_flower,
#endif
};
......@@ -116,17 +120,13 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(apps); i++)
if (apps[i]->id == id)
break;
if (i == ARRAY_SIZE(apps)) {
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
return ERR_PTR(-EINVAL);
}
if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc))
if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
......@@ -136,7 +136,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
app->pf = pf;
app->cpp = pf->cpp;
app->pdev = pf->pdev;
app->type = apps[i];
app->type = apps[id];
return app;
}
......
......@@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
* @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector
......@@ -437,6 +438,8 @@ struct nfp_net_r_vector {
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
u64 rx_replace_buf_alloc_fail;
u64 tx_errors;
u64 tx_busy;
......
......@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz);
if (unlikely(!frag))
return NULL;
} else {
struct page *page;
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
frag = page ? page_address(page) : NULL;
}
if (!frag) {
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
page = dev_alloc_page();
if (unlikely(!page))
return NULL;
frag = page_address(page);
}
*dma_addr = nfp_net_dma_map_rx(dp, frag);
......@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
/* If we have both skb and rxbuf the replacement buffer allocation
* must have failed, count this as an alloc failure.
*/
if (skb && rxbuf)
r_vec->rx_replace_buf_alloc_fail++;
u64_stats_update_end(&r_vec->rx_sync);
/* skb is build based on the frag, free_skb() would free the frag
......
......@@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
#define NN_ET_RVEC_GATHER_STATS 7
#define NN_RVEC_GATHER_STATS 8
#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
......@@ -427,7 +428,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
......@@ -444,6 +445,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
......@@ -454,9 +456,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
u64 tmp[NN_ET_RVEC_GATHER_STATS];
u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
......@@ -468,25 +470,26 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso;
tmp[4] = nn->r_vecs[i].hw_csum_tx;
tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[6] = nn->r_vecs[i].tx_gather;
tmp[7] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += 3;
data += NN_RVEC_PER_Q_STATS;
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
return data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册