提交 c01d0efa 编写于 作者: P Pieter Jansen van Vuuren 提交者: David S. Miller

nfp: flower: use rhashtable for flow caching

Make use of relativistic hash tables for tracking flows instead
of fixed sized hash tables.
Signed-off-by: NPieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7579d84b
......@@ -509,11 +509,6 @@ static int nfp_bpf_init(struct nfp_app *app)
return err;
}
static void nfp_check_rhashtable_empty(void *ptr, void *arg)
{
WARN_ON_ONCE(1);
}
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
......
......@@ -38,6 +38,7 @@
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
......@@ -53,7 +54,6 @@ struct nfp_app;
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
#define NFP_FLOWER_HASH_BITS 19
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
......@@ -171,7 +171,7 @@ struct nfp_flower_priv {
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct rhashtable flow_table;
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
......@@ -227,7 +227,7 @@ struct nfp_fl_stats {
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct hlist_node link;
struct rhash_head fl_node;
struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
......@@ -239,6 +239,8 @@ struct nfp_fl_payload {
bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
struct nfp_fl_stats_frame {
__be32 stats_con_id;
__be32 pkt_count;
......
......@@ -48,6 +48,12 @@ struct nfp_mask_id_table {
u8 mask_id;
};
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
__be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
......@@ -102,18 +108,15 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flower_entry;
hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
tc_flower_cookie)
if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
(!netdev || flower_entry->ingress_dev == netdev) &&
(host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
flower_entry->meta.host_ctx_id == host_ctx))
return flower_entry;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
flower_cmp_arg.host_ctx = host_ctx;
return NULL;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
}
static void
......@@ -389,12 +392,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
return nfp_release_stats_entry(app, temp_ctx_id);
}
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj)
{
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
(cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
}
static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
{
const struct nfp_fl_payload *flower_entry = data;
return jhash2((u32 *)&flower_entry->tc_flower_cookie,
sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
seed);
}
static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
{
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
return jhash2((u32 *)&cmp_arg->cookie,
sizeof(cmp_arg->cookie) / sizeof(u32), seed);
}
const struct rhashtable_params nfp_flower_table_params = {
.head_offset = offsetof(struct nfp_fl_payload, fl_node),
.hashfn = nfp_fl_key_hashfn,
.obj_cmpfn = nfp_fl_obj_cmpfn,
.obj_hashfn = nfp_fl_obj_hashfn,
.automatic_shrinking = true,
};
int nfp_flower_metadata_init(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
int err;
hash_init(priv->mask_table);
hash_init(priv->flow_table);
err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
if (err)
return err;
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
......@@ -402,7 +449,7 @@ int nfp_flower_metadata_init(struct nfp_app *app)
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
return -ENOMEM;
goto err_free_flow_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
......@@ -428,6 +475,8 @@ int nfp_flower_metadata_init(struct nfp_app *app)
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_flow_table:
rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
}
......@@ -438,6 +487,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
if (!priv)
return;
rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf);
......
......@@ -513,9 +513,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
INIT_HLIST_NODE(&flow_pay->link);
flow_pay->tc_flower_cookie = flow->cookie;
hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
nfp_flower_table_params);
if (err)
goto err_destroy_flow;
port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
......@@ -550,6 +553,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
int err;
......@@ -573,11 +577,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_flow;
err_free_flow:
hash_del_rcu(&nfp_flow->link);
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&nfp_flow->fl_node,
nfp_flower_table_params));
kfree_rcu(nfp_flow, rcu);
return err;
}
......
......@@ -60,6 +60,11 @@ static const struct nfp_app_type *apps[] = {
#endif
};
void nfp_check_rhashtable_empty(void *ptr, void *arg)
{
WARN_ON_ONCE(1);
}
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
{
if (nfp_netdev_is_nfp_net(netdev)) {
......
......@@ -196,6 +196,7 @@ struct nfp_app {
void *priv;
};
void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册