提交 552a2a3f 编写于 作者: D David S. Miller

Merge branch 'nfp-flower-ct-offload'

Simon Horman says:

====================
nfp: flower: conntrack offload

Louis Peens says:

This series takes the preparation from previous two series
and finally creates the structures and control messages
to offload the conntrack flows to the card. First we
do a bit of refactoring in the existing functions
to make them re-usable for the conntrack implementation,
after which the control messages are compiled and
transmitted to the card. Lastly we add stats handling
for the conntrack flows.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
}
static bool
nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
{
struct flow_action_entry *act = flow->rule->action.entries;
int num_act = flow->rule->action.num_entries;
struct flow_action_entry *act = rule->action.entries;
int num_act = rule->action.num_entries;
int act_idx;
/* Preparse action list for next mirred or redirect action */
......@@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act(struct nfp_app *app,
struct flow_cls_offload *flow,
struct flow_rule *rule,
const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
......@@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app,
/* Determine the tunnel type based on the egress netdev
* in the mirred action for tunnels without l4.
*/
if (nfp_flower_tun_is_gre(flow, act_idx))
if (nfp_flower_tun_is_gre(rule, act_idx))
return NFP_FL_TUNNEL_GRE;
switch (tun->key.tp_dst) {
......@@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts {
};
static int
nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action,
int *a_len, struct nfp_flower_pedit_acts *set_act,
u32 *csum_updated)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
......@@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
static int
nfp_fl_pedit(const struct flow_action_entry *act,
struct flow_cls_offload *flow, char *nfp_action, int *a_len,
char *nfp_action, int *a_len,
u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
struct netlink_ext_ack *extack)
{
......@@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct flow_cls_offload *flow,
struct flow_rule *rule,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
......@@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
*tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
*tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx);
if (*tun_type == NFP_FL_TUNNEL_NONE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
......@@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
/* Tunnel decap is handled by default so accept action. */
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
......@@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
}
int nfp_flower_compile_action(struct nfp_app *app,
struct flow_cls_offload *flow,
struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack)
......@@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
if (!flow_action_hw_stats_check(&flow->rule->action, extack,
if (!flow_action_hw_stats_check(&rule->action, extack,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
......@@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
flow_action_for_each(i, act, &flow->rule->action) {
if (nfp_fl_check_mangle_start(&flow->rule->action, i))
flow_action_for_each(i, act, &rule->action) {
if (nfp_fl_check_mangle_start(&rule->action, i))
memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
&set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
if (nfp_fl_check_mangle_end(&flow->rule->action, i))
nfp_fl_commit_mangle(flow,
if (nfp_fl_check_mangle_end(&rule->action, i))
nfp_fl_commit_mangle(rule,
&nfp_flow->action_data[act_len],
&act_len, &set_act, &csum_updated);
}
......
......@@ -83,6 +83,24 @@ enum ct_entry_type {
CT_TYPE_PRE_CT,
CT_TYPE_NFT,
CT_TYPE_POST_CT,
_CT_TYPE_MAX,
};
enum nfp_nfp_layer_name {
FLOW_PAY_META_TCI = 0,
FLOW_PAY_INPORT,
FLOW_PAY_EXT_META,
FLOW_PAY_MAC_MPLS,
FLOW_PAY_L4,
FLOW_PAY_IPV4,
FLOW_PAY_IPV6,
FLOW_PAY_CT,
FLOW_PAY_GRE,
FLOW_PAY_QINQ,
FLOW_PAY_UDP_TUN,
FLOW_PAY_GENEVE_OPT,
_FLOW_PAY_LAYERS_MAX
};
/**
......@@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
*/
int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
void *cb_priv);
/**
* nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows
* @flow: TC flower classifier offload structure.
* @ct_map_ent: ct map entry for the flow that needs deleting
*/
int nfp_fl_ct_stats(struct flow_cls_offload *flow,
struct nfp_fl_ct_map_entry *ct_map_ent);
#endif
......@@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
void
nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk, u8 key_type);
void
nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
struct flow_rule *rule);
void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext);
int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version, enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack);
void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
struct flow_rule *rule);
int
nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
struct flow_rule *rule,
struct netlink_ext_ack *extack);
void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
struct flow_rule *rule);
void
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
struct nfp_flower_vlan *msk,
struct flow_rule *rule);
void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule);
void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk, struct flow_rule *rule);
void
nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule);
void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct flow_rule *rule);
void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
struct flow_rule *rule);
void
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
struct nfp_flower_ipv6_udp_tun *msk,
struct flow_rule *rule);
void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
struct nfp_flower_ipv6_gre_tun *msk,
struct flow_rule *rule);
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct flow_cls_offload *flow,
struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
struct flow_cls_offload *flow,
struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack);
int nfp_compile_flow_metadata(struct nfp_app *app,
struct flow_cls_offload *flow,
int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack);
......@@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer);
int nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct flow_rule *flow,
enum nfp_flower_tun_type *tun_type,
struct netlink_ext_ack *extack);
void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype);
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
#endif
......@@ -7,51 +7,68 @@
#include "cmsg.h"
#include "main.h"
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
struct flow_rule *rule, u8 key_type, bool qinq_sup)
void
nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk, u8 key_type)
{
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
/* Populate the metadata frame. */
ext->nfp_flow_key_layer = key_type;
ext->mask_id = ~0;
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
}
if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
void
nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
struct flow_rule *rule)
{
u16 msk_tci, key_tci;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.key->vlan_id);
ext->tci = cpu_to_be16(tmp_tci);
tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.mask->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.mask->vlan_id);
msk->tci = cpu_to_be16(tmp_tci);
ext->tci |= cpu_to_be16((key_tci & msk_tci));
msk->tci |= cpu_to_be16(msk_tci);
}
}
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
struct flow_rule *rule, u8 key_type, bool qinq_sup)
{
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
nfp_flower_compile_meta(ext, msk, key_type);
if (!qinq_sup)
nfp_flower_compile_tci(ext, msk, rule);
}
void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version, enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
......@@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
static int
void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
struct netlink_ext_ack *extack)
struct nfp_flower_mac_mpls *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
int i;
flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
ether_addr_copy(ext->mac_src, &match.key->src[0]);
ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
ether_addr_copy(msk->mac_src, &match.mask->src[0]);
for (i = 0; i < ETH_ALEN; i++) {
ext->mac_dst[i] |= match.key->dst[i] &
match.mask->dst[i];
msk->mac_dst[i] |= match.mask->dst[i];
ext->mac_src[i] |= match.key->src[i] &
match.mask->src[i];
msk->mac_src[i] |= match.mask->src[i];
}
}
}
int
nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
struct flow_rule *rule,
struct netlink_ext_ack *extack)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u32 t_mpls;
u32 key_mpls, msk_mpls;
flow_rule_match_mpls(rule, &match);
......@@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
return -EOPNOTSUPP;
}
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.key->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.key->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.key->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
ext->mpls_lse = cpu_to_be32(t_mpls);
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.mask->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.mask->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.mask->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
msk->mpls_lse = cpu_to_be32(t_mpls);
key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.key->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.key->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.key->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
match.mask->ls[0].mpls_label) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
match.mask->ls[0].mpls_tc) |
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
match.mask->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
msk->mpls_lse |= cpu_to_be32(msk_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
* bit, which indicates an mpls ether type but without any
......@@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
flow_rule_match_basic(rule, &match);
if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
return 0;
}
static void
static int
nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
struct flow_rule *rule,
struct netlink_ext_ack *extack)
{
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
nfp_flower_compile_mac(ext, msk, rule);
return nfp_flower_compile_mpls(ext, msk, rule, extack);
}
void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
ext->port_src = match.key->src;
ext->port_dst = match.key->dst;
msk->port_src = match.mask->src;
msk->port_dst = match.mask->dst;
ext->port_src |= match.key->src & match.mask->src;
ext->port_dst |= match.key->dst & match.mask->dst;
msk->port_src |= match.mask->src;
msk->port_dst |= match.mask->dst;
}
}
......@@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
ext->proto = match.key->ip_proto;
msk->proto = match.mask->ip_proto;
ext->proto |= match.key->ip_proto & match.mask->ip_proto;
msk->proto |= match.mask->ip_proto;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
ext->tos = match.key->tos;
ext->ttl = match.key->ttl;
msk->tos = match.mask->tos;
msk->ttl = match.mask->ttl;
ext->tos |= match.key->tos & match.mask->tos;
ext->ttl |= match.key->ttl & match.mask->ttl;
msk->tos |= match.mask->tos;
msk->ttl |= match.mask->ttl;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
......@@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
struct nfp_flower_vlan *frame,
bool outer_vlan)
nfp_flower_fill_vlan(struct flow_match_vlan *match,
struct nfp_flower_vlan *ext,
struct nfp_flower_vlan *msk, bool outer_vlan)
{
u16 tci;
tci = NFP_FLOWER_MASK_VLAN_PRESENT;
tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
key->vlan_id);
struct flow_dissector_key_vlan *mask = match->mask;
struct flow_dissector_key_vlan *key = match->key;
u16 msk_tci, key_tci;
key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
key->vlan_id);
msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
mask->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
mask->vlan_id);
if (outer_vlan) {
frame->outer_tci = cpu_to_be16(tci);
frame->outer_tpid = key->vlan_tpid;
ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
msk->outer_tci |= cpu_to_be16(msk_tci);
msk->outer_tpid |= mask->vlan_tpid;
} else {
frame->inner_tci = cpu_to_be16(tci);
frame->inner_tpid = key->vlan_tpid;
ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
msk->inner_tci |= cpu_to_be16(msk_tci);
msk->inner_tpid |= mask->vlan_tpid;
}
}
static void
void
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
struct nfp_flower_vlan *msk,
struct flow_rule *rule)
{
struct flow_match_vlan match;
memset(ext, 0, sizeof(struct nfp_flower_vlan));
memset(msk, 0, sizeof(struct nfp_flower_vlan));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
flow_rule_match_vlan(rule, &match);
nfp_flower_fill_vlan(match.key, ext, true);
nfp_flower_fill_vlan(match.mask, msk, true);
nfp_flower_fill_vlan(&match, ext, msk, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
flow_rule_match_cvlan(rule, &match);
nfp_flower_fill_vlan(match.key, ext, false);
nfp_flower_fill_vlan(match.mask, msk, false);
nfp_flower_fill_vlan(&match, ext, msk, false);
}
}
static void
void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
memset(msk, 0, sizeof(struct nfp_flower_ipv4));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
ext->ipv4_src = match.key->src;
ext->ipv4_dst = match.key->dst;
msk->ipv4_src = match.mask->src;
msk->ipv4_dst = match.mask->dst;
ext->ipv4_src |= match.key->src & match.mask->src;
ext->ipv4_dst |= match.key->dst & match.mask->dst;
msk->ipv4_src |= match.mask->src;
msk->ipv4_dst |= match.mask->dst;
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static void
void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
int i;
flow_rule_match_ipv6_addrs(rule, &match);
ext->ipv6_src = match.key->src;
ext->ipv6_dst = match.key->dst;
msk->ipv6_src = match.mask->src;
msk->ipv6_dst = match.mask->dst;
for (i = 0; i < sizeof(ext->ipv6_src); i++) {
ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
match.mask->src.s6_addr[i];
ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
match.mask->dst.s6_addr[i];
msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
}
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
static int
nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
void
nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
int i;
flow_rule_match_enc_opts(rule, &match);
memcpy(ext, match.key->data, match.key->len);
memcpy(msk, match.mask->data, match.mask->len);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
flow_rule_match_enc_opts(rule, &match);
return 0;
for (i = 0; i < match.mask->len; i++) {
ext[i] |= match.key->data[i] & match.mask->data[i];
msk[i] |= match.mask->data[i];
}
}
}
static void
......@@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
ext->src = match.key->src;
ext->dst = match.key->dst;
msk->src = match.mask->src;
msk->dst = match.mask->dst;
ext->src |= match.key->src & match.mask->src;
ext->dst |= match.key->dst & match.mask->dst;
msk->src |= match.mask->src;
msk->dst |= match.mask->dst;
}
}
......@@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
int i;
flow_rule_match_enc_ipv6_addrs(rule, &match);
ext->src = match.key->src;
ext->dst = match.key->dst;
msk->src = match.mask->src;
msk->dst = match.mask->dst;
for (i = 0; i < sizeof(ext->src); i++) {
ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
match.mask->src.s6_addr[i];
ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
match.mask->dst.s6_addr[i];
msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
}
}
}
......@@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
ext->tos = match.key->tos;
ext->ttl = match.key->ttl;
msk->tos = match.mask->tos;
msk->ttl = match.mask->ttl;
ext->tos |= match.key->tos & match.mask->tos;
ext->ttl |= match.key->ttl & match.mask->ttl;
msk->tos |= match.mask->tos;
msk->ttl |= match.mask->ttl;
}
}
......@@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
u32 vni;
flow_rule_match_enc_keyid(rule, &match);
vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
*key = cpu_to_be32(vni);
vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
NFP_FL_TUN_VNI_OFFSET;
*key |= cpu_to_be32(vni);
vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
*key_msk = cpu_to_be32(vni);
*key_msk |= cpu_to_be32(vni);
}
}
......@@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
*key = match.key->keyid;
*key_msk = match.mask->keyid;
*key |= match.key->keyid & match.mask->keyid;
*key_msk |= match.mask->keyid;
*flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
*flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
}
static void
void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
......@@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
&ext->tun_flags, &msk->tun_flags, rule);
}
static void
void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
static void
void
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
struct nfp_flower_ipv6_udp_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
static void
void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
struct nfp_flower_ipv6_gre_tun *msk,
struct flow_rule *rule)
{
memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
......@@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct flow_cls_offload *flow,
struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_flower_priv *priv = app->priv;
bool qinq_sup;
u32 port_id;
......@@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
rule, extack);
err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
(struct nfp_flower_mac_mpls *)msk,
rule, extack);
if (err)
return err;
......@@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, msk, rule);
if (err)
return err;
nfp_flower_compile_geneve_opt(ext, msk, rule);
}
}
......
......@@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
return true;
}
int nfp_compile_flow_metadata(struct nfp_app *app,
struct flow_cls_offload *flow,
int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack)
......@@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
}
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
nfp_flow->ingress_dev = netdev;
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
......@@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
if (nfp_release_stats_entry(app, stats_cxt)) {
......
......@@ -41,6 +41,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_CT) | \
BIT(FLOW_DISSECTOR_KEY_META) | \
BIT(FLOW_DISSECTOR_KEY_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
......@@ -89,7 +91,7 @@ struct nfp_flower_merge_check {
};
};
static int
int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
{
......@@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
......@@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
return 0;
}
static int
int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct flow_cls_offload *flow,
struct flow_rule *rule,
enum nfp_flower_tun_type *tun_type,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
......@@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
return -EOPNOTSUPP;
}
} else if (nfp_flower_check_higher_than_mac(flow)) {
} else if (nfp_flower_check_higher_than_mac(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
return -EOPNOTSUPP;
}
......@@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
nfp_flower_check_higher_than_l3(flow)) {
nfp_flower_check_higher_than_l3(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
return -EOPNOTSUPP;
}
......@@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return 0;
}
static struct nfp_fl_payload *
struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
......@@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
struct flow_cls_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
struct nfp_merge_info *merge_info;
......@@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
ASSERT_RTNL();
extack = merge_tc_off.common.extack;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
......@@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err)
goto err_unlink_sub_flow1;
merge_tc_off.cookie = merge_flow->tc_flower_cookie;
err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
merge_flow->ingress_dev, extack);
err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
merge_flow->ingress_dev, NULL);
if (err)
goto err_unlink_sub_flow2;
......@@ -1305,6 +1298,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct netlink_ext_ack *extack = NULL;
......@@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
&tun_type, extack);
if (err)
goto err_free_key_ls;
......@@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
......@@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
}
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
......@@ -1476,7 +1470,7 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
kfree_rcu(merge_flow, rcu);
}
static void
void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
......@@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
}
}
static void
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
......@@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
/* Check ct_map table first */
ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
nfp_ct_map_params);
if (ct_map_ent)
return nfp_fl_ct_stats(flow, ct_map_ent);
extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册