From d708f902989b844907c5f7720abe67812a256c33 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 5 Apr 2018 15:42:06 +0300 Subject: [PATCH] net/mlx5e: Get the required HW match level while parsing TC flow matches Introduce levels of matching on headers of offloaded flows (none, L2, L3, L4) that follow the inline mode levels. This is pre-step for us to offload flows without any matches on headers. Signed-off-by: Or Gerlitz Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 34 +++++++++---------- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 7 ++++ 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 78f5c47affb4..e84bcea8b071 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1192,7 +1192,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - u8 *min_inline) + u8 *match_level) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); @@ -1201,7 +1201,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, u16 addr_type = 0; u8 ip_proto = 0; - *min_inline = MLX5_INLINE_MODE_NONE; + *match_level = MLX5_MATCH_NONE; if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -1276,7 +1276,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, key->src); if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { @@ -1298,7 +1298,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } } @@ -1317,7 +1317,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ntohs(key->n_proto)); if (mask->n_proto) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -1343,10 +1343,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* the HW doesn't need L3 inline to match on frag=no */ if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_INLINE_MODE_L2; /* *** L2 attributes parsing up to here *** */ else - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_INLINE_MODE_IP; } } @@ -1367,7 +1367,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, key->ip_proto); if (mask->ip_proto) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -1394,7 +1394,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, &key->dst, sizeof(key->dst)); if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -1423,7 +1423,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) { @@ -1451,7 +1451,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EOPNOTSUPP; if (mask->tos || mask->ttl) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } /* *** L3 attributes parsing up to here *** */ @@ -1496,7 +1496,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { @@ -1515,7 +1515,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ntohs(key->flags)); if (mask->flags) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } return 0; @@ -1530,19 +1530,19 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - u8 min_inline; + u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, &min_inline); + err = __parse_cls_flower(priv, spec, f, &match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < min_inline)) { + esw->offloads.inline_mode < match_level)) { netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - min_inline, esw->offloads.inline_mode); + match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4cd773fa55e3..efae77dd1e35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -227,6 +227,13 @@ enum { SET_VLAN_INSERT = BIT(1) }; +enum mlx5_flow_match_level { + MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, + MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, + MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, + MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, +}; + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; -- GitLab