offload.c 19.0 KB
Newer Older
1 2
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 4 5 6 7 8 9 10 11 12 13 14 15 16

#include <linux/skbuff.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>

#include "cmsg.h"
#include "main.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"

17 18 19 20
#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
	 TCPHDR_PSH | TCPHDR_URG)

21 22 23 24
#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
	(FLOW_DIS_IS_FRAGMENT | \
	 FLOW_DIS_FIRST_FRAG)

25 26 27 28 29
#define NFP_FLOWER_WHITELIST_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30
	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 32 33
	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 35 36 37 38
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41
	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 43
	 BIT(FLOW_DISSECTOR_KEY_IP))

44 45 46 47 48
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 51
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 53 54 55 56 57

#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static int
nfp_flower_xmit_flow(struct net_device *netdev,
		     struct nfp_fl_payload *nfp_flow, u8 mtype)
{
	u32 meta_len, key_len, mask_len, act_len, tot_len;
	struct nfp_repr *priv = netdev_priv(netdev);
	struct sk_buff *skb;
	unsigned char *msg;

	meta_len =  sizeof(struct nfp_fl_rule_metadata);
	key_len = nfp_flow->meta.key_len;
	mask_len = nfp_flow->meta.mask_len;
	act_len = nfp_flow->meta.act_len;

	tot_len = meta_len + key_len + mask_len + act_len;

	/* Convert to long words as firmware expects
	 * lengths in units of NFP_FL_LW_SIZ.
	 */
	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;

81
	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	if (!skb)
		return -ENOMEM;

	msg = nfp_flower_cmsg_get_data(skb);
	memcpy(msg, &nfp_flow->meta, meta_len);
	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
	memcpy(&msg[meta_len + key_len + mask_len],
	       nfp_flow->action_data, act_len);

	/* Convert back to bytes as software expects
	 * lengths in units of bytes.
	 */
	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;

	nfp_ctrl_tx(priv->app->ctrl, skb);

	return 0;
}

104 105 106 107 108 109 110 111 112 113 114
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
	return dissector_uses_key(f->dissector,
				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
		dissector_uses_key(f->dissector,
				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
		dissector_uses_key(f->dissector,
				   FLOW_DISSECTOR_KEY_PORTS) ||
		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
			  u32 *key_layer_two, int *key_size)
{
	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
		return -EOPNOTSUPP;

	if (enc_opts->len > 0) {
		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
		*key_size += sizeof(struct nfp_flower_geneve_options);
	}

	return 0;
}

130
static int
131 132
nfp_flower_calculate_key_layers(struct nfp_app *app,
				struct nfp_fl_key_ls *ret_key_ls,
133
				struct tc_cls_flower_offload *flow,
134 135
				bool egress,
				enum nfp_flower_tun_type *tun_type)
136
{
137 138
	struct flow_dissector_key_basic *mask_basic = NULL;
	struct flow_dissector_key_basic *key_basic = NULL;
139
	struct nfp_flower_priv *priv = app->priv;
140 141 142
	u32 key_layer_two;
	u8 key_layer;
	int key_size;
143
	int err;
144

145 146 147
	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
		return -EOPNOTSUPP;

148 149 150 151 152 153 154
	/* If any tun dissector is used then the required set must be used. */
	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
		return -EOPNOTSUPP;

	key_layer_two = 0;
155
	key_layer = NFP_FLOWER_LAYER_PORT;
J
John Hurley 已提交
156
	key_size = sizeof(struct nfp_flower_meta_tci) +
157 158 159 160 161 162 163
		   sizeof(struct nfp_flower_in_port);

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
		key_layer |= NFP_FLOWER_LAYER_MAC;
		key_size += sizeof(struct nfp_flower_mac_mpls);
	}
164

165 166 167 168 169 170 171 172 173 174 175
	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *flow_vlan;

		flow_vlan = skb_flow_dissector_target(flow->dissector,
						      FLOW_DISSECTOR_KEY_VLAN,
						      flow->mask);
		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
		    flow_vlan->vlan_priority)
			return -EOPNOTSUPP;
	}

176 177
	if (dissector_uses_key(flow->dissector,
			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
178 179
		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
		struct flow_dissector_key_ports *mask_enc_ports = NULL;
180
		struct flow_dissector_key_enc_opts *enc_op = NULL;
181
		struct flow_dissector_key_ports *enc_ports = NULL;
182 183 184 185
		struct flow_dissector_key_control *mask_enc_ctl =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
						  flow->mask);
186 187 188 189
		struct flow_dissector_key_control *enc_ctl =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
						  flow->key);
190 191 192
		if (!egress)
			return -EOPNOTSUPP;

193 194
		if (mask_enc_ctl->addr_type != 0xffff ||
		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
195
			return -EOPNOTSUPP;
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213

		/* These fields are already verified as used. */
		mask_ipv4 =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
						  flow->mask);
		if (mask_ipv4->dst != cpu_to_be32(~0))
			return -EOPNOTSUPP;

		mask_enc_ports =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_PORTS,
						  flow->mask);
		enc_ports =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_PORTS,
						  flow->key);

214
		if (mask_enc_ports->dst != cpu_to_be16(~0))
215 216
			return -EOPNOTSUPP;

217 218 219 220 221 222 223
		if (dissector_uses_key(flow->dissector,
				       FLOW_DISSECTOR_KEY_ENC_OPTS)) {
			enc_op = skb_flow_dissector_target(flow->dissector,
							   FLOW_DISSECTOR_KEY_ENC_OPTS,
							   flow->key);
		}

224 225 226 227 228
		switch (enc_ports->dst) {
		case htons(NFP_FL_VXLAN_PORT):
			*tun_type = NFP_FL_TUNNEL_VXLAN;
			key_layer |= NFP_FLOWER_LAYER_VXLAN;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
229 230 231

			if (enc_op)
				return -EOPNOTSUPP;
232 233 234 235 236 237 238 239 240
			break;
		case htons(NFP_FL_GENEVE_PORT):
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
				return -EOPNOTSUPP;
			*tun_type = NFP_FL_TUNNEL_GENEVE;
			key_layer |= NFP_FLOWER_LAYER_EXT_META;
			key_size += sizeof(struct nfp_flower_ext_meta);
			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
241 242 243 244 245 246 247 248 249

			if (!enc_op)
				break;
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
				return -EOPNOTSUPP;
			err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
							&key_size);
			if (err)
				return err;
250 251 252 253
			break;
		default:
			return -EOPNOTSUPP;
		}
254 255 256
	} else if (egress) {
		/* Reject non tunnel matches offloaded to egress repr. */
		return -EOPNOTSUPP;
257
	}
258

259 260 261 262
	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		mask_basic = skb_flow_dissector_target(flow->dissector,
						       FLOW_DISSECTOR_KEY_BASIC,
						       flow->mask);
263

264 265 266 267
		key_basic = skb_flow_dissector_target(flow->dissector,
						      FLOW_DISSECTOR_KEY_BASIC,
						      flow->key);
	}
268

269
	if (mask_basic && mask_basic->n_proto) {
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
		/* Ethernet type is present in the key. */
		switch (key_basic->n_proto) {
		case cpu_to_be16(ETH_P_IP):
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
			break;

		case cpu_to_be16(ETH_P_IPV6):
			key_layer |= NFP_FLOWER_LAYER_IPV6;
			key_size += sizeof(struct nfp_flower_ipv6);
			break;

		/* Currently we do not offload ARP
		 * because we rely on it to get to the host.
		 */
		case cpu_to_be16(ETH_P_ARP):
			return -EOPNOTSUPP;

288 289 290 291 292 293 294 295
		case cpu_to_be16(ETH_P_MPLS_UC):
		case cpu_to_be16(ETH_P_MPLS_MC):
			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
				key_layer |= NFP_FLOWER_LAYER_MAC;
				key_size += sizeof(struct nfp_flower_mac_mpls);
			}
			break;

296 297 298 299 300 301 302 303 304 305 306 307 308 309
		/* Will be included in layer 2. */
		case cpu_to_be16(ETH_P_8021Q):
			break;

		default:
			/* Other ethtype - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			if (nfp_flower_check_higher_than_mac(flow))
				return -EOPNOTSUPP;
			break;
		}
	}

310
	if (mask_basic && mask_basic->ip_proto) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
		/* Ethernet type is present in the key. */
		switch (key_basic->ip_proto) {
		case IPPROTO_TCP:
		case IPPROTO_UDP:
		case IPPROTO_SCTP:
		case IPPROTO_ICMP:
		case IPPROTO_ICMPV6:
			key_layer |= NFP_FLOWER_LAYER_TP;
			key_size += sizeof(struct nfp_flower_tp_ports);
			break;
		default:
			/* Other ip proto - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			return -EOPNOTSUPP;
		}
	}

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
		struct flow_dissector_key_tcp *tcp;
		u32 tcp_flags;

		tcp = skb_flow_dissector_target(flow->dissector,
						FLOW_DISSECTOR_KEY_TCP,
						flow->key);
		tcp_flags = be16_to_cpu(tcp->flags);

		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
			return -EOPNOTSUPP;

		/* We only support PSH and URG flags when either
		 * FIN, SYN or RST is present as well.
		 */
		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
			return -EOPNOTSUPP;

		/* We need to store TCP flags in the IPv4 key space, thus
		 * we need to ensure we include a IPv4 key layer if we have
		 * not done so already.
		 */
		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
		}
	}

358 359 360 361 362 363 364 365 366 367 368
	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key_ctl;

		key_ctl = skb_flow_dissector_target(flow->dissector,
						    FLOW_DISSECTOR_KEY_CONTROL,
						    flow->key);

		if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
			return -EOPNOTSUPP;
	}

369 370 371 372 373 374 375 376
	ret_key_ls->key_layer = key_layer;
	ret_key_ls->key_layer_two = key_layer_two;
	ret_key_ls->key_size = key_size;

	return 0;
}

static struct nfp_fl_payload *
377
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
{
	struct nfp_fl_payload *flow_pay;

	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
	if (!flow_pay)
		return NULL;

	flow_pay->meta.key_len = key_layer->key_size;
	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->unmasked_data)
		goto err_free_flow;

	flow_pay->meta.mask_len = key_layer->key_size;
	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->mask_data)
		goto err_free_unmasked;

395 396 397 398
	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
	if (!flow_pay->action_data)
		goto err_free_mask;

399
	flow_pay->nfp_tun_ipv4_addr = 0;
400
	flow_pay->meta.flags = 0;
401 402
	flow_pay->ingress_offload = !egress;

403 404
	return flow_pay;

405 406
err_free_mask:
	kfree(flow_pay->mask_data);
407 408 409 410 411 412 413
err_free_unmasked:
	kfree(flow_pay->unmasked_data);
err_free_flow:
	kfree(flow_pay);
	return NULL;
}

414 415 416 417 418
/**
 * nfp_flower_add_offload() - Adds a new flow to hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
J
Jakub Kicinski 已提交
419
 * @egress:	NFP netdev is the egress.
420 421 422 423 424 425 426
 *
 * Adds a new flow to the repeated hash structure and action payload.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
427
		       struct tc_cls_flower_offload *flow, bool egress)
428
{
429
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
430
	struct nfp_port *port = nfp_port_from_netdev(netdev);
431
	struct nfp_flower_priv *priv = app->priv;
432 433
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_key_ls *key_layer;
434
	struct net_device *ingr_dev;
435 436
	int err;

437 438 439 440 441 442 443 444 445 446 447
	ingr_dev = egress ? NULL : netdev;
	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
					      NFP_FL_STATS_CTX_DONT_CARE);
	if (flow_pay) {
		/* Ignore as duplicate if it has been added by different cb. */
		if (flow_pay->ingress_offload && egress)
			return 0;
		else
			return -EOPNOTSUPP;
	}

448 449 450 451
	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
	if (!key_layer)
		return -ENOMEM;

452 453
	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
					      &tun_type);
454 455 456
	if (err)
		goto err_free_key_ls;

457
	flow_pay = nfp_flower_allocate_new(key_layer, egress);
458 459 460 461 462
	if (!flow_pay) {
		err = -ENOMEM;
		goto err_free_key_ls;
	}

463 464
	flow_pay->ingress_dev = egress ? NULL : netdev;

465 466
	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
					    tun_type);
467 468 469
	if (err)
		goto err_destroy_flow;

470
	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
471 472 473
	if (err)
		goto err_destroy_flow;

474 475
	err = nfp_compile_flow_metadata(app, flow, flow_pay,
					flow_pay->ingress_dev);
476 477 478 479
	if (err)
		goto err_destroy_flow;

	flow_pay->tc_flower_cookie = flow->cookie;
480 481 482
	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
				     nfp_flower_table_params);
	if (err)
483
		goto err_release_metadata;
484

485 486 487 488 489
	err = nfp_flower_xmit_flow(netdev, flow_pay,
				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
	if (err)
		goto err_remove_rhash;

490
	port->tc_offload_cnt++;
491 492 493 494 495

	/* Deallocate flow payload when flower rule has been destroyed. */
	kfree(key_layer);

	return 0;
496

497 498 499 500
err_remove_rhash:
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &flow_pay->fl_node,
					    nfp_flower_table_params));
501 502
err_release_metadata:
	nfp_modify_flow_metadata(app, flow_pay);
503
err_destroy_flow:
504
	kfree(flow_pay->action_data);
505 506 507 508 509 510
	kfree(flow_pay->mask_data);
	kfree(flow_pay->unmasked_data);
	kfree(flow_pay);
err_free_key_ls:
	kfree(key_layer);
	return err;
511 512 513 514 515 516 517
}

/**
 * nfp_flower_del_offload() - Removes a flow from hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure
518
 * @egress:	Netdev is the egress dev.
519 520 521 522 523 524 525 526
 *
 * Removes a flow from the repeated hash structure and clears the
 * action payload.
 *
 * Return: negative value on error, 0 if removed successfully.
 */
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
527
		       struct tc_cls_flower_offload *flow, bool egress)
528
{
529
	struct nfp_port *port = nfp_port_from_netdev(netdev);
530
	struct nfp_flower_priv *priv = app->priv;
531
	struct nfp_fl_payload *nfp_flow;
532
	struct net_device *ingr_dev;
533 534
	int err;

535 536 537
	ingr_dev = egress ? NULL : netdev;
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
					      NFP_FL_STATS_CTX_DONT_CARE);
538
	if (!nfp_flow)
539
		return egress ? 0 : -ENOENT;
540 541

	err = nfp_modify_flow_metadata(app, nfp_flow);
542 543
	if (err)
		goto err_free_flow;
544

545 546 547
	if (nfp_flow->nfp_tun_ipv4_addr)
		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);

548 549 550 551 552 553
	err = nfp_flower_xmit_flow(netdev, nfp_flow,
				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
	if (err)
		goto err_free_flow;

err_free_flow:
554
	port->tc_offload_cnt--;
555 556 557
	kfree(nfp_flow->action_data);
	kfree(nfp_flow->mask_data);
	kfree(nfp_flow->unmasked_data);
558 559 560
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &nfp_flow->fl_node,
					    nfp_flower_table_params));
561 562
	kfree_rcu(nfp_flow, rcu);
	return err;
563 564 565 566 567
}

/**
 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
 * @app:	Pointer to the APP handle
568
 * @netdev:	Netdev structure.
569
 * @flow:	TC flower classifier offload structure
570
 * @egress:	Netdev is the egress dev.
571 572 573 574 575 576 577
 *
 * Populates a flow statistics structure which which corresponds to a
 * specific flow.
 *
 * Return: negative value on error, 0 if stats populated successfully.
 */
static int
578 579
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
		     struct tc_cls_flower_offload *flow, bool egress)
580
{
581
	struct nfp_flower_priv *priv = app->priv;
582
	struct nfp_fl_payload *nfp_flow;
583
	struct net_device *ingr_dev;
584
	u32 ctx_id;
585

586 587 588
	ingr_dev = egress ? NULL : netdev;
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
					      NFP_FL_STATS_CTX_DONT_CARE);
589 590 591
	if (!nfp_flow)
		return -EINVAL;

592 593 594
	if (nfp_flow->ingress_offload && egress)
		return 0;

595 596 597 598 599 600
	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);

	spin_lock_bh(&priv->stats_lock);
	tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
			      priv->stats[ctx_id].pkts,
			      priv->stats[ctx_id].used);
601

602 603 604
	priv->stats[ctx_id].pkts = 0;
	priv->stats[ctx_id].bytes = 0;
	spin_unlock_bh(&priv->stats_lock);
605 606

	return 0;
607 608 609 610
}

static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
611
			struct tc_cls_flower_offload *flower, bool egress)
612
{
613
	if (!eth_proto_is_802_3(flower->common.protocol))
614 615
		return -EOPNOTSUPP;

616 617
	switch (flower->command) {
	case TC_CLSFLOWER_REPLACE:
618
		return nfp_flower_add_offload(app, netdev, flower, egress);
619
	case TC_CLSFLOWER_DESTROY:
620
		return nfp_flower_del_offload(app, netdev, flower, egress);
621
	case TC_CLSFLOWER_STATS:
622
		return nfp_flower_get_stats(app, netdev, flower, egress);
623 624
	default:
		return -EOPNOTSUPP;
625 626 627
	}
}

628 629 630
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
				  void *cb_priv)
{
631 632
	struct nfp_repr *repr = cb_priv;

633
	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
634 635 636 637 638 639 640 641 642
		return -EOPNOTSUPP;

	switch (type) {
	case TC_SETUP_CLSFLOWER:
		return nfp_flower_repr_offload(repr->app, repr->netdev,
					       type_data, true);
	default:
		return -EOPNOTSUPP;
	}
643 644
}

645 646 647
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
					void *type_data, void *cb_priv)
{
648
	struct nfp_repr *repr = cb_priv;
649

650
	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
651 652
		return -EOPNOTSUPP;

653 654
	switch (type) {
	case TC_SETUP_CLSFLOWER:
655
		return nfp_flower_repr_offload(repr->app, repr->netdev,
656
					       type_data, false);
657 658 659 660 661 662 663
	default:
		return -EOPNOTSUPP;
	}
}

static int nfp_flower_setup_tc_block(struct net_device *netdev,
				     struct tc_block_offload *f)
664
{
665
	struct nfp_repr *repr = netdev_priv(netdev);
666

667
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
668 669
		return -EOPNOTSUPP;

670 671 672 673
	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block,
					     nfp_flower_setup_tc_block_cb,
674
					     repr, repr, f->extack);
675 676 677
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block,
					nfp_flower_setup_tc_block_cb,
678
					repr);
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
			enum tc_setup_type type, void *type_data)
{
	switch (type) {
	case TC_SETUP_BLOCK:
		return nfp_flower_setup_tc_block(netdev, type_data);
	default:
		return -EOPNOTSUPP;
	}
694
}