offload.c 20.0 KB
Newer Older
1 2
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 4 5 6 7 8 9 10 11 12 13 14 15 16

#include <linux/skbuff.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>

#include "cmsg.h"
#include "main.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"

17 18 19 20
#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
	 TCPHDR_PSH | TCPHDR_URG)

21 22 23 24
#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
	(FLOW_DIS_IS_FRAGMENT | \
	 FLOW_DIS_FIRST_FRAG)

25 26 27 28 29
#define NFP_FLOWER_WHITELIST_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30
	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 32 33
	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 35 36 37 38
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41
	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 43
	 BIT(FLOW_DISSECTOR_KEY_IP))

44 45 46 47 48
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 51
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 53 54 55 56 57

#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))

58
static int
59 60
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
		     u8 mtype)
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
{
	u32 meta_len, key_len, mask_len, act_len, tot_len;
	struct sk_buff *skb;
	unsigned char *msg;

	meta_len =  sizeof(struct nfp_fl_rule_metadata);
	key_len = nfp_flow->meta.key_len;
	mask_len = nfp_flow->meta.mask_len;
	act_len = nfp_flow->meta.act_len;

	tot_len = meta_len + key_len + mask_len + act_len;

	/* Convert to long words as firmware expects
	 * lengths in units of NFP_FL_LW_SIZ.
	 */
	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;

80
	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	if (!skb)
		return -ENOMEM;

	msg = nfp_flower_cmsg_get_data(skb);
	memcpy(msg, &nfp_flow->meta, meta_len);
	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
	memcpy(&msg[meta_len + key_len + mask_len],
	       nfp_flow->action_data, act_len);

	/* Convert back to bytes as software expects
	 * lengths in units of bytes.
	 */
	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;

98
	nfp_ctrl_tx(app->ctrl, skb);
99 100 101 102

	return 0;
}

103 104
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
105 106 107 108 109 110
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);

	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
111 112
}

113
static int
114
nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
115 116
			  u32 *key_layer_two, int *key_size)
{
117
	if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
118 119
		return -EOPNOTSUPP;

120
	if (enc_opts->key->len > 0) {
121 122 123 124 125 126 127
		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
		*key_size += sizeof(struct nfp_flower_geneve_options);
	}

	return 0;
}

128
static int
129
nfp_flower_calculate_key_layers(struct nfp_app *app,
130
				struct net_device *netdev,
131
				struct nfp_fl_key_ls *ret_key_ls,
132
				struct tc_cls_flower_offload *flow,
133
				enum nfp_flower_tun_type *tun_type)
134
{
135 136 137
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
	struct flow_dissector *dissector = rule->match.dissector;
	struct flow_match_basic basic = { NULL, NULL};
138
	struct nfp_flower_priv *priv = app->priv;
139 140 141
	u32 key_layer_two;
	u8 key_layer;
	int key_size;
142
	int err;
143

144
	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
145 146
		return -EOPNOTSUPP;

147
	/* If any tun dissector is used then the required set must be used. */
148 149
	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
150 151 152 153
	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
		return -EOPNOTSUPP;

	key_layer_two = 0;
154
	key_layer = NFP_FLOWER_LAYER_PORT;
J
John Hurley 已提交
155
	key_size = sizeof(struct nfp_flower_meta_tci) +
156 157
		   sizeof(struct nfp_flower_in_port);

158 159
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
160 161 162
		key_layer |= NFP_FLOWER_LAYER_MAC;
		key_size += sizeof(struct nfp_flower_mac_mpls);
	}
163

164 165
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan vlan;
166

167
		flow_rule_match_vlan(rule, &vlan);
168
		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
169
		    vlan.key->vlan_priority)
170 171 172
			return -EOPNOTSUPP;
	}

173 174 175 176 177 178 179 180 181 182
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
		struct flow_match_enc_opts enc_op = { NULL, NULL };
		struct flow_match_ipv4_addrs ipv4_addrs;
		struct flow_match_control enc_ctl;
		struct flow_match_ports enc_ports;

		flow_rule_match_enc_control(rule, &enc_ctl);

		if (enc_ctl.mask->addr_type != 0xffff ||
		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
183
			return -EOPNOTSUPP;
184 185

		/* These fields are already verified as used. */
186 187
		flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
		if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
188 189
			return -EOPNOTSUPP;

190 191
		flow_rule_match_enc_ports(rule, &enc_ports);
		if (enc_ports.mask->dst != cpu_to_be16(~0))
192 193
			return -EOPNOTSUPP;

194 195
		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
			flow_rule_match_enc_opts(rule, &enc_op);
196

197
		switch (enc_ports.key->dst) {
198
		case htons(IANA_VXLAN_UDP_PORT):
199 200 201
			*tun_type = NFP_FL_TUNNEL_VXLAN;
			key_layer |= NFP_FLOWER_LAYER_VXLAN;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
202

203
			if (enc_op.key)
204
				return -EOPNOTSUPP;
205
			break;
206
		case htons(GENEVE_UDP_PORT):
207 208 209 210 211 212 213
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
				return -EOPNOTSUPP;
			*tun_type = NFP_FL_TUNNEL_GENEVE;
			key_layer |= NFP_FLOWER_LAYER_EXT_META;
			key_size += sizeof(struct nfp_flower_ext_meta);
			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
214

215
			if (!enc_op.key)
216 217 218
				break;
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
				return -EOPNOTSUPP;
219
			err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
220 221 222
							&key_size);
			if (err)
				return err;
223 224 225 226
			break;
		default:
			return -EOPNOTSUPP;
		}
227 228 229 230

		/* Ensure the ingress netdev matches the expected tun type. */
		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
			return -EOPNOTSUPP;
231
	}
232

233 234
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
		flow_rule_match_basic(rule, &basic);
235

236
	if (basic.mask && basic.mask->n_proto) {
237
		/* Ethernet type is present in the key. */
238
		switch (basic.key->n_proto) {
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
		case cpu_to_be16(ETH_P_IP):
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
			break;

		case cpu_to_be16(ETH_P_IPV6):
			key_layer |= NFP_FLOWER_LAYER_IPV6;
			key_size += sizeof(struct nfp_flower_ipv6);
			break;

		/* Currently we do not offload ARP
		 * because we rely on it to get to the host.
		 */
		case cpu_to_be16(ETH_P_ARP):
			return -EOPNOTSUPP;

255 256 257 258 259 260 261 262
		case cpu_to_be16(ETH_P_MPLS_UC):
		case cpu_to_be16(ETH_P_MPLS_MC):
			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
				key_layer |= NFP_FLOWER_LAYER_MAC;
				key_size += sizeof(struct nfp_flower_mac_mpls);
			}
			break;

263 264 265 266 267 268 269 270 271 272 273 274 275 276
		/* Will be included in layer 2. */
		case cpu_to_be16(ETH_P_8021Q):
			break;

		default:
			/* Other ethtype - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			if (nfp_flower_check_higher_than_mac(flow))
				return -EOPNOTSUPP;
			break;
		}
	}

277
	if (basic.mask && basic.mask->ip_proto) {
278
		/* Ethernet type is present in the key. */
279
		switch (basic.key->ip_proto) {
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
		case IPPROTO_TCP:
		case IPPROTO_UDP:
		case IPPROTO_SCTP:
		case IPPROTO_ICMP:
		case IPPROTO_ICMPV6:
			key_layer |= NFP_FLOWER_LAYER_TP;
			key_size += sizeof(struct nfp_flower_tp_ports);
			break;
		default:
			/* Other ip proto - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			return -EOPNOTSUPP;
		}
	}

296 297
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
		struct flow_match_tcp tcp;
298 299
		u32 tcp_flags;

300 301
		flow_rule_match_tcp(rule, &tcp);
		tcp_flags = be16_to_cpu(tcp.key->flags);
302 303 304 305 306 307 308 309 310 311 312

		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
			return -EOPNOTSUPP;

		/* We only support PSH and URG flags when either
		 * FIN, SYN or RST is present as well.
		 */
		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
			return -EOPNOTSUPP;

313 314 315
		/* We need to store TCP flags in the either the IPv4 or IPv6 key
		 * space, thus we need to ensure we include a IPv4/IPv6 key
		 * layer if we have not done so already.
316
		 */
317
		if (!basic.key)
318 319 320 321
			return -EOPNOTSUPP;

		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
322
			switch (basic.key->n_proto) {
323 324 325 326 327 328 329 330 331 332 333 334 335
			case cpu_to_be16(ETH_P_IP):
				key_layer |= NFP_FLOWER_LAYER_IPV4;
				key_size += sizeof(struct nfp_flower_ipv4);
				break;

			case cpu_to_be16(ETH_P_IPV6):
				key_layer |= NFP_FLOWER_LAYER_IPV6;
				key_size += sizeof(struct nfp_flower_ipv6);
				break;

			default:
				return -EOPNOTSUPP;
			}
336 337 338
		}
	}

339 340
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control ctl;
341

342 343
		flow_rule_match_control(rule, &ctl);
		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
344 345 346
			return -EOPNOTSUPP;
	}

347 348 349 350 351 352 353 354
	ret_key_ls->key_layer = key_layer;
	ret_key_ls->key_layer_two = key_layer_two;
	ret_key_ls->key_size = key_size;

	return 0;
}

static struct nfp_fl_payload *
355
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
{
	struct nfp_fl_payload *flow_pay;

	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
	if (!flow_pay)
		return NULL;

	flow_pay->meta.key_len = key_layer->key_size;
	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->unmasked_data)
		goto err_free_flow;

	flow_pay->meta.mask_len = key_layer->key_size;
	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->mask_data)
		goto err_free_unmasked;

373 374 375 376
	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
	if (!flow_pay->action_data)
		goto err_free_mask;

377
	flow_pay->nfp_tun_ipv4_addr = 0;
378
	flow_pay->meta.flags = 0;
379

380 381
	return flow_pay;

382 383
err_free_mask:
	kfree(flow_pay->mask_data);
384 385 386 387 388 389 390
err_free_unmasked:
	kfree(flow_pay->unmasked_data);
err_free_flow:
	kfree(flow_pay);
	return NULL;
}

391 392 393 394 395 396 397 398 399 400 401 402
/**
 * nfp_flower_add_offload() - Adds a new flow to hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
 *
 * Adds a new flow to the repeated hash structure and action payload.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
403
		       struct tc_cls_flower_offload *flow)
404
{
405
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
406
	struct nfp_flower_priv *priv = app->priv;
407 408
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_key_ls *key_layer;
409
	struct nfp_port *port = NULL;
410 411
	int err;

412 413 414
	if (nfp_netdev_is_nfp_repr(netdev))
		port = nfp_port_from_netdev(netdev);

415 416 417 418
	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
	if (!key_layer)
		return -ENOMEM;

419
	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
420
					      &tun_type);
421 422 423
	if (err)
		goto err_free_key_ls;

424
	flow_pay = nfp_flower_allocate_new(key_layer);
425 426 427 428 429
	if (!flow_pay) {
		err = -ENOMEM;
		goto err_free_key_ls;
	}

430 431
	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
					    flow_pay, tun_type);
432 433 434
	if (err)
		goto err_destroy_flow;

435
	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
436 437 438
	if (err)
		goto err_destroy_flow;

439
	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
440 441 442 443
	if (err)
		goto err_destroy_flow;

	flow_pay->tc_flower_cookie = flow->cookie;
444 445 446
	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
				     nfp_flower_table_params);
	if (err)
447
		goto err_release_metadata;
448

449
	err = nfp_flower_xmit_flow(app, flow_pay,
450 451 452
				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
	if (err)
		goto err_remove_rhash;
453

454 455
	if (port)
		port->tc_offload_cnt++;
456 457 458 459 460

	/* Deallocate flow payload when flower rule has been destroyed. */
	kfree(key_layer);

	return 0;
461

462 463 464 465
err_remove_rhash:
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &flow_pay->fl_node,
					    nfp_flower_table_params));
466 467
err_release_metadata:
	nfp_modify_flow_metadata(app, flow_pay);
468
err_destroy_flow:
469
	kfree(flow_pay->action_data);
470 471 472 473 474 475
	kfree(flow_pay->mask_data);
	kfree(flow_pay->unmasked_data);
	kfree(flow_pay);
err_free_key_ls:
	kfree(key_layer);
	return err;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
}

/**
 * nfp_flower_del_offload() - Removes a flow from hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure
 *
 * Removes a flow from the repeated hash structure and clears the
 * action payload.
 *
 * Return: negative value on error, 0 if removed successfully.
 */
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
491
		       struct tc_cls_flower_offload *flow)
492
{
493
	struct nfp_flower_priv *priv = app->priv;
494
	struct nfp_fl_payload *nfp_flow;
495
	struct nfp_port *port = NULL;
496 497
	int err;

498 499 500
	if (nfp_netdev_is_nfp_repr(netdev))
		port = nfp_port_from_netdev(netdev);

501
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
502
	if (!nfp_flow)
503
		return -ENOENT;
504 505

	err = nfp_modify_flow_metadata(app, nfp_flow);
506 507
	if (err)
		goto err_free_flow;
508

509 510 511
	if (nfp_flow->nfp_tun_ipv4_addr)
		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);

512
	err = nfp_flower_xmit_flow(app, nfp_flow,
513 514 515 516 517
				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
	if (err)
		goto err_free_flow;

err_free_flow:
518 519
	if (port)
		port->tc_offload_cnt--;
520 521 522
	kfree(nfp_flow->action_data);
	kfree(nfp_flow->mask_data);
	kfree(nfp_flow->unmasked_data);
523 524 525
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &nfp_flow->fl_node,
					    nfp_flower_table_params));
526 527
	kfree_rcu(nfp_flow, rcu);
	return err;
528 529 530 531 532
}

/**
 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
 * @app:	Pointer to the APP handle
533
 * @netdev:	Netdev structure.
534 535 536 537 538 539 540 541
 * @flow:	TC flower classifier offload structure
 *
 * Populates a flow statistics structure which which corresponds to a
 * specific flow.
 *
 * Return: negative value on error, 0 if stats populated successfully.
 */
static int
542
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
543
		     struct tc_cls_flower_offload *flow)
544
{
545
	struct nfp_flower_priv *priv = app->priv;
546
	struct nfp_fl_payload *nfp_flow;
547
	u32 ctx_id;
548

549
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
550 551 552
	if (!nfp_flow)
		return -EINVAL;

553 554 555
	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);

	spin_lock_bh(&priv->stats_lock);
556 557
	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
			  priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
558

559 560 561
	priv->stats[ctx_id].pkts = 0;
	priv->stats[ctx_id].bytes = 0;
	spin_unlock_bh(&priv->stats_lock);
562 563

	return 0;
564 565 566 567
}

static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
568
			struct tc_cls_flower_offload *flower)
569
{
570
	if (!eth_proto_is_802_3(flower->common.protocol))
571 572
		return -EOPNOTSUPP;

573 574
	switch (flower->command) {
	case TC_CLSFLOWER_REPLACE:
575
		return nfp_flower_add_offload(app, netdev, flower);
576
	case TC_CLSFLOWER_DESTROY:
577
		return nfp_flower_del_offload(app, netdev, flower);
578
	case TC_CLSFLOWER_STATS:
579
		return nfp_flower_get_stats(app, netdev, flower);
580 581 582
	default:
		return -EOPNOTSUPP;
	}
583 584
}

585 586 587
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
					void *type_data, void *cb_priv)
{
588
	struct nfp_repr *repr = cb_priv;
589

590
	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
591 592
		return -EOPNOTSUPP;

593 594
	switch (type) {
	case TC_SETUP_CLSFLOWER:
595
		return nfp_flower_repr_offload(repr->app, repr->netdev,
596
					       type_data);
597 598 599 600 601 602 603
	default:
		return -EOPNOTSUPP;
	}
}

static int nfp_flower_setup_tc_block(struct net_device *netdev,
				     struct tc_block_offload *f)
604
{
605
	struct nfp_repr *repr = netdev_priv(netdev);
606

607
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
608 609
		return -EOPNOTSUPP;

610 611 612 613
	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block,
					     nfp_flower_setup_tc_block_cb,
614
					     repr, repr, f->extack);
615 616 617
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block,
					nfp_flower_setup_tc_block_cb,
618
					repr);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
			enum tc_setup_type type, void *type_data)
{
	switch (type) {
	case TC_SETUP_BLOCK:
		return nfp_flower_setup_tc_block(netdev, type_data);
	default:
		return -EOPNOTSUPP;
	}
634
}
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

struct nfp_flower_indr_block_cb_priv {
	struct net_device *netdev;
	struct nfp_app *app;
	struct list_head list;
};

static struct nfp_flower_indr_block_cb_priv *
nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
				     struct net_device *netdev)
{
	struct nfp_flower_indr_block_cb_priv *cb_priv;
	struct nfp_flower_priv *priv = app->priv;

	/* All callback list access should be protected by RTNL. */
	ASSERT_RTNL();

	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
		if (cb_priv->netdev == netdev)
			return cb_priv;

	return NULL;
}

static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
{
	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
	struct tc_cls_flower_offload *flower = type_data;

	if (flower->common.chain_index)
		return -EOPNOTSUPP;

	switch (type) {
	case TC_SETUP_CLSFLOWER:
		return nfp_flower_repr_offload(priv->app, priv->netdev,
671
					       type_data);
672 673 674 675 676 677 678 679 680 681 682 683 684
	default:
		return -EOPNOTSUPP;
	}
}

static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
			       struct tc_block_offload *f)
{
	struct nfp_flower_indr_block_cb_priv *cb_priv;
	struct nfp_flower_priv *priv = app->priv;
	int err;

685 686 687
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
	    !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
	      nfp_flower_internal_port_can_offload(app, netdev)))
688 689 690 691 692 693 694 695 696 697 698 699 700 701
		return -EOPNOTSUPP;

	switch (f->command) {
	case TC_BLOCK_BIND:
		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
		if (!cb_priv)
			return -ENOMEM;

		cb_priv->netdev = netdev;
		cb_priv->app = app;
		list_add(&cb_priv->list, &priv->indr_block_cb_priv);

		err = tcf_block_cb_register(f->block,
					    nfp_flower_setup_indr_block_cb,
702
					    cb_priv, cb_priv, f->extack);
703 704 705 706 707 708 709 710
		if (err) {
			list_del(&cb_priv->list);
			kfree(cb_priv);
		}

		return err;
	case TC_BLOCK_UNBIND:
		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
711 712 713 714 715 716 717 718
		if (!cb_priv)
			return -ENOENT;

		tcf_block_cb_unregister(f->block,
					nfp_flower_setup_indr_block_cb,
					cb_priv);
		list_del(&cb_priv->list);
		kfree(cb_priv);
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

		return 0;
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}

static int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
			    enum tc_setup_type type, void *type_data)
{
	switch (type) {
	case TC_SETUP_BLOCK:
		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
						      type_data);
	default:
		return -EOPNOTSUPP;
	}
}

int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
				       struct net_device *netdev,
				       unsigned long event)
{
	int err;

	if (!nfp_fl_is_netdev_to_offload(netdev))
		return NOTIFY_OK;

	if (event == NETDEV_REGISTER) {
		err = __tc_indr_block_cb_register(netdev, app,
						  nfp_flower_indr_setup_tc_cb,
752
						  app);
753 754 755 756 757 758
		if (err)
			nfp_flower_cmsg_warn(app,
					     "Indirect block reg failed - %s\n",
					     netdev->name);
	} else if (event == NETDEV_UNREGISTER) {
		__tc_indr_block_cb_unregister(netdev,
759
					      nfp_flower_indr_setup_tc_cb, app);
760 761 762 763
	}

	return NOTIFY_OK;
}