offload.c 36.2 KB
Newer Older
1 2
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 4 5 6 7 8 9 10 11 12 13 14 15 16

#include <linux/skbuff.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>

#include "cmsg.h"
#include "main.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"

17 18 19 20
#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
	 TCPHDR_PSH | TCPHDR_URG)

21 22 23 24
#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
	(FLOW_DIS_IS_FRAGMENT | \
	 FLOW_DIS_FIRST_FRAG)

25 26 27 28 29
#define NFP_FLOWER_WHITELIST_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30
	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 32 33
	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 35 36 37 38
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41
	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 43
	 BIT(FLOW_DISSECTOR_KEY_IP))

44 45 46 47 48
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49
	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 51
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 53 54 55 56 57

#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#define NFP_FLOWER_MERGE_FIELDS \
	(NFP_FLOWER_LAYER_PORT | \
	 NFP_FLOWER_LAYER_MAC | \
	 NFP_FLOWER_LAYER_TP | \
	 NFP_FLOWER_LAYER_IPV4 | \
	 NFP_FLOWER_LAYER_IPV6)

struct nfp_flower_merge_check {
	union {
		struct {
			__be16 tci;
			struct nfp_flower_mac_mpls l2;
			struct nfp_flower_tp_ports l4;
			union {
				struct nfp_flower_ipv4 ipv4;
				struct nfp_flower_ipv6 ipv6;
			};
		};
		unsigned long vals[8];
	};
};

80
static int
81 82
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
		     u8 mtype)
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
{
	u32 meta_len, key_len, mask_len, act_len, tot_len;
	struct sk_buff *skb;
	unsigned char *msg;

	meta_len =  sizeof(struct nfp_fl_rule_metadata);
	key_len = nfp_flow->meta.key_len;
	mask_len = nfp_flow->meta.mask_len;
	act_len = nfp_flow->meta.act_len;

	tot_len = meta_len + key_len + mask_len + act_len;

	/* Convert to long words as firmware expects
	 * lengths in units of NFP_FL_LW_SIZ.
	 */
	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;

102
	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	if (!skb)
		return -ENOMEM;

	msg = nfp_flower_cmsg_get_data(skb);
	memcpy(msg, &nfp_flow->meta, meta_len);
	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
	memcpy(&msg[meta_len + key_len + mask_len],
	       nfp_flow->action_data, act_len);

	/* Convert back to bytes as software expects
	 * lengths in units of bytes.
	 */
	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;

120
	nfp_ctrl_tx(app->ctrl, skb);
121 122 123 124

	return 0;
}

125 126
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
127 128 129 130 131 132
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);

	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
133 134
}

135
static int
136
nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
137 138
			  u32 *key_layer_two, int *key_size)
{
139
	if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
140 141
		return -EOPNOTSUPP;

142
	if (enc_opts->key->len > 0) {
143 144 145 146 147 148 149
		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
		*key_size += sizeof(struct nfp_flower_geneve_options);
	}

	return 0;
}

150
static int
151
nfp_flower_calculate_key_layers(struct nfp_app *app,
152
				struct net_device *netdev,
153
				struct nfp_fl_key_ls *ret_key_ls,
154
				struct tc_cls_flower_offload *flow,
155
				enum nfp_flower_tun_type *tun_type)
156
{
157 158 159
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
	struct flow_dissector *dissector = rule->match.dissector;
	struct flow_match_basic basic = { NULL, NULL};
160
	struct nfp_flower_priv *priv = app->priv;
161 162 163
	u32 key_layer_two;
	u8 key_layer;
	int key_size;
164
	int err;
165

166
	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
167 168
		return -EOPNOTSUPP;

169
	/* If any tun dissector is used then the required set must be used. */
170 171
	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
172 173 174 175
	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
		return -EOPNOTSUPP;

	key_layer_two = 0;
176
	key_layer = NFP_FLOWER_LAYER_PORT;
J
John Hurley 已提交
177
	key_size = sizeof(struct nfp_flower_meta_tci) +
178 179
		   sizeof(struct nfp_flower_in_port);

180 181
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
182 183 184
		key_layer |= NFP_FLOWER_LAYER_MAC;
		key_size += sizeof(struct nfp_flower_mac_mpls);
	}
185

186 187
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan vlan;
188

189
		flow_rule_match_vlan(rule, &vlan);
190
		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
191
		    vlan.key->vlan_priority)
192 193 194
			return -EOPNOTSUPP;
	}

195 196 197 198 199 200 201 202 203 204
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
		struct flow_match_enc_opts enc_op = { NULL, NULL };
		struct flow_match_ipv4_addrs ipv4_addrs;
		struct flow_match_control enc_ctl;
		struct flow_match_ports enc_ports;

		flow_rule_match_enc_control(rule, &enc_ctl);

		if (enc_ctl.mask->addr_type != 0xffff ||
		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
205
			return -EOPNOTSUPP;
206 207

		/* These fields are already verified as used. */
208 209
		flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
		if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
210 211
			return -EOPNOTSUPP;

212 213
		flow_rule_match_enc_ports(rule, &enc_ports);
		if (enc_ports.mask->dst != cpu_to_be16(~0))
214 215
			return -EOPNOTSUPP;

216 217
		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
			flow_rule_match_enc_opts(rule, &enc_op);
218

219
		switch (enc_ports.key->dst) {
220
		case htons(IANA_VXLAN_UDP_PORT):
221 222 223
			*tun_type = NFP_FL_TUNNEL_VXLAN;
			key_layer |= NFP_FLOWER_LAYER_VXLAN;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
224

225
			if (enc_op.key)
226
				return -EOPNOTSUPP;
227
			break;
228
		case htons(GENEVE_UDP_PORT):
229 230 231 232 233 234 235
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
				return -EOPNOTSUPP;
			*tun_type = NFP_FL_TUNNEL_GENEVE;
			key_layer |= NFP_FLOWER_LAYER_EXT_META;
			key_size += sizeof(struct nfp_flower_ext_meta);
			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
236

237
			if (!enc_op.key)
238 239 240
				break;
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
				return -EOPNOTSUPP;
241
			err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
242 243 244
							&key_size);
			if (err)
				return err;
245 246 247 248
			break;
		default:
			return -EOPNOTSUPP;
		}
249 250 251 252

		/* Ensure the ingress netdev matches the expected tun type. */
		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
			return -EOPNOTSUPP;
253
	}
254

255 256
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
		flow_rule_match_basic(rule, &basic);
257

258
	if (basic.mask && basic.mask->n_proto) {
259
		/* Ethernet type is present in the key. */
260
		switch (basic.key->n_proto) {
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
		case cpu_to_be16(ETH_P_IP):
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
			break;

		case cpu_to_be16(ETH_P_IPV6):
			key_layer |= NFP_FLOWER_LAYER_IPV6;
			key_size += sizeof(struct nfp_flower_ipv6);
			break;

		/* Currently we do not offload ARP
		 * because we rely on it to get to the host.
		 */
		case cpu_to_be16(ETH_P_ARP):
			return -EOPNOTSUPP;

277 278 279 280 281 282 283 284
		case cpu_to_be16(ETH_P_MPLS_UC):
		case cpu_to_be16(ETH_P_MPLS_MC):
			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
				key_layer |= NFP_FLOWER_LAYER_MAC;
				key_size += sizeof(struct nfp_flower_mac_mpls);
			}
			break;

285 286 287 288 289 290 291 292 293 294 295 296 297 298
		/* Will be included in layer 2. */
		case cpu_to_be16(ETH_P_8021Q):
			break;

		default:
			/* Other ethtype - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			if (nfp_flower_check_higher_than_mac(flow))
				return -EOPNOTSUPP;
			break;
		}
	}

299
	if (basic.mask && basic.mask->ip_proto) {
300
		/* Ethernet type is present in the key. */
301
		switch (basic.key->ip_proto) {
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
		case IPPROTO_TCP:
		case IPPROTO_UDP:
		case IPPROTO_SCTP:
		case IPPROTO_ICMP:
		case IPPROTO_ICMPV6:
			key_layer |= NFP_FLOWER_LAYER_TP;
			key_size += sizeof(struct nfp_flower_tp_ports);
			break;
		default:
			/* Other ip proto - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			return -EOPNOTSUPP;
		}
	}

318 319
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
		struct flow_match_tcp tcp;
320 321
		u32 tcp_flags;

322 323
		flow_rule_match_tcp(rule, &tcp);
		tcp_flags = be16_to_cpu(tcp.key->flags);
324 325 326 327 328 329 330 331 332 333 334

		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
			return -EOPNOTSUPP;

		/* We only support PSH and URG flags when either
		 * FIN, SYN or RST is present as well.
		 */
		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
			return -EOPNOTSUPP;

335 336 337
		/* We need to store TCP flags in the either the IPv4 or IPv6 key
		 * space, thus we need to ensure we include a IPv4/IPv6 key
		 * layer if we have not done so already.
338
		 */
339
		if (!basic.key)
340 341 342 343
			return -EOPNOTSUPP;

		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
344
			switch (basic.key->n_proto) {
345 346 347 348 349 350
			case cpu_to_be16(ETH_P_IP):
				key_layer |= NFP_FLOWER_LAYER_IPV4;
				key_size += sizeof(struct nfp_flower_ipv4);
				break;

			case cpu_to_be16(ETH_P_IPV6):
J
John Hurley 已提交
351
					key_layer |= NFP_FLOWER_LAYER_IPV6;
352 353 354 355 356 357
				key_size += sizeof(struct nfp_flower_ipv6);
				break;

			default:
				return -EOPNOTSUPP;
			}
358 359 360
		}
	}

361 362
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control ctl;
363

364 365
		flow_rule_match_control(rule, &ctl);
		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
366 367 368
			return -EOPNOTSUPP;
	}

369 370 371 372 373 374 375 376
	ret_key_ls->key_layer = key_layer;
	ret_key_ls->key_layer_two = key_layer_two;
	ret_key_ls->key_size = key_size;

	return 0;
}

static struct nfp_fl_payload *
377
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
{
	struct nfp_fl_payload *flow_pay;

	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
	if (!flow_pay)
		return NULL;

	flow_pay->meta.key_len = key_layer->key_size;
	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->unmasked_data)
		goto err_free_flow;

	flow_pay->meta.mask_len = key_layer->key_size;
	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
	if (!flow_pay->mask_data)
		goto err_free_unmasked;

395 396 397 398
	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
	if (!flow_pay->action_data)
		goto err_free_mask;

399
	flow_pay->nfp_tun_ipv4_addr = 0;
400
	flow_pay->meta.flags = 0;
401
	INIT_LIST_HEAD(&flow_pay->linked_flows);
J
John Hurley 已提交
402
	flow_pay->in_hw = false;
403

404 405
	return flow_pay;

406 407
err_free_mask:
	kfree(flow_pay->mask_data);
408 409 410 411 412 413 414
err_free_unmasked:
	kfree(flow_pay->unmasked_data);
err_free_flow:
	kfree(flow_pay);
	return NULL;
}

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
static int
nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
				     struct nfp_flower_merge_check *merge,
				     u8 *last_act_id, int *act_out)
{
	struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
	struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
	struct nfp_fl_set_ip4_addrs *ipv4_add;
	struct nfp_fl_set_ipv6_addr *ipv6_add;
	struct nfp_fl_push_vlan *push_vlan;
	struct nfp_fl_set_tport *tport;
	struct nfp_fl_set_eth *eth;
	struct nfp_fl_act_head *a;
	unsigned int act_off = 0;
	u8 act_id = 0;
	u8 *ports;
	int i;

	while (act_off < flow->meta.act_len) {
		a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
		act_id = a->jump_id;

		switch (act_id) {
		case NFP_FL_ACTION_OPCODE_OUTPUT:
			if (act_out)
				(*act_out)++;
			break;
		case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
			push_vlan = (struct nfp_fl_push_vlan *)a;
			if (push_vlan->vlan_tci)
				merge->tci = cpu_to_be16(0xffff);
			break;
		case NFP_FL_ACTION_OPCODE_POP_VLAN:
			merge->tci = cpu_to_be16(0);
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
			/* New tunnel header means l2 to l4 can be matched. */
			eth_broadcast_addr(&merge->l2.mac_dst[0]);
			eth_broadcast_addr(&merge->l2.mac_src[0]);
			memset(&merge->l4, 0xff,
			       sizeof(struct nfp_flower_tp_ports));
			memset(&merge->ipv4, 0xff,
			       sizeof(struct nfp_flower_ipv4));
			break;
		case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
			eth = (struct nfp_fl_set_eth *)a;
			for (i = 0; i < ETH_ALEN; i++)
				merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
			for (i = 0; i < ETH_ALEN; i++)
				merge->l2.mac_src[i] |=
					eth->eth_addr_mask[ETH_ALEN + i];
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
			ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
			merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
			merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
			ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
			merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
			merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
			for (i = 0; i < 4; i++)
				merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
					ipv6_add->ipv6[i].mask;
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
			for (i = 0; i < 4; i++)
				merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
					ipv6_add->ipv6[i].mask;
			break;
		case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
			ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
			merge->ipv6.ip_ext.ttl |=
				ipv6_tc_hl_fl->ipv6_hop_limit_mask;
			merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
			merge->ipv6.ipv6_flow_label_exthdr |=
				ipv6_tc_hl_fl->ipv6_label_mask;
			break;
		case NFP_FL_ACTION_OPCODE_SET_UDP:
		case NFP_FL_ACTION_OPCODE_SET_TCP:
			tport = (struct nfp_fl_set_tport *)a;
			ports = (u8 *)&merge->l4.port_src;
			for (i = 0; i < 4; i++)
				ports[i] |= tport->tp_port_mask[i];
			break;
		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
		case NFP_FL_ACTION_OPCODE_PRE_LAG:
		case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
			break;
		default:
			return -EOPNOTSUPP;
		}

		act_off += a->len_lw << NFP_FL_LW_SIZ;
	}

	if (last_act_id)
		*last_act_id = act_id;

	return 0;
}

static int
nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
				struct nfp_flower_merge_check *merge,
				bool extra_fields)
{
	struct nfp_flower_meta_tci *meta_tci;
	u8 *mask = flow->mask_data;
	u8 key_layer, match_size;

	memset(merge, 0, sizeof(struct nfp_flower_merge_check));

	meta_tci = (struct nfp_flower_meta_tci *)mask;
	key_layer = meta_tci->nfp_flow_key_layer;

	if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
		return -EOPNOTSUPP;

	merge->tci = meta_tci->tci;
	mask += sizeof(struct nfp_flower_meta_tci);

	if (key_layer & NFP_FLOWER_LAYER_EXT_META)
		mask += sizeof(struct nfp_flower_ext_meta);

	mask += sizeof(struct nfp_flower_in_port);

	if (key_layer & NFP_FLOWER_LAYER_MAC) {
		match_size = sizeof(struct nfp_flower_mac_mpls);
		memcpy(&merge->l2, mask, match_size);
		mask += match_size;
	}

	if (key_layer & NFP_FLOWER_LAYER_TP) {
		match_size = sizeof(struct nfp_flower_tp_ports);
		memcpy(&merge->l4, mask, match_size);
		mask += match_size;
	}

	if (key_layer & NFP_FLOWER_LAYER_IPV4) {
		match_size = sizeof(struct nfp_flower_ipv4);
		memcpy(&merge->ipv4, mask, match_size);
	}

	if (key_layer & NFP_FLOWER_LAYER_IPV6) {
		match_size = sizeof(struct nfp_flower_ipv6);
		memcpy(&merge->ipv6, mask, match_size);
	}

	return 0;
}

static int
nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
		     struct nfp_fl_payload *sub_flow2)
{
	/* Two flows can be merged if sub_flow2 only matches on bits that are
	 * either matched by sub_flow1 or set by a sub_flow1 action. This
	 * ensures that every packet that hits sub_flow1 and recirculates is
	 * guaranteed to hit sub_flow2.
	 */
	struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
	int err, act_out = 0;
	u8 last_act_id = 0;

	err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
					      true);
	if (err)
		return err;

	err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
					      false);
	if (err)
		return err;

	err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
						   &last_act_id, &act_out);
	if (err)
		return err;

	/* Must only be 1 output action and it must be the last in sequence. */
	if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
		return -EOPNOTSUPP;

	/* Reject merge if sub_flow2 matches on something that is not matched
	 * on or set in an action by sub_flow1.
	 */
	err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
			    sub_flow1_merge.vals,
			    sizeof(struct nfp_flower_merge_check) * 8);
	if (err)
		return -EINVAL;

	return 0;
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
static unsigned int
nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
			    bool *tunnel_act)
{
	unsigned int act_off = 0, act_len;
	struct nfp_fl_act_head *a;
	u8 act_id = 0;

	while (act_off < len) {
		a = (struct nfp_fl_act_head *)&act_src[act_off];
		act_len = a->len_lw << NFP_FL_LW_SIZ;
		act_id = a->jump_id;

		switch (act_id) {
		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
			if (tunnel_act)
				*tunnel_act = true;
		case NFP_FL_ACTION_OPCODE_PRE_LAG:
			memcpy(act_dst + act_off, act_src + act_off, act_len);
			break;
		default:
			return act_off;
		}

		act_off += act_len;
	}

	return act_off;
}

static int nfp_fl_verify_post_tun_acts(char *acts, int len)
{
	struct nfp_fl_act_head *a;
	unsigned int act_off = 0;

	while (act_off < len) {
		a = (struct nfp_fl_act_head *)&acts[act_off];
		if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
			return -EOPNOTSUPP;

		act_off += a->len_lw << NFP_FL_LW_SIZ;
	}

	return 0;
}

static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
			struct nfp_fl_payload *sub_flow2,
			struct nfp_fl_payload *merge_flow)
{
	unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
	bool tunnel_act = false;
	char *merge_act;
	int err;

	/* The last action of sub_flow1 must be output - do not merge this. */
	sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
	sub2_act_len = sub_flow2->meta.act_len;

	if (!sub2_act_len)
		return -EINVAL;

	if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
		return -EINVAL;

	/* A shortcut can only be applied if there is a single action. */
	if (sub1_act_len)
		merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
	else
		merge_flow->meta.shortcut = sub_flow2->meta.shortcut;

	merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
	merge_act = merge_flow->action_data;

	/* Copy any pre-actions to the start of merge flow action list. */
	pre_off1 = nfp_flower_copy_pre_actions(merge_act,
					       sub_flow1->action_data,
					       sub1_act_len, &tunnel_act);
	merge_act += pre_off1;
	sub1_act_len -= pre_off1;
	pre_off2 = nfp_flower_copy_pre_actions(merge_act,
					       sub_flow2->action_data,
					       sub2_act_len, NULL);
	merge_act += pre_off2;
	sub2_act_len -= pre_off2;

	/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
	 * a tunnel, sub_flow 2 can only have output actions for a valid merge.
	 */
	if (tunnel_act) {
		char *post_tun_acts = &sub_flow2->action_data[pre_off2];

		err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
		if (err)
			return err;
	}

	/* Copy remaining actions from sub_flows 1 and 2. */
	memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
	merge_act += sub1_act_len;
	memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);

	return 0;
}

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
/* Flow link code should only be accessed under RTNL. */
static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
{
	list_del(&link->merge_flow.list);
	list_del(&link->sub_flow.list);
	kfree(link);
}

static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
				    struct nfp_fl_payload *sub_flow)
{
	struct nfp_fl_payload_link *link;

	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
		if (link->sub_flow.flow == sub_flow) {
			nfp_flower_unlink_flow(link);
			return;
		}
}

static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
				 struct nfp_fl_payload *sub_flow)
{
	struct nfp_fl_payload_link *link;

	link = kmalloc(sizeof(*link), GFP_KERNEL);
	if (!link)
		return -ENOMEM;

	link->merge_flow.flow = merge_flow;
	list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
	link->sub_flow.flow = sub_flow;
	list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);

	return 0;
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
/**
 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
 * @app:	Pointer to the APP handle
 * @sub_flow1:	Initial flow matched to produce merge hint
 * @sub_flow2:	Post recirculation flow matched in merge hint
 *
 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
 * and offloading the new, merged flow.
 *
 * Return: negative value on error, 0 in success.
 */
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
				     struct nfp_fl_payload *sub_flow1,
				     struct nfp_fl_payload *sub_flow2)
{
J
John Hurley 已提交
773 774
	struct tc_cls_flower_offload merge_tc_off;
	struct nfp_flower_priv *priv = app->priv;
775 776
	struct nfp_fl_payload *merge_flow;
	struct nfp_fl_key_ls merge_key_ls;
777 778
	int err;

779 780 781 782 783 784 785
	ASSERT_RTNL();

	if (sub_flow1 == sub_flow2 ||
	    nfp_flower_is_merge_flow(sub_flow1) ||
	    nfp_flower_is_merge_flow(sub_flow2))
		return -EINVAL;

786 787 788 789
	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
	if (err)
		return err;

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
	merge_key_ls.key_size = sub_flow1->meta.key_len;

	merge_flow = nfp_flower_allocate_new(&merge_key_ls);
	if (!merge_flow)
		return -ENOMEM;

	merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
	merge_flow->ingress_dev = sub_flow1->ingress_dev;

	memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
	       sub_flow1->meta.key_len);
	memcpy(merge_flow->mask_data, sub_flow1->mask_data,
	       sub_flow1->meta.mask_len);

	err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
	if (err)
		goto err_destroy_merge_flow;

808 809 810 811 812 813 814 815
	err = nfp_flower_link_flows(merge_flow, sub_flow1);
	if (err)
		goto err_destroy_merge_flow;

	err = nfp_flower_link_flows(merge_flow, sub_flow2);
	if (err)
		goto err_unlink_sub_flow1;

J
John Hurley 已提交
816 817 818 819 820
	merge_tc_off.cookie = merge_flow->tc_flower_cookie;
	err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
					merge_flow->ingress_dev);
	if (err)
		goto err_unlink_sub_flow2;
821

J
John Hurley 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
	err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
				     nfp_flower_table_params);
	if (err)
		goto err_release_metadata;

	err = nfp_flower_xmit_flow(app, merge_flow,
				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
	if (err)
		goto err_remove_rhash;

	merge_flow->in_hw = true;
	sub_flow1->in_hw = false;

	return 0;

err_remove_rhash:
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &merge_flow->fl_node,
					    nfp_flower_table_params));
err_release_metadata:
	nfp_modify_flow_metadata(app, merge_flow);
err_unlink_sub_flow2:
844 845 846
	nfp_flower_unlink_flows(merge_flow, sub_flow2);
err_unlink_sub_flow1:
	nfp_flower_unlink_flows(merge_flow, sub_flow1);
847 848 849 850 851 852
err_destroy_merge_flow:
	kfree(merge_flow->action_data);
	kfree(merge_flow->mask_data);
	kfree(merge_flow->unmasked_data);
	kfree(merge_flow);
	return err;
853 854
}

855 856 857 858 859 860 861 862 863 864 865 866
/**
 * nfp_flower_add_offload() - Adds a new flow to hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
 *
 * Adds a new flow to the repeated hash structure and action payload.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
867
		       struct tc_cls_flower_offload *flow)
868
{
869
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
870
	struct nfp_flower_priv *priv = app->priv;
871 872
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_key_ls *key_layer;
873
	struct nfp_port *port = NULL;
874 875
	int err;

876 877 878
	if (nfp_netdev_is_nfp_repr(netdev))
		port = nfp_port_from_netdev(netdev);

879 880 881 882
	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
	if (!key_layer)
		return -ENOMEM;

883
	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
884
					      &tun_type);
885 886 887
	if (err)
		goto err_free_key_ls;

888
	flow_pay = nfp_flower_allocate_new(key_layer);
889 890 891 892 893
	if (!flow_pay) {
		err = -ENOMEM;
		goto err_free_key_ls;
	}

894 895
	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
					    flow_pay, tun_type);
896 897 898
	if (err)
		goto err_destroy_flow;

899
	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
900 901 902
	if (err)
		goto err_destroy_flow;

903
	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
904 905 906 907
	if (err)
		goto err_destroy_flow;

	flow_pay->tc_flower_cookie = flow->cookie;
908 909 910
	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
				     nfp_flower_table_params);
	if (err)
911
		goto err_release_metadata;
912

913
	err = nfp_flower_xmit_flow(app, flow_pay,
914 915 916
				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
	if (err)
		goto err_remove_rhash;
917

918 919
	if (port)
		port->tc_offload_cnt++;
920

J
John Hurley 已提交
921 922
	flow_pay->in_hw = true;

923 924 925 926
	/* Deallocate flow payload when flower rule has been destroyed. */
	kfree(key_layer);

	return 0;
927

928 929 930 931
err_remove_rhash:
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &flow_pay->fl_node,
					    nfp_flower_table_params));
932 933
err_release_metadata:
	nfp_modify_flow_metadata(app, flow_pay);
934
err_destroy_flow:
935
	kfree(flow_pay->action_data);
936 937 938 939 940 941
	kfree(flow_pay->mask_data);
	kfree(flow_pay->unmasked_data);
	kfree(flow_pay);
err_free_key_ls:
	kfree(key_layer);
	return err;
942 943
}

J
John Hurley 已提交
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static void
nfp_flower_remove_merge_flow(struct nfp_app *app,
			     struct nfp_fl_payload *del_sub_flow,
			     struct nfp_fl_payload *merge_flow)
{
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_fl_payload_link *link, *temp;
	struct nfp_fl_payload *origin;
	bool mod = false;
	int err;

	link = list_first_entry(&merge_flow->linked_flows,
				struct nfp_fl_payload_link, merge_flow.list);
	origin = link->sub_flow.flow;

	/* Re-add rule the merge had overwritten if it has not been deleted. */
	if (origin != del_sub_flow)
		mod = true;

	err = nfp_modify_flow_metadata(app, merge_flow);
	if (err) {
		nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
		goto err_free_links;
	}

	if (!mod) {
		err = nfp_flower_xmit_flow(app, merge_flow,
					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
		if (err) {
			nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
			goto err_free_links;
		}
	} else {
		__nfp_modify_flow_metadata(priv, origin);
		err = nfp_flower_xmit_flow(app, origin,
					   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
		if (err)
			nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
		origin->in_hw = true;
	}

err_free_links:
	/* Clean any links connected with the merged flow. */
	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
				 merge_flow.list)
		nfp_flower_unlink_flow(link);

	kfree(merge_flow->action_data);
	kfree(merge_flow->mask_data);
	kfree(merge_flow->unmasked_data);
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &merge_flow->fl_node,
					    nfp_flower_table_params));
	kfree_rcu(merge_flow, rcu);
}

static void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
				  struct nfp_fl_payload *sub_flow)
{
	struct nfp_fl_payload_link *link, *temp;

	/* Remove any merge flow formed from the deleted sub_flow. */
	list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
				 sub_flow.list)
		nfp_flower_remove_merge_flow(app, sub_flow,
					     link->merge_flow.flow);
}

1013 1014 1015 1016 1017 1018 1019
/**
 * nfp_flower_del_offload() - Removes a flow from hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure
 *
 * Removes a flow from the repeated hash structure and clears the
J
John Hurley 已提交
1020
 * action payload. Any flows merged from this are also deleted.
1021 1022 1023 1024 1025
 *
 * Return: negative value on error, 0 if removed successfully.
 */
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1026
		       struct tc_cls_flower_offload *flow)
1027
{
1028
	struct nfp_flower_priv *priv = app->priv;
1029
	struct nfp_fl_payload *nfp_flow;
1030
	struct nfp_port *port = NULL;
1031 1032
	int err;

1033 1034 1035
	if (nfp_netdev_is_nfp_repr(netdev))
		port = nfp_port_from_netdev(netdev);

1036
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1037
	if (!nfp_flow)
1038
		return -ENOENT;
1039 1040

	err = nfp_modify_flow_metadata(app, nfp_flow);
1041
	if (err)
J
John Hurley 已提交
1042
		goto err_free_merge_flow;
1043

1044 1045 1046
	if (nfp_flow->nfp_tun_ipv4_addr)
		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);

J
John Hurley 已提交
1047 1048 1049 1050 1051
	if (!nfp_flow->in_hw) {
		err = 0;
		goto err_free_merge_flow;
	}

1052
	err = nfp_flower_xmit_flow(app, nfp_flow,
1053
				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
J
John Hurley 已提交
1054
	/* Fall through on error. */
1055

J
John Hurley 已提交
1056 1057
err_free_merge_flow:
	nfp_flower_del_linked_merge_flows(app, nfp_flow);
1058 1059
	if (port)
		port->tc_offload_cnt--;
1060 1061 1062
	kfree(nfp_flow->action_data);
	kfree(nfp_flow->mask_data);
	kfree(nfp_flow->unmasked_data);
1063 1064 1065
	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
					    &nfp_flow->fl_node,
					    nfp_flower_table_params));
1066 1067
	kfree_rcu(nfp_flow, rcu);
	return err;
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
static void
__nfp_flower_update_merge_stats(struct nfp_app *app,
				struct nfp_fl_payload *merge_flow)
{
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_fl_payload_link *link;
	struct nfp_fl_payload *sub_flow;
	u64 pkts, bytes, used;
	u32 ctx_id;

	ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
	pkts = priv->stats[ctx_id].pkts;
	/* Do not cycle subflows if no stats to distribute. */
	if (!pkts)
		return;
	bytes = priv->stats[ctx_id].bytes;
	used = priv->stats[ctx_id].used;

	/* Reset stats for the merge flow. */
	priv->stats[ctx_id].pkts = 0;
	priv->stats[ctx_id].bytes = 0;

	/* The merge flow has received stats updates from firmware.
	 * Distribute these stats to all subflows that form the merge.
	 * The stats will collected from TC via the subflows.
	 */
	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
		sub_flow = link->sub_flow.flow;
		ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
		priv->stats[ctx_id].pkts += pkts;
		priv->stats[ctx_id].bytes += bytes;
		max_t(u64, priv->stats[ctx_id].used, used);
	}
}

static void
nfp_flower_update_merge_stats(struct nfp_app *app,
			      struct nfp_fl_payload *sub_flow)
{
	struct nfp_fl_payload_link *link;

	/* Get merge flows that the subflow forms to distribute their stats. */
	list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
		__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
}

1116 1117 1118
/**
 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
 * @app:	Pointer to the APP handle
1119
 * @netdev:	Netdev structure.
1120 1121 1122 1123 1124 1125 1126 1127
 * @flow:	TC flower classifier offload structure
 *
 * Populates a flow statistics structure which which corresponds to a
 * specific flow.
 *
 * Return: negative value on error, 0 if stats populated successfully.
 */
static int
1128
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1129
		     struct tc_cls_flower_offload *flow)
1130
{
1131
	struct nfp_flower_priv *priv = app->priv;
1132
	struct nfp_fl_payload *nfp_flow;
1133
	u32 ctx_id;
1134

1135
	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1136 1137 1138
	if (!nfp_flow)
		return -EINVAL;

1139 1140 1141
	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);

	spin_lock_bh(&priv->stats_lock);
1142 1143 1144 1145
	/* If request is for a sub_flow, update stats from merged flows. */
	if (!list_empty(&nfp_flow->linked_flows))
		nfp_flower_update_merge_stats(app, nfp_flow);

1146 1147
	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
			  priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1148

1149 1150 1151
	priv->stats[ctx_id].pkts = 0;
	priv->stats[ctx_id].bytes = 0;
	spin_unlock_bh(&priv->stats_lock);
1152 1153

	return 0;
1154 1155 1156 1157
}

static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1158
			struct tc_cls_flower_offload *flower)
1159
{
1160
	if (!eth_proto_is_802_3(flower->common.protocol))
1161 1162
		return -EOPNOTSUPP;

1163 1164
	switch (flower->command) {
	case TC_CLSFLOWER_REPLACE:
1165
		return nfp_flower_add_offload(app, netdev, flower);
1166
	case TC_CLSFLOWER_DESTROY:
1167
		return nfp_flower_del_offload(app, netdev, flower);
1168
	case TC_CLSFLOWER_STATS:
1169
		return nfp_flower_get_stats(app, netdev, flower);
1170 1171 1172
	default:
		return -EOPNOTSUPP;
	}
1173 1174
}

1175 1176 1177
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
					void *type_data, void *cb_priv)
{
1178
	struct nfp_repr *repr = cb_priv;
1179

1180
	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1181 1182
		return -EOPNOTSUPP;

1183 1184
	switch (type) {
	case TC_SETUP_CLSFLOWER:
1185
		return nfp_flower_repr_offload(repr->app, repr->netdev,
1186
					       type_data);
1187 1188 1189 1190 1191 1192 1193
	default:
		return -EOPNOTSUPP;
	}
}

static int nfp_flower_setup_tc_block(struct net_device *netdev,
				     struct tc_block_offload *f)
1194
{
1195
	struct nfp_repr *repr = netdev_priv(netdev);
1196

1197
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1198 1199
		return -EOPNOTSUPP;

1200 1201 1202 1203
	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block,
					     nfp_flower_setup_tc_block_cb,
1204
					     repr, repr, f->extack);
1205 1206 1207
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block,
					nfp_flower_setup_tc_block_cb,
1208
					repr);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
			enum tc_setup_type type, void *type_data)
{
	switch (type) {
	case TC_SETUP_BLOCK:
		return nfp_flower_setup_tc_block(netdev, type_data);
	default:
		return -EOPNOTSUPP;
	}
1224
}
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

struct nfp_flower_indr_block_cb_priv {
	struct net_device *netdev;
	struct nfp_app *app;
	struct list_head list;
};

static struct nfp_flower_indr_block_cb_priv *
nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
				     struct net_device *netdev)
{
	struct nfp_flower_indr_block_cb_priv *cb_priv;
	struct nfp_flower_priv *priv = app->priv;

	/* All callback list access should be protected by RTNL. */
	ASSERT_RTNL();

	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
		if (cb_priv->netdev == netdev)
			return cb_priv;

	return NULL;
}

static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
{
	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
	struct tc_cls_flower_offload *flower = type_data;

	if (flower->common.chain_index)
		return -EOPNOTSUPP;

	switch (type) {
	case TC_SETUP_CLSFLOWER:
		return nfp_flower_repr_offload(priv->app, priv->netdev,
1261
					       type_data);
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	default:
		return -EOPNOTSUPP;
	}
}

static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
			       struct tc_block_offload *f)
{
	struct nfp_flower_indr_block_cb_priv *cb_priv;
	struct nfp_flower_priv *priv = app->priv;
	int err;

1275 1276 1277
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
	    !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
	      nfp_flower_internal_port_can_offload(app, netdev)))
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
		return -EOPNOTSUPP;

	switch (f->command) {
	case TC_BLOCK_BIND:
		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
		if (!cb_priv)
			return -ENOMEM;

		cb_priv->netdev = netdev;
		cb_priv->app = app;
		list_add(&cb_priv->list, &priv->indr_block_cb_priv);

		err = tcf_block_cb_register(f->block,
					    nfp_flower_setup_indr_block_cb,
1292
					    cb_priv, cb_priv, f->extack);
1293 1294 1295 1296 1297 1298 1299 1300
		if (err) {
			list_del(&cb_priv->list);
			kfree(cb_priv);
		}

		return err;
	case TC_BLOCK_UNBIND:
		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1301 1302 1303 1304 1305 1306 1307 1308
		if (!cb_priv)
			return -ENOENT;

		tcf_block_cb_unregister(f->block,
					nfp_flower_setup_indr_block_cb,
					cb_priv);
		list_del(&cb_priv->list);
		kfree(cb_priv);
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

		return 0;
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}

static int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
			    enum tc_setup_type type, void *type_data)
{
	switch (type) {
	case TC_SETUP_BLOCK:
		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
						      type_data);
	default:
		return -EOPNOTSUPP;
	}
}

int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
				       struct net_device *netdev,
				       unsigned long event)
{
	int err;

	if (!nfp_fl_is_netdev_to_offload(netdev))
		return NOTIFY_OK;

	if (event == NETDEV_REGISTER) {
		err = __tc_indr_block_cb_register(netdev, app,
						  nfp_flower_indr_setup_tc_cb,
1342
						  app);
1343 1344 1345 1346 1347 1348
		if (err)
			nfp_flower_cmsg_warn(app,
					     "Indirect block reg failed - %s\n",
					     netdev->name);
	} else if (event == NETDEV_UNREGISTER) {
		__tc_indr_block_cb_unregister(netdev,
1349
					      nfp_flower_indr_setup_tc_cb, app);
1350 1351 1352 1353
	}

	return NOTIFY_OK;
}