cxgb4_tc_flower.c 24.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
 *
 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <net/tc_act/tc_mirred.h>
36 37
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_gact.h>
38
#include <net/tc_act/tc_vlan.h>
39 40

#include "cxgb4.h"
41
#include "cxgb4_filter.h"
42 43
#include "cxgb4_tc_flower.h"

44 45
#define STATS_CHECK_PERIOD (HZ / 2)

W
Wei Yongjun 已提交
46
static struct ch_tc_pedit_fields pedits[] = {
47 48
	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 50
	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
	PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 52 53 54 55 56 57 58 59 60 61 62 63 64
	PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
	PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
	PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
	PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
	PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
	PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
	PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
	PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
	PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
	PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 66
};

67 68 69
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 71
	if (new)
		spin_lock_init(&new->lock);
72 73 74 75 76 77 78
	return new;
}

/* Must be called with either RTNL or rcu_read_lock */
static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
						   unsigned long flower_cookie)
{
79 80
	return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
				      adap->flower_ht_params);
81 82 83
}

static void cxgb4_process_flow_match(struct net_device *dev,
84
				     struct flow_rule *rule,
85 86
				     struct ch_filter_specification *fs)
{
87 88 89 90 91 92 93
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;
		u16 ethtype_key, ethtype_mask;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
94 95 96 97 98 99

		if (ethtype_key == ETH_P_ALL) {
			ethtype_key = 0;
			ethtype_mask = 0;
		}

100 101 102
		if (ethtype_key == ETH_P_IPV6)
			fs->type = 1;

103 104
		fs->val.ethtype = ethtype_key;
		fs->mask.ethtype = ethtype_mask;
105 106
		fs->val.proto = match.key->ip_proto;
		fs->mask.proto = match.mask->ip_proto;
107 108
	}

109
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
110 111 112
		struct flow_match_ipv4_addrs match;

		flow_rule_match_ipv4_addrs(rule, &match);
113
		fs->type = 0;
114 115 116 117
		memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
118 119

		/* also initialize nat_lip/fip to same values */
120 121
		memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
122 123
	}

124
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
125
		struct flow_match_ipv6_addrs match;
126

127
		flow_rule_match_ipv6_addrs(rule, &match);
128
		fs->type = 1;
129 130 131 132 133 134 135 136
		memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
		       sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
		       sizeof(match.mask->src));
137 138

		/* also initialize nat_lip/fip to same values */
139 140 141 142
		memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
143 144
	}

145 146
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_match_ports match;
147

148 149 150 151 152
		flow_rule_match_ports(rule, &match);
		fs->val.lport = cpu_to_be16(match.key->dst);
		fs->mask.lport = cpu_to_be16(match.mask->dst);
		fs->val.fport = cpu_to_be16(match.key->src);
		fs->mask.fport = cpu_to_be16(match.mask->src);
153 154

		/* also initialize nat_lport/fport to same values */
155 156
		fs->nat_lport = cpu_to_be16(match.key->dst);
		fs->nat_fport = cpu_to_be16(match.key->src);
157 158
	}

159 160 161 162 163 164
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
		struct flow_match_ip match;

		flow_rule_match_ip(rule, &match);
		fs->val.tos = match.key->tos;
		fs->mask.tos = match.mask->tos;
165 166
	}

167 168 169 170 171 172
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_match_enc_keyid match;

		flow_rule_match_enc_keyid(rule, &match);
		fs->val.vni = be32_to_cpu(match.key->keyid);
		fs->mask.vni = be32_to_cpu(match.mask->keyid);
173 174 175 176 177 178
		if (fs->mask.vni) {
			fs->val.encap_vld = 1;
			fs->mask.encap_vld = 1;
		}
	}

179 180
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
181 182
		u16 vlan_tci, vlan_tci_mask;

183 184 185 186 187
		flow_rule_match_vlan(rule, &match);
		vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
					       VLAN_PRIO_SHIFT);
		vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
						     VLAN_PRIO_SHIFT);
188 189
		fs->val.ivlan = vlan_tci;
		fs->mask.ivlan = vlan_tci_mask;
190

191 192 193
		fs->val.ivlan_vld = 1;
		fs->mask.ivlan_vld = 1;

194 195 196 197 198 199 200 201 202 203 204 205 206
		/* Chelsio adapters use ivlan_vld bit to match vlan packets
		 * as 802.1Q. Also, when vlan tag is present in packets,
		 * ethtype match is used then to match on ethtype of inner
		 * header ie. the header following the vlan header.
		 * So, set the ivlan_vld based on ethtype info supplied by
		 * TC for vlan packets if its 802.1Q. And then reset the
		 * ethtype value else, hw will try to match the supplied
		 * ethtype value with ethtype of inner header.
		 */
		if (fs->val.ethtype == ETH_P_8021Q) {
			fs->val.ethtype = 0;
			fs->mask.ethtype = 0;
		}
207 208 209 210 211 212 213 214 215 216
	}

	/* Match only packets coming from the ingress port where this
	 * filter will be created.
	 */
	fs->val.iport = netdev2pinfo(dev)->port_id;
	fs->mask.iport = ~0;
}

static int cxgb4_validate_flow_match(struct net_device *dev,
217
				     struct flow_rule *rule)
218
{
219
	struct flow_dissector *dissector = rule->match.dissector;
220 221 222
	u16 ethtype_mask = 0;
	u16 ethtype_key = 0;

223
	if (dissector->used_keys &
224 225 226 227
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
228
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
229
	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
230
	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
231
	      BIT(FLOW_DISSECTOR_KEY_IP))) {
232
		netdev_warn(dev, "Unsupported key used: 0x%x\n",
233
			    dissector->used_keys);
234 235
		return -EOPNOTSUPP;
	}
236

237 238 239 240 241 242
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
243 244
	}

245
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
246
		u16 eth_ip_type = ethtype_key & ethtype_mask;
247
		struct flow_match_ip match;
248 249 250 251 252 253

		if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
			netdev_err(dev, "IP Key supported only with IPv4/v6");
			return -EINVAL;
		}

254 255
		flow_rule_match_ip(rule, &match);
		if (match.mask->ttl) {
256 257 258 259 260
			netdev_warn(dev, "ttl match unsupported for offload");
			return -EOPNOTSUPP;
		}
	}

261 262 263
	return 0;
}

264 265 266 267
static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
			  u8 field)
{
	u32 set_val = val & ~mask;
268 269
	u32 offset = 0;
	u8 size = 1;
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	int i;

	for (i = 0; i < ARRAY_SIZE(pedits); i++) {
		if (pedits[i].field == field) {
			offset = pedits[i].offset;
			size = pedits[i].size;
			break;
		}
	}
	memcpy((u8 *)fs + offset, &set_val, size);
}

static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
				u32 mask, u32 offset, u8 htype)
{
	switch (htype) {
286
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
287 288 289 290 291 292 293 294
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
			fs->newdmac = 1;
			offload_pedit(fs, val, mask, ETH_DMAC_31_0);
			break;
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
			if (~mask & PEDIT_ETH_DMAC_MASK)
				offload_pedit(fs, val, mask, ETH_DMAC_47_32);
295 296 297 298 299 300 301
			else
				offload_pedit(fs, val >> 16, mask >> 16,
					      ETH_SMAC_15_0);
			break;
		case PEDIT_ETH_SMAC_47_16:
			fs->newsmac = 1;
			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
302
		}
303
		break;
304
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
305 306 307 308 309 310 311 312 313
		switch (offset) {
		case PEDIT_IP4_SRC:
			offload_pedit(fs, val, mask, IP4_SRC);
			break;
		case PEDIT_IP4_DST:
			offload_pedit(fs, val, mask, IP4_DST);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
314
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
			offload_pedit(fs, val, mask, IP6_SRC_31_0);
			break;
		case PEDIT_IP6_SRC_63_32:
			offload_pedit(fs, val, mask, IP6_SRC_63_32);
			break;
		case PEDIT_IP6_SRC_95_64:
			offload_pedit(fs, val, mask, IP6_SRC_95_64);
			break;
		case PEDIT_IP6_SRC_127_96:
			offload_pedit(fs, val, mask, IP6_SRC_127_96);
			break;
		case PEDIT_IP6_DST_31_0:
			offload_pedit(fs, val, mask, IP6_DST_31_0);
			break;
		case PEDIT_IP6_DST_63_32:
			offload_pedit(fs, val, mask, IP6_DST_63_32);
			break;
		case PEDIT_IP6_DST_95_64:
			offload_pedit(fs, val, mask, IP6_DST_95_64);
			break;
		case PEDIT_IP6_DST_127_96:
			offload_pedit(fs, val, mask, IP6_DST_127_96);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
342
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
343 344 345 346 347 348 349 350 351 352 353 354
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
				offload_pedit(fs, cpu_to_be32(val) >> 16,
					      cpu_to_be32(mask) >> 16,
					      TCP_SPORT);
			else
				offload_pedit(fs, cpu_to_be32(val),
					      cpu_to_be32(mask), TCP_DPORT);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
355
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
356 357 358 359 360 361 362 363 364 365 366
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
				offload_pedit(fs, cpu_to_be32(val) >> 16,
					      cpu_to_be32(mask) >> 16,
					      UDP_SPORT);
			else
				offload_pedit(fs, cpu_to_be32(val),
					      cpu_to_be32(mask), UDP_DPORT);
		}
		fs->nat_mode = NAT_MODE_ALL;
367 368 369
	}
}

370 371 372
void cxgb4_process_flow_actions(struct net_device *in,
				struct flow_action *actions,
				struct ch_filter_specification *fs)
373
{
374
	struct flow_action_entry *act;
375
	int i;
376

377
	flow_action_for_each(i, act, actions) {
378 379
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
380
			fs->action = FILTER_PASS;
381 382
			break;
		case FLOW_ACTION_DROP:
383
			fs->action = FILTER_DROP;
384 385 386
			break;
		case FLOW_ACTION_REDIRECT: {
			struct net_device *out = act->dev;
387 388 389 390
			struct port_info *pi = netdev_priv(out);

			fs->action = FILTER_SWITCH;
			fs->eport = pi->port_id;
391 392 393 394 395 396 397
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
398
			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
399 400
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
401 402
				fs->newvlan |= VLAN_REMOVE;
				break;
403
			case FLOW_ACTION_VLAN_PUSH:
404 405 406
				fs->newvlan |= VLAN_INSERT;
				fs->vlan = vlan_tci;
				break;
407
			case FLOW_ACTION_VLAN_MANGLE:
408 409 410 411 412 413
				fs->newvlan |= VLAN_REWRITE;
				fs->vlan = vlan_tci;
				break;
			default:
				break;
			}
414 415 416
			}
			break;
		case FLOW_ACTION_MANGLE: {
417 418 419
			u32 mask, val, offset;
			u8 htype;

420 421 422 423
			htype = act->mangle.htype;
			mask = act->mangle.mask;
			val = act->mangle.val;
			offset = act->mangle.offset;
424

425
			process_pedit_field(fs, val, mask, offset, htype);
426
			}
427 428 429
			break;
		default:
			break;
430 431 432 433
		}
	}
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447
static bool valid_l4_mask(u32 mask)
{
	u16 hi, lo;

	/* Either the upper 16-bits (SPORT) OR the lower
	 * 16-bits (DPORT) can be set, but NOT BOTH.
	 */
	hi = (mask >> 16) & 0xFFFF;
	lo = mask & 0xFFFF;

	return hi && lo ? false : true;
}

static bool valid_pedit_action(struct net_device *dev,
448
			       const struct flow_action_entry *act)
449 450
{
	u32 mask, offset;
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	u8 htype;

	htype = act->mangle.htype;
	mask = act->mangle.mask;
	offset = act->mangle.offset;

	switch (htype) {
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
		case PEDIT_ETH_SMAC_47_16:
			break;
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
466 467 468
				   __func__);
			return false;
		}
469 470 471 472 473
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
		switch (offset) {
		case PEDIT_IP4_SRC:
		case PEDIT_IP4_DST:
474
			break;
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
		case PEDIT_IP6_SRC_63_32:
		case PEDIT_IP6_SRC_95_64:
		case PEDIT_IP6_SRC_127_96:
		case PEDIT_IP6_DST_31_0:
		case PEDIT_IP6_DST_63_32:
		case PEDIT_IP6_DST_95_64:
		case PEDIT_IP6_DST_127_96:
491
			break;
492 493 494 495 496 497 498 499 500 501 502
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
503 504 505 506
					   __func__);
				return false;
			}
			break;
507 508 509 510 511 512 513 514 515 516 517
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
518 519 520 521 522
					   __func__);
				return false;
			}
			break;
		default:
523
			netdev_err(dev, "%s: Unsupported pedit field\n",
524 525
				   __func__);
			return false;
526
		}
527 528 529 530
		break;
	default:
		netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
		return false;
531
	}
532
	return true;
533 534
}

535
int cxgb4_validate_flow_actions(struct net_device *dev,
536 537
				struct flow_action *actions,
				struct netlink_ext_ack *extack)
538
{
539
	struct flow_action_entry *act;
540 541 542
	bool act_redir = false;
	bool act_pedit = false;
	bool act_vlan = false;
543
	int i;
544

545
	if (!flow_action_basic_hw_stats_check(actions, extack))
546 547
		return -EOPNOTSUPP;

548
	flow_action_for_each(i, act, actions) {
549 550 551
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
		case FLOW_ACTION_DROP:
552
			/* Do nothing */
553 554
			break;
		case FLOW_ACTION_REDIRECT: {
555
			struct adapter *adap = netdev2adap(dev);
556 557
			struct net_device *n_dev, *target_dev;
			unsigned int i;
558 559
			bool found = false;

560
			target_dev = act->dev;
561 562
			for_each_port(adap, i) {
				n_dev = adap->port[i];
563
				if (target_dev == n_dev) {
564 565 566 567 568 569 570 571 572 573 574 575 576
					found = true;
					break;
				}
			}

			/* If interface doesn't belong to our hw, then
			 * the provided output port is not valid
			 */
			if (!found) {
				netdev_err(dev, "%s: Out port invalid\n",
					   __func__);
				return -EINVAL;
			}
577
			act_redir = true;
578 579 580 581 582 583
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u16 proto = be16_to_cpu(act->vlan.proto);
584

585 586
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
587
				break;
588 589
			case FLOW_ACTION_VLAN_PUSH:
			case FLOW_ACTION_VLAN_MANGLE:
590 591 592 593 594 595 596 597 598 599 600
				if (proto != ETH_P_8021Q) {
					netdev_err(dev, "%s: Unsupported vlan proto\n",
						   __func__);
					return -EOPNOTSUPP;
				}
				break;
			default:
				netdev_err(dev, "%s: Unsupported vlan action\n",
					   __func__);
				return -EOPNOTSUPP;
			}
601
			act_vlan = true;
602 603 604 605
			}
			break;
		case FLOW_ACTION_MANGLE: {
			bool pedit_valid = valid_pedit_action(dev, act);
606

607 608
			if (!pedit_valid)
				return -EOPNOTSUPP;
609
			act_pedit = true;
610 611 612
			}
			break;
		default:
613 614 615 616
			netdev_err(dev, "%s: Unsupported action\n", __func__);
			return -EOPNOTSUPP;
		}
	}
617 618 619 620 621 622 623

	if ((act_pedit || act_vlan) && !act_redir) {
		netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
			   __func__);
		return -EINVAL;
	}

624 625 626
	return 0;
}

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
{
	spin_lock_bh(&adap->tids.ftid_lock);
	if (adap->tids.tc_hash_tids_max_prio < tc_prio)
		adap->tids.tc_hash_tids_max_prio = tc_prio;
	spin_unlock_bh(&adap->tids.ftid_lock);
}

static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
{
	struct tid_info *t = &adap->tids;
	struct ch_tc_flower_entry *fe;
	struct rhashtable_iter iter;
	u32 found = 0;

	spin_lock_bh(&t->ftid_lock);
	/* Bail if the current rule is not the one with the max
	 * prio.
	 */
	if (t->tc_hash_tids_max_prio != tc_prio)
		goto out_unlock;

	/* Search for the next rule having the same or next lower
	 * max prio.
	 */
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
		rhashtable_walk_start(&iter);

		fe = rhashtable_walk_next(&iter);
		while (!IS_ERR_OR_NULL(fe)) {
			if (fe->fs.hash &&
			    fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
				t->tc_hash_tids_max_prio = fe->fs.tc_prio;
				found++;

				/* Bail if we found another rule
				 * having the same prio as the
				 * current max one.
				 */
				if (fe->fs.tc_prio == tc_prio)
					break;
			}

			fe = rhashtable_walk_next(&iter);
		}

		rhashtable_walk_stop(&iter);
	} while (fe == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);

	if (!found)
		t->tc_hash_tids_max_prio = 0;

out_unlock:
	spin_unlock_bh(&t->ftid_lock);
}

685 686 687
int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
			    u32 tc_prio, struct netlink_ext_ack *extack,
			    struct ch_filter_specification *fs, u32 *tid)
688
{
689 690
	struct adapter *adap = netdev2adap(dev);
	struct filter_ctx ctx;
691
	u8 inet_family;
692
	int fidx, ret;
693

694
	if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
695 696
		return -EOPNOTSUPP;

697
	if (cxgb4_validate_flow_match(dev, rule))
698 699
		return -EOPNOTSUPP;

700
	cxgb4_process_flow_match(dev, rule, fs);
701
	cxgb4_process_flow_actions(dev, &rule->action, fs);
702

703
	fs->hash = is_filter_exact_match(adap, fs);
704
	inet_family = fs->type ? PF_INET6 : PF_INET;
705

706 707 708 709 710
	/* Get a free filter entry TID, where we can insert this new
	 * rule. Only insert rule if its prio doesn't conflict with
	 * existing rules.
	 */
	fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
711
				   tc_prio);
712 713 714
	if (fidx < 0) {
		NL_SET_ERR_MSG_MOD(extack,
				   "No free LETCAM index available");
715
		return -ENOMEM;
716
	}
717

718 719 720
	if (fidx < adap->tids.nhpftids) {
		fs->prio = 1;
		fs->hash = 0;
721 722
	}

723 724 725 726 727 728
	/* If the rule can be inserted into HASH region, then ignore
	 * the index to normal FILTER region.
	 */
	if (fs->hash)
		fidx = 0;

729
	fs->tc_prio = tc_prio;
730

731 732 733 734 735
	init_completion(&ctx.completion);
	ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
	if (ret) {
		netdev_err(dev, "%s: filter creation err %d\n",
			   __func__, ret);
736
		return ret;
737 738 739 740
	}

	/* Wait for reply */
	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
741 742
	if (!ret)
		return -ETIMEDOUT;
743 744

	/* Check if hw returned error for filter creation */
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
	if (ctx.result)
		return ctx.result;

	*tid = ctx.tid;

	if (fs->hash)
		cxgb4_tc_flower_hash_prio_add(adap, tc_prio);

	return 0;
}

int cxgb4_tc_flower_replace(struct net_device *dev,
			    struct flow_cls_offload *cls)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
	struct netlink_ext_ack *extack = cls->common.extack;
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
	struct ch_filter_specification *fs;
	int ret;

	ch_flower = allocate_flower_entry();
	if (!ch_flower) {
		netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
		return -ENOMEM;
	}

	fs = &ch_flower->fs;
	fs->hitcnts = 1;
	fs->tc_cookie = cls->cookie;

	ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
				      &ch_flower->filter_id);
778
	if (ret)
779 780 781
		goto free_entry;

	ch_flower->tc_flower_cookie = cls->cookie;
782 783 784 785
	ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret)
		goto del_filter;
786

787 788 789
	return 0;

del_filter:
790 791 792
	if (fs->hash)
		cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);

793
	cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
794 795 796 797

free_entry:
	kfree(ch_flower);
	return ret;
798 799
}

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
			    struct ch_filter_specification *fs, int tid)
{
	struct adapter *adap = netdev2adap(dev);
	u8 hash;
	int ret;

	hash = fs->hash;

	ret = cxgb4_del_filter(dev, tid, fs);
	if (ret)
		return ret;

	if (hash)
		cxgb4_tc_flower_hash_prio_del(adap, tc_prio);

	return ret;
}

819
int cxgb4_tc_flower_destroy(struct net_device *dev,
820
			    struct flow_cls_offload *cls)
821
{
822 823 824 825 826 827 828 829
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower)
		return -ENOENT;

830 831
	ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
				      &ch_flower->fs, ch_flower->filter_id);
832 833 834
	if (ret)
		goto err;

835 836 837 838 839 840
	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret) {
		netdev_err(dev, "Flow remove from rhashtable failed");
		goto err;
	}
841 842 843 844
	kfree_rcu(ch_flower, rcu);

err:
	return ret;
845 846
}

847
static void ch_flower_stats_handler(struct work_struct *work)
848
{
849 850
	struct adapter *adap = container_of(work, struct adapter,
					    flower_stats_work);
851 852
	struct ch_tc_flower_entry *flower_entry;
	struct ch_tc_flower_stats *ofld_stats;
853
	struct rhashtable_iter iter;
854 855 856 857
	u64 packets;
	u64 bytes;
	int ret;

858 859
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
860
		rhashtable_walk_start(&iter);
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876

		while ((flower_entry = rhashtable_walk_next(&iter)) &&
		       !IS_ERR(flower_entry)) {
			ret = cxgb4_get_filter_counters(adap->port[0],
							flower_entry->filter_id,
							&packets, &bytes,
							flower_entry->fs.hash);
			if (!ret) {
				spin_lock(&flower_entry->lock);
				ofld_stats = &flower_entry->stats;

				if (ofld_stats->prev_packet_count != packets) {
					ofld_stats->prev_packet_count = packets;
					ofld_stats->last_used = jiffies;
				}
				spin_unlock(&flower_entry->lock);
877 878
			}
		}
879

880
		rhashtable_walk_stop(&iter);
881

882 883
	} while (flower_entry == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);
884 885 886
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
}

887 888 889 890 891 892 893
static void ch_flower_stats_cb(struct timer_list *t)
{
	struct adapter *adap = from_timer(adap, t, flower_stats_timer);

	schedule_work(&adap->flower_stats_work);
}

894
int cxgb4_tc_flower_stats(struct net_device *dev,
895
			  struct flow_cls_offload *cls)
896
{
897 898 899 900 901 902 903 904 905 906 907 908 909 910
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_stats *ofld_stats;
	struct ch_tc_flower_entry *ch_flower;
	u64 packets;
	u64 bytes;
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower) {
		ret = -ENOENT;
		goto err;
	}

	ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
911 912
					&packets, &bytes,
					ch_flower->fs.hash);
913 914 915 916 917 918 919 920
	if (ret < 0)
		goto err;

	spin_lock_bh(&ch_flower->lock);
	ofld_stats = &ch_flower->stats;
	if (ofld_stats->packet_count != packets) {
		if (ofld_stats->prev_packet_count != packets)
			ofld_stats->last_used = jiffies;
921
		flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
922
				  packets - ofld_stats->packet_count, 0,
923 924
				  ofld_stats->last_used,
				  FLOW_ACTION_HW_STATS_IMMEDIATE);
925 926 927 928 929 930 931 932 933 934

		ofld_stats->packet_count = packets;
		ofld_stats->byte_count = bytes;
		ofld_stats->prev_packet_count = packets;
	}
	spin_unlock_bh(&ch_flower->lock);
	return 0;

err:
	return ret;
935
}
936

937 938 939 940 941 942 943 944 945 946 947
static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
	.nelem_hint = 384,
	.head_offset = offsetof(struct ch_tc_flower_entry, node),
	.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
	.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
	.max_size = 524288,
	.min_size = 512,
	.automatic_shrinking = true
};

int cxgb4_init_tc_flower(struct adapter *adap)
948
{
949 950
	int ret;

951 952 953
	if (adap->tc_flower_initialized)
		return -EEXIST;

954 955 956 957 958 959
	adap->flower_ht_params = cxgb4_tc_flower_ht_params;
	ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
	if (ret)
		return ret;

	INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
960
	timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
961
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
962
	adap->tc_flower_initialized = true;
963
	return 0;
964 965 966 967
}

void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
968 969 970
	if (!adap->tc_flower_initialized)
		return;

971 972
	if (adap->flower_stats_timer.function)
		del_timer_sync(&adap->flower_stats_timer);
973 974
	cancel_work_sync(&adap->flower_stats_work);
	rhashtable_destroy(&adap->flower_tbl);
975
	adap->tc_flower_initialized = false;
976
}