cxgb4_tc_flower.c 25.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
 *
 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <net/tc_act/tc_mirred.h>
36 37
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_gact.h>
38
#include <net/tc_act/tc_vlan.h>
39 40

#include "cxgb4.h"
41
#include "cxgb4_filter.h"
42 43
#include "cxgb4_tc_flower.h"

44 45
#define STATS_CHECK_PERIOD (HZ / 2)

W
Wei Yongjun 已提交
46
static struct ch_tc_pedit_fields pedits[] = {
47 48
	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 50
	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
	PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 52 53 54 55 56 57 58 59 60
	PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
	PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
	PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
	PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
	PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
	PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 62
};

63 64 65
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
66 67
	if (new)
		spin_lock_init(&new->lock);
68 69 70 71 72 73 74
	return new;
}

/* Must be called with either RTNL or rcu_read_lock */
static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
						   unsigned long flower_cookie)
{
75 76
	return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
				      adap->flower_ht_params);
77 78 79
}

static void cxgb4_process_flow_match(struct net_device *dev,
80
				     struct flow_rule *rule,
81 82
				     struct ch_filter_specification *fs)
{
83 84 85 86 87 88 89 90 91 92 93 94 95
	u16 addr_type = 0;

	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;

		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
		addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
		addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
	}

96 97 98 99 100 101 102
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;
		u16 ethtype_key, ethtype_mask;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
103 104 105 106 107 108

		if (ethtype_key == ETH_P_ALL) {
			ethtype_key = 0;
			ethtype_mask = 0;
		}

109 110 111
		if (ethtype_key == ETH_P_IPV6)
			fs->type = 1;

112 113
		fs->val.ethtype = ethtype_key;
		fs->mask.ethtype = ethtype_mask;
114 115
		fs->val.proto = match.key->ip_proto;
		fs->mask.proto = match.mask->ip_proto;
116 117
	}

118
	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
119 120 121
		struct flow_match_ipv4_addrs match;

		flow_rule_match_ipv4_addrs(rule, &match);
122
		fs->type = 0;
123 124 125 126
		memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
127 128

		/* also initialize nat_lip/fip to same values */
129 130
		memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
131 132
	}

133
	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
134
		struct flow_match_ipv6_addrs match;
135

136
		flow_rule_match_ipv6_addrs(rule, &match);
137
		fs->type = 1;
138 139 140 141 142 143 144 145
		memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
		       sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
		       sizeof(match.mask->src));
146 147

		/* also initialize nat_lip/fip to same values */
148 149 150 151
		memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
152 153
	}

154 155
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_match_ports match;
156

157
		flow_rule_match_ports(rule, &match);
158 159 160 161
		fs->val.lport = be16_to_cpu(match.key->dst);
		fs->mask.lport = be16_to_cpu(match.mask->dst);
		fs->val.fport = be16_to_cpu(match.key->src);
		fs->mask.fport = be16_to_cpu(match.mask->src);
162 163

		/* also initialize nat_lport/fport to same values */
164 165
		fs->nat_lport = fs->val.lport;
		fs->nat_fport = fs->val.fport;
166 167
	}

168 169 170 171 172 173
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
		struct flow_match_ip match;

		flow_rule_match_ip(rule, &match);
		fs->val.tos = match.key->tos;
		fs->mask.tos = match.mask->tos;
174 175
	}

176 177 178 179 180 181
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_match_enc_keyid match;

		flow_rule_match_enc_keyid(rule, &match);
		fs->val.vni = be32_to_cpu(match.key->keyid);
		fs->mask.vni = be32_to_cpu(match.mask->keyid);
182 183 184 185 186 187
		if (fs->mask.vni) {
			fs->val.encap_vld = 1;
			fs->mask.encap_vld = 1;
		}
	}

188 189
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
190 191
		u16 vlan_tci, vlan_tci_mask;

192 193 194 195 196
		flow_rule_match_vlan(rule, &match);
		vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
					       VLAN_PRIO_SHIFT);
		vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
						     VLAN_PRIO_SHIFT);
197 198
		fs->val.ivlan = vlan_tci;
		fs->mask.ivlan = vlan_tci_mask;
199

200 201 202
		fs->val.ivlan_vld = 1;
		fs->mask.ivlan_vld = 1;

203 204 205 206 207 208 209 210 211 212 213 214 215
		/* Chelsio adapters use ivlan_vld bit to match vlan packets
		 * as 802.1Q. Also, when vlan tag is present in packets,
		 * ethtype match is used then to match on ethtype of inner
		 * header ie. the header following the vlan header.
		 * So, set the ivlan_vld based on ethtype info supplied by
		 * TC for vlan packets if its 802.1Q. And then reset the
		 * ethtype value else, hw will try to match the supplied
		 * ethtype value with ethtype of inner header.
		 */
		if (fs->val.ethtype == ETH_P_8021Q) {
			fs->val.ethtype = 0;
			fs->mask.ethtype = 0;
		}
216 217 218 219 220 221 222 223 224 225
	}

	/* Match only packets coming from the ingress port where this
	 * filter will be created.
	 */
	fs->val.iport = netdev2pinfo(dev)->port_id;
	fs->mask.iport = ~0;
}

static int cxgb4_validate_flow_match(struct net_device *dev,
226
				     struct flow_rule *rule)
227
{
228
	struct flow_dissector *dissector = rule->match.dissector;
229 230 231
	u16 ethtype_mask = 0;
	u16 ethtype_key = 0;

232
	if (dissector->used_keys &
233 234 235 236
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
237
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
238
	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
239
	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
240
	      BIT(FLOW_DISSECTOR_KEY_IP))) {
241
		netdev_warn(dev, "Unsupported key used: 0x%x\n",
242
			    dissector->used_keys);
243 244
		return -EOPNOTSUPP;
	}
245

246 247 248 249 250 251
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
252 253
	}

254
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
255
		u16 eth_ip_type = ethtype_key & ethtype_mask;
256
		struct flow_match_ip match;
257 258 259 260 261 262

		if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
			netdev_err(dev, "IP Key supported only with IPv4/v6");
			return -EINVAL;
		}

263 264
		flow_rule_match_ip(rule, &match);
		if (match.mask->ttl) {
265 266 267 268 269
			netdev_warn(dev, "ttl match unsupported for offload");
			return -EOPNOTSUPP;
		}
	}

270 271 272
	return 0;
}

273 274 275 276
static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
			  u8 field)
{
	u32 set_val = val & ~mask;
277 278
	u32 offset = 0;
	u8 size = 1;
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	int i;

	for (i = 0; i < ARRAY_SIZE(pedits); i++) {
		if (pedits[i].field == field) {
			offset = pedits[i].offset;
			size = pedits[i].size;
			break;
		}
	}
	memcpy((u8 *)fs + offset, &set_val, size);
}

static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
				u32 mask, u32 offset, u8 htype)
{
	switch (htype) {
295
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
296 297 298 299 300 301 302 303
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
			fs->newdmac = 1;
			offload_pedit(fs, val, mask, ETH_DMAC_31_0);
			break;
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
			if (~mask & PEDIT_ETH_DMAC_MASK)
				offload_pedit(fs, val, mask, ETH_DMAC_47_32);
304 305 306 307 308 309 310
			else
				offload_pedit(fs, val >> 16, mask >> 16,
					      ETH_SMAC_15_0);
			break;
		case PEDIT_ETH_SMAC_47_16:
			fs->newsmac = 1;
			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
311
		}
312
		break;
313
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
314 315 316 317 318 319 320 321 322
		switch (offset) {
		case PEDIT_IP4_SRC:
			offload_pedit(fs, val, mask, IP4_SRC);
			break;
		case PEDIT_IP4_DST:
			offload_pedit(fs, val, mask, IP4_DST);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
323
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
			offload_pedit(fs, val, mask, IP6_SRC_31_0);
			break;
		case PEDIT_IP6_SRC_63_32:
			offload_pedit(fs, val, mask, IP6_SRC_63_32);
			break;
		case PEDIT_IP6_SRC_95_64:
			offload_pedit(fs, val, mask, IP6_SRC_95_64);
			break;
		case PEDIT_IP6_SRC_127_96:
			offload_pedit(fs, val, mask, IP6_SRC_127_96);
			break;
		case PEDIT_IP6_DST_31_0:
			offload_pedit(fs, val, mask, IP6_DST_31_0);
			break;
		case PEDIT_IP6_DST_63_32:
			offload_pedit(fs, val, mask, IP6_DST_63_32);
			break;
		case PEDIT_IP6_DST_95_64:
			offload_pedit(fs, val, mask, IP6_DST_95_64);
			break;
		case PEDIT_IP6_DST_127_96:
			offload_pedit(fs, val, mask, IP6_DST_127_96);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
351
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
352 353 354
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
355
				fs->nat_fport = val;
356
			else
357
				fs->nat_lport = val >> 16;
358 359 360
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
361
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
362 363 364
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
365
				fs->nat_fport = val;
366
			else
367
				fs->nat_lport = val >> 16;
368 369
		}
		fs->nat_mode = NAT_MODE_ALL;
370 371 372
	}
}

373 374 375
void cxgb4_process_flow_actions(struct net_device *in,
				struct flow_action *actions,
				struct ch_filter_specification *fs)
376
{
377
	struct flow_action_entry *act;
378
	int i;
379

380
	flow_action_for_each(i, act, actions) {
381 382
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
383
			fs->action = FILTER_PASS;
384 385
			break;
		case FLOW_ACTION_DROP:
386
			fs->action = FILTER_DROP;
387
			break;
388
		case FLOW_ACTION_MIRRED:
389 390
		case FLOW_ACTION_REDIRECT: {
			struct net_device *out = act->dev;
391 392 393 394
			struct port_info *pi = netdev_priv(out);

			fs->action = FILTER_SWITCH;
			fs->eport = pi->port_id;
395 396 397 398 399 400 401
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
402
			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
403 404
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
405 406
				fs->newvlan |= VLAN_REMOVE;
				break;
407
			case FLOW_ACTION_VLAN_PUSH:
408 409 410
				fs->newvlan |= VLAN_INSERT;
				fs->vlan = vlan_tci;
				break;
411
			case FLOW_ACTION_VLAN_MANGLE:
412 413 414 415 416 417
				fs->newvlan |= VLAN_REWRITE;
				fs->vlan = vlan_tci;
				break;
			default:
				break;
			}
418 419 420
			}
			break;
		case FLOW_ACTION_MANGLE: {
421 422 423
			u32 mask, val, offset;
			u8 htype;

424 425 426 427
			htype = act->mangle.htype;
			mask = act->mangle.mask;
			val = act->mangle.val;
			offset = act->mangle.offset;
428

429
			process_pedit_field(fs, val, mask, offset, htype);
430
			}
431
			break;
432 433 434 435 436
		case FLOW_ACTION_QUEUE:
			fs->action = FILTER_PASS;
			fs->dirsteer = 1;
			fs->iq = act->queue.index;
			break;
437 438
		default:
			break;
439 440 441 442
		}
	}
}

443 444 445 446 447 448 449 450 451 452 453 454 455 456
static bool valid_l4_mask(u32 mask)
{
	u16 hi, lo;

	/* Either the upper 16-bits (SPORT) OR the lower
	 * 16-bits (DPORT) can be set, but NOT BOTH.
	 */
	hi = (mask >> 16) & 0xFFFF;
	lo = mask & 0xFFFF;

	return hi && lo ? false : true;
}

static bool valid_pedit_action(struct net_device *dev,
457
			       const struct flow_action_entry *act)
458 459
{
	u32 mask, offset;
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	u8 htype;

	htype = act->mangle.htype;
	mask = act->mangle.mask;
	offset = act->mangle.offset;

	switch (htype) {
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
		case PEDIT_ETH_SMAC_47_16:
			break;
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
475 476 477
				   __func__);
			return false;
		}
478 479 480 481 482
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
		switch (offset) {
		case PEDIT_IP4_SRC:
		case PEDIT_IP4_DST:
483
			break;
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
		case PEDIT_IP6_SRC_63_32:
		case PEDIT_IP6_SRC_95_64:
		case PEDIT_IP6_SRC_127_96:
		case PEDIT_IP6_DST_31_0:
		case PEDIT_IP6_DST_63_32:
		case PEDIT_IP6_DST_95_64:
		case PEDIT_IP6_DST_127_96:
500
			break;
501 502 503 504 505 506 507 508 509 510 511
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
512 513 514 515
					   __func__);
				return false;
			}
			break;
516 517 518 519 520 521 522 523 524 525 526
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
527 528 529 530 531
					   __func__);
				return false;
			}
			break;
		default:
532
			netdev_err(dev, "%s: Unsupported pedit field\n",
533 534
				   __func__);
			return false;
535
		}
536 537 538 539
		break;
	default:
		netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
		return false;
540
	}
541
	return true;
542 543
}

544
int cxgb4_validate_flow_actions(struct net_device *dev,
545
				struct flow_action *actions,
546 547
				struct netlink_ext_ack *extack,
				u8 matchall_filter)
548
{
549
	struct flow_action_entry *act;
550 551 552
	bool act_redir = false;
	bool act_pedit = false;
	bool act_vlan = false;
553
	int i;
554

555
	if (!flow_action_basic_hw_stats_check(actions, extack))
556 557
		return -EOPNOTSUPP;

558
	flow_action_for_each(i, act, actions) {
559 560 561
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
		case FLOW_ACTION_DROP:
562
			/* Do nothing */
563
			break;
564
		case FLOW_ACTION_MIRRED:
565
		case FLOW_ACTION_REDIRECT: {
566
			struct adapter *adap = netdev2adap(dev);
567
			struct net_device *n_dev, *target_dev;
568
			bool found = false;
569 570 571 572 573 574 575 576
			unsigned int i;

			if (act->id == FLOW_ACTION_MIRRED &&
			    !matchall_filter) {
				NL_SET_ERR_MSG_MOD(extack,
						   "Egress mirror action is only supported for tc-matchall");
				return -EOPNOTSUPP;
			}
577

578
			target_dev = act->dev;
579 580
			for_each_port(adap, i) {
				n_dev = adap->port[i];
581
				if (target_dev == n_dev) {
582 583 584 585 586 587 588 589 590 591 592 593 594
					found = true;
					break;
				}
			}

			/* If interface doesn't belong to our hw, then
			 * the provided output port is not valid
			 */
			if (!found) {
				netdev_err(dev, "%s: Out port invalid\n",
					   __func__);
				return -EINVAL;
			}
595
			act_redir = true;
596 597 598 599 600 601
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u16 proto = be16_to_cpu(act->vlan.proto);
602

603 604
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
605
				break;
606 607
			case FLOW_ACTION_VLAN_PUSH:
			case FLOW_ACTION_VLAN_MANGLE:
608 609 610 611 612 613 614 615 616 617 618
				if (proto != ETH_P_8021Q) {
					netdev_err(dev, "%s: Unsupported vlan proto\n",
						   __func__);
					return -EOPNOTSUPP;
				}
				break;
			default:
				netdev_err(dev, "%s: Unsupported vlan action\n",
					   __func__);
				return -EOPNOTSUPP;
			}
619
			act_vlan = true;
620 621 622 623
			}
			break;
		case FLOW_ACTION_MANGLE: {
			bool pedit_valid = valid_pedit_action(dev, act);
624

625 626
			if (!pedit_valid)
				return -EOPNOTSUPP;
627
			act_pedit = true;
628 629
			}
			break;
630 631 632
		case FLOW_ACTION_QUEUE:
			/* Do nothing. cxgb4_set_filter will validate */
			break;
633
		default:
634 635 636 637
			netdev_err(dev, "%s: Unsupported action\n", __func__);
			return -EOPNOTSUPP;
		}
	}
638 639 640 641 642 643 644

	if ((act_pedit || act_vlan) && !act_redir) {
		netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
			   __func__);
		return -EINVAL;
	}

645 646 647
	return 0;
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
{
	spin_lock_bh(&adap->tids.ftid_lock);
	if (adap->tids.tc_hash_tids_max_prio < tc_prio)
		adap->tids.tc_hash_tids_max_prio = tc_prio;
	spin_unlock_bh(&adap->tids.ftid_lock);
}

static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
{
	struct tid_info *t = &adap->tids;
	struct ch_tc_flower_entry *fe;
	struct rhashtable_iter iter;
	u32 found = 0;

	spin_lock_bh(&t->ftid_lock);
	/* Bail if the current rule is not the one with the max
	 * prio.
	 */
	if (t->tc_hash_tids_max_prio != tc_prio)
		goto out_unlock;

	/* Search for the next rule having the same or next lower
	 * max prio.
	 */
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
		rhashtable_walk_start(&iter);

		fe = rhashtable_walk_next(&iter);
		while (!IS_ERR_OR_NULL(fe)) {
			if (fe->fs.hash &&
			    fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
				t->tc_hash_tids_max_prio = fe->fs.tc_prio;
				found++;

				/* Bail if we found another rule
				 * having the same prio as the
				 * current max one.
				 */
				if (fe->fs.tc_prio == tc_prio)
					break;
			}

			fe = rhashtable_walk_next(&iter);
		}

		rhashtable_walk_stop(&iter);
	} while (fe == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);

	if (!found)
		t->tc_hash_tids_max_prio = 0;

out_unlock:
	spin_unlock_bh(&t->ftid_lock);
}

706 707 708
int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
			    u32 tc_prio, struct netlink_ext_ack *extack,
			    struct ch_filter_specification *fs, u32 *tid)
709
{
710 711
	struct adapter *adap = netdev2adap(dev);
	struct filter_ctx ctx;
712
	u8 inet_family;
713
	int fidx, ret;
714

715
	if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
716 717
		return -EOPNOTSUPP;

718
	if (cxgb4_validate_flow_match(dev, rule))
719 720
		return -EOPNOTSUPP;

721
	cxgb4_process_flow_match(dev, rule, fs);
722
	cxgb4_process_flow_actions(dev, &rule->action, fs);
723

724
	fs->hash = is_filter_exact_match(adap, fs);
725
	inet_family = fs->type ? PF_INET6 : PF_INET;
726

727 728 729 730 731
	/* Get a free filter entry TID, where we can insert this new
	 * rule. Only insert rule if its prio doesn't conflict with
	 * existing rules.
	 */
	fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
732
				   tc_prio);
733 734 735
	if (fidx < 0) {
		NL_SET_ERR_MSG_MOD(extack,
				   "No free LETCAM index available");
736
		return -ENOMEM;
737
	}
738

739 740 741
	if (fidx < adap->tids.nhpftids) {
		fs->prio = 1;
		fs->hash = 0;
742 743
	}

744 745 746 747 748 749
	/* If the rule can be inserted into HASH region, then ignore
	 * the index to normal FILTER region.
	 */
	if (fs->hash)
		fidx = 0;

750
	fs->tc_prio = tc_prio;
751

752 753 754 755 756
	init_completion(&ctx.completion);
	ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
	if (ret) {
		netdev_err(dev, "%s: filter creation err %d\n",
			   __func__, ret);
757
		return ret;
758 759 760 761
	}

	/* Wait for reply */
	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
762 763
	if (!ret)
		return -ETIMEDOUT;
764 765

	/* Check if hw returned error for filter creation */
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	if (ctx.result)
		return ctx.result;

	*tid = ctx.tid;

	if (fs->hash)
		cxgb4_tc_flower_hash_prio_add(adap, tc_prio);

	return 0;
}

int cxgb4_tc_flower_replace(struct net_device *dev,
			    struct flow_cls_offload *cls)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
	struct netlink_ext_ack *extack = cls->common.extack;
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
	struct ch_filter_specification *fs;
	int ret;

	ch_flower = allocate_flower_entry();
	if (!ch_flower) {
		netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
		return -ENOMEM;
	}

	fs = &ch_flower->fs;
	fs->hitcnts = 1;
	fs->tc_cookie = cls->cookie;

	ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
				      &ch_flower->filter_id);
799
	if (ret)
800 801 802
		goto free_entry;

	ch_flower->tc_flower_cookie = cls->cookie;
803 804 805 806
	ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret)
		goto del_filter;
807

808 809 810
	return 0;

del_filter:
811 812 813
	if (fs->hash)
		cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);

814
	cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
815 816 817 818

free_entry:
	kfree(ch_flower);
	return ret;
819 820
}

821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
			    struct ch_filter_specification *fs, int tid)
{
	struct adapter *adap = netdev2adap(dev);
	u8 hash;
	int ret;

	hash = fs->hash;

	ret = cxgb4_del_filter(dev, tid, fs);
	if (ret)
		return ret;

	if (hash)
		cxgb4_tc_flower_hash_prio_del(adap, tc_prio);

	return ret;
}

840
int cxgb4_tc_flower_destroy(struct net_device *dev,
841
			    struct flow_cls_offload *cls)
842
{
843 844 845 846 847 848 849 850
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower)
		return -ENOENT;

851 852
	ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
				      &ch_flower->fs, ch_flower->filter_id);
853 854 855
	if (ret)
		goto err;

856 857 858 859 860 861
	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret) {
		netdev_err(dev, "Flow remove from rhashtable failed");
		goto err;
	}
862 863 864 865
	kfree_rcu(ch_flower, rcu);

err:
	return ret;
866 867
}

868
static void ch_flower_stats_handler(struct work_struct *work)
869
{
870 871
	struct adapter *adap = container_of(work, struct adapter,
					    flower_stats_work);
872 873
	struct ch_tc_flower_entry *flower_entry;
	struct ch_tc_flower_stats *ofld_stats;
874
	struct rhashtable_iter iter;
875 876 877 878
	u64 packets;
	u64 bytes;
	int ret;

879 880
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
881
		rhashtable_walk_start(&iter);
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897

		while ((flower_entry = rhashtable_walk_next(&iter)) &&
		       !IS_ERR(flower_entry)) {
			ret = cxgb4_get_filter_counters(adap->port[0],
							flower_entry->filter_id,
							&packets, &bytes,
							flower_entry->fs.hash);
			if (!ret) {
				spin_lock(&flower_entry->lock);
				ofld_stats = &flower_entry->stats;

				if (ofld_stats->prev_packet_count != packets) {
					ofld_stats->prev_packet_count = packets;
					ofld_stats->last_used = jiffies;
				}
				spin_unlock(&flower_entry->lock);
898 899
			}
		}
900

901
		rhashtable_walk_stop(&iter);
902

903 904
	} while (flower_entry == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);
905 906 907
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
}

908 909 910 911 912 913 914
static void ch_flower_stats_cb(struct timer_list *t)
{
	struct adapter *adap = from_timer(adap, t, flower_stats_timer);

	schedule_work(&adap->flower_stats_work);
}

915
int cxgb4_tc_flower_stats(struct net_device *dev,
916
			  struct flow_cls_offload *cls)
917
{
918 919 920 921 922 923 924 925 926 927 928 929 930 931
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_stats *ofld_stats;
	struct ch_tc_flower_entry *ch_flower;
	u64 packets;
	u64 bytes;
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower) {
		ret = -ENOENT;
		goto err;
	}

	ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
932 933
					&packets, &bytes,
					ch_flower->fs.hash);
934 935 936 937 938 939 940 941
	if (ret < 0)
		goto err;

	spin_lock_bh(&ch_flower->lock);
	ofld_stats = &ch_flower->stats;
	if (ofld_stats->packet_count != packets) {
		if (ofld_stats->prev_packet_count != packets)
			ofld_stats->last_used = jiffies;
942
		flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
943
				  packets - ofld_stats->packet_count, 0,
944 945
				  ofld_stats->last_used,
				  FLOW_ACTION_HW_STATS_IMMEDIATE);
946 947 948 949 950 951 952 953 954 955

		ofld_stats->packet_count = packets;
		ofld_stats->byte_count = bytes;
		ofld_stats->prev_packet_count = packets;
	}
	spin_unlock_bh(&ch_flower->lock);
	return 0;

err:
	return ret;
956
}
957

958 959 960 961 962 963 964 965 966 967 968
static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
	.nelem_hint = 384,
	.head_offset = offsetof(struct ch_tc_flower_entry, node),
	.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
	.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
	.max_size = 524288,
	.min_size = 512,
	.automatic_shrinking = true
};

int cxgb4_init_tc_flower(struct adapter *adap)
969
{
970 971
	int ret;

972 973 974
	if (adap->tc_flower_initialized)
		return -EEXIST;

975 976 977 978 979 980
	adap->flower_ht_params = cxgb4_tc_flower_ht_params;
	ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
	if (ret)
		return ret;

	INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
981
	timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
982
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
983
	adap->tc_flower_initialized = true;
984
	return 0;
985 986 987 988
}

void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
989 990 991
	if (!adap->tc_flower_initialized)
		return;

992 993
	if (adap->flower_stats_timer.function)
		del_timer_sync(&adap->flower_stats_timer);
994 995
	cancel_work_sync(&adap->flower_stats_work);
	rhashtable_destroy(&adap->flower_tbl);
996
	adap->tc_flower_initialized = false;
997
}