cxgb4_tc_flower.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
 *
 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <net/tc_act/tc_mirred.h>
36 37
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_gact.h>
38
#include <net/tc_act/tc_vlan.h>
39 40

#include "cxgb4.h"
41
#include "cxgb4_filter.h"
42 43
#include "cxgb4_tc_flower.h"

44 45
#define STATS_CHECK_PERIOD (HZ / 2)

W
Wei Yongjun 已提交
46
static struct ch_tc_pedit_fields pedits[] = {
47 48
	PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
	PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 50
	PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
	PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 52 53 54 55 56 57 58 59 60 61 62 63 64
	PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
	PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
	PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
	PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
	PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
	PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
	PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
	PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
	PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
	PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
	PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
	PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 66
};

67 68 69
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 71
	if (new)
		spin_lock_init(&new->lock);
72 73 74 75 76 77 78
	return new;
}

/* Must be called with either RTNL or rcu_read_lock */
static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
						   unsigned long flower_cookie)
{
79 80
	return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
				      adap->flower_ht_params);
81 82 83
}

static void cxgb4_process_flow_match(struct net_device *dev,
84
				     struct flow_cls_offload *cls,
85 86
				     struct ch_filter_specification *fs)
{
87
	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
88 89
	u16 addr_type = 0;

90 91
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;
92

93 94
		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
95 96
	}

97 98 99 100 101 102 103
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;
		u16 ethtype_key, ethtype_mask;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
104 105 106 107 108 109

		if (ethtype_key == ETH_P_ALL) {
			ethtype_key = 0;
			ethtype_mask = 0;
		}

110 111 112
		if (ethtype_key == ETH_P_IPV6)
			fs->type = 1;

113 114
		fs->val.ethtype = ethtype_key;
		fs->mask.ethtype = ethtype_mask;
115 116
		fs->val.proto = match.key->ip_proto;
		fs->mask.proto = match.mask->ip_proto;
117 118 119
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
120 121 122
		struct flow_match_ipv4_addrs match;

		flow_rule_match_ipv4_addrs(rule, &match);
123
		fs->type = 0;
124 125 126 127
		memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
128 129

		/* also initialize nat_lip/fip to same values */
130 131
		memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
132 133 134
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
135
		struct flow_match_ipv6_addrs match;
136

137
		flow_rule_match_ipv6_addrs(rule, &match);
138
		fs->type = 1;
139 140 141 142 143 144 145 146
		memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->val.fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
		memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
		       sizeof(match.mask->dst));
		memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
		       sizeof(match.mask->src));
147 148

		/* also initialize nat_lip/fip to same values */
149 150 151 152
		memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
		       sizeof(match.key->dst));
		memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
		       sizeof(match.key->src));
153 154
	}

155 156
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_match_ports match;
157

158 159 160 161 162
		flow_rule_match_ports(rule, &match);
		fs->val.lport = cpu_to_be16(match.key->dst);
		fs->mask.lport = cpu_to_be16(match.mask->dst);
		fs->val.fport = cpu_to_be16(match.key->src);
		fs->mask.fport = cpu_to_be16(match.mask->src);
163 164

		/* also initialize nat_lport/fport to same values */
165 166
		fs->nat_lport = cpu_to_be16(match.key->dst);
		fs->nat_fport = cpu_to_be16(match.key->src);
167 168
	}

169 170 171 172 173 174
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
		struct flow_match_ip match;

		flow_rule_match_ip(rule, &match);
		fs->val.tos = match.key->tos;
		fs->mask.tos = match.mask->tos;
175 176
	}

177 178 179 180 181 182
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_match_enc_keyid match;

		flow_rule_match_enc_keyid(rule, &match);
		fs->val.vni = be32_to_cpu(match.key->keyid);
		fs->mask.vni = be32_to_cpu(match.mask->keyid);
183 184 185 186 187 188
		if (fs->mask.vni) {
			fs->val.encap_vld = 1;
			fs->mask.encap_vld = 1;
		}
	}

189 190
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
191 192
		u16 vlan_tci, vlan_tci_mask;

193 194 195 196 197
		flow_rule_match_vlan(rule, &match);
		vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
					       VLAN_PRIO_SHIFT);
		vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
						     VLAN_PRIO_SHIFT);
198 199
		fs->val.ivlan = vlan_tci;
		fs->mask.ivlan = vlan_tci_mask;
200

201 202 203
		fs->val.ivlan_vld = 1;
		fs->mask.ivlan_vld = 1;

204 205 206 207 208 209 210 211 212 213 214 215 216
		/* Chelsio adapters use ivlan_vld bit to match vlan packets
		 * as 802.1Q. Also, when vlan tag is present in packets,
		 * ethtype match is used then to match on ethtype of inner
		 * header ie. the header following the vlan header.
		 * So, set the ivlan_vld based on ethtype info supplied by
		 * TC for vlan packets if its 802.1Q. And then reset the
		 * ethtype value else, hw will try to match the supplied
		 * ethtype value with ethtype of inner header.
		 */
		if (fs->val.ethtype == ETH_P_8021Q) {
			fs->val.ethtype = 0;
			fs->mask.ethtype = 0;
		}
217 218 219 220 221 222 223 224 225 226
	}

	/* Match only packets coming from the ingress port where this
	 * filter will be created.
	 */
	fs->val.iport = netdev2pinfo(dev)->port_id;
	fs->mask.iport = ~0;
}

static int cxgb4_validate_flow_match(struct net_device *dev,
227
				     struct flow_cls_offload *cls)
228
{
229
	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
230
	struct flow_dissector *dissector = rule->match.dissector;
231 232 233
	u16 ethtype_mask = 0;
	u16 ethtype_key = 0;

234
	if (dissector->used_keys &
235 236 237 238
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
239
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
240
	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
241
	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
242
	      BIT(FLOW_DISSECTOR_KEY_IP))) {
243
		netdev_warn(dev, "Unsupported key used: 0x%x\n",
244
			    dissector->used_keys);
245 246
		return -EOPNOTSUPP;
	}
247

248 249 250 251 252 253
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		ethtype_key = ntohs(match.key->n_proto);
		ethtype_mask = ntohs(match.mask->n_proto);
254 255
	}

256
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
257
		u16 eth_ip_type = ethtype_key & ethtype_mask;
258
		struct flow_match_ip match;
259 260 261 262 263 264

		if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
			netdev_err(dev, "IP Key supported only with IPv4/v6");
			return -EINVAL;
		}

265 266
		flow_rule_match_ip(rule, &match);
		if (match.mask->ttl) {
267 268 269 270 271
			netdev_warn(dev, "ttl match unsupported for offload");
			return -EOPNOTSUPP;
		}
	}

272 273 274
	return 0;
}

275 276 277 278
static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
			  u8 field)
{
	u32 set_val = val & ~mask;
279 280
	u32 offset = 0;
	u8 size = 1;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	int i;

	for (i = 0; i < ARRAY_SIZE(pedits); i++) {
		if (pedits[i].field == field) {
			offset = pedits[i].offset;
			size = pedits[i].size;
			break;
		}
	}
	memcpy((u8 *)fs + offset, &set_val, size);
}

static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
				u32 mask, u32 offset, u8 htype)
{
	switch (htype) {
297
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
298 299 300 301 302 303 304 305
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
			fs->newdmac = 1;
			offload_pedit(fs, val, mask, ETH_DMAC_31_0);
			break;
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
			if (~mask & PEDIT_ETH_DMAC_MASK)
				offload_pedit(fs, val, mask, ETH_DMAC_47_32);
306 307 308 309 310 311 312
			else
				offload_pedit(fs, val >> 16, mask >> 16,
					      ETH_SMAC_15_0);
			break;
		case PEDIT_ETH_SMAC_47_16:
			fs->newsmac = 1;
			offload_pedit(fs, val, mask, ETH_SMAC_47_16);
313
		}
314
		break;
315
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
316 317 318 319 320 321 322 323 324
		switch (offset) {
		case PEDIT_IP4_SRC:
			offload_pedit(fs, val, mask, IP4_SRC);
			break;
		case PEDIT_IP4_DST:
			offload_pedit(fs, val, mask, IP4_DST);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
325
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
			offload_pedit(fs, val, mask, IP6_SRC_31_0);
			break;
		case PEDIT_IP6_SRC_63_32:
			offload_pedit(fs, val, mask, IP6_SRC_63_32);
			break;
		case PEDIT_IP6_SRC_95_64:
			offload_pedit(fs, val, mask, IP6_SRC_95_64);
			break;
		case PEDIT_IP6_SRC_127_96:
			offload_pedit(fs, val, mask, IP6_SRC_127_96);
			break;
		case PEDIT_IP6_DST_31_0:
			offload_pedit(fs, val, mask, IP6_DST_31_0);
			break;
		case PEDIT_IP6_DST_63_32:
			offload_pedit(fs, val, mask, IP6_DST_63_32);
			break;
		case PEDIT_IP6_DST_95_64:
			offload_pedit(fs, val, mask, IP6_DST_95_64);
			break;
		case PEDIT_IP6_DST_127_96:
			offload_pedit(fs, val, mask, IP6_DST_127_96);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
353
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
354 355 356 357 358 359 360 361 362 363 364 365
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
				offload_pedit(fs, cpu_to_be32(val) >> 16,
					      cpu_to_be32(mask) >> 16,
					      TCP_SPORT);
			else
				offload_pedit(fs, cpu_to_be32(val),
					      cpu_to_be32(mask), TCP_DPORT);
		}
		fs->nat_mode = NAT_MODE_ALL;
		break;
366
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
367 368 369 370 371 372 373 374 375 376 377
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
				offload_pedit(fs, cpu_to_be32(val) >> 16,
					      cpu_to_be32(mask) >> 16,
					      UDP_SPORT);
			else
				offload_pedit(fs, cpu_to_be32(val),
					      cpu_to_be32(mask), UDP_DPORT);
		}
		fs->nat_mode = NAT_MODE_ALL;
378 379 380
	}
}

381 382 383
void cxgb4_process_flow_actions(struct net_device *in,
				struct flow_action *actions,
				struct ch_filter_specification *fs)
384
{
385
	struct flow_action_entry *act;
386
	int i;
387

388
	flow_action_for_each(i, act, actions) {
389 390
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
391
			fs->action = FILTER_PASS;
392 393
			break;
		case FLOW_ACTION_DROP:
394
			fs->action = FILTER_DROP;
395 396 397
			break;
		case FLOW_ACTION_REDIRECT: {
			struct net_device *out = act->dev;
398 399 400 401
			struct port_info *pi = netdev_priv(out);

			fs->action = FILTER_SWITCH;
			fs->eport = pi->port_id;
402 403 404 405 406 407 408
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
409
			u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
410 411
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
412 413
				fs->newvlan |= VLAN_REMOVE;
				break;
414
			case FLOW_ACTION_VLAN_PUSH:
415 416 417
				fs->newvlan |= VLAN_INSERT;
				fs->vlan = vlan_tci;
				break;
418
			case FLOW_ACTION_VLAN_MANGLE:
419 420 421 422 423 424
				fs->newvlan |= VLAN_REWRITE;
				fs->vlan = vlan_tci;
				break;
			default:
				break;
			}
425 426 427
			}
			break;
		case FLOW_ACTION_MANGLE: {
428 429 430
			u32 mask, val, offset;
			u8 htype;

431 432 433 434
			htype = act->mangle.htype;
			mask = act->mangle.mask;
			val = act->mangle.val;
			offset = act->mangle.offset;
435

436
			process_pedit_field(fs, val, mask, offset, htype);
437
			}
438 439 440
			break;
		default:
			break;
441 442 443 444
		}
	}
}

445 446 447 448 449 450 451 452 453 454 455 456 457 458
static bool valid_l4_mask(u32 mask)
{
	u16 hi, lo;

	/* Either the upper 16-bits (SPORT) OR the lower
	 * 16-bits (DPORT) can be set, but NOT BOTH.
	 */
	hi = (mask >> 16) & 0xFFFF;
	lo = mask & 0xFFFF;

	return hi && lo ? false : true;
}

static bool valid_pedit_action(struct net_device *dev,
459
			       const struct flow_action_entry *act)
460 461
{
	u32 mask, offset;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	u8 htype;

	htype = act->mangle.htype;
	mask = act->mangle.mask;
	offset = act->mangle.offset;

	switch (htype) {
	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
		switch (offset) {
		case PEDIT_ETH_DMAC_31_0:
		case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
		case PEDIT_ETH_SMAC_47_16:
			break;
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
477 478 479
				   __func__);
			return false;
		}
480 481 482 483 484
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
		switch (offset) {
		case PEDIT_IP4_SRC:
		case PEDIT_IP4_DST:
485
			break;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
		switch (offset) {
		case PEDIT_IP6_SRC_31_0:
		case PEDIT_IP6_SRC_63_32:
		case PEDIT_IP6_SRC_95_64:
		case PEDIT_IP6_SRC_127_96:
		case PEDIT_IP6_DST_31_0:
		case PEDIT_IP6_DST_63_32:
		case PEDIT_IP6_DST_95_64:
		case PEDIT_IP6_DST_127_96:
502
			break;
503 504 505 506 507 508 509 510 511 512 513
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
		switch (offset) {
		case PEDIT_TCP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
514 515 516 517
					   __func__);
				return false;
			}
			break;
518 519 520 521 522 523 524 525 526 527 528
		default:
			netdev_err(dev, "%s: Unsupported pedit field\n",
				   __func__);
			return false;
		}
		break;
	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
		switch (offset) {
		case PEDIT_UDP_SPORT_DPORT:
			if (!valid_l4_mask(~mask)) {
				netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
529 530 531 532 533
					   __func__);
				return false;
			}
			break;
		default:
534
			netdev_err(dev, "%s: Unsupported pedit field\n",
535 536
				   __func__);
			return false;
537
		}
538 539 540 541
		break;
	default:
		netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
		return false;
542
	}
543
	return true;
544 545
}

546
int cxgb4_validate_flow_actions(struct net_device *dev,
547 548
				struct flow_action *actions,
				struct netlink_ext_ack *extack)
549
{
550
	struct flow_action_entry *act;
551 552 553
	bool act_redir = false;
	bool act_pedit = false;
	bool act_vlan = false;
554
	int i;
555

556
	if (!flow_action_basic_hw_stats_check(actions, extack))
557 558
		return -EOPNOTSUPP;

559
	flow_action_for_each(i, act, actions) {
560 561 562
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
		case FLOW_ACTION_DROP:
563
			/* Do nothing */
564 565
			break;
		case FLOW_ACTION_REDIRECT: {
566
			struct adapter *adap = netdev2adap(dev);
567 568
			struct net_device *n_dev, *target_dev;
			unsigned int i;
569 570
			bool found = false;

571
			target_dev = act->dev;
572 573
			for_each_port(adap, i) {
				n_dev = adap->port[i];
574
				if (target_dev == n_dev) {
575 576 577 578 579 580 581 582 583 584 585 586 587
					found = true;
					break;
				}
			}

			/* If interface doesn't belong to our hw, then
			 * the provided output port is not valid
			 */
			if (!found) {
				netdev_err(dev, "%s: Out port invalid\n",
					   __func__);
				return -EINVAL;
			}
588
			act_redir = true;
589 590 591 592 593 594
			}
			break;
		case FLOW_ACTION_VLAN_POP:
		case FLOW_ACTION_VLAN_PUSH:
		case FLOW_ACTION_VLAN_MANGLE: {
			u16 proto = be16_to_cpu(act->vlan.proto);
595

596 597
			switch (act->id) {
			case FLOW_ACTION_VLAN_POP:
598
				break;
599 600
			case FLOW_ACTION_VLAN_PUSH:
			case FLOW_ACTION_VLAN_MANGLE:
601 602 603 604 605 606 607 608 609 610 611
				if (proto != ETH_P_8021Q) {
					netdev_err(dev, "%s: Unsupported vlan proto\n",
						   __func__);
					return -EOPNOTSUPP;
				}
				break;
			default:
				netdev_err(dev, "%s: Unsupported vlan action\n",
					   __func__);
				return -EOPNOTSUPP;
			}
612
			act_vlan = true;
613 614 615 616
			}
			break;
		case FLOW_ACTION_MANGLE: {
			bool pedit_valid = valid_pedit_action(dev, act);
617

618 619
			if (!pedit_valid)
				return -EOPNOTSUPP;
620
			act_pedit = true;
621 622 623
			}
			break;
		default:
624 625 626 627
			netdev_err(dev, "%s: Unsupported action\n", __func__);
			return -EOPNOTSUPP;
		}
	}
628 629 630 631 632 633 634

	if ((act_pedit || act_vlan) && !act_redir) {
		netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
			   __func__);
		return -EINVAL;
	}

635 636 637
	return 0;
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
{
	spin_lock_bh(&adap->tids.ftid_lock);
	if (adap->tids.tc_hash_tids_max_prio < tc_prio)
		adap->tids.tc_hash_tids_max_prio = tc_prio;
	spin_unlock_bh(&adap->tids.ftid_lock);
}

static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
{
	struct tid_info *t = &adap->tids;
	struct ch_tc_flower_entry *fe;
	struct rhashtable_iter iter;
	u32 found = 0;

	spin_lock_bh(&t->ftid_lock);
	/* Bail if the current rule is not the one with the max
	 * prio.
	 */
	if (t->tc_hash_tids_max_prio != tc_prio)
		goto out_unlock;

	/* Search for the next rule having the same or next lower
	 * max prio.
	 */
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
		rhashtable_walk_start(&iter);

		fe = rhashtable_walk_next(&iter);
		while (!IS_ERR_OR_NULL(fe)) {
			if (fe->fs.hash &&
			    fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
				t->tc_hash_tids_max_prio = fe->fs.tc_prio;
				found++;

				/* Bail if we found another rule
				 * having the same prio as the
				 * current max one.
				 */
				if (fe->fs.tc_prio == tc_prio)
					break;
			}

			fe = rhashtable_walk_next(&iter);
		}

		rhashtable_walk_stop(&iter);
	} while (fe == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);

	if (!found)
		t->tc_hash_tids_max_prio = 0;

out_unlock:
	spin_unlock_bh(&t->ftid_lock);
}

696
int cxgb4_tc_flower_replace(struct net_device *dev,
697
			    struct flow_cls_offload *cls)
698
{
699
	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
700
	struct netlink_ext_ack *extack = cls->common.extack;
701 702 703 704
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
	struct ch_filter_specification *fs;
	struct filter_ctx ctx;
705
	u8 inet_family;
706
	int fidx, ret;
707

708
	if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
709 710 711 712 713 714 715 716 717 718 719 720 721 722
		return -EOPNOTSUPP;

	if (cxgb4_validate_flow_match(dev, cls))
		return -EOPNOTSUPP;

	ch_flower = allocate_flower_entry();
	if (!ch_flower) {
		netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
		return -ENOMEM;
	}

	fs = &ch_flower->fs;
	fs->hitcnts = 1;
	cxgb4_process_flow_match(dev, cls, fs);
723
	cxgb4_process_flow_actions(dev, &rule->action, fs);
724

725
	fs->hash = is_filter_exact_match(adap, fs);
726
	inet_family = fs->type ? PF_INET6 : PF_INET;
727

728 729 730 731 732 733 734 735 736 737 738 739
	/* Get a free filter entry TID, where we can insert this new
	 * rule. Only insert rule if its prio doesn't conflict with
	 * existing rules.
	 */
	fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
				   cls->common.prio);
	if (fidx < 0) {
		NL_SET_ERR_MSG_MOD(extack,
				   "No free LETCAM index available");
		ret = -ENOMEM;
		goto free_entry;
	}
740

741 742 743
	if (fidx < adap->tids.nhpftids) {
		fs->prio = 1;
		fs->hash = 0;
744 745
	}

746 747 748 749 750 751
	/* If the rule can be inserted into HASH region, then ignore
	 * the index to normal FILTER region.
	 */
	if (fs->hash)
		fidx = 0;

752 753 754
	fs->tc_prio = cls->common.prio;
	fs->tc_cookie = cls->cookie;

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	init_completion(&ctx.completion);
	ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
	if (ret) {
		netdev_err(dev, "%s: filter creation err %d\n",
			   __func__, ret);
		goto free_entry;
	}

	/* Wait for reply */
	ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
	if (!ret) {
		ret = -ETIMEDOUT;
		goto free_entry;
	}

	ret = ctx.result;
	/* Check if hw returned error for filter creation */
772
	if (ret)
773 774 775 776
		goto free_entry;

	ch_flower->tc_flower_cookie = cls->cookie;
	ch_flower->filter_id = ctx.tid;
777 778 779 780
	ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret)
		goto del_filter;
781

782 783 784
	if (fs->hash)
		cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);

785 786 787 788
	return 0;

del_filter:
	cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
789 790 791 792

free_entry:
	kfree(ch_flower);
	return ret;
793 794 795
}

int cxgb4_tc_flower_destroy(struct net_device *dev,
796
			    struct flow_cls_offload *cls)
797
{
798 799
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_entry *ch_flower;
800 801
	u32 tc_prio;
	bool hash;
802 803 804 805 806 807
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower)
		return -ENOENT;

808 809 810
	hash = ch_flower->fs.hash;
	tc_prio = ch_flower->fs.tc_prio;

811
	ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
812 813 814
	if (ret)
		goto err;

815 816 817 818 819 820
	ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
				     adap->flower_ht_params);
	if (ret) {
		netdev_err(dev, "Flow remove from rhashtable failed");
		goto err;
	}
821 822
	kfree_rcu(ch_flower, rcu);

823 824 825
	if (hash)
		cxgb4_tc_flower_hash_prio_del(adap, tc_prio);

826 827
err:
	return ret;
828 829
}

830
static void ch_flower_stats_handler(struct work_struct *work)
831
{
832 833
	struct adapter *adap = container_of(work, struct adapter,
					    flower_stats_work);
834 835
	struct ch_tc_flower_entry *flower_entry;
	struct ch_tc_flower_stats *ofld_stats;
836
	struct rhashtable_iter iter;
837 838 839 840
	u64 packets;
	u64 bytes;
	int ret;

841 842
	rhashtable_walk_enter(&adap->flower_tbl, &iter);
	do {
843
		rhashtable_walk_start(&iter);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859

		while ((flower_entry = rhashtable_walk_next(&iter)) &&
		       !IS_ERR(flower_entry)) {
			ret = cxgb4_get_filter_counters(adap->port[0],
							flower_entry->filter_id,
							&packets, &bytes,
							flower_entry->fs.hash);
			if (!ret) {
				spin_lock(&flower_entry->lock);
				ofld_stats = &flower_entry->stats;

				if (ofld_stats->prev_packet_count != packets) {
					ofld_stats->prev_packet_count = packets;
					ofld_stats->last_used = jiffies;
				}
				spin_unlock(&flower_entry->lock);
860 861
			}
		}
862

863
		rhashtable_walk_stop(&iter);
864

865 866
	} while (flower_entry == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);
867 868 869
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
}

870 871 872 873 874 875 876
static void ch_flower_stats_cb(struct timer_list *t)
{
	struct adapter *adap = from_timer(adap, t, flower_stats_timer);

	schedule_work(&adap->flower_stats_work);
}

877
int cxgb4_tc_flower_stats(struct net_device *dev,
878
			  struct flow_cls_offload *cls)
879
{
880 881 882 883 884 885 886 887 888 889 890 891 892 893
	struct adapter *adap = netdev2adap(dev);
	struct ch_tc_flower_stats *ofld_stats;
	struct ch_tc_flower_entry *ch_flower;
	u64 packets;
	u64 bytes;
	int ret;

	ch_flower = ch_flower_lookup(adap, cls->cookie);
	if (!ch_flower) {
		ret = -ENOENT;
		goto err;
	}

	ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
894 895
					&packets, &bytes,
					ch_flower->fs.hash);
896 897 898 899 900 901 902 903
	if (ret < 0)
		goto err;

	spin_lock_bh(&ch_flower->lock);
	ofld_stats = &ch_flower->stats;
	if (ofld_stats->packet_count != packets) {
		if (ofld_stats->prev_packet_count != packets)
			ofld_stats->last_used = jiffies;
904 905
		flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
				  packets - ofld_stats->packet_count,
906 907
				  ofld_stats->last_used,
				  FLOW_ACTION_HW_STATS_IMMEDIATE);
908 909 910 911 912 913 914 915 916 917

		ofld_stats->packet_count = packets;
		ofld_stats->byte_count = bytes;
		ofld_stats->prev_packet_count = packets;
	}
	spin_unlock_bh(&ch_flower->lock);
	return 0;

err:
	return ret;
918
}
919

920 921 922 923 924 925 926 927 928 929 930
static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
	.nelem_hint = 384,
	.head_offset = offsetof(struct ch_tc_flower_entry, node),
	.key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
	.key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
	.max_size = 524288,
	.min_size = 512,
	.automatic_shrinking = true
};

int cxgb4_init_tc_flower(struct adapter *adap)
931
{
932 933
	int ret;

934 935 936
	if (adap->tc_flower_initialized)
		return -EEXIST;

937 938 939 940 941 942
	adap->flower_ht_params = cxgb4_tc_flower_ht_params;
	ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
	if (ret)
		return ret;

	INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
943
	timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
944
	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
945
	adap->tc_flower_initialized = true;
946
	return 0;
947 948 949 950
}

void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
951 952 953
	if (!adap->tc_flower_initialized)
		return;

954 955
	if (adap->flower_stats_timer.function)
		del_timer_sync(&adap->flower_stats_timer);
956 957
	cancel_work_sync(&adap->flower_stats_work);
	rhashtable_destroy(&adap->flower_tbl);
958
	adap->tc_flower_initialized = false;
959
}