spectrum_matchall.c 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <net/flow_offload.h>

#include "spectrum.h"
#include "spectrum_span.h"
#include "reg.h"

enum mlxsw_sp_mall_action_type {
	MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
	MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
};

struct mlxsw_sp_mall_mirror_entry {
19
	const struct net_device *to_dev;
20 21 22 23 24 25 26
	int span_id;
};

struct mlxsw_sp_mall_entry {
	struct list_head list;
	unsigned long cookie;
	enum mlxsw_sp_mall_action_type type;
27
	bool ingress;
28 29
	union {
		struct mlxsw_sp_mall_mirror_entry mirror;
30
		struct mlxsw_sp_port_sample sample;
31
	};
32
	struct rcu_head rcu;
33 34 35
};

static struct mlxsw_sp_mall_entry *
36
mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
37 38 39
{
	struct mlxsw_sp_mall_entry *mall_entry;

40
	list_for_each_entry(mall_entry, &block->mall_list, list)
41 42 43 44 45 46 47 48
		if (mall_entry->cookie == cookie)
			return mall_entry;

	return NULL;
}

static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
49
			      struct mlxsw_sp_mall_entry *mall_entry)
50 51 52
{
	enum mlxsw_sp_span_type span_type;

53
	if (!mall_entry->mirror.to_dev) {
54 55 56 57
		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
		return -EINVAL;
	}

58 59
	span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
					  MLXSW_SP_SPAN_EGRESS;
60 61 62 63
	return mlxsw_sp_span_mirror_add(mlxsw_sp_port,
					mall_entry->mirror.to_dev,
					span_type, true,
					&mall_entry->mirror.span_id);
64 65 66 67
}

static void
mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
68
			      struct mlxsw_sp_mall_entry *mall_entry)
69 70 71
{
	enum mlxsw_sp_span_type span_type;

72 73
	span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
					  MLXSW_SP_SPAN_EGRESS;
74
	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mall_entry->mirror.span_id,
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
				 span_type, true);
}

static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
					 bool enable, u32 rate)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char mpsc_pl[MLXSW_REG_MPSC_LEN];

	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
}

static int
mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
90
			      struct mlxsw_sp_mall_entry *mall_entry)
91 92 93
{
	int err;

94
	if (rtnl_dereference(mlxsw_sp_port->sample)) {
95 96 97
		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
		return -EEXIST;
	}
98
	rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
99 100

	err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
101
					    mall_entry->sample.rate);
102 103 104 105 106
	if (err)
		goto err_port_sample_set;
	return 0;

err_port_sample_set:
107
	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
108 109 110 111 112 113 114 115 116 117
	return err;
}

static void
mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
{
	if (!mlxsw_sp_port->sample)
		return;

	mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
118
	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
119 120
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static int
mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
			    struct mlxsw_sp_mall_entry *mall_entry)
{
	switch (mall_entry->type) {
	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
		return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
		return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
	default:
		WARN_ON(1);
		return -EINVAL;
	}
}

static void
mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
			    struct mlxsw_sp_mall_entry *mall_entry)
{
	switch (mall_entry->type) {
	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
		mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
		break;
	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
		mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
		break;
	default:
		WARN_ON(1);
	}
}

152 153
int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
			  struct tc_cls_matchall_offload *f)
154
{
155
	struct mlxsw_sp_flow_block_binding *binding;
156 157 158 159 160 161
	struct mlxsw_sp_mall_entry *mall_entry;
	__be16 protocol = f->common.protocol;
	struct flow_action_entry *act;
	int err;

	if (!flow_offload_has_one_action(&f->rule->action)) {
162 163 164 165 166 167 168 169 170 171 172
		NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
		return -EOPNOTSUPP;
	}

	if (f->common.chain_index) {
		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
		return -EOPNOTSUPP;
	}

	if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
		NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
173 174 175 176 177 178 179
		return -EOPNOTSUPP;
	}

	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
	if (!mall_entry)
		return -ENOMEM;
	mall_entry->cookie = f->cookie;
180
	mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
181 182 183 184 185

	act = &f->rule->action.entries[0];

	if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
186
		mall_entry->mirror.to_dev = act->dev;
187 188
	} else if (act->id == FLOW_ACTION_SAMPLE &&
		   protocol == htons(ETH_P_ALL)) {
189
		if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
190
			NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
191 192 193
			err = -EOPNOTSUPP;
			goto errout;
		}
194
		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
195 196 197 198
		mall_entry->sample.psample_group = act->sample.psample_group;
		mall_entry->sample.truncate = act->sample.truncate;
		mall_entry->sample.trunc_size = act->sample.trunc_size;
		mall_entry->sample.rate = act->sample.rate;
199 200
	} else {
		err = -EOPNOTSUPP;
201
		goto errout;
202 203
	}

204 205 206 207 208 209
	list_for_each_entry(binding, &block->binding_list, list) {
		err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
						  mall_entry);
		if (err)
			goto rollback;
	}
210

211 212 213 214 215 216
	block->rule_count++;
	if (mall_entry->ingress)
		block->egress_blocker_rule_count++;
	else
		block->ingress_blocker_rule_count++;
	list_add_tail(&mall_entry->list, &block->mall_list);
217 218
	return 0;

219 220 221 222
rollback:
	list_for_each_entry_continue_reverse(binding, &block->binding_list,
					     list)
		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
223
errout:
224 225 226 227
	kfree(mall_entry);
	return err;
}

228
void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
229 230
			   struct tc_cls_matchall_offload *f)
{
231
	struct mlxsw_sp_flow_block_binding *binding;
232 233
	struct mlxsw_sp_mall_entry *mall_entry;

234
	mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
235
	if (!mall_entry) {
236
		NL_SET_ERR_MSG(f->common.extack, "Entry not found");
237 238 239
		return;
	}

240
	list_del(&mall_entry->list);
241 242 243 244 245 246 247
	if (mall_entry->ingress)
		block->egress_blocker_rule_count--;
	else
		block->ingress_blocker_rule_count--;
	block->rule_count--;
	list_for_each_entry(binding, &block->binding_list, list)
		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
248
	kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
249
}
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
			    struct mlxsw_sp_port *mlxsw_sp_port)
{
	struct mlxsw_sp_mall_entry *mall_entry;
	int err;

	list_for_each_entry(mall_entry, &block->mall_list, list) {
		err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
		if (err)
			goto rollback;
	}
	return 0;

rollback:
	list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
					     list)
		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
	return err;
}

void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
			       struct mlxsw_sp_port *mlxsw_sp_port)
{
	struct mlxsw_sp_mall_entry *mall_entry;

	list_for_each_entry(mall_entry, &block->mall_list, list)
		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
}