cls_matchall.c 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * net/sched/cls_matchll.c		Match-all classifier
 *
 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>

#include <net/sch_generic.h>
#include <net/pkt_cls.h>

19
struct cls_mall_head {
20 21 22
	struct tcf_exts exts;
	struct tcf_result res;
	u32 handle;
23
	u32 flags;
24 25 26 27 28 29 30 31
	struct rcu_head	rcu;
};

static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			 struct tcf_result *res)
{
	struct cls_mall_head *head = rcu_dereference_bh(tp->root);

32
	if (tc_skip_sw(head->flags))
33 34
		return -1;

35
	return tcf_exts_exec(skb, &head->exts, res);
36 37 38 39 40 41 42
}

static int mall_init(struct tcf_proto *tp)
{
	return 0;
}

43
static void mall_destroy_rcu(struct rcu_head *rcu)
44
{
45 46
	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
						  rcu);
47

48 49
	tcf_exts_destroy(&head->exts);
	kfree(head);
50 51
}

52
static int mall_replace_hw_filter(struct tcf_proto *tp,
53
				  struct cls_mall_head *head,
54 55 56
				  unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
57
	struct tc_cls_matchall_offload cls_mall = {};
58
	int err;
59

60 61 62 63
	tc_cls_common_offload_init(&cls_mall.common, tp);
	cls_mall.command = TC_CLSMATCHALL_REPLACE;
	cls_mall.exts = &head->exts;
	cls_mall.cookie = cookie;
64

65
	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL,
66
					    &cls_mall);
67 68 69 70
	if (!err)
		head->flags |= TCA_CLS_FLAGS_IN_HW;

	return err;
71 72 73
}

static void mall_destroy_hw_filter(struct tcf_proto *tp,
74
				   struct cls_mall_head *head,
75 76 77
				   unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
78
	struct tc_cls_matchall_offload cls_mall = {};
79

80 81 82
	tc_cls_common_offload_init(&cls_mall.common, tp);
	cls_mall.command = TC_CLSMATCHALL_DESTROY;
	cls_mall.cookie = cookie;
83

84
	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &cls_mall);
85 86
}

87
static void mall_destroy(struct tcf_proto *tp)
88 89
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
90
	struct net_device *dev = tp->q->dev_queue->dev;
91

92
	if (!head)
93
		return;
94

95
	if (tc_should_offload(dev, head->flags))
96
		mall_destroy_hw_filter(tp, head, (unsigned long) head);
97

98
	call_rcu(&head->rcu, mall_destroy_rcu);
99 100
}

101
static void *mall_get(struct tcf_proto *tp, u32 handle)
102
{
103
	return NULL;
104 105 106 107 108 109 110 111
}

static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
};

static int mall_set_parms(struct net *net, struct tcf_proto *tp,
112
			  struct cls_mall_head *head,
113 114 115 116 117
			  unsigned long base, struct nlattr **tb,
			  struct nlattr *est, bool ovr)
{
	int err;

118
	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
119
	if (err < 0)
120
		return err;
121 122

	if (tb[TCA_MATCHALL_CLASSID]) {
123 124
		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
		tcf_bind_filter(tp, &head->res, base);
125 126 127 128 129 130 131
	}
	return 0;
}

static int mall_change(struct net *net, struct sk_buff *in_skb,
		       struct tcf_proto *tp, unsigned long base,
		       u32 handle, struct nlattr **tca,
132
		       void **arg, bool ovr)
133 134
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
135
	struct net_device *dev = tp->q->dev_queue->dev;
136
	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
137
	struct cls_mall_head *new;
138
	u32 flags = 0;
139 140 141 142 143
	int err;

	if (!tca[TCA_OPTIONS])
		return -EINVAL;

144 145
	if (head)
		return -EEXIST;
146

147 148
	err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
			       mall_policy, NULL);
149 150 151
	if (err < 0)
		return err;

152 153 154 155 156 157
	if (tb[TCA_MATCHALL_FLAGS]) {
		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
		if (!tc_flags_valid(flags))
			return -EINVAL;
	}

158 159
	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new)
160 161
		return -ENOBUFS;

162
	err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
163 164
	if (err)
		goto err_exts_init;
165 166 167

	if (!handle)
		handle = 1;
168 169
	new->handle = handle;
	new->flags = flags;
170

171
	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
172
	if (err)
173
		goto err_set_parms;
174

175
	if (tc_should_offload(dev, flags)) {
176
		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
177 178
		if (err) {
			if (tc_skip_sw(flags))
179
				goto err_replace_hw_filter;
180 181 182 183
			else
				err = 0;
		}
	}
184 185 186

	if (!tc_in_hw(new->flags))
		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
187

188
	*arg = head;
189
	rcu_assign_pointer(tp->root, new);
190 191
	return 0;

192 193
err_replace_hw_filter:
err_set_parms:
194
	tcf_exts_destroy(&new->exts);
195
err_exts_init:
196
	kfree(new);
197 198 199
	return err;
}

200
static int mall_delete(struct tcf_proto *tp, void *arg, bool *last)
201
{
202
	return -EOPNOTSUPP;
203 204 205 206 207 208 209 210
}

static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);

	if (arg->count < arg->skip)
		goto skip;
211
	if (arg->fn(tp, head, arg) < 0)
212 213 214 215 216
		arg->stop = 1;
skip:
	arg->count++;
}

217
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
218 219
		     struct sk_buff *skb, struct tcmsg *t)
{
220
	struct cls_mall_head *head = fh;
221 222
	struct nlattr *nest;

223
	if (!head)
224 225
		return skb->len;

226
	t->tcm_handle = head->handle;
227 228 229 230 231

	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (!nest)
		goto nla_put_failure;

232 233
	if (head->res.classid &&
	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
234 235
		goto nla_put_failure;

236 237 238
	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
		goto nla_put_failure;

239
	if (tcf_exts_dump(skb, &head->exts))
240 241 242 243
		goto nla_put_failure;

	nla_nest_end(skb, nest);

244
	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static struct tcf_proto_ops cls_mall_ops __read_mostly = {
	.kind		= "matchall",
	.classify	= mall_classify,
	.init		= mall_init,
	.destroy	= mall_destroy,
	.get		= mall_get,
	.change		= mall_change,
	.delete		= mall_delete,
	.walk		= mall_walk,
	.dump		= mall_dump,
	.owner		= THIS_MODULE,
};

static int __init cls_mall_init(void)
{
	return register_tcf_proto_ops(&cls_mall_ops);
}

static void __exit cls_mall_exit(void)
{
	unregister_tcf_proto_ops(&cls_mall_ops);
}

module_init(cls_mall_init);
module_exit(cls_mall_exit);

MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Match-all classifier");
MODULE_LICENSE("GPL v2");