cls_matchall.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * net/sched/cls_matchll.c		Match-all classifier
 *
 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>

#include <net/sch_generic.h>
#include <net/pkt_cls.h>

19
struct cls_mall_head {
20 21 22
	struct tcf_exts exts;
	struct tcf_result res;
	u32 handle;
23
	u32 flags;
24 25 26 27 28 29 30 31
	struct rcu_head	rcu;
};

static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			 struct tcf_result *res)
{
	struct cls_mall_head *head = rcu_dereference_bh(tp->root);

32
	if (tc_skip_sw(head->flags))
33 34
		return -1;

35
	return tcf_exts_exec(skb, &head->exts, res);
36 37 38 39 40 41 42
}

static int mall_init(struct tcf_proto *tp)
{
	return 0;
}

43
static void mall_destroy_rcu(struct rcu_head *rcu)
44
{
45 46
	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
						  rcu);
47

48 49
	tcf_exts_destroy(&head->exts);
	kfree(head);
50 51
}

52
static int mall_replace_hw_filter(struct tcf_proto *tp,
53
				  struct cls_mall_head *head,
54 55 56 57 58 59 60 61 62
				  unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_to_netdev offload;
	struct tc_cls_matchall_offload mall_offload = {0};

	offload.type = TC_SETUP_MATCHALL;
	offload.cls_mall = &mall_offload;
	offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
63
	offload.cls_mall->exts = &head->exts;
64 65 66 67 68 69 70
	offload.cls_mall->cookie = cookie;

	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
					     &offload);
}

static void mall_destroy_hw_filter(struct tcf_proto *tp,
71
				   struct cls_mall_head *head,
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
				   unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_to_netdev offload;
	struct tc_cls_matchall_offload mall_offload = {0};

	offload.type = TC_SETUP_MATCHALL;
	offload.cls_mall = &mall_offload;
	offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
	offload.cls_mall->exts = NULL;
	offload.cls_mall->cookie = cookie;

	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
					     &offload);
}

88 89 90
static bool mall_destroy(struct tcf_proto *tp, bool force)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
91
	struct net_device *dev = tp->q->dev_queue->dev;
92

93 94
	if (!head)
		return true;
95

96 97
	if (tc_should_offload(dev, tp, head->flags))
		mall_destroy_hw_filter(tp, head, (unsigned long) head);
98

99
	call_rcu(&head->rcu, mall_destroy_rcu);
100 101 102 103 104
	return true;
}

static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
{
105
	return 0UL;
106 107 108 109 110 111 112 113
}

static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
};

static int mall_set_parms(struct net *net, struct tcf_proto *tp,
114
			  struct cls_mall_head *head,
115 116 117 118 119 120
			  unsigned long base, struct nlattr **tb,
			  struct nlattr *est, bool ovr)
{
	struct tcf_exts e;
	int err;

121 122 123
	err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
	if (err)
		return err;
124 125
	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
	if (err < 0)
126
		goto errout;
127 128

	if (tb[TCA_MATCHALL_CLASSID]) {
129 130
		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
		tcf_bind_filter(tp, &head->res, base);
131 132
	}

133
	tcf_exts_change(tp, &head->exts, &e);
134 135

	return 0;
136 137 138
errout:
	tcf_exts_destroy(&e);
	return err;
139 140 141 142 143 144 145 146
}

static int mall_change(struct net *net, struct sk_buff *in_skb,
		       struct tcf_proto *tp, unsigned long base,
		       u32 handle, struct nlattr **tca,
		       unsigned long *arg, bool ovr)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
147
	struct net_device *dev = tp->q->dev_queue->dev;
148
	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
149
	struct cls_mall_head *new;
150
	u32 flags = 0;
151 152 153 154 155
	int err;

	if (!tca[TCA_OPTIONS])
		return -EINVAL;

156 157
	if (head)
		return -EEXIST;
158 159 160 161 162 163

	err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
			       tca[TCA_OPTIONS], mall_policy);
	if (err < 0)
		return err;

164 165 166 167 168 169
	if (tb[TCA_MATCHALL_FLAGS]) {
		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
		if (!tc_flags_valid(flags))
			return -EINVAL;
	}

170 171
	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new)
172 173
		return -ENOBUFS;

174
	err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
175 176
	if (err)
		goto err_exts_init;
177 178 179

	if (!handle)
		handle = 1;
180 181
	new->handle = handle;
	new->flags = flags;
182

183
	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
184
	if (err)
185
		goto err_set_parms;
186

187
	if (tc_should_offload(dev, tp, flags)) {
188
		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
189 190
		if (err) {
			if (tc_skip_sw(flags))
191
				goto err_replace_hw_filter;
192 193 194 195 196
			else
				err = 0;
		}
	}

197 198 199 200
	*arg = (unsigned long) head;
	rcu_assign_pointer(tp->root, new);
	if (head)
		call_rcu(&head->rcu, mall_destroy_rcu);
201 202
	return 0;

203 204
err_replace_hw_filter:
err_set_parms:
205
	tcf_exts_destroy(&new->exts);
206
err_exts_init:
207
	kfree(new);
208 209 210 211 212
	return err;
}

static int mall_delete(struct tcf_proto *tp, unsigned long arg)
{
213
	return -EOPNOTSUPP;
214 215 216 217 218 219 220 221
}

static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);

	if (arg->count < arg->skip)
		goto skip;
222
	if (arg->fn(tp, (unsigned long) head, arg) < 0)
223 224 225 226 227 228 229 230
		arg->stop = 1;
skip:
	arg->count++;
}

static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
		     struct sk_buff *skb, struct tcmsg *t)
{
231
	struct cls_mall_head *head = (struct cls_mall_head *) fh;
232 233
	struct nlattr *nest;

234
	if (!head)
235 236
		return skb->len;

237
	t->tcm_handle = head->handle;
238 239 240 241 242

	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (!nest)
		goto nla_put_failure;

243 244
	if (head->res.classid &&
	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
245 246
		goto nla_put_failure;

247
	if (tcf_exts_dump(skb, &head->exts))
248 249 250 251
		goto nla_put_failure;

	nla_nest_end(skb, nest);

252
	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static struct tcf_proto_ops cls_mall_ops __read_mostly = {
	.kind		= "matchall",
	.classify	= mall_classify,
	.init		= mall_init,
	.destroy	= mall_destroy,
	.get		= mall_get,
	.change		= mall_change,
	.delete		= mall_delete,
	.walk		= mall_walk,
	.dump		= mall_dump,
	.owner		= THIS_MODULE,
};

static int __init cls_mall_init(void)
{
	return register_tcf_proto_ops(&cls_mall_ops);
}

static void __exit cls_mall_exit(void)
{
	unregister_tcf_proto_ops(&cls_mall_ops);
}

module_init(cls_mall_init);
module_exit(cls_mall_exit);

MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Match-all classifier");
MODULE_LICENSE("GPL v2");