cls_matchall.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * net/sched/cls_matchll.c		Match-all classifier
 *
 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>

#include <net/sch_generic.h>
#include <net/pkt_cls.h>

19
struct cls_mall_head {
20 21 22
	struct tcf_exts exts;
	struct tcf_result res;
	u32 handle;
23
	u32 flags;
24 25 26 27 28 29 30 31
	struct rcu_head	rcu;
};

static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			 struct tcf_result *res)
{
	struct cls_mall_head *head = rcu_dereference_bh(tp->root);

32
	if (tc_skip_sw(head->flags))
33 34
		return -1;

35
	return tcf_exts_exec(skb, &head->exts, res);
36 37 38 39 40 41 42
}

static int mall_init(struct tcf_proto *tp)
{
	return 0;
}

43
static void mall_destroy_rcu(struct rcu_head *rcu)
44
{
45 46
	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
						  rcu);
47

48 49
	tcf_exts_destroy(&head->exts);
	kfree(head);
50 51
}

52
static int mall_replace_hw_filter(struct tcf_proto *tp,
53
				  struct cls_mall_head *head,
54 55 56 57 58
				  unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_to_netdev offload;
	struct tc_cls_matchall_offload mall_offload = {0};
59
	int err;
60

61
	tc_cls_common_offload_init(&mall_offload.common, tp);
62 63
	offload.cls_mall = &mall_offload;
	offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
64
	offload.cls_mall->exts = &head->exts;
65 66
	offload.cls_mall->cookie = cookie;

67
	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL,
68
					    &offload);
69 70 71 72
	if (!err)
		head->flags |= TCA_CLS_FLAGS_IN_HW;

	return err;
73 74 75
}

static void mall_destroy_hw_filter(struct tcf_proto *tp,
76
				   struct cls_mall_head *head,
77 78 79 80 81 82
				   unsigned long cookie)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_to_netdev offload;
	struct tc_cls_matchall_offload mall_offload = {0};

83
	tc_cls_common_offload_init(&mall_offload.common, tp);
84 85 86 87 88
	offload.cls_mall = &mall_offload;
	offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
	offload.cls_mall->exts = NULL;
	offload.cls_mall->cookie = cookie;

89
	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &offload);
90 91
}

92
static void mall_destroy(struct tcf_proto *tp)
93 94
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
95
	struct net_device *dev = tp->q->dev_queue->dev;
96

97
	if (!head)
98
		return;
99

100 101
	if (tc_should_offload(dev, tp, head->flags))
		mall_destroy_hw_filter(tp, head, (unsigned long) head);
102

103
	call_rcu(&head->rcu, mall_destroy_rcu);
104 105 106 107
}

static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
{
108
	return 0UL;
109 110 111 112 113 114 115 116
}

static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
};

static int mall_set_parms(struct net *net, struct tcf_proto *tp,
117
			  struct cls_mall_head *head,
118 119 120 121 122
			  unsigned long base, struct nlattr **tb,
			  struct nlattr *est, bool ovr)
{
	int err;

123
	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
124
	if (err < 0)
125
		return err;
126 127

	if (tb[TCA_MATCHALL_CLASSID]) {
128 129
		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
		tcf_bind_filter(tp, &head->res, base);
130 131 132 133 134 135 136 137 138 139
	}
	return 0;
}

static int mall_change(struct net *net, struct sk_buff *in_skb,
		       struct tcf_proto *tp, unsigned long base,
		       u32 handle, struct nlattr **tca,
		       unsigned long *arg, bool ovr)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);
140
	struct net_device *dev = tp->q->dev_queue->dev;
141
	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
142
	struct cls_mall_head *new;
143
	u32 flags = 0;
144 145 146 147 148
	int err;

	if (!tca[TCA_OPTIONS])
		return -EINVAL;

149 150
	if (head)
		return -EEXIST;
151

152 153
	err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
			       mall_policy, NULL);
154 155 156
	if (err < 0)
		return err;

157 158 159 160 161 162
	if (tb[TCA_MATCHALL_FLAGS]) {
		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
		if (!tc_flags_valid(flags))
			return -EINVAL;
	}

163 164
	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new)
165 166
		return -ENOBUFS;

167
	err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
168 169
	if (err)
		goto err_exts_init;
170 171 172

	if (!handle)
		handle = 1;
173 174
	new->handle = handle;
	new->flags = flags;
175

176
	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
177
	if (err)
178
		goto err_set_parms;
179

180
	if (tc_should_offload(dev, tp, flags)) {
181
		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
182 183
		if (err) {
			if (tc_skip_sw(flags))
184
				goto err_replace_hw_filter;
185 186 187 188
			else
				err = 0;
		}
	}
189 190 191

	if (!tc_in_hw(new->flags))
		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
192

193 194
	*arg = (unsigned long) head;
	rcu_assign_pointer(tp->root, new);
195 196
	return 0;

197 198
err_replace_hw_filter:
err_set_parms:
199
	tcf_exts_destroy(&new->exts);
200
err_exts_init:
201
	kfree(new);
202 203 204
	return err;
}

205
static int mall_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
206
{
207
	return -EOPNOTSUPP;
208 209 210 211 212 213 214 215
}

static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
	struct cls_mall_head *head = rtnl_dereference(tp->root);

	if (arg->count < arg->skip)
		goto skip;
216
	if (arg->fn(tp, (unsigned long) head, arg) < 0)
217 218 219 220 221 222 223 224
		arg->stop = 1;
skip:
	arg->count++;
}

static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
		     struct sk_buff *skb, struct tcmsg *t)
{
225
	struct cls_mall_head *head = (struct cls_mall_head *) fh;
226 227
	struct nlattr *nest;

228
	if (!head)
229 230
		return skb->len;

231
	t->tcm_handle = head->handle;
232 233 234 235 236

	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (!nest)
		goto nla_put_failure;

237 238
	if (head->res.classid &&
	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
239 240
		goto nla_put_failure;

241 242 243
	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
		goto nla_put_failure;

244
	if (tcf_exts_dump(skb, &head->exts))
245 246 247 248
		goto nla_put_failure;

	nla_nest_end(skb, nest);

249
	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static struct tcf_proto_ops cls_mall_ops __read_mostly = {
	.kind		= "matchall",
	.classify	= mall_classify,
	.init		= mall_init,
	.destroy	= mall_destroy,
	.get		= mall_get,
	.change		= mall_change,
	.delete		= mall_delete,
	.walk		= mall_walk,
	.dump		= mall_dump,
	.owner		= THIS_MODULE,
};

static int __init cls_mall_init(void)
{
	return register_tcf_proto_ops(&cls_mall_ops);
}

static void __exit cls_mall_exit(void)
{
	unregister_tcf_proto_ops(&cls_mall_ops);
}

module_init(cls_mall_init);
module_exit(cls_mall_exit);

MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Match-all classifier");
MODULE_LICENSE("GPL v2");