act_skbedit.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2008, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16 17 18 19 20 21 22 23 24 25
 *
 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
26 27 28
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/dsfield.h>
29 30 31 32

#include <linux/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_skbedit.h>

33
static unsigned int skbedit_net_id;
34
static struct tc_action_ops act_skbedit_ops;
35

36
static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
37 38
		       struct tcf_result *res)
{
39
	struct tcf_skbedit *d = to_skbedit(a);
40

41
	tcf_lastuse_update(&d->tcf_tm);
42
	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
43

44
	spin_lock(&d->tcf_lock);
45 46
	if (d->flags & SKBEDIT_F_PRIORITY)
		skb->priority = d->priority;
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
	if (d->flags & SKBEDIT_F_INHERITDSFIELD) {
		int wlen = skb_network_offset(skb);

		switch (tc_skb_protocol(skb)) {
		case htons(ETH_P_IP):
			wlen += sizeof(struct iphdr);
			if (!pskb_may_pull(skb, wlen))
				goto err;
			skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
			break;

		case htons(ETH_P_IPV6):
			wlen += sizeof(struct ipv6hdr);
			if (!pskb_may_pull(skb, wlen))
				goto err;
			skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
			break;
		}
	}
66 67 68
	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
	    skb->dev->real_num_tx_queues > d->queue_mapping)
		skb_set_queue_mapping(skb, d->queue_mapping);
69 70 71 72
	if (d->flags & SKBEDIT_F_MARK) {
		skb->mark &= ~d->mask;
		skb->mark |= d->mark & d->mask;
	}
73 74
	if (d->flags & SKBEDIT_F_PTYPE)
		skb->pkt_type = d->ptype;
75 76 77

	spin_unlock(&d->tcf_lock);
	return d->tcf_action;
78 79 80

err:
	spin_unlock(&d->tcf_lock);
81
	qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
82
	return TC_ACT_SHOT;
83 84 85 86 87 88
}

static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
	[TCA_SKBEDIT_PARMS]		= { .len = sizeof(struct tc_skbedit) },
	[TCA_SKBEDIT_PRIORITY]		= { .len = sizeof(u32) },
	[TCA_SKBEDIT_QUEUE_MAPPING]	= { .len = sizeof(u16) },
89
	[TCA_SKBEDIT_MARK]		= { .len = sizeof(u32) },
90
	[TCA_SKBEDIT_PTYPE]		= { .len = sizeof(u16) },
91
	[TCA_SKBEDIT_MASK]		= { .len = sizeof(u32) },
92
	[TCA_SKBEDIT_FLAGS]		= { .len = sizeof(u64) },
93 94
};

95
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
96
			    struct nlattr *est, struct tc_action **a,
97 98
			    int ovr, int bind, bool rtnl_held,
			    struct netlink_ext_ack *extack)
99
{
100
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
101 102 103
	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
	struct tc_skbedit *parm;
	struct tcf_skbedit *d;
104
	u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
105
	u16 *queue_mapping = NULL, *ptype = NULL;
106 107
	bool exists = false;
	int ret = 0, err;
108 109 110 111

	if (nla == NULL)
		return -EINVAL;

112
	err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy, NULL);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	if (err < 0)
		return err;

	if (tb[TCA_SKBEDIT_PARMS] == NULL)
		return -EINVAL;

	if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
		flags |= SKBEDIT_F_PRIORITY;
		priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
	}

	if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
		flags |= SKBEDIT_F_QUEUE_MAPPING;
		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
	}
128

129 130 131 132 133 134 135
	if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
		ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
		if (!skb_pkt_type_ok(*ptype))
			return -EINVAL;
		flags |= SKBEDIT_F_PTYPE;
	}

136 137 138 139 140
	if (tb[TCA_SKBEDIT_MARK] != NULL) {
		flags |= SKBEDIT_F_MARK;
		mark = nla_data(tb[TCA_SKBEDIT_MARK]);
	}

141 142 143 144 145
	if (tb[TCA_SKBEDIT_MASK] != NULL) {
		flags |= SKBEDIT_F_MASK;
		mask = nla_data(tb[TCA_SKBEDIT_MASK]);
	}

146 147 148 149 150 151 152
	if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
		u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);

		if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
			flags |= SKBEDIT_F_INHERITDSFIELD;
	}

153 154
	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);

155 156 157 158
	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
	if (err < 0)
		return err;
	exists = err;
159 160 161 162
	if (exists && bind)
		return 0;

	if (!flags) {
163 164
		if (exists)
			tcf_idr_release(*a, bind);
165 166
		else
			tcf_idr_cleanup(tn, parm->index);
167 168 169 170
		return -EINVAL;
	}

	if (!exists) {
171
		ret = tcf_idr_create(tn, parm->index, est, a,
172
				     &act_skbedit_ops, bind, true);
173 174
		if (ret) {
			tcf_idr_cleanup(tn, parm->index);
175
			return ret;
176
		}
177

178
		d = to_skbedit(*a);
179 180
		ret = ACT_P_CREATED;
	} else {
181
		d = to_skbedit(*a);
182 183
		if (!ovr) {
			tcf_idr_release(*a, bind);
184
			return -EEXIST;
185
		}
186 187 188 189 190 191 192 193 194
	}

	spin_lock_bh(&d->tcf_lock);

	d->flags = flags;
	if (flags & SKBEDIT_F_PRIORITY)
		d->priority = *priority;
	if (flags & SKBEDIT_F_QUEUE_MAPPING)
		d->queue_mapping = *queue_mapping;
195 196
	if (flags & SKBEDIT_F_MARK)
		d->mark = *mark;
197 198
	if (flags & SKBEDIT_F_PTYPE)
		d->ptype = *ptype;
199 200 201 202
	/* default behaviour is to use all the bits */
	d->mask = 0xffffffff;
	if (flags & SKBEDIT_F_MASK)
		d->mask = *mask;
203

204 205 206 207 208
	d->tcf_action = parm->action;

	spin_unlock_bh(&d->tcf_lock);

	if (ret == ACT_P_CREATED)
209
		tcf_idr_insert(tn, *a);
210 211 212
	return ret;
}

E
Eric Dumazet 已提交
213 214
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
			    int bind, int ref)
215 216
{
	unsigned char *b = skb_tail_pointer(skb);
217
	struct tcf_skbedit *d = to_skbedit(a);
218 219
	struct tc_skbedit opt = {
		.index   = d->tcf_index,
220 221
		.refcnt  = refcount_read(&d->tcf_refcnt) - ref,
		.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
222 223
		.action  = d->tcf_action,
	};
224
	struct tcf_t t;
225
	u64 pure_flags = 0;
226

227 228 229
	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_PRIORITY) &&
230
	    nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
231 232
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
233
	    nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
234 235
		goto nla_put_failure;
	if ((d->flags & SKBEDIT_F_MARK) &&
236
	    nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
237
		goto nla_put_failure;
238
	if ((d->flags & SKBEDIT_F_PTYPE) &&
239
	    nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
240
		goto nla_put_failure;
241 242 243
	if ((d->flags & SKBEDIT_F_MASK) &&
	    nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
		goto nla_put_failure;
244 245 246 247 248
	if (d->flags & SKBEDIT_F_INHERITDSFIELD)
		pure_flags |= SKBEDIT_F_INHERITDSFIELD;
	if (pure_flags != 0 &&
	    nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
		goto nla_put_failure;
249 250

	tcf_tm_dump(&t, &d->tcf_tm);
251
	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
252
		goto nla_put_failure;
253 254 255 256 257 258 259
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

260 261
static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
			      struct netlink_callback *cb, int type,
262 263
			      const struct tc_action_ops *ops,
			      struct netlink_ext_ack *extack)
264 265 266
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

267
	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
268 269
}

270 271
static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
			      struct netlink_ext_ack *extack)
272 273 274
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

275
	return tcf_idr_search(tn, a, index);
276 277
}

278 279 280 281 282 283 284
static int tcf_skbedit_delete(struct net *net, u32 index)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

	return tcf_idr_delete_index(tn, index);
}

285 286 287 288 289 290 291
static struct tc_action_ops act_skbedit_ops = {
	.kind		=	"skbedit",
	.type		=	TCA_ACT_SKBEDIT,
	.owner		=	THIS_MODULE,
	.act		=	tcf_skbedit,
	.dump		=	tcf_skbedit_dump,
	.init		=	tcf_skbedit_init,
292 293
	.walk		=	tcf_skbedit_walker,
	.lookup		=	tcf_skbedit_search,
294
	.delete		=	tcf_skbedit_delete,
295
	.size		=	sizeof(struct tcf_skbedit),
296 297 298 299 300 301
};

static __net_init int skbedit_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, skbedit_net_id);

302
	return tc_action_net_init(tn, &act_skbedit_ops);
303 304
}

305
static void __net_exit skbedit_exit_net(struct list_head *net_list)
306
{
307
	tc_action_net_exit(net_list, skbedit_net_id);
308 309 310 311
}

static struct pernet_operations skbedit_net_ops = {
	.init = skbedit_init_net,
312
	.exit_batch = skbedit_exit_net,
313 314
	.id   = &skbedit_net_id,
	.size = sizeof(struct tc_action_net),
315 316 317 318 319 320 321 322
};

MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
MODULE_DESCRIPTION("SKB Editing");
MODULE_LICENSE("GPL");

static int __init skbedit_init_module(void)
{
323
	return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
324 325 326 327
}

static void __exit skbedit_cleanup_module(void)
{
328
	tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
329 330 331 332
}

module_init(skbedit_init_module);
module_exit(skbedit_cleanup_module);