cls_bpf.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Berkeley Packet Filter based traffic classifier
 *
 * Might be used to classify traffic through flexible, user-defined and
 * possibly JIT-ed BPF filters for traffic control as an alternative to
 * ematches.
 *
 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
19 20
#include <linux/bpf.h>

21 22 23 24 25 26 27 28
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");

29
#define CLS_BPF_NAME_LEN	256
30 31
#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
	TCA_CLS_FLAGS_SKIP_HW
32

33 34 35
struct cls_bpf_head {
	struct list_head plist;
	u32 hgen;
J
John Fastabend 已提交
36
	struct rcu_head rcu;
37 38 39
};

struct cls_bpf_prog {
40
	struct bpf_prog *filter;
41
	struct list_head link;
42
	struct tcf_result res;
43
	bool exts_integrated;
44
	bool offloaded;
45
	u32 gen_flags;
46
	struct tcf_exts exts;
47
	u32 handle;
48 49 50 51 52 53
	union {
		u32 bpf_fd;
		u16 bpf_num_ops;
	};
	struct sock_filter *bpf_ops;
	const char *bpf_name;
J
John Fastabend 已提交
54 55
	struct tcf_proto *tp;
	struct rcu_head rcu;
56 57 58 59
};

static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
60
	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
61
	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
62
	[TCA_BPF_FD]		= { .type = NLA_U32 },
J
Jamal Hadi Salim 已提交
63 64
	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
				    .len = CLS_BPF_NAME_LEN },
65 66 67 68 69
	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};

70 71 72 73 74 75
static int cls_bpf_exec_opcode(int code)
{
	switch (code) {
	case TC_ACT_OK:
	case TC_ACT_SHOT:
	case TC_ACT_STOLEN:
76
	case TC_ACT_REDIRECT:
77 78 79 80 81 82 83
	case TC_ACT_UNSPEC:
		return code;
	default:
		return TC_ACT_UNSPEC;
	}
}

84 85 86
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			    struct tcf_result *res)
{
87
	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
88
	bool at_ingress = skb_at_tc_ingress(skb);
89
	struct cls_bpf_prog *prog;
90
	int ret = -1;
91

92 93
	/* Needed here for accessing maps. */
	rcu_read_lock();
J
John Fastabend 已提交
94
	list_for_each_entry_rcu(prog, &head->plist, link) {
95 96
		int filter_res;

97 98
		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;

99 100 101
		if (at_ingress) {
			/* It is safe to push/pull even if skb_shared() */
			__skb_push(skb, skb->mac_len);
102
			bpf_compute_data_end(skb);
103 104 105
			filter_res = BPF_PROG_RUN(prog->filter, skb);
			__skb_pull(skb, skb->mac_len);
		} else {
106
			bpf_compute_data_end(skb);
107 108
			filter_res = BPF_PROG_RUN(prog->filter, skb);
		}
109

110
		if (prog->exts_integrated) {
111 112 113
			res->class   = 0;
			res->classid = TC_H_MAJ(prog->res.classid) |
				       qdisc_skb_cb(skb)->tc_classid;
114 115 116 117 118 119 120

			ret = cls_bpf_exec_opcode(filter_res);
			if (ret == TC_ACT_UNSPEC)
				continue;
			break;
		}

121 122
		if (filter_res == 0)
			continue;
123 124
		if (filter_res != -1) {
			res->class   = 0;
125
			res->classid = filter_res;
126 127 128
		} else {
			*res = prog->res;
		}
129 130 131 132 133

		ret = tcf_exts_exec(skb, &prog->exts, res);
		if (ret < 0)
			continue;

134
		break;
135
	}
136
	rcu_read_unlock();
137

138
	return ret;
139 140
}

141 142 143 144 145
static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
{
	return !prog->bpf_ops;
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			       enum tc_clsbpf_command cmd)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_cls_bpf_offload bpf_offload = {};
	struct tc_to_netdev offload;

	offload.type = TC_SETUP_CLSBPF;
	offload.cls_bpf = &bpf_offload;

	bpf_offload.command = cmd;
	bpf_offload.exts = &prog->exts;
	bpf_offload.prog = prog->filter;
	bpf_offload.name = prog->bpf_name;
	bpf_offload.exts_integrated = prog->exts_integrated;
161
	bpf_offload.gen_flags = prog->gen_flags;
162 163 164 165 166 167 168 169 170 171 172 173 174

	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
					     tp->protocol, &offload);
}

static void cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			    struct cls_bpf_prog *oldprog)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct cls_bpf_prog *obj = prog;
	enum tc_clsbpf_command cmd;

	if (oldprog && oldprog->offloaded) {
175
		if (tc_should_offload(dev, tp, prog->gen_flags)) {
176 177 178 179 180 181
			cmd = TC_CLSBPF_REPLACE;
		} else {
			obj = oldprog;
			cmd = TC_CLSBPF_DESTROY;
		}
	} else {
182
		if (!tc_should_offload(dev, tp, prog->gen_flags))
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
			return;
		cmd = TC_CLSBPF_ADD;
	}

	if (cls_bpf_offload_cmd(tp, obj, cmd))
		return;

	obj->offloaded = true;
	if (oldprog)
		oldprog->offloaded = false;
}

static void cls_bpf_stop_offload(struct tcf_proto *tp,
				 struct cls_bpf_prog *prog)
{
	int err;

	if (!prog->offloaded)
		return;

	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
	if (err) {
		pr_err("Stopping hardware offload failed: %d\n", err);
		return;
	}

	prog->offloaded = false;
}

212 213 214 215 216 217 218 219
static int cls_bpf_init(struct tcf_proto *tp)
{
	struct cls_bpf_head *head;

	head = kzalloc(sizeof(*head), GFP_KERNEL);
	if (head == NULL)
		return -ENOBUFS;

J
John Fastabend 已提交
220 221
	INIT_LIST_HEAD_RCU(&head->plist);
	rcu_assign_pointer(tp->root, head);
222 223 224 225 226 227

	return 0;
}

static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
228
	tcf_exts_destroy(&prog->exts);
229

230 231 232 233
	if (cls_bpf_is_ebpf(prog))
		bpf_prog_put(prog->filter);
	else
		bpf_prog_destroy(prog->filter);
234

235
	kfree(prog->bpf_name);
236 237 238 239
	kfree(prog->bpf_ops);
	kfree(prog);
}

J
John Fastabend 已提交
240 241 242 243 244 245 246
static void __cls_bpf_delete_prog(struct rcu_head *rcu)
{
	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);

	cls_bpf_delete_prog(prog->tp, prog);
}

247 248
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
249
	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
250

251
	cls_bpf_stop_offload(tp, prog);
252 253 254
	list_del_rcu(&prog->link);
	tcf_unbind_filter(tp, &prog->res);
	call_rcu(&prog->rcu, __cls_bpf_delete_prog);
255

256
	return 0;
257 258
}

259
static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
260
{
J
John Fastabend 已提交
261
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
262 263
	struct cls_bpf_prog *prog, *tmp;

264 265 266
	if (!force && !list_empty(&head->plist))
		return false;

267
	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
268
		cls_bpf_stop_offload(tp, prog);
J
John Fastabend 已提交
269
		list_del_rcu(&prog->link);
270
		tcf_unbind_filter(tp, &prog->res);
J
John Fastabend 已提交
271
		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
272 273
	}

J
John Fastabend 已提交
274 275
	RCU_INIT_POINTER(tp->root, NULL);
	kfree_rcu(head, rcu);
276
	return true;
277 278 279 280
}

static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
{
J
John Fastabend 已提交
281
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
282 283 284 285 286 287
	struct cls_bpf_prog *prog;
	unsigned long ret = 0UL;

	if (head == NULL)
		return 0UL;

288
	list_for_each_entry(prog, &head->plist, link) {
289 290 291 292 293 294 295 296 297
		if (prog->handle == handle) {
			ret = (unsigned long) prog;
			break;
		}
	}

	return ret;
}

298
static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
299
{
J
John Fastabend 已提交
300
	struct sock_filter *bpf_ops;
301
	struct sock_fprog_kern fprog_tmp;
J
John Fastabend 已提交
302
	struct bpf_prog *fp;
303
	u16 bpf_size, bpf_num_ops;
304 305
	int ret;

306
	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
307 308
	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
		return -EINVAL;
309

310
	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
311 312
	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
		return -EINVAL;
313

314
	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
315 316
	if (bpf_ops == NULL)
		return -ENOMEM;
317 318 319

	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);

320 321
	fprog_tmp.len = bpf_num_ops;
	fprog_tmp.filter = bpf_ops;
322

323 324 325 326 327
	ret = bpf_prog_create(&fp, &fprog_tmp);
	if (ret < 0) {
		kfree(bpf_ops);
		return ret;
	}
328 329

	prog->bpf_ops = bpf_ops;
330 331
	prog->bpf_num_ops = bpf_num_ops;
	prog->bpf_name = NULL;
332 333
	prog->filter = fp;

334 335 336
	return 0;
}

337 338
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
				 const struct tcf_proto *tp)
339 340 341 342 343 344 345
{
	struct bpf_prog *fp;
	char *name = NULL;
	u32 bpf_fd;

	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);

346
	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	if (IS_ERR(fp))
		return PTR_ERR(fp);

	if (tb[TCA_BPF_NAME]) {
		name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
			       nla_len(tb[TCA_BPF_NAME]),
			       GFP_KERNEL);
		if (!name) {
			bpf_prog_put(fp);
			return -ENOMEM;
		}
	}

	prog->bpf_ops = NULL;
	prog->bpf_fd = bpf_fd;
	prog->bpf_name = name;
	prog->filter = fp;

D
Daniel Borkmann 已提交
365
	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
366 367
		netif_keep_dst(qdisc_dev(tp->q));

368 369 370 371 372 373 374 375
	return 0;
}

static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
				   struct cls_bpf_prog *prog,
				   unsigned long base, struct nlattr **tb,
				   struct nlattr *est, bool ovr)
{
376
	bool is_bpf, is_ebpf, have_exts = false;
377
	struct tcf_exts exts;
378
	u32 gen_flags = 0;
379 380 381 382
	int ret;

	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
	is_ebpf = tb[TCA_BPF_FD];
383
	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
384 385
		return -EINVAL;

386
	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
387 388
	if (ret < 0)
		return ret;
389 390 391
	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
	if (ret < 0)
		goto errout;
392

393 394 395 396
	if (tb[TCA_BPF_FLAGS]) {
		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);

		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
397 398
			ret = -EINVAL;
			goto errout;
399 400 401 402
		}

		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
	}
403 404 405 406 407 408 409 410
	if (tb[TCA_BPF_FLAGS_GEN]) {
		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
		    !tc_flags_valid(gen_flags)) {
			ret = -EINVAL;
			goto errout;
		}
	}
411 412

	prog->exts_integrated = have_exts;
413
	prog->gen_flags = gen_flags;
414

415
	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
416
		       cls_bpf_prog_from_efd(tb, prog, tp);
417 418
	if (ret < 0)
		goto errout;
419

420 421 422 423
	if (tb[TCA_BPF_CLASSID]) {
		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
		tcf_bind_filter(tp, &prog->res, base);
	}
424

425
	tcf_exts_change(tp, &prog->exts, &exts);
426
	return 0;
427 428 429 430

errout:
	tcf_exts_destroy(&exts);
	return ret;
431 432 433 434 435 436
}

static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
				   struct cls_bpf_head *head)
{
	unsigned int i = 0x80000000;
437
	u32 handle;
438 439 440 441 442

	do {
		if (++head->hgen == 0x7FFFFFFF)
			head->hgen = 1;
	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
443 444

	if (unlikely(i == 0)) {
445
		pr_err("Insufficient number of handles\n");
446 447 448 449
		handle = 0;
	} else {
		handle = head->hgen;
	}
450

451
	return handle;
452 453 454 455 456
}

static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
			  struct tcf_proto *tp, unsigned long base,
			  u32 handle, struct nlattr **tca,
457
			  unsigned long *arg, bool ovr)
458
{
J
John Fastabend 已提交
459 460
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
461
	struct nlattr *tb[TCA_BPF_MAX + 1];
J
John Fastabend 已提交
462
	struct cls_bpf_prog *prog;
463 464 465 466 467 468 469 470 471 472
	int ret;

	if (tca[TCA_OPTIONS] == NULL)
		return -EINVAL;

	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
	if (ret < 0)
		return ret;

	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
J
John Fastabend 已提交
473
	if (!prog)
474 475
		return -ENOBUFS;

476 477 478
	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
	if (ret < 0)
		goto errout;
J
John Fastabend 已提交
479 480 481 482 483 484 485 486

	if (oldprog) {
		if (handle && oldprog->handle != handle) {
			ret = -EINVAL;
			goto errout;
		}
	}

487 488 489 490 491 492 493 494 495
	if (handle == 0)
		prog->handle = cls_bpf_grab_new_handle(tp, head);
	else
		prog->handle = handle;
	if (prog->handle == 0) {
		ret = -EINVAL;
		goto errout;
	}

J
Jamal Hadi Salim 已提交
496 497
	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
				      ovr);
498 499 500
	if (ret < 0)
		goto errout;

501 502
	cls_bpf_offload(tp, prog, oldprog);

J
John Fastabend 已提交
503
	if (oldprog) {
504
		list_replace_rcu(&oldprog->link, &prog->link);
505
		tcf_unbind_filter(tp, &oldprog->res);
J
John Fastabend 已提交
506 507 508 509
		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
	} else {
		list_add_rcu(&prog->link, &head->plist);
	}
510 511 512

	*arg = (unsigned long) prog;
	return 0;
513

514
errout:
515
	tcf_exts_destroy(&prog->exts);
J
John Fastabend 已提交
516
	kfree(prog);
517 518 519
	return ret;
}

520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
				 struct sk_buff *skb)
{
	struct nlattr *nla;

	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
		return -EMSGSIZE;

	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
			  sizeof(struct sock_filter));
	if (nla == NULL)
		return -EMSGSIZE;

	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));

	return 0;
}

static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
				  struct sk_buff *skb)
{
	if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
		return -EMSGSIZE;

	if (prog->bpf_name &&
	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
		return -EMSGSIZE;

	return 0;
}

551
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
552 553 554
			struct sk_buff *skb, struct tcmsg *tm)
{
	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
555
	struct nlattr *nest;
556
	u32 bpf_flags = 0;
557
	int ret;
558 559 560 561 562 563 564 565 566 567

	if (prog == NULL)
		return skb->len;

	tm->tcm_handle = prog->handle;

	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (nest == NULL)
		goto nla_put_failure;

568 569
	if (prog->res.classid &&
	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
570 571
		goto nla_put_failure;

572 573 574 575 576
	if (cls_bpf_is_ebpf(prog))
		ret = cls_bpf_dump_ebpf_info(prog, skb);
	else
		ret = cls_bpf_dump_bpf_info(prog, skb);
	if (ret)
577 578
		goto nla_put_failure;

579
	if (tcf_exts_dump(skb, &prog->exts) < 0)
580 581
		goto nla_put_failure;

582 583 584 585
	if (prog->exts_integrated)
		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
		goto nla_put_failure;
586 587 588
	if (prog->gen_flags &&
	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
		goto nla_put_failure;
589

590 591
	nla_nest_end(skb, nest);

592
	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
593 594 595 596 597 598 599 600 601 602 603
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
J
John Fastabend 已提交
604
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
605 606
	struct cls_bpf_prog *prog;

607
	list_for_each_entry(prog, &head->plist, link) {
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
		if (arg->count < arg->skip)
			goto skip;
		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
			arg->stop = 1;
			break;
		}
skip:
		arg->count++;
	}
}

static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
	.kind		=	"bpf",
	.owner		=	THIS_MODULE,
	.classify	=	cls_bpf_classify,
	.init		=	cls_bpf_init,
	.destroy	=	cls_bpf_destroy,
	.get		=	cls_bpf_get,
	.change		=	cls_bpf_change,
	.delete		=	cls_bpf_delete,
	.walk		=	cls_bpf_walk,
	.dump		=	cls_bpf_dump,
};

static int __init cls_bpf_init_mod(void)
{
	return register_tcf_proto_ops(&cls_bpf_ops);
}

static void __exit cls_bpf_exit_mod(void)
{
	unregister_tcf_proto_ops(&cls_bpf_ops);
}

module_init(cls_bpf_init_mod);
module_exit(cls_bpf_exit_mod);