cls_bpf.c 14.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Berkeley Packet Filter based traffic classifier
 *
 * Might be used to classify traffic through flexible, user-defined and
 * possibly JIT-ed BPF filters for traffic control as an alternative to
 * ematches.
 *
 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
19 20
#include <linux/bpf.h>

21 22 23 24 25 26 27 28
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");

29
#define CLS_BPF_NAME_LEN	256
30
#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
31
	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32

33 34 35
struct cls_bpf_head {
	struct list_head plist;
	u32 hgen;
J
John Fastabend 已提交
36
	struct rcu_head rcu;
37 38 39
};

struct cls_bpf_prog {
40
	struct bpf_prog *filter;
41
	struct list_head link;
42
	struct tcf_result res;
43
	bool exts_integrated;
44
	bool offloaded;
45
	u32 gen_flags;
46
	struct tcf_exts exts;
47
	u32 handle;
48
	u16 bpf_num_ops;
49 50
	struct sock_filter *bpf_ops;
	const char *bpf_name;
J
John Fastabend 已提交
51 52
	struct tcf_proto *tp;
	struct rcu_head rcu;
53 54 55 56
};

static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
57
	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
58
	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
59
	[TCA_BPF_FD]		= { .type = NLA_U32 },
J
Jamal Hadi Salim 已提交
60 61
	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
				    .len = CLS_BPF_NAME_LEN },
62 63 64 65 66
	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};

67 68 69 70 71 72
static int cls_bpf_exec_opcode(int code)
{
	switch (code) {
	case TC_ACT_OK:
	case TC_ACT_SHOT:
	case TC_ACT_STOLEN:
73
	case TC_ACT_REDIRECT:
74 75 76 77 78 79 80
	case TC_ACT_UNSPEC:
		return code;
	default:
		return TC_ACT_UNSPEC;
	}
}

81 82 83
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			    struct tcf_result *res)
{
84
	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
85
	bool at_ingress = skb_at_tc_ingress(skb);
86
	struct cls_bpf_prog *prog;
87
	int ret = -1;
88

89 90
	/* Needed here for accessing maps. */
	rcu_read_lock();
J
John Fastabend 已提交
91
	list_for_each_entry_rcu(prog, &head->plist, link) {
92 93
		int filter_res;

94 95
		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;

96 97 98
		if (tc_skip_sw(prog->gen_flags)) {
			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
		} else if (at_ingress) {
99 100
			/* It is safe to push/pull even if skb_shared() */
			__skb_push(skb, skb->mac_len);
101
			bpf_compute_data_end(skb);
102 103 104
			filter_res = BPF_PROG_RUN(prog->filter, skb);
			__skb_pull(skb, skb->mac_len);
		} else {
105
			bpf_compute_data_end(skb);
106 107
			filter_res = BPF_PROG_RUN(prog->filter, skb);
		}
108

109
		if (prog->exts_integrated) {
110 111 112
			res->class   = 0;
			res->classid = TC_H_MAJ(prog->res.classid) |
				       qdisc_skb_cb(skb)->tc_classid;
113 114 115 116 117 118 119

			ret = cls_bpf_exec_opcode(filter_res);
			if (ret == TC_ACT_UNSPEC)
				continue;
			break;
		}

120 121
		if (filter_res == 0)
			continue;
122 123
		if (filter_res != -1) {
			res->class   = 0;
124
			res->classid = filter_res;
125 126 127
		} else {
			*res = prog->res;
		}
128 129 130 131 132

		ret = tcf_exts_exec(skb, &prog->exts, res);
		if (ret < 0)
			continue;

133
		break;
134
	}
135
	rcu_read_unlock();
136

137
	return ret;
138 139
}

140 141 142 143 144
static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
{
	return !prog->bpf_ops;
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			       enum tc_clsbpf_command cmd)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_cls_bpf_offload bpf_offload = {};
	struct tc_to_netdev offload;

	offload.type = TC_SETUP_CLSBPF;
	offload.cls_bpf = &bpf_offload;

	bpf_offload.command = cmd;
	bpf_offload.exts = &prog->exts;
	bpf_offload.prog = prog->filter;
	bpf_offload.name = prog->bpf_name;
	bpf_offload.exts_integrated = prog->exts_integrated;
160
	bpf_offload.gen_flags = prog->gen_flags;
161 162 163 164 165

	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
					     tp->protocol, &offload);
}

166 167
static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			   struct cls_bpf_prog *oldprog)
168 169 170 171
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct cls_bpf_prog *obj = prog;
	enum tc_clsbpf_command cmd;
172 173 174 175 176
	bool skip_sw;
	int ret;

	skip_sw = tc_skip_sw(prog->gen_flags) ||
		(oldprog && tc_skip_sw(oldprog->gen_flags));
177 178

	if (oldprog && oldprog->offloaded) {
179
		if (tc_should_offload(dev, tp, prog->gen_flags)) {
180
			cmd = TC_CLSBPF_REPLACE;
181
		} else if (!tc_skip_sw(prog->gen_flags)) {
182 183
			obj = oldprog;
			cmd = TC_CLSBPF_DESTROY;
184 185
		} else {
			return -EINVAL;
186 187
		}
	} else {
188
		if (!tc_should_offload(dev, tp, prog->gen_flags))
189
			return skip_sw ? -EINVAL : 0;
190 191 192
		cmd = TC_CLSBPF_ADD;
	}

193 194 195
	ret = cls_bpf_offload_cmd(tp, obj, cmd);
	if (ret)
		return skip_sw ? ret : 0;
196 197 198 199

	obj->offloaded = true;
	if (oldprog)
		oldprog->offloaded = false;
200 201

	return 0;
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
}

static void cls_bpf_stop_offload(struct tcf_proto *tp,
				 struct cls_bpf_prog *prog)
{
	int err;

	if (!prog->offloaded)
		return;

	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
	if (err) {
		pr_err("Stopping hardware offload failed: %d\n", err);
		return;
	}

	prog->offloaded = false;
}

221 222 223 224 225 226 227 228 229
static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
					 struct cls_bpf_prog *prog)
{
	if (!prog->offloaded)
		return;

	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
}

230 231 232 233 234 235 236 237
static int cls_bpf_init(struct tcf_proto *tp)
{
	struct cls_bpf_head *head;

	head = kzalloc(sizeof(*head), GFP_KERNEL);
	if (head == NULL)
		return -ENOBUFS;

J
John Fastabend 已提交
238 239
	INIT_LIST_HEAD_RCU(&head->plist);
	rcu_assign_pointer(tp->root, head);
240 241 242 243

	return 0;
}

244
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
245
{
246
	tcf_exts_destroy(&prog->exts);
247

248 249 250 251
	if (cls_bpf_is_ebpf(prog))
		bpf_prog_put(prog->filter);
	else
		bpf_prog_destroy(prog->filter);
252

253
	kfree(prog->bpf_name);
254 255 256 257
	kfree(prog->bpf_ops);
	kfree(prog);
}

258
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
J
John Fastabend 已提交
259
{
260
	__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
J
John Fastabend 已提交
261 262
}

263
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
264
{
265
	cls_bpf_stop_offload(tp, prog);
266 267
	list_del_rcu(&prog->link);
	tcf_unbind_filter(tp, &prog->res);
268 269
	call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
}
270

271 272 273
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
	__cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
274
	return 0;
275 276
}

277
static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
278
{
J
John Fastabend 已提交
279
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
280 281
	struct cls_bpf_prog *prog, *tmp;

282 283 284
	if (!force && !list_empty(&head->plist))
		return false;

285 286
	list_for_each_entry_safe(prog, tmp, &head->plist, link)
		__cls_bpf_delete(tp, prog);
287

J
John Fastabend 已提交
288
	kfree_rcu(head, rcu);
289
	return true;
290 291 292 293
}

static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
{
J
John Fastabend 已提交
294
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
295 296 297
	struct cls_bpf_prog *prog;
	unsigned long ret = 0UL;

298
	list_for_each_entry(prog, &head->plist, link) {
299 300 301 302 303 304 305 306 307
		if (prog->handle == handle) {
			ret = (unsigned long) prog;
			break;
		}
	}

	return ret;
}

308
static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
309
{
J
John Fastabend 已提交
310
	struct sock_filter *bpf_ops;
311
	struct sock_fprog_kern fprog_tmp;
J
John Fastabend 已提交
312
	struct bpf_prog *fp;
313
	u16 bpf_size, bpf_num_ops;
314 315
	int ret;

316
	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
317 318
	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
		return -EINVAL;
319

320
	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
321 322
	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
		return -EINVAL;
323

324
	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
325 326
	if (bpf_ops == NULL)
		return -ENOMEM;
327 328 329

	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);

330 331
	fprog_tmp.len = bpf_num_ops;
	fprog_tmp.filter = bpf_ops;
332

333 334 335 336 337
	ret = bpf_prog_create(&fp, &fprog_tmp);
	if (ret < 0) {
		kfree(bpf_ops);
		return ret;
	}
338 339

	prog->bpf_ops = bpf_ops;
340 341
	prog->bpf_num_ops = bpf_num_ops;
	prog->bpf_name = NULL;
342 343
	prog->filter = fp;

344 345 346
	return 0;
}

347 348
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
				 const struct tcf_proto *tp)
349 350 351 352 353 354 355
{
	struct bpf_prog *fp;
	char *name = NULL;
	u32 bpf_fd;

	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);

356
	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
357 358 359 360
	if (IS_ERR(fp))
		return PTR_ERR(fp);

	if (tb[TCA_BPF_NAME]) {
361
		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
362 363 364 365 366 367 368 369 370 371
		if (!name) {
			bpf_prog_put(fp);
			return -ENOMEM;
		}
	}

	prog->bpf_ops = NULL;
	prog->bpf_name = name;
	prog->filter = fp;

D
Daniel Borkmann 已提交
372
	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
373 374
		netif_keep_dst(qdisc_dev(tp->q));

375 376 377 378 379 380 381 382
	return 0;
}

static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
				   struct cls_bpf_prog *prog,
				   unsigned long base, struct nlattr **tb,
				   struct nlattr *est, bool ovr)
{
383
	bool is_bpf, is_ebpf, have_exts = false;
384
	struct tcf_exts exts;
385
	u32 gen_flags = 0;
386 387 388 389
	int ret;

	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
	is_ebpf = tb[TCA_BPF_FD];
390
	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
391 392
		return -EINVAL;

393
	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
394 395
	if (ret < 0)
		return ret;
396 397 398
	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
	if (ret < 0)
		goto errout;
399

400 401 402 403
	if (tb[TCA_BPF_FLAGS]) {
		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);

		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
404 405
			ret = -EINVAL;
			goto errout;
406 407 408 409
		}

		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
	}
410 411 412 413 414 415 416 417
	if (tb[TCA_BPF_FLAGS_GEN]) {
		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
		    !tc_flags_valid(gen_flags)) {
			ret = -EINVAL;
			goto errout;
		}
	}
418 419

	prog->exts_integrated = have_exts;
420
	prog->gen_flags = gen_flags;
421

422
	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
423
		       cls_bpf_prog_from_efd(tb, prog, tp);
424 425
	if (ret < 0)
		goto errout;
426

427 428 429 430
	if (tb[TCA_BPF_CLASSID]) {
		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
		tcf_bind_filter(tp, &prog->res, base);
	}
431

432
	tcf_exts_change(tp, &prog->exts, &exts);
433
	return 0;
434 435 436 437

errout:
	tcf_exts_destroy(&exts);
	return ret;
438 439 440 441 442 443
}

static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
				   struct cls_bpf_head *head)
{
	unsigned int i = 0x80000000;
444
	u32 handle;
445 446 447 448 449

	do {
		if (++head->hgen == 0x7FFFFFFF)
			head->hgen = 1;
	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
450 451

	if (unlikely(i == 0)) {
452
		pr_err("Insufficient number of handles\n");
453 454 455 456
		handle = 0;
	} else {
		handle = head->hgen;
	}
457

458
	return handle;
459 460 461 462 463
}

static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
			  struct tcf_proto *tp, unsigned long base,
			  u32 handle, struct nlattr **tca,
464
			  unsigned long *arg, bool ovr)
465
{
J
John Fastabend 已提交
466 467
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
468
	struct nlattr *tb[TCA_BPF_MAX + 1];
J
John Fastabend 已提交
469
	struct cls_bpf_prog *prog;
470 471 472 473 474 475 476 477 478 479
	int ret;

	if (tca[TCA_OPTIONS] == NULL)
		return -EINVAL;

	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
	if (ret < 0)
		return ret;

	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
J
John Fastabend 已提交
480
	if (!prog)
481 482
		return -ENOBUFS;

483 484 485
	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
	if (ret < 0)
		goto errout;
J
John Fastabend 已提交
486 487 488 489 490 491 492 493

	if (oldprog) {
		if (handle && oldprog->handle != handle) {
			ret = -EINVAL;
			goto errout;
		}
	}

494 495 496 497 498 499 500 501 502
	if (handle == 0)
		prog->handle = cls_bpf_grab_new_handle(tp, head);
	else
		prog->handle = handle;
	if (prog->handle == 0) {
		ret = -EINVAL;
		goto errout;
	}

J
Jamal Hadi Salim 已提交
503 504
	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
				      ovr);
505 506 507
	if (ret < 0)
		goto errout;

508 509
	ret = cls_bpf_offload(tp, prog, oldprog);
	if (ret) {
510
		__cls_bpf_delete_prog(prog);
511 512
		return ret;
	}
513

J
John Fastabend 已提交
514
	if (oldprog) {
515
		list_replace_rcu(&oldprog->link, &prog->link);
516
		tcf_unbind_filter(tp, &oldprog->res);
517
		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
J
John Fastabend 已提交
518 519 520
	} else {
		list_add_rcu(&prog->link, &head->plist);
	}
521 522 523

	*arg = (unsigned long) prog;
	return 0;
524

525
errout:
526
	tcf_exts_destroy(&prog->exts);
J
John Fastabend 已提交
527
	kfree(prog);
528 529 530
	return ret;
}

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
				 struct sk_buff *skb)
{
	struct nlattr *nla;

	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
		return -EMSGSIZE;

	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
			  sizeof(struct sock_filter));
	if (nla == NULL)
		return -EMSGSIZE;

	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));

	return 0;
}

static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
				  struct sk_buff *skb)
{
552 553
	struct nlattr *nla;

554 555 556 557
	if (prog->bpf_name &&
	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
		return -EMSGSIZE;

558 559 560 561 562 563
	nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
	if (nla == NULL)
		return -EMSGSIZE;

	memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));

564 565 566
	return 0;
}

567
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
568 569 570
			struct sk_buff *skb, struct tcmsg *tm)
{
	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
571
	struct nlattr *nest;
572
	u32 bpf_flags = 0;
573
	int ret;
574 575 576 577 578 579

	if (prog == NULL)
		return skb->len;

	tm->tcm_handle = prog->handle;

580 581
	cls_bpf_offload_update_stats(tp, prog);

582 583 584 585
	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (nest == NULL)
		goto nla_put_failure;

586 587
	if (prog->res.classid &&
	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
588 589
		goto nla_put_failure;

590 591 592 593 594
	if (cls_bpf_is_ebpf(prog))
		ret = cls_bpf_dump_ebpf_info(prog, skb);
	else
		ret = cls_bpf_dump_bpf_info(prog, skb);
	if (ret)
595 596
		goto nla_put_failure;

597
	if (tcf_exts_dump(skb, &prog->exts) < 0)
598 599
		goto nla_put_failure;

600 601 602 603
	if (prog->exts_integrated)
		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
		goto nla_put_failure;
604 605 606
	if (prog->gen_flags &&
	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
		goto nla_put_failure;
607

608 609
	nla_nest_end(skb, nest);

610
	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
611 612 613 614 615 616 617 618 619 620 621
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
J
John Fastabend 已提交
622
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
623 624
	struct cls_bpf_prog *prog;

625
	list_for_each_entry(prog, &head->plist, link) {
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
		if (arg->count < arg->skip)
			goto skip;
		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
			arg->stop = 1;
			break;
		}
skip:
		arg->count++;
	}
}

static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
	.kind		=	"bpf",
	.owner		=	THIS_MODULE,
	.classify	=	cls_bpf_classify,
	.init		=	cls_bpf_init,
	.destroy	=	cls_bpf_destroy,
	.get		=	cls_bpf_get,
	.change		=	cls_bpf_change,
	.delete		=	cls_bpf_delete,
	.walk		=	cls_bpf_walk,
	.dump		=	cls_bpf_dump,
};

static int __init cls_bpf_init_mod(void)
{
	return register_tcf_proto_ops(&cls_bpf_ops);
}

static void __exit cls_bpf_exit_mod(void)
{
	unregister_tcf_proto_ops(&cls_bpf_ops);
}

module_init(cls_bpf_init_mod);
module_exit(cls_bpf_exit_mod);