cls_bpf.c 14.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Berkeley Packet Filter based traffic classifier
 *
 * Might be used to classify traffic through flexible, user-defined and
 * possibly JIT-ed BPF filters for traffic control as an alternative to
 * ematches.
 *
 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
19 20
#include <linux/bpf.h>

21 22 23 24 25 26 27 28
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");

29
#define CLS_BPF_NAME_LEN	256
30
#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
31
	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32

33 34 35
struct cls_bpf_head {
	struct list_head plist;
	u32 hgen;
J
John Fastabend 已提交
36
	struct rcu_head rcu;
37 38 39
};

struct cls_bpf_prog {
40
	struct bpf_prog *filter;
41
	struct list_head link;
42
	struct tcf_result res;
43
	bool exts_integrated;
44
	bool offloaded;
45
	u32 gen_flags;
46
	struct tcf_exts exts;
47
	u32 handle;
48 49 50 51 52 53
	union {
		u32 bpf_fd;
		u16 bpf_num_ops;
	};
	struct sock_filter *bpf_ops;
	const char *bpf_name;
J
John Fastabend 已提交
54 55
	struct tcf_proto *tp;
	struct rcu_head rcu;
56 57 58 59
};

static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
60
	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
61
	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
62
	[TCA_BPF_FD]		= { .type = NLA_U32 },
J
Jamal Hadi Salim 已提交
63 64
	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
				    .len = CLS_BPF_NAME_LEN },
65 66 67 68 69
	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
};

70 71 72 73 74 75
static int cls_bpf_exec_opcode(int code)
{
	switch (code) {
	case TC_ACT_OK:
	case TC_ACT_SHOT:
	case TC_ACT_STOLEN:
76
	case TC_ACT_REDIRECT:
77 78 79 80 81 82 83
	case TC_ACT_UNSPEC:
		return code;
	default:
		return TC_ACT_UNSPEC;
	}
}

84 85 86
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			    struct tcf_result *res)
{
87
	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
88
	bool at_ingress = skb_at_tc_ingress(skb);
89
	struct cls_bpf_prog *prog;
90
	int ret = -1;
91

92 93
	/* Needed here for accessing maps. */
	rcu_read_lock();
J
John Fastabend 已提交
94
	list_for_each_entry_rcu(prog, &head->plist, link) {
95 96
		int filter_res;

97 98
		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;

99 100 101
		if (tc_skip_sw(prog->gen_flags)) {
			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
		} else if (at_ingress) {
102 103
			/* It is safe to push/pull even if skb_shared() */
			__skb_push(skb, skb->mac_len);
104
			bpf_compute_data_end(skb);
105 106 107
			filter_res = BPF_PROG_RUN(prog->filter, skb);
			__skb_pull(skb, skb->mac_len);
		} else {
108
			bpf_compute_data_end(skb);
109 110
			filter_res = BPF_PROG_RUN(prog->filter, skb);
		}
111

112
		if (prog->exts_integrated) {
113 114 115
			res->class   = 0;
			res->classid = TC_H_MAJ(prog->res.classid) |
				       qdisc_skb_cb(skb)->tc_classid;
116 117 118 119 120 121 122

			ret = cls_bpf_exec_opcode(filter_res);
			if (ret == TC_ACT_UNSPEC)
				continue;
			break;
		}

123 124
		if (filter_res == 0)
			continue;
125 126
		if (filter_res != -1) {
			res->class   = 0;
127
			res->classid = filter_res;
128 129 130
		} else {
			*res = prog->res;
		}
131 132 133 134 135

		ret = tcf_exts_exec(skb, &prog->exts, res);
		if (ret < 0)
			continue;

136
		break;
137
	}
138
	rcu_read_unlock();
139

140
	return ret;
141 142
}

143 144 145 146 147
static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
{
	return !prog->bpf_ops;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			       enum tc_clsbpf_command cmd)
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct tc_cls_bpf_offload bpf_offload = {};
	struct tc_to_netdev offload;

	offload.type = TC_SETUP_CLSBPF;
	offload.cls_bpf = &bpf_offload;

	bpf_offload.command = cmd;
	bpf_offload.exts = &prog->exts;
	bpf_offload.prog = prog->filter;
	bpf_offload.name = prog->bpf_name;
	bpf_offload.exts_integrated = prog->exts_integrated;
163
	bpf_offload.gen_flags = prog->gen_flags;
164 165 166 167 168

	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
					     tp->protocol, &offload);
}

169 170
static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
			   struct cls_bpf_prog *oldprog)
171 172 173 174
{
	struct net_device *dev = tp->q->dev_queue->dev;
	struct cls_bpf_prog *obj = prog;
	enum tc_clsbpf_command cmd;
175 176 177 178 179
	bool skip_sw;
	int ret;

	skip_sw = tc_skip_sw(prog->gen_flags) ||
		(oldprog && tc_skip_sw(oldprog->gen_flags));
180 181

	if (oldprog && oldprog->offloaded) {
182
		if (tc_should_offload(dev, tp, prog->gen_flags)) {
183
			cmd = TC_CLSBPF_REPLACE;
184
		} else if (!tc_skip_sw(prog->gen_flags)) {
185 186
			obj = oldprog;
			cmd = TC_CLSBPF_DESTROY;
187 188
		} else {
			return -EINVAL;
189 190
		}
	} else {
191
		if (!tc_should_offload(dev, tp, prog->gen_flags))
192
			return skip_sw ? -EINVAL : 0;
193 194 195
		cmd = TC_CLSBPF_ADD;
	}

196 197 198
	ret = cls_bpf_offload_cmd(tp, obj, cmd);
	if (ret)
		return skip_sw ? ret : 0;
199 200 201 202

	obj->offloaded = true;
	if (oldprog)
		oldprog->offloaded = false;
203 204

	return 0;
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
}

static void cls_bpf_stop_offload(struct tcf_proto *tp,
				 struct cls_bpf_prog *prog)
{
	int err;

	if (!prog->offloaded)
		return;

	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
	if (err) {
		pr_err("Stopping hardware offload failed: %d\n", err);
		return;
	}

	prog->offloaded = false;
}

224 225 226 227 228 229 230 231 232
static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
					 struct cls_bpf_prog *prog)
{
	if (!prog->offloaded)
		return;

	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
}

233 234 235 236 237 238 239 240
static int cls_bpf_init(struct tcf_proto *tp)
{
	struct cls_bpf_head *head;

	head = kzalloc(sizeof(*head), GFP_KERNEL);
	if (head == NULL)
		return -ENOBUFS;

J
John Fastabend 已提交
241 242
	INIT_LIST_HEAD_RCU(&head->plist);
	rcu_assign_pointer(tp->root, head);
243 244 245 246 247 248

	return 0;
}

static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
249
	tcf_exts_destroy(&prog->exts);
250

251 252 253 254
	if (cls_bpf_is_ebpf(prog))
		bpf_prog_put(prog->filter);
	else
		bpf_prog_destroy(prog->filter);
255

256
	kfree(prog->bpf_name);
257 258 259 260
	kfree(prog->bpf_ops);
	kfree(prog);
}

J
John Fastabend 已提交
261 262 263 264 265 266 267
static void __cls_bpf_delete_prog(struct rcu_head *rcu)
{
	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);

	cls_bpf_delete_prog(prog->tp, prog);
}

268 269
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
270
	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
271

272
	cls_bpf_stop_offload(tp, prog);
273 274 275
	list_del_rcu(&prog->link);
	tcf_unbind_filter(tp, &prog->res);
	call_rcu(&prog->rcu, __cls_bpf_delete_prog);
276

277
	return 0;
278 279
}

280
static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
281
{
J
John Fastabend 已提交
282
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
283 284
	struct cls_bpf_prog *prog, *tmp;

285 286 287
	if (!force && !list_empty(&head->plist))
		return false;

288
	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
289
		cls_bpf_stop_offload(tp, prog);
J
John Fastabend 已提交
290
		list_del_rcu(&prog->link);
291
		tcf_unbind_filter(tp, &prog->res);
J
John Fastabend 已提交
292
		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
293 294
	}

J
John Fastabend 已提交
295 296
	RCU_INIT_POINTER(tp->root, NULL);
	kfree_rcu(head, rcu);
297
	return true;
298 299 300 301
}

static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
{
J
John Fastabend 已提交
302
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
303 304 305 306 307 308
	struct cls_bpf_prog *prog;
	unsigned long ret = 0UL;

	if (head == NULL)
		return 0UL;

309
	list_for_each_entry(prog, &head->plist, link) {
310 311 312 313 314 315 316 317 318
		if (prog->handle == handle) {
			ret = (unsigned long) prog;
			break;
		}
	}

	return ret;
}

319
static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
320
{
J
John Fastabend 已提交
321
	struct sock_filter *bpf_ops;
322
	struct sock_fprog_kern fprog_tmp;
J
John Fastabend 已提交
323
	struct bpf_prog *fp;
324
	u16 bpf_size, bpf_num_ops;
325 326
	int ret;

327
	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
328 329
	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
		return -EINVAL;
330

331
	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
332 333
	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
		return -EINVAL;
334

335
	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
336 337
	if (bpf_ops == NULL)
		return -ENOMEM;
338 339 340

	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);

341 342
	fprog_tmp.len = bpf_num_ops;
	fprog_tmp.filter = bpf_ops;
343

344 345 346 347 348
	ret = bpf_prog_create(&fp, &fprog_tmp);
	if (ret < 0) {
		kfree(bpf_ops);
		return ret;
	}
349 350

	prog->bpf_ops = bpf_ops;
351 352
	prog->bpf_num_ops = bpf_num_ops;
	prog->bpf_name = NULL;
353 354
	prog->filter = fp;

355 356 357
	return 0;
}

358 359
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
				 const struct tcf_proto *tp)
360 361 362 363 364 365 366
{
	struct bpf_prog *fp;
	char *name = NULL;
	u32 bpf_fd;

	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);

367
	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
368 369 370 371
	if (IS_ERR(fp))
		return PTR_ERR(fp);

	if (tb[TCA_BPF_NAME]) {
372
		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
373 374 375 376 377 378 379 380 381 382 383
		if (!name) {
			bpf_prog_put(fp);
			return -ENOMEM;
		}
	}

	prog->bpf_ops = NULL;
	prog->bpf_fd = bpf_fd;
	prog->bpf_name = name;
	prog->filter = fp;

D
Daniel Borkmann 已提交
384
	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
385 386
		netif_keep_dst(qdisc_dev(tp->q));

387 388 389 390 391 392 393 394
	return 0;
}

static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
				   struct cls_bpf_prog *prog,
				   unsigned long base, struct nlattr **tb,
				   struct nlattr *est, bool ovr)
{
395
	bool is_bpf, is_ebpf, have_exts = false;
396
	struct tcf_exts exts;
397
	u32 gen_flags = 0;
398 399 400 401
	int ret;

	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
	is_ebpf = tb[TCA_BPF_FD];
402
	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
403 404
		return -EINVAL;

405
	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
406 407
	if (ret < 0)
		return ret;
408 409 410
	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
	if (ret < 0)
		goto errout;
411

412 413 414 415
	if (tb[TCA_BPF_FLAGS]) {
		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);

		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
416 417
			ret = -EINVAL;
			goto errout;
418 419 420 421
		}

		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
	}
422 423 424 425 426 427 428 429
	if (tb[TCA_BPF_FLAGS_GEN]) {
		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
		    !tc_flags_valid(gen_flags)) {
			ret = -EINVAL;
			goto errout;
		}
	}
430 431

	prog->exts_integrated = have_exts;
432
	prog->gen_flags = gen_flags;
433

434
	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
435
		       cls_bpf_prog_from_efd(tb, prog, tp);
436 437
	if (ret < 0)
		goto errout;
438

439 440 441 442
	if (tb[TCA_BPF_CLASSID]) {
		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
		tcf_bind_filter(tp, &prog->res, base);
	}
443

444
	tcf_exts_change(tp, &prog->exts, &exts);
445
	return 0;
446 447 448 449

errout:
	tcf_exts_destroy(&exts);
	return ret;
450 451 452 453 454 455
}

static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
				   struct cls_bpf_head *head)
{
	unsigned int i = 0x80000000;
456
	u32 handle;
457 458 459 460 461

	do {
		if (++head->hgen == 0x7FFFFFFF)
			head->hgen = 1;
	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
462 463

	if (unlikely(i == 0)) {
464
		pr_err("Insufficient number of handles\n");
465 466 467 468
		handle = 0;
	} else {
		handle = head->hgen;
	}
469

470
	return handle;
471 472 473 474 475
}

static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
			  struct tcf_proto *tp, unsigned long base,
			  u32 handle, struct nlattr **tca,
476
			  unsigned long *arg, bool ovr)
477
{
J
John Fastabend 已提交
478 479
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
480
	struct nlattr *tb[TCA_BPF_MAX + 1];
J
John Fastabend 已提交
481
	struct cls_bpf_prog *prog;
482 483 484 485 486 487 488 489 490 491
	int ret;

	if (tca[TCA_OPTIONS] == NULL)
		return -EINVAL;

	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
	if (ret < 0)
		return ret;

	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
J
John Fastabend 已提交
492
	if (!prog)
493 494
		return -ENOBUFS;

495 496 497
	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
	if (ret < 0)
		goto errout;
J
John Fastabend 已提交
498 499 500 501 502 503 504 505

	if (oldprog) {
		if (handle && oldprog->handle != handle) {
			ret = -EINVAL;
			goto errout;
		}
	}

506 507 508 509 510 511 512 513 514
	if (handle == 0)
		prog->handle = cls_bpf_grab_new_handle(tp, head);
	else
		prog->handle = handle;
	if (prog->handle == 0) {
		ret = -EINVAL;
		goto errout;
	}

J
Jamal Hadi Salim 已提交
515 516
	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
				      ovr);
517 518 519
	if (ret < 0)
		goto errout;

520 521 522 523 524
	ret = cls_bpf_offload(tp, prog, oldprog);
	if (ret) {
		cls_bpf_delete_prog(tp, prog);
		return ret;
	}
525

J
John Fastabend 已提交
526
	if (oldprog) {
527
		list_replace_rcu(&oldprog->link, &prog->link);
528
		tcf_unbind_filter(tp, &oldprog->res);
J
John Fastabend 已提交
529 530 531 532
		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
	} else {
		list_add_rcu(&prog->link, &head->plist);
	}
533 534 535

	*arg = (unsigned long) prog;
	return 0;
536

537
errout:
538
	tcf_exts_destroy(&prog->exts);
J
John Fastabend 已提交
539
	kfree(prog);
540 541 542
	return ret;
}

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
				 struct sk_buff *skb)
{
	struct nlattr *nla;

	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
		return -EMSGSIZE;

	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
			  sizeof(struct sock_filter));
	if (nla == NULL)
		return -EMSGSIZE;

	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));

	return 0;
}

static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
				  struct sk_buff *skb)
{
	if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
		return -EMSGSIZE;

	if (prog->bpf_name &&
	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
		return -EMSGSIZE;

	return 0;
}

574
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
575 576 577
			struct sk_buff *skb, struct tcmsg *tm)
{
	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
578
	struct nlattr *nest;
579
	u32 bpf_flags = 0;
580
	int ret;
581 582 583 584 585 586

	if (prog == NULL)
		return skb->len;

	tm->tcm_handle = prog->handle;

587 588
	cls_bpf_offload_update_stats(tp, prog);

589 590 591 592
	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (nest == NULL)
		goto nla_put_failure;

593 594
	if (prog->res.classid &&
	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
595 596
		goto nla_put_failure;

597 598 599 600 601
	if (cls_bpf_is_ebpf(prog))
		ret = cls_bpf_dump_ebpf_info(prog, skb);
	else
		ret = cls_bpf_dump_bpf_info(prog, skb);
	if (ret)
602 603
		goto nla_put_failure;

604
	if (tcf_exts_dump(skb, &prog->exts) < 0)
605 606
		goto nla_put_failure;

607 608 609 610
	if (prog->exts_integrated)
		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
		goto nla_put_failure;
611 612 613
	if (prog->gen_flags &&
	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
		goto nla_put_failure;
614

615 616
	nla_nest_end(skb, nest);

617
	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
618 619 620 621 622 623 624 625 626 627 628
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
J
John Fastabend 已提交
629
	struct cls_bpf_head *head = rtnl_dereference(tp->root);
630 631
	struct cls_bpf_prog *prog;

632
	list_for_each_entry(prog, &head->plist, link) {
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
		if (arg->count < arg->skip)
			goto skip;
		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
			arg->stop = 1;
			break;
		}
skip:
		arg->count++;
	}
}

static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
	.kind		=	"bpf",
	.owner		=	THIS_MODULE,
	.classify	=	cls_bpf_classify,
	.init		=	cls_bpf_init,
	.destroy	=	cls_bpf_destroy,
	.get		=	cls_bpf_get,
	.change		=	cls_bpf_change,
	.delete		=	cls_bpf_delete,
	.walk		=	cls_bpf_walk,
	.dump		=	cls_bpf_dump,
};

static int __init cls_bpf_init_mod(void)
{
	return register_tcf_proto_ops(&cls_bpf_ops);
}

static void __exit cls_bpf_exit_mod(void)
{
	unregister_tcf_proto_ops(&cls_bpf_ops);
}

module_init(cls_bpf_init_mod);
module_exit(cls_bpf_exit_mod);