act_csum.c 15.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Checksum updating actions
 *
 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>

#include <linux/netlink.h>
#include <net/netlink.h>
#include <linux/rtnetlink.h>

#include <linux/skbuff.h>

#include <net/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
#include <linux/igmp.h>
#include <net/tcp.h>
#include <net/udp.h>
32
#include <net/ip6_checksum.h>
33
#include <net/sctp/checksum.h>
34 35 36 37 38 39 40 41 42 43

#include <net/act_api.h>

#include <linux/tc_act/tc_csum.h>
#include <net/tc_act/tc_csum.h>

static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
};

44
static unsigned int csum_net_id;
45
static struct tc_action_ops act_csum_ops;
46 47

static int tcf_csum_init(struct net *net, struct nlattr *nla,
48
			 struct nlattr *est, struct tc_action **a, int ovr,
49 50
			 int bind, bool rtnl_held,
			 struct netlink_ext_ack *extack)
51
{
52
	struct tc_action_net *tn = net_generic(net, csum_net_id);
53
	struct tcf_csum_params *params_old, *params_new;
54 55 56 57 58 59 60 61
	struct nlattr *tb[TCA_CSUM_MAX + 1];
	struct tc_csum *parm;
	struct tcf_csum *p;
	int ret = 0, err;

	if (nla == NULL)
		return -EINVAL;

62
	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
63 64 65 66 67 68 69
	if (err < 0)
		return err;

	if (tb[TCA_CSUM_PARMS] == NULL)
		return -EINVAL;
	parm = nla_data(tb[TCA_CSUM_PARMS]);

70 71
	if (!tcf_idr_check(tn, parm->index, a, bind)) {
		ret = tcf_idr_create(tn, parm->index, est, a,
72
				     &act_csum_ops, bind, true);
73 74
		if (ret)
			return ret;
75 76
		ret = ACT_P_CREATED;
	} else {
77 78
		if (bind)/* dont override defaults */
			return 0;
79
		tcf_idr_release(*a, bind);
80
		if (!ovr)
81 82 83
			return -EEXIST;
	}

84
	p = to_tcf_csum(*a);
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	ASSERT_RTNL();

	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
	if (unlikely(!params_new)) {
		if (ret == ACT_P_CREATED)
			tcf_idr_release(*a, bind);
		return -ENOMEM;
	}
	params_old = rtnl_dereference(p->params);

	params_new->action = parm->action;
	params_new->update_flags = parm->update_flags;
	rcu_assign_pointer(p->params, params_new);
	if (params_old)
		kfree_rcu(params_old, rcu);
100 101

	if (ret == ACT_P_CREATED)
102
		tcf_idr_insert(tn, *a);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	return ret;
}

/**
 * tcf_csum_skb_nextlayer - Get next layer pointer
 * @skb: sk_buff to use
 * @ihl: previous summed headers length
 * @ipl: complete packet length
 * @jhl: next header length
 *
 * Check the expected next layer availability in the specified sk_buff.
 * Return the next layer pointer if pass, NULL otherwise.
 */
static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
				    unsigned int ihl, unsigned int ipl,
				    unsigned int jhl)
{
	int ntkoff = skb_network_offset(skb);
	int hl = ihl + jhl;

	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
125
	    skb_try_make_writable(skb, hl + ntkoff))
126 127 128 129 130
		return NULL;
	else
		return (void *)(skb_network_header(skb) + ihl);
}

J
Jamal Hadi Salim 已提交
131 132
static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
			      unsigned int ipl)
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
{
	struct icmphdr *icmph;

	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
	if (icmph == NULL)
		return 0;

	icmph->checksum = 0;
	skb->csum = csum_partial(icmph, ipl - ihl, 0);
	icmph->checksum = csum_fold(skb->csum);

	skb->ip_summed = CHECKSUM_NONE;

	return 1;
}

static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
			      unsigned int ihl, unsigned int ipl)
{
	struct igmphdr *igmph;

	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
	if (igmph == NULL)
		return 0;

	igmph->csum = 0;
	skb->csum = csum_partial(igmph, ipl - ihl, 0);
	igmph->csum = csum_fold(skb->csum);

	skb->ip_summed = CHECKSUM_NONE;

	return 1;
}

J
Jamal Hadi Salim 已提交
167 168
static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
			      unsigned int ipl)
169 170
{
	struct icmp6hdr *icmp6h;
171
	const struct ipv6hdr *ip6h;
172 173 174 175 176

	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
	if (icmp6h == NULL)
		return 0;

177
	ip6h = ipv6_hdr(skb);
178 179 180 181 182 183 184 185 186 187 188
	icmp6h->icmp6_cksum = 0;
	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
					      ipl - ihl, IPPROTO_ICMPV6,
					      skb->csum);

	skb->ip_summed = CHECKSUM_NONE;

	return 1;
}

J
Jamal Hadi Salim 已提交
189 190
static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
			     unsigned int ipl)
191 192
{
	struct tcphdr *tcph;
193
	const struct iphdr *iph;
194

195 196 197
	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
		return 1;

198 199 200 201
	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
	if (tcph == NULL)
		return 0;

202
	iph = ip_hdr(skb);
203 204 205 206 207 208 209 210 211 212
	tcph->check = 0;
	skb->csum = csum_partial(tcph, ipl - ihl, 0);
	tcph->check = tcp_v4_check(ipl - ihl,
				   iph->saddr, iph->daddr, skb->csum);

	skb->ip_summed = CHECKSUM_NONE;

	return 1;
}

J
Jamal Hadi Salim 已提交
213 214
static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
			     unsigned int ipl)
215 216
{
	struct tcphdr *tcph;
217
	const struct ipv6hdr *ip6h;
218

219 220 221
	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
		return 1;

222 223 224 225
	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
	if (tcph == NULL)
		return 0;

226
	ip6h = ipv6_hdr(skb);
227 228 229 230 231 232 233 234 235 236 237
	tcph->check = 0;
	skb->csum = csum_partial(tcph, ipl - ihl, 0);
	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
				      ipl - ihl, IPPROTO_TCP,
				      skb->csum);

	skb->ip_summed = CHECKSUM_NONE;

	return 1;
}

J
Jamal Hadi Salim 已提交
238 239
static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
			     unsigned int ipl, int udplite)
240 241
{
	struct udphdr *udph;
242
	const struct iphdr *iph;
243 244
	u16 ul;

245 246 247
	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
		return 1;

248 249 250
	/*
	 * Support both UDP and UDPLITE checksum algorithms, Don't use
	 * udph->len to get the real length without any protocol check,
251 252 253 254 255 256 257 258
	 * UDPLITE uses udph->len for another thing,
	 * Use iph->tot_len, or just ipl.
	 */

	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
	if (udph == NULL)
		return 0;

259
	iph = ip_hdr(skb);
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	ul = ntohs(udph->len);

	if (udplite || udph->check) {

		udph->check = 0;

		if (udplite) {
			if (ul == 0)
				skb->csum = csum_partial(udph, ipl - ihl, 0);
			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
				skb->csum = csum_partial(udph, ul, 0);
			else
				goto ignore_obscure_skb;
		} else {
			if (ul != ipl - ihl)
				goto ignore_obscure_skb;

			skb->csum = csum_partial(udph, ul, 0);
		}

		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
						ul, iph->protocol,
						skb->csum);

		if (!udph->check)
			udph->check = CSUM_MANGLED_0;
	}

	skb->ip_summed = CHECKSUM_NONE;

ignore_obscure_skb:
	return 1;
}

J
Jamal Hadi Salim 已提交
294 295
static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
			     unsigned int ipl, int udplite)
296 297
{
	struct udphdr *udph;
298
	const struct ipv6hdr *ip6h;
299 300
	u16 ul;

301 302 303
	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
		return 1;

304 305 306
	/*
	 * Support both UDP and UDPLITE checksum algorithms, Don't use
	 * udph->len to get the real length without any protocol check,
307 308 309 310 311 312 313 314
	 * UDPLITE uses udph->len for another thing,
	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
	 */

	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
	if (udph == NULL)
		return 0;

315
	ip6h = ipv6_hdr(skb);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	ul = ntohs(udph->len);

	udph->check = 0;

	if (udplite) {
		if (ul == 0)
			skb->csum = csum_partial(udph, ipl - ihl, 0);

		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
			skb->csum = csum_partial(udph, ul, 0);

		else
			goto ignore_obscure_skb;
	} else {
		if (ul != ipl - ihl)
			goto ignore_obscure_skb;

		skb->csum = csum_partial(udph, ul, 0);
	}

	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
				      skb->csum);

	if (!udph->check)
		udph->check = CSUM_MANGLED_0;

	skb->ip_summed = CHECKSUM_NONE;

ignore_obscure_skb:
	return 1;
}

349 350 351 352 353
static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
			 unsigned int ipl)
{
	struct sctphdr *sctph;

354
	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
355 356 357 358 359 360 361 362 363
		return 1;

	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
	if (!sctph)
		return 0;

	sctph->checksum = sctp_compute_cksum(skb,
					     skb_network_offset(skb) + ihl);
	skb->ip_summed = CHECKSUM_NONE;
364
	skb->csum_not_inet = 0;
365 366 367 368

	return 1;
}

369 370
static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
{
371
	const struct iphdr *iph;
372 373 374 375 376 377 378 379 380 381 382 383
	int ntkoff;

	ntkoff = skb_network_offset(skb);

	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
		goto fail;

	iph = ip_hdr(skb);

	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
	case IPPROTO_ICMP:
		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
384 385
			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
						ntohs(iph->tot_len)))
386 387 388 389
				goto fail;
		break;
	case IPPROTO_IGMP:
		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
390 391
			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
						ntohs(iph->tot_len)))
392 393 394 395
				goto fail;
		break;
	case IPPROTO_TCP:
		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
396
			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
397
					       ntohs(iph->tot_len)))
398 399 400 401
				goto fail;
		break;
	case IPPROTO_UDP:
		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
402
			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
403
					       ntohs(iph->tot_len), 0))
404 405 406 407
				goto fail;
		break;
	case IPPROTO_UDPLITE:
		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
408
			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
409
					       ntohs(iph->tot_len), 1))
410 411
				goto fail;
		break;
412 413 414 415 416
	case IPPROTO_SCTP:
		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
			goto fail;
		break;
417 418 419
	}

	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
420
		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
421 422
			goto fail;

423
		ip_send_check(ip_hdr(skb));
424 425 426 427 428 429 430 431
	}

	return 1;

fail:
	return 0;
}

J
Jamal Hadi Salim 已提交
432 433
static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
				 unsigned int *pl)
434 435 436 437 438 439 440 441
{
	int off, len, optlen;
	unsigned char *xh = (void *)ip6xh;

	off = sizeof(*ip6xh);
	len = ixhl - off;

	while (len > 1) {
442
		switch (xh[off]) {
443
		case IPV6_TLV_PAD1:
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			optlen = 1;
			break;
		case IPV6_TLV_JUMBO:
			optlen = xh[off + 1] + 2;
			if (optlen != 6 || len < 6 || (off & 3) != 2)
				/* wrong jumbo option length/alignment */
				return 0;
			*pl = ntohl(*(__be32 *)(xh + off + 2));
			goto done;
		default:
			optlen = xh[off + 1] + 2;
			if (optlen > len)
				/* ignore obscure options */
				goto done;
			break;
		}
		off += optlen;
		len -= optlen;
	}

done:
	return 1;
}

static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
{
	struct ipv6hdr *ip6h;
	struct ipv6_opt_hdr *ip6xh;
	unsigned int hl, ixhl;
	unsigned int pl;
	int ntkoff;
	u8 nexthdr;

	ntkoff = skb_network_offset(skb);

	hl = sizeof(*ip6h);

	if (!pskb_may_pull(skb, hl + ntkoff))
		goto fail;

	ip6h = ipv6_hdr(skb);

	pl = ntohs(ip6h->payload_len);
	nexthdr = ip6h->nexthdr;

	do {
		switch (nexthdr) {
		case NEXTHDR_FRAGMENT:
			goto ignore_skb;
		case NEXTHDR_ROUTING:
		case NEXTHDR_HOP:
		case NEXTHDR_DEST:
			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
				goto fail;
			ip6xh = (void *)(skb_network_header(skb) + hl);
			ixhl = ipv6_optlen(ip6xh);
			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
				goto fail;
502
			ip6xh = (void *)(skb_network_header(skb) + hl);
503 504 505 506 507 508 509 510
			if ((nexthdr == NEXTHDR_HOP) &&
			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
				goto fail;
			nexthdr = ip6xh->nexthdr;
			hl += ixhl;
			break;
		case IPPROTO_ICMPV6:
			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
511
				if (!tcf_csum_ipv6_icmp(skb,
512 513 514 515 516
							hl, pl + sizeof(*ip6h)))
					goto fail;
			goto done;
		case IPPROTO_TCP:
			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
517
				if (!tcf_csum_ipv6_tcp(skb,
518 519 520 521 522
						       hl, pl + sizeof(*ip6h)))
					goto fail;
			goto done;
		case IPPROTO_UDP:
			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
523
				if (!tcf_csum_ipv6_udp(skb, hl,
524
						       pl + sizeof(*ip6h), 0))
525 526 527 528
					goto fail;
			goto done;
		case IPPROTO_UDPLITE:
			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
529
				if (!tcf_csum_ipv6_udp(skb, hl,
530
						       pl + sizeof(*ip6h), 1))
531 532
					goto fail;
			goto done;
533 534 535 536 537
		case IPPROTO_SCTP:
			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
				goto fail;
			goto done;
538 539 540 541 542 543 544 545 546 547 548 549 550
		default:
			goto ignore_skb;
		}
	} while (pskb_may_pull(skb, hl + 1 + ntkoff));

done:
ignore_skb:
	return 1;

fail:
	return 0;
}

J
Jamal Hadi Salim 已提交
551 552
static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
		    struct tcf_result *res)
553
{
554
	struct tcf_csum *p = to_tcf_csum(a);
555
	struct tcf_csum_params *params;
556
	u32 update_flags;
557 558 559 560
	int action;

	rcu_read_lock();
	params = rcu_dereference(p->params);
561

562
	tcf_lastuse_update(&p->tcf_tm);
563
	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
564

565
	action = params->action;
566
	if (unlikely(action == TC_ACT_SHOT))
567
		goto drop_stats;
568

569
	update_flags = params->update_flags;
570
	switch (tc_skb_protocol(skb)) {
571 572 573 574 575 576 577 578 579 580
	case cpu_to_be16(ETH_P_IP):
		if (!tcf_csum_ipv4(skb, update_flags))
			goto drop;
		break;
	case cpu_to_be16(ETH_P_IPV6):
		if (!tcf_csum_ipv6(skb, update_flags))
			goto drop;
		break;
	}

581 582
unlock:
	rcu_read_unlock();
583 584 585
	return action;

drop:
586 587 588
	action = TC_ACT_SHOT;

drop_stats:
589
	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
590
	goto unlock;
591 592
}

J
Jamal Hadi Salim 已提交
593 594
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
			 int ref)
595 596
{
	unsigned char *b = skb_tail_pointer(skb);
597
	struct tcf_csum *p = to_tcf_csum(a);
598
	struct tcf_csum_params *params;
599 600
	struct tc_csum opt = {
		.index   = p->tcf_index,
601 602
		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
603 604 605
	};
	struct tcf_t t;

606 607 608 609
	params = rtnl_dereference(p->params);
	opt.action = params->action;
	opt.update_flags = params->update_flags;

610 611
	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;
612 613

	tcf_tm_dump(&t, &p->tcf_tm);
614
	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
615
		goto nla_put_failure;
616 617 618 619 620 621 622 623

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

624 625 626 627 628 629
static void tcf_csum_cleanup(struct tc_action *a)
{
	struct tcf_csum *p = to_tcf_csum(a);
	struct tcf_csum_params *params;

	params = rcu_dereference_protected(p->params, 1);
630 631
	if (params)
		kfree_rcu(params, rcu);
632 633
}

634 635
static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
			   struct netlink_callback *cb, int type,
636 637
			   const struct tc_action_ops *ops,
			   struct netlink_ext_ack *extack)
638 639 640
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

641
	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
642 643
}

644 645
static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
			   struct netlink_ext_ack *extack)
646 647 648
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

649
	return tcf_idr_search(tn, a, index);
650 651
}

652 653 654 655 656
static size_t tcf_csum_get_fill_size(const struct tc_action *act)
{
	return nla_total_size(sizeof(struct tc_csum));
}

657
static struct tc_action_ops act_csum_ops = {
658 659 660 661 662 663
	.kind		= "csum",
	.type		= TCA_ACT_CSUM,
	.owner		= THIS_MODULE,
	.act		= tcf_csum,
	.dump		= tcf_csum_dump,
	.init		= tcf_csum_init,
664
	.cleanup	= tcf_csum_cleanup,
665 666
	.walk		= tcf_csum_walker,
	.lookup		= tcf_csum_search,
667
	.get_fill_size  = tcf_csum_get_fill_size,
668
	.size		= sizeof(struct tcf_csum),
669 670 671 672 673 674
};

static __net_init int csum_init_net(struct net *net)
{
	struct tc_action_net *tn = net_generic(net, csum_net_id);

675
	return tc_action_net_init(tn, &act_csum_ops);
676 677
}

678
static void __net_exit csum_exit_net(struct list_head *net_list)
679
{
680
	tc_action_net_exit(net_list, csum_net_id);
681 682 683 684
}

static struct pernet_operations csum_net_ops = {
	.init = csum_init_net,
685
	.exit_batch = csum_exit_net,
686 687
	.id   = &csum_net_id,
	.size = sizeof(struct tc_action_net),
688 689 690 691 692 693 694
};

MODULE_DESCRIPTION("Checksum updating actions");
MODULE_LICENSE("GPL");

static int __init csum_init_module(void)
{
695
	return tcf_register_action(&act_csum_ops, &csum_net_ops);
696 697 698 699
}

static void __exit csum_cleanup_module(void)
{
700
	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
701 702 703 704
}

module_init(csum_init_module);
module_exit(csum_cleanup_module);