gtp.c 33.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
 *
 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
 *
 * Author: Harald Welte <hwelte@sysmocom.de>
 *	   Pablo Neira Ayuso <pablo@netfilter.org>
 *	   Andreas Schultz <aschultz@travelping.com>
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
#include <linux/jhash.h>
#include <linux/if_tunnel.h>
#include <linux/net.h>
#include <linux/file.h>
#include <linux/gtp.h>

#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/genetlink.h>
#include <net/netns/generic.h>
#include <net/gtp.h>

/* An active session for the subscriber. */
struct pdp_ctx {
	struct hlist_node	hlist_tid;
	struct hlist_node	hlist_addr;

	union {
		struct {
			u64	tid;
			u16	flow;
		} v0;
		struct {
			u32	i_tei;
			u32	o_tei;
		} v1;
	} u;
	u8			gtp_version;
	u16			af;

	struct in_addr		ms_addr_ip4;
J
Jonas Bonn 已提交
54
	struct in_addr		peer_addr_ip4;
55

A
Andreas Schultz 已提交
56
	struct sock		*sk;
57 58
	struct net_device       *dev;

59 60 61 62 63 64 65 66
	atomic_t		tx_seq;
	struct rcu_head		rcu_head;
};

/* One instance of the GTP device. */
struct gtp_dev {
	struct list_head	list;

67 68
	struct sock		*sk0;
	struct sock		*sk1u;
69 70 71

	struct net_device	*dev;

J
Jonas Bonn 已提交
72
	unsigned int		role;
73 74 75 76 77
	unsigned int		hash_size;
	struct hlist_head	*tid_hash;
	struct hlist_head	*addr_hash;
};

78
static unsigned int gtp_net_id __read_mostly;
79 80 81 82 83 84 85

struct gtp_net {
	struct list_head gtp_dev_list;
};

static u32 gtp_h_initval;

86 87
static void pdp_context_delete(struct pdp_ctx *pctx);

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
static inline u32 gtp0_hashfn(u64 tid)
{
	u32 *tid32 = (u32 *) &tid;
	return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
}

static inline u32 gtp1u_hashfn(u32 tid)
{
	return jhash_1word(tid, gtp_h_initval);
}

static inline u32 ipv4_hashfn(__be32 ip)
{
	return jhash_1word((__force u32)ip, gtp_h_initval);
}

/* Resolve a PDP context structure based on the 64bit TID. */
static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
{
	struct hlist_head *head;
	struct pdp_ctx *pdp;

	head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];

	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
		if (pdp->gtp_version == GTP_V0 &&
		    pdp->u.v0.tid == tid)
			return pdp;
	}
	return NULL;
}

/* Resolve a PDP context structure based on the 32bit TEI. */
static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
{
	struct hlist_head *head;
	struct pdp_ctx *pdp;

	head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];

	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
		if (pdp->gtp_version == GTP_V1 &&
		    pdp->u.v1.i_tei == tid)
			return pdp;
	}
	return NULL;
}

/* Resolve a PDP context based on IPv4 address of MS. */
static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
{
	struct hlist_head *head;
	struct pdp_ctx *pdp;

	head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];

	hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
		if (pdp->af == AF_INET &&
		    pdp->ms_addr_ip4.s_addr == ms_addr)
			return pdp;
	}

	return NULL;
}

J
Jonas Bonn 已提交
153 154
static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
				  unsigned int hdrlen, unsigned int role)
155 156 157 158 159 160
{
	struct iphdr *iph;

	if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
		return false;

161
	iph = (struct iphdr *)(skb->data + hdrlen);
162

J
Jonas Bonn 已提交
163 164 165 166
	if (role == GTP_ROLE_SGSN)
		return iph->daddr == pctx->ms_addr_ip4.s_addr;
	else
		return iph->saddr == pctx->ms_addr_ip4.s_addr;
167 168
}

J
Jonas Bonn 已提交
169
/* Check if the inner IP address in this packet is assigned to any
170 171
 * existing mobile subscriber.
 */
J
Jonas Bonn 已提交
172 173
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
			     unsigned int hdrlen, unsigned int role)
174 175 176
{
	switch (ntohs(skb->protocol)) {
	case ETH_P_IP:
J
Jonas Bonn 已提交
177
		return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
178 179 180 181
	}
	return false;
}

182 183
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
			unsigned int hdrlen, unsigned int role)
184
{
185 186 187
	if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
		netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
		return 1;
188 189 190
	}

	/* Get rid of the GTP + UDP headers. */
A
Andreas Schultz 已提交
191
	if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
192 193 194 195
			 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
		pctx->dev->stats.rx_length_errors++;
		goto err;
	}
196

197
	netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
198 199 200 201 202 203 204

	/* Now that the UDP and the GTP header have been removed, set up the
	 * new network header. This is required by the upper layer to
	 * calculate the transport header.
	 */
	skb_reset_network_header(skb);

205 206 207 208
	skb->dev = pctx->dev;

	dev_sw_netstats_rx_add(pctx->dev, skb->len);

209 210
	netif_rx(skb);
	return 0;
211 212 213 214

err:
	pctx->dev->stats.rx_dropped++;
	return -1;
215 216
}

217
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
A
Andreas Schultz 已提交
218
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
219 220 221 222
{
	unsigned int hdrlen = sizeof(struct udphdr) +
			      sizeof(struct gtp0_header);
	struct gtp0_header *gtp0;
223
	struct pdp_ctx *pctx;
224 225 226 227 228 229 230 231 232

	if (!pskb_may_pull(skb, hdrlen))
		return -1;

	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));

	if ((gtp0->flags >> 5) != GTP_V0)
		return 1;

233 234 235 236 237 238 239 240 241 242
	if (gtp0->type != GTP_TPDU)
		return 1;

	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
	if (!pctx) {
		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
		return 1;
	}

	return gtp_rx(pctx, skb, hdrlen, gtp->role);
243 244
}

A
Andreas Schultz 已提交
245
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
246 247 248 249
{
	unsigned int hdrlen = sizeof(struct udphdr) +
			      sizeof(struct gtp1_header);
	struct gtp1_header *gtp1;
250
	struct pdp_ctx *pctx;
251 252 253 254 255 256 257 258 259

	if (!pskb_may_pull(skb, hdrlen))
		return -1;

	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));

	if ((gtp1->flags >> 5) != GTP_V1)
		return 1;

260 261 262
	if (gtp1->type != GTP_TPDU)
		return 1;

263 264 265 266 267 268
	/* From 29.060: "This field shall be present if and only if any one or
	 * more of the S, PN and E flags are set.".
	 *
	 * If any of the bit is set, then the remaining ones also have to be
	 * set.
	 */
269 270 271
	if (gtp1->flags & GTP1_F_MASK)
		hdrlen += 4;

272 273 274 275
	/* Make sure the header is larger enough, including extensions. */
	if (!pskb_may_pull(skb, hdrlen))
		return -1;

276 277
	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));

278 279 280 281 282 283 284
	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
	if (!pctx) {
		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
		return 1;
	}

	return gtp_rx(pctx, skb, hdrlen, gtp->role);
285 286
}

287
static void __gtp_encap_destroy(struct sock *sk)
288
{
289
	struct gtp_dev *gtp;
290

T
Taehee Yoo 已提交
291 292
	lock_sock(sk);
	gtp = sk->sk_user_data;
293
	if (gtp) {
294 295 296 297
		if (gtp->sk0 == sk)
			gtp->sk0 = NULL;
		else
			gtp->sk1u = NULL;
298 299 300 301
		udp_sk(sk)->encap_type = 0;
		rcu_assign_sk_user_data(sk, NULL);
		sock_put(sk);
	}
T
Taehee Yoo 已提交
302
	release_sock(sk);
303 304
}

305 306 307 308 309 310 311
static void gtp_encap_destroy(struct sock *sk)
{
	rtnl_lock();
	__gtp_encap_destroy(sk);
	rtnl_unlock();
}

312
static void gtp_encap_disable_sock(struct sock *sk)
313
{
314 315
	if (!sk)
		return;
316

317
	__gtp_encap_destroy(sk);
318 319 320 321 322 323
}

static void gtp_encap_disable(struct gtp_dev *gtp)
{
	gtp_encap_disable_sock(gtp->sk0);
	gtp_encap_disable_sock(gtp->sk1u);
324 325 326 327 328 329 330 331
}

/* UDP encapsulation receive handler. See net/ipv4/udp.c.
 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
 */
static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct gtp_dev *gtp;
332
	int ret = 0;
333 334 335 336 337

	gtp = rcu_dereference_sk_user_data(sk);
	if (!gtp)
		return 1;

338
	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
339 340 341 342

	switch (udp_sk(sk)->encap_type) {
	case UDP_ENCAP_GTP0:
		netdev_dbg(gtp->dev, "received GTP0 packet\n");
A
Andreas Schultz 已提交
343
		ret = gtp0_udp_encap_recv(gtp, skb);
344 345 346
		break;
	case UDP_ENCAP_GTP1U:
		netdev_dbg(gtp->dev, "received GTP1U packet\n");
A
Andreas Schultz 已提交
347
		ret = gtp1u_udp_encap_recv(gtp, skb);
348 349 350 351 352 353 354 355
		break;
	default:
		ret = -1; /* Shouldn't happen. */
	}

	switch (ret) {
	case 1:
		netdev_dbg(gtp->dev, "pass up to the process\n");
356
		break;
357 358 359 360 361
	case 0:
		break;
	case -1:
		netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
		kfree_skb(skb);
362 363
		ret = 0;
		break;
364 365
	}

366
	return ret;
367 368 369 370 371 372 373 374
}

static int gtp_dev_init(struct net_device *dev)
{
	struct gtp_dev *gtp = netdev_priv(dev);

	gtp->dev = dev;

375
	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
376 377 378 379 380 381 382 383 384 385 386 387 388 389
	if (!dev->tstats)
		return -ENOMEM;

	return 0;
}

static void gtp_dev_uninit(struct net_device *dev)
{
	struct gtp_dev *gtp = netdev_priv(dev);

	gtp_encap_disable(gtp);
	free_percpu(dev->tstats);
}

A
Andreas Schultz 已提交
390 391
static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
					   const struct sock *sk,
392
					   __be32 daddr)
393 394 395 396
{
	memset(fl4, 0, sizeof(*fl4));
	fl4->flowi4_oif		= sk->sk_bound_dev_if;
	fl4->daddr		= daddr;
397
	fl4->saddr		= inet_sk(sk)->inet_saddr;
398 399 400
	fl4->flowi4_tos		= RT_CONN_FLAGS(sk);
	fl4->flowi4_proto	= sk->sk_protocol;

A
Andreas Schultz 已提交
401
	return ip_route_output_key(sock_net(sk), fl4);
402 403 404 405 406 407 408
}

static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
	int payload_len = skb->len;
	struct gtp0_header *gtp0;

409
	gtp0 = skb_push(skb, sizeof(*gtp0));
410 411 412 413 414 415 416 417 418 419 420

	gtp0->flags	= 0x1e; /* v0, GTP-non-prime. */
	gtp0->type	= GTP_TPDU;
	gtp0->length	= htons(payload_len);
	gtp0->seq	= htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
	gtp0->flow	= htons(pctx->u.v0.flow);
	gtp0->number	= 0xff;
	gtp0->spare[0]	= gtp0->spare[1] = gtp0->spare[2] = 0xff;
	gtp0->tid	= cpu_to_be64(pctx->u.v0.tid);
}

421
static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
422 423 424 425
{
	int payload_len = skb->len;
	struct gtp1_header *gtp1;

426
	gtp1 = skb_push(skb, sizeof(*gtp1));
427 428 429

	/* Bits    8  7  6  5  4  3  2	1
	 *	  +--+--+--+--+--+--+--+--+
430
	 *	  |version |PT| 0| E| S|PN|
431 432 433
	 *	  +--+--+--+--+--+--+--+--+
	 *	    0  0  1  1	1  0  0  0
	 */
434
	gtp1->flags	= 0x30; /* v1, GTP-non-prime. */
435 436
	gtp1->type	= GTP_TPDU;
	gtp1->length	= htons(payload_len);
437
	gtp1->tid	= htonl(pctx->u.v1.o_tei);
438 439 440 441 442 443

	/* TODO: Suppport for extension header, sequence number and N-PDU.
	 *	 Update the length field if any of them is available.
	 */
}

444 445 446 447 448 449 450 451 452
struct gtp_pktinfo {
	struct sock		*sk;
	struct iphdr		*iph;
	struct flowi4		fl4;
	struct rtable		*rt;
	struct pdp_ctx		*pctx;
	struct net_device	*dev;
	__be16			gtph_port;
};
453

454 455 456 457 458 459 460 461 462 463 464
static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
{
	switch (pktinfo->pctx->gtp_version) {
	case GTP_V0:
		pktinfo->gtph_port = htons(GTP0_PORT);
		gtp0_push_header(skb, pktinfo->pctx);
		break;
	case GTP_V1:
		pktinfo->gtph_port = htons(GTP1U_PORT);
		gtp1_push_header(skb, pktinfo->pctx);
		break;
465 466 467 468
	}
}

static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
469 470
					struct sock *sk, struct iphdr *iph,
					struct pdp_ctx *pctx, struct rtable *rt,
471 472 473 474
					struct flowi4 *fl4,
					struct net_device *dev)
{
	pktinfo->sk	= sk;
475 476
	pktinfo->iph	= iph;
	pktinfo->pctx	= pctx;
477 478 479 480 481 482 483 484 485 486 487 488
	pktinfo->rt	= rt;
	pktinfo->fl4	= *fl4;
	pktinfo->dev	= dev;
}

static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
			     struct gtp_pktinfo *pktinfo)
{
	struct gtp_dev *gtp = netdev_priv(dev);
	struct pdp_ctx *pctx;
	struct rtable *rt;
	struct flowi4 fl4;
489 490
	struct iphdr *iph;
	__be16 df;
491 492
	int mtu;

493 494 495 496 497 498 499 500
	/* Read the IP destination address and resolve the PDP context.
	 * Prepend PDP header with TEI/TID from PDP ctx.
	 */
	iph = ip_hdr(skb);
	if (gtp->role == GTP_ROLE_SGSN)
		pctx = ipv4_pdp_find(gtp, iph->saddr);
	else
		pctx = ipv4_pdp_find(gtp, iph->daddr);
501

502 503 504 505
	if (!pctx) {
		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
			   &iph->daddr);
		return -ENOENT;
506
	}
507
	netdev_dbg(dev, "found PDP context %p\n", pctx);
508

509
	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
510
	if (IS_ERR(rt)) {
511 512
		netdev_dbg(dev, "no route to SSGN %pI4\n",
			   &pctx->peer_addr_ip4.s_addr);
513 514 515 516 517
		dev->stats.tx_carrier_errors++;
		goto err;
	}

	if (rt->dst.dev == dev) {
518 519
		netdev_dbg(dev, "circular route to SSGN %pI4\n",
			   &pctx->peer_addr_ip4.s_addr);
520 521 522 523 524
		dev->stats.collisions++;
		goto err_rt;
	}

	/* This is similar to tnl_update_pmtu(). */
525
	df = iph->frag_off;
526 527 528
	if (df) {
		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
			sizeof(struct iphdr) - sizeof(struct udphdr);
529
		switch (pctx->gtp_version) {
530 531 532 533 534 535 536 537 538 539 540
		case GTP_V0:
			mtu -= sizeof(struct gtp0_header);
			break;
		case GTP_V1:
			mtu -= sizeof(struct gtp1_header);
			break;
		}
	} else {
		mtu = dst_mtu(&rt->dst);
	}

541
	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
542

543 544 545
	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
	    mtu < ntohs(iph->tot_len)) {
		netdev_dbg(dev, "packet too big, fragmentation needed\n");
546
		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
547 548
		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
			      htonl(mtu));
549 550 551
		goto err_rt;
	}

552 553
	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
	gtp_push_header(skb, pktinfo);
554 555 556 557 558 559 560 561 562 563

	return 0;
err_rt:
	ip_rt_put(rt);
err:
	return -EBADMSG;
}

static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
564
	unsigned int proto = ntohs(skb->protocol);
565 566 567 568 569 570 571 572 573 574 575
	struct gtp_pktinfo pktinfo;
	int err;

	/* Ensure there is sufficient headroom. */
	if (skb_cow_head(skb, dev->needed_headroom))
		goto tx_err;

	skb_reset_inner_headers(skb);

	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
	rcu_read_lock();
576 577 578 579 580 581 582 583
	switch (proto) {
	case ETH_P_IP:
		err = gtp_build_skb_ip4(skb, dev, &pktinfo);
		break;
	default:
		err = -EOPNOTSUPP;
		break;
	}
584 585 586 587 588
	rcu_read_unlock();

	if (err < 0)
		goto tx_err;

589 590 591 592 593 594 595 596 597 598
	switch (proto) {
	case ETH_P_IP:
		netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
			   &pktinfo.iph->saddr, &pktinfo.iph->daddr);
		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
				    pktinfo.iph->tos,
				    ip4_dst_hoplimit(&pktinfo.rt->dst),
				    0,
				    pktinfo.gtph_port, pktinfo.gtph_port,
599 600 601
				    !net_eq(sock_net(pktinfo.pctx->sk),
					    dev_net(dev)),
				    false);
602 603
		break;
	}
604 605 606 607 608 609 610 611 612 613 614 615

	return NETDEV_TX_OK;
tx_err:
	dev->stats.tx_errors++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static const struct net_device_ops gtp_netdev_ops = {
	.ndo_init		= gtp_dev_init,
	.ndo_uninit		= gtp_dev_uninit,
	.ndo_start_xmit		= gtp_dev_xmit,
H
Heiner Kallweit 已提交
616
	.ndo_get_stats64	= dev_get_tstats64,
617 618
};

J
Jonas Bonn 已提交
619 620 621 622
static const struct device_type gtp_type = {
	.name = "gtp",
};

623 624
static void gtp_link_setup(struct net_device *dev)
{
J
Jonas Bonn 已提交
625 626 627 628
	unsigned int max_gtp_header_len = sizeof(struct iphdr) +
					  sizeof(struct udphdr) +
					  sizeof(struct gtp0_header);

629
	dev->netdev_ops		= &gtp_netdev_ops;
630
	dev->needs_free_netdev	= true;
J
Jonas Bonn 已提交
631
	SET_NETDEV_DEVTYPE(dev, &gtp_type);
632 633 634

	dev->hard_header_len = 0;
	dev->addr_len = 0;
J
Jonas Bonn 已提交
635
	dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
636 637 638 639 640 641 642 643 644

	/* Zero header length. */
	dev->type = ARPHRD_NONE;
	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;

	dev->priv_flags	|= IFF_NO_QUEUE;
	dev->features	|= NETIF_F_LLTX;
	netif_keep_dst(dev);

J
Jonas Bonn 已提交
645
	dev->needed_headroom	= LL_MAX_HEADER + max_gtp_header_len;
646 647 648
}

static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
649
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
650

651 652 653 654 655 656 657 658
static void gtp_destructor(struct net_device *dev)
{
	struct gtp_dev *gtp = netdev_priv(dev);

	kfree(gtp->addr_hash);
	kfree(gtp->tid_hash);
}

659
static int gtp_newlink(struct net *src_net, struct net_device *dev,
660 661
		       struct nlattr *tb[], struct nlattr *data[],
		       struct netlink_ext_ack *extack)
662 663 664
{
	struct gtp_dev *gtp;
	struct gtp_net *gn;
665
	int hashsize, err;
666

667
	if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
668 669 670 671
		return -EINVAL;

	gtp = netdev_priv(dev);

T
Taehee Yoo 已提交
672
	if (!data[IFLA_GTP_PDP_HASHSIZE]) {
673
		hashsize = 1024;
T
Taehee Yoo 已提交
674
	} else {
675
		hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
T
Taehee Yoo 已提交
676 677 678
		if (!hashsize)
			hashsize = 1024;
	}
679 680 681

	err = gtp_hashtable_new(gtp, hashsize);
	if (err < 0)
682 683
		return err;

684
	err = gtp_encap_enable(gtp, data);
685 686
	if (err < 0)
		goto out_hashtable;
687 688 689 690

	err = register_netdevice(dev);
	if (err < 0) {
		netdev_dbg(dev, "failed to register new netdev %d\n", err);
691
		goto out_encap;
692 693 694 695
	}

	gn = net_generic(dev_net(dev), gtp_net_id);
	list_add_rcu(&gtp->list, &gn->gtp_dev_list);
696
	dev->priv_destructor = gtp_destructor;
697

698
	netdev_dbg(dev, "registered new GTP interface\n");
699 700 701

	return 0;

702 703
out_encap:
	gtp_encap_disable(gtp);
704
out_hashtable:
705 706
	kfree(gtp->addr_hash);
	kfree(gtp->tid_hash);
707 708 709 710 711 712
	return err;
}

static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
	struct gtp_dev *gtp = netdev_priv(dev);
713 714 715 716 717 718
	struct pdp_ctx *pctx;
	int i;

	for (i = 0; i < gtp->hash_size; i++)
		hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
			pdp_context_delete(pctx);
719 720 721 722 723 724 725 726 727

	list_del_rcu(&gtp->list);
	unregister_netdevice_queue(dev, head);
}

static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
	[IFLA_GTP_FD0]			= { .type = NLA_U32 },
	[IFLA_GTP_FD1]			= { .type = NLA_U32 },
	[IFLA_GTP_PDP_HASHSIZE]		= { .type = NLA_U32 },
J
Jonas Bonn 已提交
728
	[IFLA_GTP_ROLE]			= { .type = NLA_U32 },
729 730
};

731 732
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
			struct netlink_ext_ack *extack)
733 734 735 736 737 738 739 740 741
{
	if (!data)
		return -EINVAL;

	return 0;
}

static size_t gtp_get_size(const struct net_device *dev)
{
J
Jonas Bonn 已提交
742 743
	return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
		nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
744 745 746 747 748 749 750 751
}

static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
	struct gtp_dev *gtp = netdev_priv(dev);

	if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
		goto nla_put_failure;
J
Jonas Bonn 已提交
752 753
	if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
		goto nla_put_failure;
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

static struct rtnl_link_ops gtp_link_ops __read_mostly = {
	.kind		= "gtp",
	.maxtype	= IFLA_GTP_MAX,
	.policy		= gtp_policy,
	.priv_size	= sizeof(struct gtp_dev),
	.setup		= gtp_link_setup,
	.validate	= gtp_validate,
	.newlink	= gtp_newlink,
	.dellink	= gtp_dellink,
	.get_size	= gtp_get_size,
	.fill_info	= gtp_fill_info,
};

static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
	int i;

778
	gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
779
				       GFP_KERNEL | __GFP_NOWARN);
780 781 782
	if (gtp->addr_hash == NULL)
		return -ENOMEM;

783
	gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
784
				      GFP_KERNEL | __GFP_NOWARN);
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	if (gtp->tid_hash == NULL)
		goto err1;

	gtp->hash_size = hsize;

	for (i = 0; i < hsize; i++) {
		INIT_HLIST_HEAD(&gtp->addr_hash[i]);
		INIT_HLIST_HEAD(&gtp->tid_hash[i]);
	}
	return 0;
err1:
	kfree(gtp->addr_hash);
	return -ENOMEM;
}

800 801
static struct sock *gtp_encap_enable_socket(int fd, int type,
					    struct gtp_dev *gtp)
802 803
{
	struct udp_tunnel_sock_cfg tuncfg = {NULL};
804
	struct socket *sock;
805
	struct sock *sk;
806 807 808 809 810 811 812 813 814
	int err;

	pr_debug("enable gtp on %d, %d\n", fd, type);

	sock = sockfd_lookup(fd, &err);
	if (!sock) {
		pr_debug("gtp socket fd=%d not found\n", fd);
		return NULL;
	}
815

816 817 818 819
	sk = sock->sk;
	if (sk->sk_protocol != IPPROTO_UDP ||
	    sk->sk_type != SOCK_DGRAM ||
	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
820 821 822
		pr_debug("socket fd=%d not UDP\n", fd);
		sk = ERR_PTR(-EINVAL);
		goto out_sock;
823 824
	}

825 826
	lock_sock(sk);
	if (sk->sk_user_data) {
827 828
		sk = ERR_PTR(-EBUSY);
		goto out_rel_sock;
829 830
	}

831
	sock_hold(sk);
832 833

	tuncfg.sk_user_data = gtp;
834
	tuncfg.encap_type = type;
835 836 837
	tuncfg.encap_rcv = gtp_encap_recv;
	tuncfg.encap_destroy = gtp_encap_destroy;

838
	setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
839

840 841 842
out_rel_sock:
	release_sock(sock->sk);
out_sock:
843
	sockfd_put(sock);
844
	return sk;
845
}
846

847
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
848 849 850
{
	struct sock *sk1u = NULL;
	struct sock *sk0 = NULL;
J
Jonas Bonn 已提交
851
	unsigned int role = GTP_ROLE_GGSN;
852 853 854 855 856 857 858 859 860 861 862 863 864 865

	if (data[IFLA_GTP_FD0]) {
		u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);

		sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
		if (IS_ERR(sk0))
			return PTR_ERR(sk0);
	}

	if (data[IFLA_GTP_FD1]) {
		u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);

		sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
		if (IS_ERR(sk1u)) {
866
			gtp_encap_disable_sock(sk0);
867 868 869 870
			return PTR_ERR(sk1u);
		}
	}

J
Jonas Bonn 已提交
871 872
	if (data[IFLA_GTP_ROLE]) {
		role = nla_get_u32(data[IFLA_GTP_ROLE]);
873
		if (role > GTP_ROLE_SGSN) {
874 875
			gtp_encap_disable_sock(sk0);
			gtp_encap_disable_sock(sk1u);
J
Jonas Bonn 已提交
876
			return -EINVAL;
877
		}
J
Jonas Bonn 已提交
878 879
	}

880 881
	gtp->sk0 = sk0;
	gtp->sk1u = sk1u;
J
Jonas Bonn 已提交
882
	gtp->role = role;
883 884

	return 0;
885 886
}

887
static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
888
{
889 890 891
	struct gtp_dev *gtp = NULL;
	struct net_device *dev;
	struct net *net;
892

893 894 895 896 897 898 899 900 901 902 903 904 905
	/* Examine the link attributes and figure out which network namespace
	 * we are talking about.
	 */
	if (nla[GTPA_NET_NS_FD])
		net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
	else
		net = get_net(src_net);

	if (IS_ERR(net))
		return NULL;

	/* Check if there's an existing gtpX device to configure */
	dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
906
	if (dev && dev->netdev_ops == &gtp_netdev_ops)
907 908 909 910
		gtp = netdev_priv(dev);

	put_net(net);
	return gtp;
911 912 913 914 915 916
}

static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
	pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
	pctx->af = AF_INET;
J
Jonas Bonn 已提交
917 918
	pctx->peer_addr_ip4.s_addr =
		nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	pctx->ms_addr_ip4.s_addr =
		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);

	switch (pctx->gtp_version) {
	case GTP_V0:
		/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
		 * label needs to be the same for uplink and downlink packets,
		 * so let's annotate this.
		 */
		pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
		pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
		break;
	case GTP_V1:
		pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
		pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
		break;
	default:
		break;
	}
}

940 941
static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
				   struct genl_info *info)
942
{
943
	struct pdp_ctx *pctx, *pctx_tid = NULL;
944
	struct net_device *dev = gtp->dev;
945
	u32 hash_ms, hash_tid = 0;
946
	unsigned int version;
947 948 949 950 951
	bool found = false;
	__be32 ms_addr;

	ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
	hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
952
	version = nla_get_u32(info->attrs[GTPA_VERSION]);
953

954 955 956 957 958 959 960 961 962 963 964
	pctx = ipv4_pdp_find(gtp, ms_addr);
	if (pctx)
		found = true;
	if (version == GTP_V0)
		pctx_tid = gtp0_pdp_find(gtp,
					 nla_get_u64(info->attrs[GTPA_TID]));
	else if (version == GTP_V1)
		pctx_tid = gtp1_pdp_find(gtp,
					 nla_get_u32(info->attrs[GTPA_I_TEI]));
	if (pctx_tid)
		found = true;
965 966 967

	if (found) {
		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
968
			return ERR_PTR(-EEXIST);
969
		if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
970
			return ERR_PTR(-EOPNOTSUPP);
971

972
		if (pctx && pctx_tid)
973
			return ERR_PTR(-EEXIST);
974 975 976
		if (!pctx)
			pctx = pctx_tid;

977 978 979 980 981 982 983 984 985
		ipv4_pdp_fill(pctx, info);

		if (pctx->gtp_version == GTP_V0)
			netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
				   pctx->u.v0.tid, pctx);
		else if (pctx->gtp_version == GTP_V1)
			netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
				   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);

986
		return pctx;
987 988 989

	}

990
	pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
991
	if (pctx == NULL)
992
		return ERR_PTR(-ENOMEM);
993

A
Andreas Schultz 已提交
994 995
	sock_hold(sk);
	pctx->sk = sk;
996
	pctx->dev = gtp->dev;
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	ipv4_pdp_fill(pctx, info);
	atomic_set(&pctx->tx_seq, 0);

	switch (pctx->gtp_version) {
	case GTP_V0:
		/* TS 09.60: "The flow label identifies unambiguously a GTP
		 * flow.". We use the tid for this instead, I cannot find a
		 * situation in which this doesn't unambiguosly identify the
		 * PDP context.
		 */
		hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
		break;
	case GTP_V1:
		hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
		break;
	}

	hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
	hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);

	switch (pctx->gtp_version) {
	case GTP_V0:
		netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
J
Jonas Bonn 已提交
1020
			   pctx->u.v0.tid, &pctx->peer_addr_ip4,
1021 1022 1023 1024 1025
			   &pctx->ms_addr_ip4, pctx);
		break;
	case GTP_V1:
		netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
			   pctx->u.v1.i_tei, pctx->u.v1.o_tei,
J
Jonas Bonn 已提交
1026
			   &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
1027 1028 1029
		break;
	}

1030
	return pctx;
1031 1032
}

A
Andreas Schultz 已提交
1033 1034 1035 1036 1037 1038 1039 1040
static void pdp_context_free(struct rcu_head *head)
{
	struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);

	sock_put(pctx->sk);
	kfree(pctx);
}

1041 1042 1043 1044
static void pdp_context_delete(struct pdp_ctx *pctx)
{
	hlist_del_rcu(&pctx->hlist_tid);
	hlist_del_rcu(&pctx->hlist_addr);
A
Andreas Schultz 已提交
1045
	call_rcu(&pctx->rcu_head, pdp_context_free);
1046 1047
}

1048
static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1049

1050 1051
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
A
Andreas Schultz 已提交
1052
	unsigned int version;
1053
	struct pdp_ctx *pctx;
1054
	struct gtp_dev *gtp;
A
Andreas Schultz 已提交
1055
	struct sock *sk;
1056
	int err;
1057 1058 1059

	if (!info->attrs[GTPA_VERSION] ||
	    !info->attrs[GTPA_LINK] ||
J
Jonas Bonn 已提交
1060
	    !info->attrs[GTPA_PEER_ADDRESS] ||
1061 1062 1063
	    !info->attrs[GTPA_MS_ADDRESS])
		return -EINVAL;

A
Andreas Schultz 已提交
1064 1065 1066
	version = nla_get_u32(info->attrs[GTPA_VERSION]);

	switch (version) {
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	case GTP_V0:
		if (!info->attrs[GTPA_TID] ||
		    !info->attrs[GTPA_FLOW])
			return -EINVAL;
		break;
	case GTP_V1:
		if (!info->attrs[GTPA_I_TEI] ||
		    !info->attrs[GTPA_O_TEI])
			return -EINVAL;
		break;

	default:
		return -EINVAL;
	}

1082
	rtnl_lock();
1083

1084 1085 1086 1087
	gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
	if (!gtp) {
		err = -ENODEV;
		goto out_unlock;
1088
	}
1089

A
Andreas Schultz 已提交
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	if (version == GTP_V0)
		sk = gtp->sk0;
	else if (version == GTP_V1)
		sk = gtp->sk1u;
	else
		sk = NULL;

	if (!sk) {
		err = -ENODEV;
		goto out_unlock;
	}

1102 1103 1104 1105
	pctx = gtp_pdp_add(gtp, sk, info);
	if (IS_ERR(pctx)) {
		err = PTR_ERR(pctx);
	} else {
1106
		gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
1107 1108
		err = 0;
	}
1109 1110

out_unlock:
1111
	rtnl_unlock();
1112
	return err;
1113 1114
}

1115 1116
static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
					    struct nlattr *nla[])
1117 1118 1119
{
	struct gtp_dev *gtp;

1120 1121 1122
	gtp = gtp_find_dev(net, nla);
	if (!gtp)
		return ERR_PTR(-ENODEV);
1123

1124 1125
	if (nla[GTPA_MS_ADDRESS]) {
		__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
1126

1127 1128 1129 1130 1131 1132 1133 1134
		return ipv4_pdp_find(gtp, ip);
	} else if (nla[GTPA_VERSION]) {
		u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);

		if (gtp_version == GTP_V0 && nla[GTPA_TID])
			return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
		else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
			return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
1135
	}
1136

1137 1138
	return ERR_PTR(-EINVAL);
}
1139

1140 1141 1142
static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
{
	struct pdp_ctx *pctx;
1143

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	if (nla[GTPA_LINK])
		pctx = gtp_find_pdp_by_link(net, nla);
	else
		pctx = ERR_PTR(-EINVAL);

	if (!pctx)
		pctx = ERR_PTR(-ENOENT);

	return pctx;
}

static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
{
	struct pdp_ctx *pctx;
	int err = 0;

	if (!info->attrs[GTPA_VERSION])
		return -EINVAL;

	rcu_read_lock();

	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
	if (IS_ERR(pctx)) {
		err = PTR_ERR(pctx);
1168 1169
		goto out_unlock;
	}
1170 1171

	if (pctx->gtp_version == GTP_V0)
1172
		netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1173 1174
			   pctx->u.v0.tid, pctx);
	else if (pctx->gtp_version == GTP_V1)
1175
		netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1176 1177
			   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);

1178
	gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
1179
	pdp_context_delete(pctx);
1180

1181 1182 1183
out_unlock:
	rcu_read_unlock();
	return err;
1184 1185
}

1186
static struct genl_family gtp_genl_family;
1187

1188 1189 1190 1191 1192 1193 1194 1195
enum gtp_multicast_groups {
	GTP_GENL_MCGRP,
};

static const struct genl_multicast_group gtp_genl_mcgrps[] = {
	[GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
};

1196
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1197
			      int flags, u32 type, struct pdp_ctx *pctx)
1198 1199 1200
{
	void *genlh;

1201
	genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
1202 1203 1204 1205 1206
			    type);
	if (genlh == NULL)
		goto nlmsg_failure;

	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1207
	    nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
J
Jonas Bonn 已提交
1208
	    nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
		goto nla_put_failure;

	switch (pctx->gtp_version) {
	case GTP_V0:
		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
		    nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
			goto nla_put_failure;
		break;
	case GTP_V1:
		if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
		    nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
			goto nla_put_failure;
		break;
	}
	genlmsg_end(skb, genlh);
	return 0;

nlmsg_failure:
nla_put_failure:
	genlmsg_cancel(skb, genlh);
	return -EMSGSIZE;
}

1233
static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
1234 1235 1236 1237
{
	struct sk_buff *msg;
	int ret;

1238
	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	if (!msg)
		return -ENOMEM;

	ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
	if (ret < 0) {
		nlmsg_free(msg);
		return ret;
	}

	ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
				      0, GTP_GENL_MCGRP, GFP_ATOMIC);
	return ret;
}

1253 1254 1255 1256 1257 1258
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
	struct pdp_ctx *pctx = NULL;
	struct sk_buff *skb2;
	int err;

1259
	if (!info->attrs[GTPA_VERSION])
1260 1261
		return -EINVAL;

1262
	rcu_read_lock();
1263

1264 1265 1266
	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
	if (IS_ERR(pctx)) {
		err = PTR_ERR(pctx);
1267 1268 1269 1270 1271 1272 1273 1274 1275
		goto err_unlock;
	}

	skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
	if (skb2 == NULL) {
		err = -ENOMEM;
		goto err_unlock;
	}

1276 1277
	err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
				 0, info->nlhdr->nlmsg_type, pctx);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	if (err < 0)
		goto err_unlock_free;

	rcu_read_unlock();
	return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);

err_unlock_free:
	kfree_skb(skb2);
err_unlock:
	rcu_read_unlock();
	return err;
}

static int gtp_genl_dump_pdp(struct sk_buff *skb,
				struct netlink_callback *cb)
{
	struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
1295
	int i, j, bucket = cb->args[0], skip = cb->args[1];
1296 1297
	struct net *net = sock_net(skb->sk);
	struct pdp_ctx *pctx;
1298 1299 1300
	struct gtp_net *gn;

	gn = net_generic(net, gtp_net_id);
1301 1302 1303 1304

	if (cb->args[4])
		return 0;

1305
	rcu_read_lock();
1306 1307 1308 1309 1310 1311
	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
		if (last_gtp && last_gtp != gtp)
			continue;
		else
			last_gtp = NULL;

1312 1313 1314 1315 1316 1317 1318 1319
		for (i = bucket; i < gtp->hash_size; i++) {
			j = 0;
			hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
						 hlist_tid) {
				if (j >= skip &&
				    gtp_genl_fill_info(skb,
					    NETLINK_CB(cb->skb).portid,
					    cb->nlh->nlmsg_seq,
1320
					    NLM_F_MULTI,
1321
					    cb->nlh->nlmsg_type, pctx)) {
1322
					cb->args[0] = i;
1323
					cb->args[1] = j;
1324 1325 1326
					cb->args[2] = (unsigned long)gtp;
					goto out;
				}
1327
				j++;
1328
			}
1329
			skip = 0;
1330
		}
1331
		bucket = 0;
1332 1333 1334
	}
	cb->args[4] = 1;
out:
1335
	rcu_read_unlock();
1336 1337 1338
	return skb->len;
}

S
Stephen Hemminger 已提交
1339
static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1340 1341 1342
	[GTPA_LINK]		= { .type = NLA_U32, },
	[GTPA_VERSION]		= { .type = NLA_U32, },
	[GTPA_TID]		= { .type = NLA_U64, },
J
Jonas Bonn 已提交
1343
	[GTPA_PEER_ADDRESS]	= { .type = NLA_U32, },
1344 1345 1346 1347 1348 1349 1350
	[GTPA_MS_ADDRESS]	= { .type = NLA_U32, },
	[GTPA_FLOW]		= { .type = NLA_U16, },
	[GTPA_NET_NS_FD]	= { .type = NLA_U32, },
	[GTPA_I_TEI]		= { .type = NLA_U32, },
	[GTPA_O_TEI]		= { .type = NLA_U32, },
};

1351
static const struct genl_small_ops gtp_genl_ops[] = {
1352 1353
	{
		.cmd = GTP_CMD_NEWPDP,
1354
		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1355 1356 1357 1358 1359
		.doit = gtp_genl_new_pdp,
		.flags = GENL_ADMIN_PERM,
	},
	{
		.cmd = GTP_CMD_DELPDP,
1360
		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1361 1362 1363 1364 1365
		.doit = gtp_genl_del_pdp,
		.flags = GENL_ADMIN_PERM,
	},
	{
		.cmd = GTP_CMD_GETPDP,
1366
		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1367 1368 1369 1370 1371 1372
		.doit = gtp_genl_get_pdp,
		.dumpit = gtp_genl_dump_pdp,
		.flags = GENL_ADMIN_PERM,
	},
};

1373
static struct genl_family gtp_genl_family __ro_after_init = {
1374 1375 1376 1377
	.name		= "gtp",
	.version	= 0,
	.hdrsize	= 0,
	.maxattr	= GTPA_MAX,
1378
	.policy = gtp_genl_policy,
1379 1380
	.netnsok	= true,
	.module		= THIS_MODULE,
1381 1382
	.small_ops	= gtp_genl_ops,
	.n_small_ops	= ARRAY_SIZE(gtp_genl_ops),
1383 1384
	.mcgrps		= gtp_genl_mcgrps,
	.n_mcgrps	= ARRAY_SIZE(gtp_genl_mcgrps),
1385 1386
};

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
static int __net_init gtp_net_init(struct net *net)
{
	struct gtp_net *gn = net_generic(net, gtp_net_id);

	INIT_LIST_HEAD(&gn->gtp_dev_list);
	return 0;
}

static void __net_exit gtp_net_exit(struct net *net)
{
	struct gtp_net *gn = net_generic(net, gtp_net_id);
	struct gtp_dev *gtp;
	LIST_HEAD(list);

	rtnl_lock();
	list_for_each_entry(gtp, &gn->gtp_dev_list, list)
		gtp_dellink(gtp->dev, &list);

	unregister_netdevice_many(&list);
	rtnl_unlock();
}

static struct pernet_operations gtp_net_ops = {
	.init	= gtp_net_init,
	.exit	= gtp_net_exit,
	.id	= &gtp_net_id,
	.size	= sizeof(struct gtp_net),
};

static int __init gtp_init(void)
{
	int err;

	get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));

	err = rtnl_link_register(&gtp_link_ops);
	if (err < 0)
		goto error_out;

1426
	err = genl_register_family(&gtp_genl_family);
1427 1428 1429 1430 1431 1432 1433
	if (err < 0)
		goto unreg_rtnl_link;

	err = register_pernet_subsys(&gtp_net_ops);
	if (err < 0)
		goto unreg_genl_family;

1434
	pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
		sizeof(struct pdp_ctx));
	return 0;

unreg_genl_family:
	genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
	rtnl_link_unregister(&gtp_link_ops);
error_out:
	pr_err("error loading GTP module loaded\n");
	return err;
}
late_initcall(gtp_init);

static void __exit gtp_fini(void)
{
	genl_unregister_family(&gtp_genl_family);
	rtnl_link_unregister(&gtp_link_ops);
1452
	unregister_pernet_subsys(&gtp_net_ops);
1453 1454 1455 1456 1457 1458 1459 1460 1461

	pr_info("GTP module unloaded\n");
}
module_exit(gtp_fini);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
1462
MODULE_ALIAS_GENL_FAMILY("gtp");