geneve.c 39.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * GENEVE: Generic Network Virtualization Encapsulation
 *
 * Copyright (c) 2015 Red Hat, Inc.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
17
#include <net/dst_metadata.h>
18
#include <net/gro_cells.h>
19 20
#include <net/rtnetlink.h>
#include <net/geneve.h>
21
#include <net/protocol.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36

#define GENEVE_NETDEV_VER	"0.6"

#define GENEVE_UDP_PORT		6081

#define GENEVE_N_VID		(1u << 24)
#define GENEVE_VID_MASK		(GENEVE_N_VID - 1)

#define VNI_HASH_BITS		10
#define VNI_HASH_SIZE		(1<<VNI_HASH_BITS)

static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");

37 38 39
#define GENEVE_VER 0
#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))

40 41
/* per-network namespace private data for this module */
struct geneve_net {
42 43
	struct list_head	geneve_list;
	struct list_head	sock_list;
44 45
};

46 47
static int geneve_net_id;

48 49 50 51 52 53 54 55
union geneve_addr {
	struct sockaddr_in sin;
	struct sockaddr_in6 sin6;
	struct sockaddr sa;
};

static union geneve_addr geneve_remote_unspec = { .sa.sa_family = AF_UNSPEC, };

56 57 58 59 60
/* Pseudo network device */
struct geneve_dev {
	struct hlist_node  hlist;	/* vni hash table */
	struct net	   *net;	/* netns for packet i/o */
	struct net_device  *dev;	/* netdev for geneve tunnel */
61
	struct geneve_sock __rcu *sock4;	/* IPv4 socket used for geneve tunnel */
62
#if IS_ENABLED(CONFIG_IPV6)
63
	struct geneve_sock __rcu *sock6;	/* IPv6 socket used for geneve tunnel */
64
#endif
65 66
	u8                 vni[3];	/* virtual network ID for tunnel */
	u8                 ttl;		/* TTL override */
67
	u8                 tos;		/* TOS override */
68
	union geneve_addr  remote;	/* IP address for link partner */
69
	struct list_head   next;	/* geneve's per namespace list */
70
	__be32		   label;	/* IPv6 flowlabel override */
71
	__be16		   dst_port;
72
	bool		   collect_md;
73
	struct gro_cells   gro_cells;
74
	u32		   flags;
P
Paolo Abeni 已提交
75
	struct dst_cache   dst_cache;
76 77
};

78
/* Geneve device flags */
79
#define GENEVE_F_UDP_ZERO_CSUM_TX	BIT(0)
80 81 82
#define GENEVE_F_UDP_ZERO_CSUM6_TX	BIT(1)
#define GENEVE_F_UDP_ZERO_CSUM6_RX	BIT(2)

83 84 85 86 87 88
struct geneve_sock {
	bool			collect_md;
	struct list_head	list;
	struct socket		*sock;
	struct rcu_head		rcu;
	int			refcnt;
89
	struct hlist_head	vni_list[VNI_HASH_SIZE];
90
	u32			flags;
91
};
92 93 94 95 96 97 98 99 100

static inline __u32 geneve_net_vni_hash(u8 vni[3])
{
	__u32 vnid;

	vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
	return hash_32(vnid, VNI_HASH_BITS);
}

101 102 103 104 105 106 107 108 109 110 111
static __be64 vni_to_tunnel_id(const __u8 *vni)
{
#ifdef __BIG_ENDIAN
	return (vni[0] << 16) | (vni[1] << 8) | vni[2];
#else
	return (__force __be64)(((__force u64)vni[0] << 40) |
				((__force u64)vni[1] << 48) |
				((__force u64)vni[2] << 56));
#endif
}

112 113 114 115 116
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
{
	return gs->sock->sk->sk_family;
}

117
static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
118
					__be32 addr, u8 vni[])
119 120
{
	struct hlist_head *vni_list_head;
121
	struct geneve_dev *geneve;
122 123 124
	__u32 hash;

	/* Find the device for this VNI */
125
	hash = geneve_net_vni_hash(vni);
126
	vni_list_head = &gs->vni_list[hash];
127
	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
128
		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
		    addr == geneve->remote.sin.sin_addr.s_addr)
			return geneve;
	}
	return NULL;
}

#if IS_ENABLED(CONFIG_IPV6)
static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
					 struct in6_addr addr6, u8 vni[])
{
	struct hlist_head *vni_list_head;
	struct geneve_dev *geneve;
	__u32 hash;

	/* Find the device for this VNI */
	hash = geneve_net_vni_hash(vni);
	vni_list_head = &gs->vni_list[hash];
	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
		    ipv6_addr_equal(&addr6, &geneve->remote.sin6.sin6_addr))
149
			return geneve;
150
	}
151 152
	return NULL;
}
153
#endif
154

155 156 157 158 159
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
{
	return (struct genevehdr *)(udp_hdr(skb) + 1);
}

160 161
static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
					    struct sk_buff *skb)
162
{
163
	u8 *vni;
164
	__be32 addr;
165 166 167 168
	static u8 zero_vni[3];
#if IS_ENABLED(CONFIG_IPV6)
	static struct in6_addr zero_addr6;
#endif
169

170
	if (geneve_get_sk_family(gs) == AF_INET) {
171 172
		struct iphdr *iph;

173
		iph = ip_hdr(skb); /* outer IP header... */
174

175 176 177 178
		if (gs->collect_md) {
			vni = zero_vni;
			addr = 0;
		} else {
179
			vni = geneve_hdr(skb)->vni;
180 181 182
			addr = iph->saddr;
		}

183
		return geneve_lookup(gs, addr, vni);
184
#if IS_ENABLED(CONFIG_IPV6)
185
	} else if (geneve_get_sk_family(gs) == AF_INET6) {
186 187 188
		struct ipv6hdr *ip6h;
		struct in6_addr addr6;

189
		ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
190

191 192 193 194
		if (gs->collect_md) {
			vni = zero_vni;
			addr6 = zero_addr6;
		} else {
195
			vni = geneve_hdr(skb)->vni;
196 197 198
			addr6 = ip6h->saddr;
		}

199
		return geneve6_lookup(gs, addr6, vni);
200 201
#endif
	}
202 203 204 205 206 207 208 209 210 211 212 213
	return NULL;
}

/* geneve receive/decap routine */
static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
		      struct sk_buff *skb)
{
	struct genevehdr *gnvh = geneve_hdr(skb);
	struct metadata_dst *tun_dst = NULL;
	struct pcpu_sw_netstats *stats;
	int err = 0;
	void *oiph;
214

215
	if (ip_tunnel_collect_metadata() || gs->collect_md) {
216 217 218 219 220 221
		__be16 flags;

		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
			(gnvh->oam ? TUNNEL_OAM : 0) |
			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);

222
		tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
223 224 225 226 227
					 vni_to_tunnel_id(gnvh->vni),
					 gnvh->opt_len * 4);
		if (!tun_dst)
			goto drop;
		/* Update tunnel dst according to Geneve options. */
228 229
		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
					gnvh->options, gnvh->opt_len * 4);
230 231 232 233 234 235 236
	} else {
		/* Drop packets w/ critical options,
		 * since we don't support any...
		 */
		if (gnvh->critical)
			goto drop;
	}
237 238 239 240 241

	skb_reset_mac_header(skb);
	skb->protocol = eth_type_trans(skb, geneve->dev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

242 243 244
	if (tun_dst)
		skb_dst_set(skb, &tun_dst->dst);

245 246 247 248
	/* Ignore packet loops (and multicast echo) */
	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
		goto drop;

249
	oiph = skb_network_header(skb);
250 251
	skb_reset_network_header(skb);

252 253
	if (geneve_get_sk_family(gs) == AF_INET)
		err = IP_ECN_decapsulate(oiph, skb);
254
#if IS_ENABLED(CONFIG_IPV6)
255 256
	else
		err = IP6_ECN_decapsulate(oiph, skb);
257
#endif
258 259

	if (unlikely(err)) {
260
		if (log_ecn_error) {
261
			if (geneve_get_sk_family(gs) == AF_INET)
262 263
				net_info_ratelimited("non-ECT from %pI4 "
						     "with TOS=%#x\n",
264 265
						     &((struct iphdr *)oiph)->saddr,
						     ((struct iphdr *)oiph)->tos);
266
#if IS_ENABLED(CONFIG_IPV6)
267
			else
268
				net_info_ratelimited("non-ECT from %pI6\n",
269
						     &((struct ipv6hdr *)oiph)->saddr);
270 271
#endif
		}
272 273 274 275 276 277 278 279 280 281 282 283 284
		if (err > 1) {
			++geneve->dev->stats.rx_frame_errors;
			++geneve->dev->stats.rx_errors;
			goto drop;
		}
	}

	stats = this_cpu_ptr(geneve->dev->tstats);
	u64_stats_update_begin(&stats->syncp);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	u64_stats_update_end(&stats->syncp);

285
	gro_cells_receive(&geneve->gro_cells, skb);
286 287 288 289 290 291 292 293 294
	return;
drop:
	/* Consume bad packet */
	kfree_skb(skb);
}

/* Setup stats when device is created */
static int geneve_init(struct net_device *dev)
{
295 296 297
	struct geneve_dev *geneve = netdev_priv(dev);
	int err;

298 299 300
	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
	if (!dev->tstats)
		return -ENOMEM;
301 302 303 304 305 306 307

	err = gro_cells_init(&geneve->gro_cells, dev);
	if (err) {
		free_percpu(dev->tstats);
		return err;
	}

P
Paolo Abeni 已提交
308 309 310 311 312 313 314
	err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL);
	if (err) {
		free_percpu(dev->tstats);
		gro_cells_destroy(&geneve->gro_cells);
		return err;
	}

315 316 317 318 319
	return 0;
}

static void geneve_uninit(struct net_device *dev)
{
320 321
	struct geneve_dev *geneve = netdev_priv(dev);

P
Paolo Abeni 已提交
322
	dst_cache_destroy(&geneve->dst_cache);
323
	gro_cells_destroy(&geneve->gro_cells);
324 325 326
	free_percpu(dev->tstats);
}

327 328 329 330
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct genevehdr *geneveh;
331
	struct geneve_dev *geneve;
332 333 334 335 336
	struct geneve_sock *gs;
	int opts_len;

	/* Need Geneve and inner Ethernet header to be present */
	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
337
		goto drop;
338 339 340 341

	/* Return packets with reserved bits set */
	geneveh = geneve_hdr(skb);
	if (unlikely(geneveh->ver != GENEVE_VER))
342
		goto drop;
343 344

	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
345
		goto drop;
346

347 348 349 350 351 352 353 354
	gs = rcu_dereference_sk_user_data(sk);
	if (!gs)
		goto drop;

	geneve = geneve_lookup_skb(gs, skb);
	if (!geneve)
		goto drop;

355 356
	opts_len = geneveh->opt_len * 4;
	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
357 358
				 htons(ETH_P_TEB),
				 !net_eq(geneve->net, dev_net(geneve->dev))))
359 360
		goto drop;

361
	geneve_rx(geneve, gs, skb);
362 363 364 365 366 367 368 369 370
	return 0;

drop:
	/* Consume bad packet */
	kfree_skb(skb);
	return 0;
}

static struct socket *geneve_create_sock(struct net *net, bool ipv6,
371
					 __be16 port, u32 flags)
372 373 374 375 376 377 378 379 380
{
	struct socket *sock;
	struct udp_port_cfg udp_conf;
	int err;

	memset(&udp_conf, 0, sizeof(udp_conf));

	if (ipv6) {
		udp_conf.family = AF_INET6;
381
		udp_conf.ipv6_v6only = 1;
382 383
		udp_conf.use_udp6_rx_checksums =
		    !(flags & GENEVE_F_UDP_ZERO_CSUM6_RX);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
	} else {
		udp_conf.family = AF_INET;
		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
	}

	udp_conf.local_udp_port = port;

	/* Open UDP socket */
	err = udp_sock_create(net, &udp_conf, &sock);
	if (err < 0)
		return ERR_PTR(err);

	return sock;
}

static int geneve_hlen(struct genevehdr *gh)
{
	return sizeof(*gh) + gh->opt_len * 4;
}

404 405 406
static struct sk_buff **geneve_gro_receive(struct sock *sk,
					   struct sk_buff **head,
					   struct sk_buff *skb)
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
{
	struct sk_buff *p, **pp = NULL;
	struct genevehdr *gh, *gh2;
	unsigned int hlen, gh_len, off_gnv;
	const struct packet_offload *ptype;
	__be16 type;
	int flush = 1;

	off_gnv = skb_gro_offset(skb);
	hlen = off_gnv + sizeof(*gh);
	gh = skb_gro_header_fast(skb, off_gnv);
	if (skb_gro_header_hard(skb, hlen)) {
		gh = skb_gro_header_slow(skb, hlen, off_gnv);
		if (unlikely(!gh))
			goto out;
	}

	if (gh->ver != GENEVE_VER || gh->oam)
		goto out;
	gh_len = geneve_hlen(gh);

	hlen = off_gnv + gh_len;
	if (skb_gro_header_hard(skb, hlen)) {
		gh = skb_gro_header_slow(skb, hlen, off_gnv);
		if (unlikely(!gh))
			goto out;
	}

	for (p = *head; p; p = p->next) {
		if (!NAPI_GRO_CB(p)->same_flow)
			continue;

		gh2 = (struct genevehdr *)(p->data + off_gnv);
		if (gh->opt_len != gh2->opt_len ||
		    memcmp(gh, gh2, gh_len)) {
			NAPI_GRO_CB(p)->same_flow = 0;
			continue;
		}
	}

	type = gh->proto_type;

	rcu_read_lock();
	ptype = gro_find_receive_by_type(type);
451
	if (!ptype)
452 453 454 455
		goto out_unlock;

	skb_gro_pull(skb, gh_len);
	skb_gro_postpull_rcsum(skb, gh, gh_len);
S
Sabrina Dubroca 已提交
456
	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
457
	flush = 0;
458 459 460 461 462 463 464 465 466

out_unlock:
	rcu_read_unlock();
out:
	NAPI_GRO_CB(skb)->flush |= flush;

	return pp;
}

467 468
static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
			       int nhoff)
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
{
	struct genevehdr *gh;
	struct packet_offload *ptype;
	__be16 type;
	int gh_len;
	int err = -ENOSYS;

	gh = (struct genevehdr *)(skb->data + nhoff);
	gh_len = geneve_hlen(gh);
	type = gh->proto_type;

	rcu_read_lock();
	ptype = gro_find_complete_by_type(type);
	if (ptype)
		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);

	rcu_read_unlock();
486 487 488

	skb_set_inner_mac_header(skb, nhoff + gh_len);

489 490 491 492 493
	return err;
}

/* Create new listen socket if needed */
static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
494
						bool ipv6, u32 flags)
495 496 497 498 499
{
	struct geneve_net *gn = net_generic(net, geneve_net_id);
	struct geneve_sock *gs;
	struct socket *sock;
	struct udp_tunnel_sock_cfg tunnel_cfg;
500
	int h;
501 502 503 504 505

	gs = kzalloc(sizeof(*gs), GFP_KERNEL);
	if (!gs)
		return ERR_PTR(-ENOMEM);

506
	sock = geneve_create_sock(net, ipv6, port, flags);
507 508 509 510 511 512 513
	if (IS_ERR(sock)) {
		kfree(gs);
		return ERR_CAST(sock);
	}

	gs->sock = sock;
	gs->refcnt = 1;
514 515
	for (h = 0; h < VNI_HASH_SIZE; ++h)
		INIT_HLIST_HEAD(&gs->vni_list[h]);
516 517

	/* Initialize the geneve udp offloads structure */
518
	udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
519 520

	/* Mark socket as an encapsulation socket */
521
	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
522 523
	tunnel_cfg.sk_user_data = gs;
	tunnel_cfg.encap_type = 1;
524 525
	tunnel_cfg.gro_receive = geneve_gro_receive;
	tunnel_cfg.gro_complete = geneve_gro_complete;
526 527 528 529 530 531 532
	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
	tunnel_cfg.encap_destroy = NULL;
	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
	list_add(&gs->list, &gn->sock_list);
	return gs;
}

533
static void __geneve_sock_release(struct geneve_sock *gs)
534
{
535
	if (!gs || --gs->refcnt)
536 537 538
		return;

	list_del(&gs->list);
539
	udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
540 541 542 543
	udp_tunnel_sock_release(gs->sock);
	kfree_rcu(gs, rcu);
}

544 545
static void geneve_sock_release(struct geneve_dev *geneve)
{
546
	struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
547
#if IS_ENABLED(CONFIG_IPV6)
548 549 550 551 552 553 554 555 556 557 558
	struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);

	rcu_assign_pointer(geneve->sock6, NULL);
#endif

	rcu_assign_pointer(geneve->sock4, NULL);
	synchronize_net();

	__geneve_sock_release(gs4);
#if IS_ENABLED(CONFIG_IPV6)
	__geneve_sock_release(gs6);
559 560 561
#endif
}

562
static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
563
					    sa_family_t family,
564 565 566 567 568 569
					    __be16 dst_port)
{
	struct geneve_sock *gs;

	list_for_each_entry(gs, &gn->sock_list, list) {
		if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
570
		    geneve_get_sk_family(gs) == family) {
571 572 573 574 575 576
			return gs;
		}
	}
	return NULL;
}

577
static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
578 579
{
	struct net *net = geneve->net;
580
	struct geneve_net *gn = net_generic(net, geneve_net_id);
581
	struct geneve_sock *gs;
582
	__u32 hash;
583

584
	gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->dst_port);
585 586 587 588 589
	if (gs) {
		gs->refcnt++;
		goto out;
	}

590
	gs = geneve_socket_create(net, geneve->dst_port, ipv6, geneve->flags);
591 592 593
	if (IS_ERR(gs))
		return PTR_ERR(gs);

594 595
out:
	gs->collect_md = geneve->collect_md;
596
	gs->flags = geneve->flags;
597 598
#if IS_ENABLED(CONFIG_IPV6)
	if (ipv6)
599
		rcu_assign_pointer(geneve->sock6, gs);
600 601
	else
#endif
602
		rcu_assign_pointer(geneve->sock4, gs);
603 604 605

	hash = geneve_net_vni_hash(geneve->vni);
	hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
606 607 608
	return 0;
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
static int geneve_open(struct net_device *dev)
{
	struct geneve_dev *geneve = netdev_priv(dev);
	bool ipv6 = geneve->remote.sa.sa_family == AF_INET6;
	bool metadata = geneve->collect_md;
	int ret = 0;

#if IS_ENABLED(CONFIG_IPV6)
	if (ipv6 || metadata)
		ret = geneve_sock_add(geneve, true);
#endif
	if (!ret && (!ipv6 || metadata))
		ret = geneve_sock_add(geneve, false);
	if (ret < 0)
		geneve_sock_release(geneve);

	return ret;
}

628 629 630 631
static int geneve_stop(struct net_device *dev)
{
	struct geneve_dev *geneve = netdev_priv(dev);

632 633
	if (!hlist_unhashed(&geneve->hlist))
		hlist_del_rcu(&geneve->hlist);
634
	geneve_sock_release(geneve);
635 636 637
	return 0;
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
static void geneve_build_header(struct genevehdr *geneveh,
				__be16 tun_flags, u8 vni[3],
				u8 options_len, u8 *options)
{
	geneveh->ver = GENEVE_VER;
	geneveh->opt_len = options_len / 4;
	geneveh->oam = !!(tun_flags & TUNNEL_OAM);
	geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
	geneveh->rsvd1 = 0;
	memcpy(geneveh->vni, vni, 3);
	geneveh->proto_type = htons(ETH_P_TEB);
	geneveh->rsvd2 = 0;

	memcpy(geneveh->options, options, options_len);
}

654 655
static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
			    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
656
			    u32 flags, bool xnet)
657 658 659 660
{
	struct genevehdr *gnvh;
	int min_headroom;
	int err;
661
	bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM_TX);
662

663 664
	skb_scrub_packet(skb, xnet);

665 666 667
	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
	err = skb_cow_head(skb, min_headroom);
668
	if (unlikely(err))
669 670
		goto free_rt;

671 672
	err = udp_tunnel_handle_offloads(skb, udp_sum);
	if (err)
673 674 675
		goto free_rt;

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
676
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
677 678

	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
679
	return 0;
680 681 682 683

free_rt:
	ip_rt_put(rt);
	return err;
684 685
}

686 687 688
#if IS_ENABLED(CONFIG_IPV6)
static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
			     __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
689
			     u32 flags, bool xnet)
690 691 692 693
{
	struct genevehdr *gnvh;
	int min_headroom;
	int err;
694
	bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM6_TX);
695 696 697 698 699 700

	skb_scrub_packet(skb, xnet);

	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
	err = skb_cow_head(skb, min_headroom);
701
	if (unlikely(err))
702 703
		goto free_dst;

704
	err = udp_tunnel_handle_offloads(skb, udp_sum);
705
	if (err)
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
		goto free_dst;

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);

	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
	return 0;

free_dst:
	dst_release(dst);
	return err;
}
#endif

static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
				       struct net_device *dev,
				       struct flowi4 *fl4,
				       struct ip_tunnel_info *info)
724
{
725
	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
726
	struct geneve_dev *geneve = netdev_priv(dev);
P
Paolo Abeni 已提交
727
	struct dst_cache *dst_cache;
728 729 730
	struct rtable *rt = NULL;
	__u8 tos;

731 732 733
	if (!rcu_dereference(geneve->sock4))
		return ERR_PTR(-EIO);

734 735 736 737 738 739 740 741
	memset(fl4, 0, sizeof(*fl4));
	fl4->flowi4_mark = skb->mark;
	fl4->flowi4_proto = IPPROTO_UDP;

	if (info) {
		fl4->daddr = info->key.u.ipv4.dst;
		fl4->saddr = info->key.u.ipv4.src;
		fl4->flowi4_tos = RT_TOS(info->key.tos);
P
Paolo Abeni 已提交
742
		dst_cache = &info->dst_cache;
743 744 745 746 747 748
	} else {
		tos = geneve->tos;
		if (tos == 1) {
			const struct iphdr *iip = ip_hdr(skb);

			tos = ip_tunnel_get_dsfield(iip, skb);
P
Paolo Abeni 已提交
749
			use_cache = false;
750 751 752
		}

		fl4->flowi4_tos = RT_TOS(tos);
753
		fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
P
Paolo Abeni 已提交
754 755 756 757 758 759 760
		dst_cache = &geneve->dst_cache;
	}

	if (use_cache) {
		rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
		if (rt)
			return rt;
761 762 763 764 765
	}

	rt = ip_route_output_key(geneve->net, fl4);
	if (IS_ERR(rt)) {
		netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
766
		return ERR_PTR(-ENETUNREACH);
767 768 769 770
	}
	if (rt->dst.dev == dev) { /* is this necessary? */
		netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
		ip_rt_put(rt);
771
		return ERR_PTR(-ELOOP);
772
	}
P
Paolo Abeni 已提交
773 774
	if (use_cache)
		dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr);
775 776 777
	return rt;
}

778 779 780 781 782 783
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
					   struct net_device *dev,
					   struct flowi6 *fl6,
					   struct ip_tunnel_info *info)
{
784
	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
785 786
	struct geneve_dev *geneve = netdev_priv(dev);
	struct dst_entry *dst = NULL;
P
Paolo Abeni 已提交
787
	struct dst_cache *dst_cache;
788
	struct geneve_sock *gs6;
789
	__u8 prio;
790

791 792 793 794
	gs6 = rcu_dereference(geneve->sock6);
	if (!gs6)
		return ERR_PTR(-EIO);

795 796 797 798 799 800 801
	memset(fl6, 0, sizeof(*fl6));
	fl6->flowi6_mark = skb->mark;
	fl6->flowi6_proto = IPPROTO_UDP;

	if (info) {
		fl6->daddr = info->key.u.ipv6.dst;
		fl6->saddr = info->key.u.ipv6.src;
802 803
		fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
						   info->key.label);
P
Paolo Abeni 已提交
804
		dst_cache = &info->dst_cache;
805
	} else {
806 807 808 809 810
		prio = geneve->tos;
		if (prio == 1) {
			const struct iphdr *iip = ip_hdr(skb);

			prio = ip_tunnel_get_dsfield(iip, skb);
P
Paolo Abeni 已提交
811
			use_cache = false;
812 813
		}

814 815
		fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
						   geneve->label);
816
		fl6->daddr = geneve->remote.sin6.sin6_addr;
P
Paolo Abeni 已提交
817 818 819 820 821 822 823
		dst_cache = &geneve->dst_cache;
	}

	if (use_cache) {
		dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
		if (dst)
			return dst;
824 825 826 827 828 829 830 831 832 833 834 835
	}

	if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
		netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
		return ERR_PTR(-ENETUNREACH);
	}
	if (dst->dev == dev) { /* is this necessary? */
		netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr);
		dst_release(dst);
		return ERR_PTR(-ELOOP);
	}

P
Paolo Abeni 已提交
836 837
	if (use_cache)
		dst_cache_set_ip6(dst_cache, dst, &fl6->saddr);
838 839 840 841
	return dst;
}
#endif

842 843 844 845 846 847 848 849 850 851 852 853 854 855
/* Convert 64 bit tunnel ID to 24 bit VNI. */
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
{
#ifdef __BIG_ENDIAN
	vni[0] = (__force __u8)(tun_id >> 16);
	vni[1] = (__force __u8)(tun_id >> 8);
	vni[2] = (__force __u8)tun_id;
#else
	vni[0] = (__force __u8)((__force u64)tun_id >> 40);
	vni[1] = (__force __u8)((__force u64)tun_id >> 48);
	vni[2] = (__force __u8)((__force u64)tun_id >> 56);
#endif
}

856 857
static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
				   struct ip_tunnel_info *info)
858 859
{
	struct geneve_dev *geneve = netdev_priv(dev);
860
	struct geneve_sock *gs4;
861
	struct rtable *rt = NULL;
862
	const struct iphdr *iip; /* interior IP header */
863
	int err = -EINVAL;
864
	struct flowi4 fl4;
865
	__u8 tos, ttl;
866
	__be16 sport;
867
	__be16 df;
868
	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
869
	u32 flags = geneve->flags;
870

871 872 873 874
	gs4 = rcu_dereference(geneve->sock4);
	if (!gs4)
		goto tx_error;

875
	if (geneve->collect_md) {
876
		if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
877 878 879
			netdev_dbg(dev, "no tunnel metadata\n");
			goto tx_error;
		}
880 881
		if (info && ip_tunnel_info_af(info) != AF_INET)
			goto tx_error;
882
	}
883

884
	rt = geneve_get_v4_rt(skb, dev, &fl4, info);
885
	if (IS_ERR(rt)) {
886
		err = PTR_ERR(rt);
887 888
		goto tx_error;
	}
889 890

	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
891 892
	skb_reset_mac_header(skb);

893 894
	iip = ip_hdr(skb);

895 896 897 898 899 900
	if (info) {
		const struct ip_tunnel_key *key = &info->key;
		u8 *opts = NULL;
		u8 vni[3];

		tunnel_id_to_vni(key->tun_id, vni);
901
		if (info->options_len)
902
			opts = ip_tunnel_info_opts(info);
903

904
		if (key->tun_flags & TUNNEL_CSUM)
905
			flags &= ~GENEVE_F_UDP_ZERO_CSUM_TX;
906
		else
907
			flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
908

909
		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
910
				       info->options_len, opts, flags, xnet);
911
		if (unlikely(err))
912
			goto tx_error;
913

914
		tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
915 916
		ttl = key->ttl;
		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
917
	} else {
918
		err = geneve_build_skb(rt, skb, 0, geneve->vni,
919
				       0, NULL, flags, xnet);
920
		if (unlikely(err))
921
			goto tx_error;
922

923 924 925 926 927
		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
		ttl = geneve->ttl;
		if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
			ttl = 1;
		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
928
		df = 0;
929
	}
930 931 932
	udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
			    tos, ttl, df, sport, geneve->dst_port,
			    !net_eq(geneve->net, dev_net(geneve->dev)),
933
			    !!(flags & GENEVE_F_UDP_ZERO_CSUM_TX));
934 935 936 937 938

	return NETDEV_TX_OK;

tx_error:
	dev_kfree_skb(skb);
939

940 941 942 943
	if (err == -ELOOP)
		dev->stats.collisions++;
	else if (err == -ENETUNREACH)
		dev->stats.tx_carrier_errors++;
H
Haishuang Yan 已提交
944 945

	dev->stats.tx_errors++;
946 947 948
	return NETDEV_TX_OK;
}

949 950 951 952 953 954
#if IS_ENABLED(CONFIG_IPV6)
static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
				    struct ip_tunnel_info *info)
{
	struct geneve_dev *geneve = netdev_priv(dev);
	struct dst_entry *dst = NULL;
955
	const struct iphdr *iip; /* interior IP header */
956
	struct geneve_sock *gs6;
957 958
	int err = -EINVAL;
	struct flowi6 fl6;
959
	__u8 prio, ttl;
960
	__be16 sport;
961
	__be32 label;
962
	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
963
	u32 flags = geneve->flags;
964

965 966 967 968
	gs6 = rcu_dereference(geneve->sock6);
	if (!gs6)
		goto tx_error;

969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
	if (geneve->collect_md) {
		if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
			netdev_dbg(dev, "no tunnel metadata\n");
			goto tx_error;
		}
	}

	dst = geneve_get_v6_dst(skb, dev, &fl6, info);
	if (IS_ERR(dst)) {
		err = PTR_ERR(dst);
		goto tx_error;
	}

	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
	skb_reset_mac_header(skb);

985 986
	iip = ip_hdr(skb);

987 988 989 990 991 992
	if (info) {
		const struct ip_tunnel_key *key = &info->key;
		u8 *opts = NULL;
		u8 vni[3];

		tunnel_id_to_vni(key->tun_id, vni);
993
		if (info->options_len)
994 995
			opts = ip_tunnel_info_opts(info);

996
		if (key->tun_flags & TUNNEL_CSUM)
997
			flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
998
		else
999
			flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
1000

1001 1002
		err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
					info->options_len, opts,
1003
					flags, xnet);
1004
		if (unlikely(err))
1005
			goto tx_error;
1006

1007
		prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
1008
		ttl = key->ttl;
1009
		label = info->key.label;
1010 1011
	} else {
		err = geneve6_build_skb(dst, skb, 0, geneve->vni,
1012
					0, NULL, flags, xnet);
1013
		if (unlikely(err))
1014
			goto tx_error;
1015

1016 1017
		prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
					   iip, skb);
1018 1019 1020 1021
		ttl = geneve->ttl;
		if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
			ttl = 1;
		ttl = ttl ? : ip6_dst_hoplimit(dst);
1022
		label = geneve->label;
1023
	}
1024

1025
	udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
1026
			     &fl6.saddr, &fl6.daddr, prio, ttl, label,
1027 1028
			     sport, geneve->dst_port,
			     !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
1029 1030 1031 1032
	return NETDEV_TX_OK;

tx_error:
	dev_kfree_skb(skb);
1033

1034 1035 1036 1037
	if (err == -ELOOP)
		dev->stats.collisions++;
	else if (err == -ENETUNREACH)
		dev->stats.tx_carrier_errors++;
H
Haishuang Yan 已提交
1038 1039

	dev->stats.tx_errors++;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	return NETDEV_TX_OK;
}
#endif

static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct geneve_dev *geneve = netdev_priv(dev);
	struct ip_tunnel_info *info = NULL;

	if (geneve->collect_md)
		info = skb_tunnel_info(skb);

#if IS_ENABLED(CONFIG_IPV6)
	if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
	    (!info && geneve->remote.sa.sa_family == AF_INET6))
		return geneve6_xmit_skb(skb, dev, info);
#endif
	return geneve_xmit_skb(skb, dev, info);
}

1060
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
D
David Wragg 已提交
1061
{
1062 1063
	/* Only possible if called internally, ndo_change_mtu path's new_mtu
	 * is guaranteed to be between dev->min_mtu and dev->max_mtu.
D
David Wragg 已提交
1064
	 */
1065 1066
	if (new_mtu > dev->max_mtu)
		new_mtu = dev->max_mtu;
D
David Wragg 已提交
1067

D
David Wragg 已提交
1068 1069 1070 1071
	dev->mtu = new_mtu;
	return 0;
}

1072 1073 1074 1075 1076 1077
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
	struct ip_tunnel_info *info = skb_tunnel_info(skb);
	struct geneve_dev *geneve = netdev_priv(dev);
	struct rtable *rt;
	struct flowi4 fl4;
1078 1079 1080 1081
#if IS_ENABLED(CONFIG_IPV6)
	struct dst_entry *dst;
	struct flowi6 fl6;
#endif
1082

1083 1084 1085 1086
	if (ip_tunnel_info_af(info) == AF_INET) {
		rt = geneve_get_v4_rt(skb, dev, &fl4, info);
		if (IS_ERR(rt))
			return PTR_ERR(rt);
1087

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
		ip_rt_put(rt);
		info->key.u.ipv4.src = fl4.saddr;
#if IS_ENABLED(CONFIG_IPV6)
	} else if (ip_tunnel_info_af(info) == AF_INET6) {
		dst = geneve_get_v6_dst(skb, dev, &fl6, info);
		if (IS_ERR(dst))
			return PTR_ERR(dst);

		dst_release(dst);
		info->key.u.ipv6.src = fl6.saddr;
#endif
	} else {
		return -EINVAL;
	}
1102 1103 1104 1105 1106 1107 1108

	info->key.tp_src = udp_flow_src_port(geneve->net, skb,
					     1, USHRT_MAX, true);
	info->key.tp_dst = geneve->dst_port;
	return 0;
}

1109 1110 1111 1112 1113 1114 1115
static const struct net_device_ops geneve_netdev_ops = {
	.ndo_init		= geneve_init,
	.ndo_uninit		= geneve_uninit,
	.ndo_open		= geneve_open,
	.ndo_stop		= geneve_stop,
	.ndo_start_xmit		= geneve_xmit,
	.ndo_get_stats64	= ip_tunnel_get_stats64,
D
David Wragg 已提交
1116
	.ndo_change_mtu		= geneve_change_mtu,
1117 1118
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= eth_mac_addr,
1119
	.ndo_fill_metadata_dst	= geneve_fill_metadata_dst,
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
};

static void geneve_get_drvinfo(struct net_device *dev,
			       struct ethtool_drvinfo *drvinfo)
{
	strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
	strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
}

static const struct ethtool_ops geneve_ethtool_ops = {
	.get_drvinfo	= geneve_get_drvinfo,
	.get_link	= ethtool_op_get_link,
};

/* Info for udev, that this is a virtual tunnel endpoint */
static struct device_type geneve_type = {
	.name = "geneve",
};

1139
/* Calls the ndo_udp_tunnel_add of the caller in order to
1140
 * supply the listening GENEVE udp ports. Callers are expected
1141
 * to implement the ndo_udp_tunnel_add.
1142
 */
1143
static void geneve_push_rx_ports(struct net_device *dev)
1144 1145 1146 1147
{
	struct net *net = dev_net(dev);
	struct geneve_net *gn = net_generic(net, geneve_net_id);
	struct geneve_sock *gs;
1148

1149
	rcu_read_lock();
1150 1151 1152
	list_for_each_entry_rcu(gs, &gn->sock_list, list)
		udp_tunnel_push_rx_port(dev, gs->sock,
					UDP_TUNNEL_TYPE_GENEVE);
1153 1154 1155
	rcu_read_unlock();
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
/* Initialize the device structure. */
static void geneve_setup(struct net_device *dev)
{
	ether_setup(dev);

	dev->netdev_ops = &geneve_netdev_ops;
	dev->ethtool_ops = &geneve_ethtool_ops;
	dev->destructor = free_netdev;

	SET_NETDEV_DEVTYPE(dev, &geneve_type);

	dev->features    |= NETIF_F_LLTX;
	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
	dev->features    |= NETIF_F_RXCSUM;
	dev->features    |= NETIF_F_GSO_SOFTWARE;

	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
	dev->hw_features |= NETIF_F_GSO_SOFTWARE;

1175 1176 1177 1178 1179 1180 1181 1182
	/* MTU range: 68 - (something less than 65535) */
	dev->min_mtu = ETH_MIN_MTU;
	/* The max_mtu calculation does not take account of GENEVE
	 * options, to avoid excluding potentially valid
	 * configurations. This will be further reduced by IPvX hdr size.
	 */
	dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;

1183
	netif_keep_dst(dev);
J
Jiri Benc 已提交
1184
	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1185
	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1186
	eth_hw_addr_random(dev);
1187 1188 1189 1190 1191
}

static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
	[IFLA_GENEVE_ID]		= { .type = NLA_U32 },
	[IFLA_GENEVE_REMOTE]		= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1192
	[IFLA_GENEVE_REMOTE6]		= { .len = sizeof(struct in6_addr) },
1193
	[IFLA_GENEVE_TTL]		= { .type = NLA_U8 },
1194
	[IFLA_GENEVE_TOS]		= { .type = NLA_U8 },
1195
	[IFLA_GENEVE_LABEL]		= { .type = NLA_U32 },
1196
	[IFLA_GENEVE_PORT]		= { .type = NLA_U16 },
1197
	[IFLA_GENEVE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1198 1199 1200
	[IFLA_GENEVE_UDP_CSUM]		= { .type = NLA_U8 },
	[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]	= { .type = NLA_U8 },
	[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]	= { .type = NLA_U8 },
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
};

static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
{
	if (tb[IFLA_ADDRESS]) {
		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
			return -EINVAL;

		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
			return -EADDRNOTAVAIL;
	}

	if (!data)
		return -EINVAL;

	if (data[IFLA_GENEVE_ID]) {
		__u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);

		if (vni >= GENEVE_VID_MASK)
			return -ERANGE;
	}

	return 0;
}

1226 1227
static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
					  __be16 dst_port,
1228
					  union geneve_addr *remote,
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
					  u8 vni[],
					  bool *tun_on_same_port,
					  bool *tun_collect_md)
{
	struct geneve_dev *geneve, *t;

	*tun_on_same_port = false;
	*tun_collect_md = false;
	t = NULL;
	list_for_each_entry(geneve, &gn->geneve_list, next) {
		if (geneve->dst_port == dst_port) {
			*tun_collect_md = geneve->collect_md;
			*tun_on_same_port = true;
		}
		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
1244
		    !memcmp(remote, &geneve->remote, sizeof(geneve->remote)) &&
1245 1246 1247 1248 1249 1250
		    dst_port == geneve->dst_port)
			t = geneve;
	}
	return t;
}

1251
static int geneve_configure(struct net *net, struct net_device *dev,
1252
			    union geneve_addr *remote,
1253 1254
			    __u32 vni, __u8 ttl, __u8 tos, __be32 label,
			    __be16 dst_port, bool metadata, u32 flags)
1255 1256
{
	struct geneve_net *gn = net_generic(net, geneve_net_id);
1257 1258
	struct geneve_dev *t, *geneve = netdev_priv(dev);
	bool tun_collect_md, tun_on_same_port;
P
Paolo Abeni 已提交
1259
	int err, encap_len;
1260

1261 1262 1263
	if (!remote)
		return -EINVAL;
	if (metadata &&
1264
	    (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl || label))
1265
		return -EINVAL;
1266 1267 1268 1269 1270 1271 1272 1273

	geneve->net = net;
	geneve->dev = dev;

	geneve->vni[0] = (vni & 0x00ff0000) >> 16;
	geneve->vni[1] = (vni & 0x0000ff00) >> 8;
	geneve->vni[2] =  vni & 0x000000ff;

1274 1275 1276 1277
	if ((remote->sa.sa_family == AF_INET &&
	     IN_MULTICAST(ntohl(remote->sin.sin_addr.s_addr))) ||
	    (remote->sa.sa_family == AF_INET6 &&
	     ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
1278
		return -EINVAL;
1279 1280 1281
	if (label && remote->sa.sa_family != AF_INET6)
		return -EINVAL;

1282
	geneve->remote = *remote;
1283

1284 1285
	geneve->ttl = ttl;
	geneve->tos = tos;
1286
	geneve->label = label;
1287
	geneve->dst_port = dst_port;
1288
	geneve->collect_md = metadata;
1289
	geneve->flags = flags;
1290

1291
	t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
1292 1293 1294 1295
			    &tun_on_same_port, &tun_collect_md);
	if (t)
		return -EBUSY;

P
Paolo Abeni 已提交
1296 1297
	/* make enough headroom for basic scenario */
	encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
1298
	if (remote->sa.sa_family == AF_INET) {
P
Paolo Abeni 已提交
1299
		encap_len += sizeof(struct iphdr);
1300 1301
		dev->max_mtu -= sizeof(struct iphdr);
	} else {
P
Paolo Abeni 已提交
1302
		encap_len += sizeof(struct ipv6hdr);
1303 1304
		dev->max_mtu -= sizeof(struct ipv6hdr);
	}
P
Paolo Abeni 已提交
1305 1306
	dev->needed_headroom = encap_len + ETH_HLEN;

1307 1308 1309 1310 1311 1312 1313 1314
	if (metadata) {
		if (tun_on_same_port)
			return -EPERM;
	} else {
		if (tun_collect_md)
			return -EPERM;
	}

P
Paolo Abeni 已提交
1315 1316
	dst_cache_reset(&geneve->dst_cache);

1317 1318 1319 1320
	err = register_netdevice(dev);
	if (err)
		return err;

1321 1322 1323 1324 1325 1326 1327
	list_add(&geneve->next, &gn->geneve_list);
	return 0;
}

static int geneve_newlink(struct net *net, struct net_device *dev,
			  struct nlattr *tb[], struct nlattr *data[])
{
1328
	__be16 dst_port = htons(GENEVE_UDP_PORT);
1329 1330
	__u8 ttl = 0, tos = 0;
	bool metadata = false;
1331
	union geneve_addr remote = geneve_remote_unspec;
1332
	__be32 label = 0;
1333
	__u32 vni = 0;
1334
	u32 flags = 0;
1335

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
		return -EINVAL;

	if (data[IFLA_GENEVE_REMOTE]) {
		remote.sa.sa_family = AF_INET;
		remote.sin.sin_addr.s_addr =
			nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
	}

	if (data[IFLA_GENEVE_REMOTE6]) {
		if (!IS_ENABLED(CONFIG_IPV6))
			return -EPFNOSUPPORT;

		remote.sa.sa_family = AF_INET6;
		remote.sin6.sin6_addr =
			nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);

		if (ipv6_addr_type(&remote.sin6.sin6_addr) &
		    IPV6_ADDR_LINKLOCAL) {
			netdev_dbg(dev, "link-local remote is unsupported\n");
			return -EINVAL;
		}
	}

1360 1361
	if (data[IFLA_GENEVE_ID])
		vni = nla_get_u32(data[IFLA_GENEVE_ID]);
1362

1363
	if (data[IFLA_GENEVE_TTL])
1364
		ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
1365

1366
	if (data[IFLA_GENEVE_TOS])
1367
		tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
1368

1369 1370 1371 1372
	if (data[IFLA_GENEVE_LABEL])
		label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
			IPV6_FLOWLABEL_MASK;

1373
	if (data[IFLA_GENEVE_PORT])
1374
		dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
1375

1376 1377
	if (data[IFLA_GENEVE_COLLECT_METADATA])
		metadata = true;
1378

1379
	if (data[IFLA_GENEVE_UDP_CSUM] &&
1380 1381
	    !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
		flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
1382 1383 1384 1385 1386 1387 1388 1389 1390

	if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
	    nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
		flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;

	if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] &&
	    nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
		flags |= GENEVE_F_UDP_ZERO_CSUM6_RX;

1391 1392
	return geneve_configure(net, dev, &remote, vni, ttl, tos, label,
				dst_port, metadata, flags);
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
}

static void geneve_dellink(struct net_device *dev, struct list_head *head)
{
	struct geneve_dev *geneve = netdev_priv(dev);

	list_del(&geneve->next);
	unregister_netdevice_queue(dev, head);
}

static size_t geneve_get_size(const struct net_device *dev)
{
	return nla_total_size(sizeof(__u32)) +	/* IFLA_GENEVE_ID */
1406
		nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
1407
		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
1408
		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
1409
		nla_total_size(sizeof(__be32)) +  /* IFLA_GENEVE_LABEL */
1410
		nla_total_size(sizeof(__be16)) +  /* IFLA_GENEVE_PORT */
1411
		nla_total_size(0) +	 /* IFLA_GENEVE_COLLECT_METADATA */
1412 1413 1414
		nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */
		nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
		nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
		0;
}

static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
	struct geneve_dev *geneve = netdev_priv(dev);
	__u32 vni;

	vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
	if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
		goto nla_put_failure;

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (geneve->remote.sa.sa_family == AF_INET) {
		if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
				    geneve->remote.sin.sin_addr.s_addr))
			goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
	} else {
		if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
				     &geneve->remote.sin6.sin6_addr))
			goto nla_put_failure;
#endif
	}
1438

1439
	if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
1440 1441
	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos) ||
	    nla_put_be32(skb, IFLA_GENEVE_LABEL, geneve->label))
1442 1443
		goto nla_put_failure;

1444
	if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
1445 1446
		goto nla_put_failure;

1447 1448 1449 1450 1451
	if (geneve->collect_md) {
		if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
			goto nla_put_failure;
	}

1452
	if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
1453
		       !(geneve->flags & GENEVE_F_UDP_ZERO_CSUM_TX)) ||
1454 1455 1456 1457 1458 1459
	    nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
		       !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) ||
	    nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
		       !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_RX)))
		goto nla_put_failure;

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

static struct rtnl_link_ops geneve_link_ops __read_mostly = {
	.kind		= "geneve",
	.maxtype	= IFLA_GENEVE_MAX,
	.policy		= geneve_policy,
	.priv_size	= sizeof(struct geneve_dev),
	.setup		= geneve_setup,
	.validate	= geneve_validate,
	.newlink	= geneve_newlink,
	.dellink	= geneve_dellink,
	.get_size	= geneve_get_size,
	.fill_info	= geneve_fill_info,
};

1479 1480 1481 1482 1483
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
					u8 name_assign_type, u16 dst_port)
{
	struct nlattr *tb[IFLA_MAX + 1];
	struct net_device *dev;
1484
	LIST_HEAD(list_kill);
1485 1486 1487 1488 1489 1490 1491 1492
	int err;

	memset(tb, 0, sizeof(tb));
	dev = rtnl_create_link(net, name, name_assign_type,
			       &geneve_link_ops, tb);
	if (IS_ERR(dev))
		return dev;

1493
	err = geneve_configure(net, dev, &geneve_remote_unspec,
1494
			       0, 0, 0, 0, htons(dst_port), true,
1495
			       GENEVE_F_UDP_ZERO_CSUM6_RX);
1496 1497 1498 1499
	if (err) {
		free_netdev(dev);
		return ERR_PTR(err);
	}
1500 1501 1502 1503

	/* openvswitch users expect packet sizes to be unrestricted,
	 * so set the largest MTU we can.
	 */
1504
	err = geneve_change_mtu(dev, IP_MAX_MTU);
1505 1506 1507
	if (err)
		goto err;

1508 1509 1510 1511
	err = rtnl_configure_link(dev, NULL);
	if (err < 0)
		goto err;

1512
	return dev;
1513 1514

 err:
1515 1516
	geneve_dellink(dev, &list_kill);
	unregister_netdevice_many(&list_kill);
1517
	return ERR_PTR(err);
1518 1519 1520
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);

1521 1522 1523 1524 1525
static int geneve_netdevice_event(struct notifier_block *unused,
				  unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);

1526
	if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
1527 1528 1529 1530 1531 1532 1533 1534 1535
		geneve_push_rx_ports(dev);

	return NOTIFY_DONE;
}

static struct notifier_block geneve_notifier_block __read_mostly = {
	.notifier_call = geneve_netdevice_event,
};

1536 1537 1538 1539 1540
static __net_init int geneve_init_net(struct net *net)
{
	struct geneve_net *gn = net_generic(net, geneve_net_id);

	INIT_LIST_HEAD(&gn->geneve_list);
1541
	INIT_LIST_HEAD(&gn->sock_list);
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
	return 0;
}

static void __net_exit geneve_exit_net(struct net *net)
{
	struct geneve_net *gn = net_generic(net, geneve_net_id);
	struct geneve_dev *geneve, *next;
	struct net_device *dev, *aux;
	LIST_HEAD(list);

	rtnl_lock();

	/* gather any geneve devices that were moved into this ns */
	for_each_netdev_safe(net, dev, aux)
		if (dev->rtnl_link_ops == &geneve_link_ops)
			unregister_netdevice_queue(dev, &list);

	/* now gather any other geneve devices that were created in this ns */
	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
		/* If geneve->dev is in the same netns, it was already added
		 * to the list by the previous loop.
		 */
		if (!net_eq(dev_net(geneve->dev), net))
			unregister_netdevice_queue(geneve->dev, &list);
	}

	/* unregister the devices gathered above */
	unregister_netdevice_many(&list);
	rtnl_unlock();
}

static struct pernet_operations geneve_net_ops = {
	.init = geneve_init_net,
	.exit = geneve_exit_net,
	.id   = &geneve_net_id,
	.size = sizeof(struct geneve_net),
};

static int __init geneve_init_module(void)
{
	int rc;

	rc = register_pernet_subsys(&geneve_net_ops);
	if (rc)
		goto out1;

1588
	rc = register_netdevice_notifier(&geneve_notifier_block);
1589 1590 1591
	if (rc)
		goto out2;

1592 1593 1594 1595
	rc = rtnl_link_register(&geneve_link_ops);
	if (rc)
		goto out3;

1596
	return 0;
1597 1598 1599

out3:
	unregister_netdevice_notifier(&geneve_notifier_block);
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
out2:
	unregister_pernet_subsys(&geneve_net_ops);
out1:
	return rc;
}
late_initcall(geneve_init_module);

static void __exit geneve_cleanup_module(void)
{
	rtnl_link_unregister(&geneve_link_ops);
1610
	unregister_netdevice_notifier(&geneve_notifier_block);
1611 1612 1613 1614 1615 1616 1617 1618 1619
	unregister_pernet_subsys(&geneve_net_ops);
}
module_exit(geneve_cleanup_module);

MODULE_LICENSE("GPL");
MODULE_VERSION(GENEVE_NETDEV_VER);
MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("geneve");