actions.c 29.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
J
Joe Stringer 已提交
25
#include <linux/netfilter_ipv6.h>
J
Joe Stringer 已提交
26
#include <linux/sctp.h>
27 28 29 30 31
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
32

J
Joe Stringer 已提交
33
#include <net/dst.h>
34
#include <net/ip.h>
A
Ansis Atteka 已提交
35
#include <net/ipv6.h>
J
Joe Stringer 已提交
36
#include <net/ip6_fib.h>
37 38
#include <net/checksum.h>
#include <net/dsfield.h>
39
#include <net/mpls.h>
J
Joe Stringer 已提交
40
#include <net/sctp/checksum.h>
41 42

#include "datapath.h"
43
#include "flow.h"
J
Joe Stringer 已提交
44
#include "conntrack.h"
45 46 47
#include "vport.h"

static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48
			      struct sw_flow_key *key,
49
			      const struct nlattr *attr, int len);
50

51 52 53 54 55 56 57 58
struct deferred_action {
	struct sk_buff *skb;
	const struct nlattr *actions;

	/* Store pkt_key clone when creating deferred action. */
	struct sw_flow_key pkt_key;
};

J
Joe Stringer 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72
#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
struct ovs_frag_data {
	unsigned long dst;
	struct vport *vport;
	struct ovs_skb_cb cb;
	__be16 inner_protocol;
	__u16 vlan_tci;
	__be16 vlan_proto;
	unsigned int l2_len;
	u8 l2_data[MAX_L2_LEN];
};

static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#define DEFERRED_ACTION_FIFO_SIZE 10
struct action_fifo {
	int head;
	int tail;
	/* Deferred action fifo queue storage. */
	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};

static struct action_fifo __percpu *action_fifos;
static DEFINE_PER_CPU(int, exec_actions_level);

static void action_fifo_init(struct action_fifo *fifo)
{
	fifo->head = 0;
	fifo->tail = 0;
}

90
static bool action_fifo_is_empty(const struct action_fifo *fifo)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
{
	return (fifo->head == fifo->tail);
}

static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{
	if (action_fifo_is_empty(fifo))
		return NULL;

	return &fifo->fifo[fifo->tail++];
}

static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
		return NULL;

	return &fifo->fifo[fifo->head++];
}

/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
113
						    const struct sw_flow_key *key,
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
						    const struct nlattr *attr)
{
	struct action_fifo *fifo;
	struct deferred_action *da;

	fifo = this_cpu_ptr(action_fifos);
	da = action_fifo_put(fifo);
	if (da) {
		da->skb = skb;
		da->actions = attr;
		da->pkt_key = *key;
	}

	return da;
}

130 131 132 133 134 135 136 137 138 139 140
static void invalidate_flow_key(struct sw_flow_key *key)
{
	key->eth.type = htons(0);
}

static bool is_flow_key_valid(const struct sw_flow_key *key)
{
	return !!key->eth.type;
}

static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;

168 169
	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
170 171
	skb->protocol = mpls->mpls_ethertype;

172
	invalidate_flow_key(key);
173 174 175
	return 0;
}

176 177
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
178 179 180 181
{
	struct ethhdr *hdr;
	int err;

182
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
183 184 185
	if (unlikely(err))
		return err;

186
	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
187 188 189 190 191 192 193 194 195 196 197 198 199 200

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);

	/* skb_mpls_header() is used to locate the ethertype
	 * field correctly in the presence of VLAN tags.
	 */
	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
	hdr->h_proto = ethertype;
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;
201 202

	invalidate_flow_key(key);
203 204 205
	return 0;
}

206 207
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const __be32 *mpls_lse, const __be32 *mask)
208 209
{
	__be32 *stack;
210
	__be32 lse;
211 212
	int err;

213
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
214 215 216 217
	if (unlikely(err))
		return err;

	stack = (__be32 *)skb_mpls_header(skb);
218
	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
219
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
220 221
		__be32 diff[] = { ~(*stack), lse };

222 223 224 225
		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
					  ~skb->csum);
	}

226 227
	*stack = lse;
	flow_key->mpls.top_lse = lse;
228 229 230
	return 0;
}

231
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
232 233 234
{
	int err;

235
	err = skb_vlan_pop(skb);
236
	if (skb_vlan_tag_present(skb))
237 238
		invalidate_flow_key(key);
	else
239
		key->eth.tci = 0;
240
	return err;
241 242
}

243 244
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_vlan *vlan)
245
{
246
	if (skb_vlan_tag_present(skb))
247
		invalidate_flow_key(key);
248
	else
249
		key->eth.tci = vlan->vlan_tci;
250 251
	return skb_vlan_push(skb, vlan->vlan_tpid,
			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
252 253
}

254 255 256 257 258 259 260
/* 'src' is already properly masked. */
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
{
	u16 *dst = (u16 *)dst_;
	const u16 *src = (const u16 *)src_;
	const u16 *mask = (const u16 *)mask_;

261 262 263
	OVS_SET_MASKED(dst[0], src[0], mask[0]);
	OVS_SET_MASKED(dst[1], src[1], mask[1]);
	OVS_SET_MASKED(dst[2], src[2], mask[2]);
264 265 266 267 268
}

static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
269 270
{
	int err;
271

272
	err = skb_ensure_writable(skb, ETH_HLEN);
273 274 275
	if (unlikely(err))
		return err;

276 277
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

278 279 280 281
	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
			       mask->eth_src);
	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);
282

283 284
	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

285 286
	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
287 288 289
	return 0;
}

290 291
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
				  __be32 addr, __be32 new_addr)
292 293 294
{
	int transport_len = skb->len - skb_transport_offset(skb);

295 296 297
	if (nh->frag_off & htons(IP_OFFSET))
		return;

298 299 300
	if (nh->protocol == IPPROTO_TCP) {
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
301
						 addr, new_addr, true);
302
	} else if (nh->protocol == IPPROTO_UDP) {
303 304 305 306 307
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace4(&uh->check, skb,
308
							 addr, new_addr, true);
309 310 311 312
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
313
	}
314
}
315

316 317 318 319
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
			__be32 *addr, __be32 new_addr)
{
	update_ip_l4_checksum(skb, nh, *addr, new_addr);
320
	csum_replace4(&nh->check, *addr, new_addr);
321
	skb_clear_hash(skb);
322 323 324
	*addr = new_addr;
}

A
Ansis Atteka 已提交
325 326 327 328 329
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
				 __be32 addr[4], const __be32 new_addr[4])
{
	int transport_len = skb->len - skb_transport_offset(skb);

330
	if (l4_proto == NEXTHDR_TCP) {
A
Ansis Atteka 已提交
331 332
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
333
						  addr, new_addr, true);
334
	} else if (l4_proto == NEXTHDR_UDP) {
A
Ansis Atteka 已提交
335 336 337 338 339
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace16(&uh->check, skb,
340
							  addr, new_addr, true);
A
Ansis Atteka 已提交
341 342 343 344
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
345 346 347
	} else if (l4_proto == NEXTHDR_ICMP) {
		if (likely(transport_len >= sizeof(struct icmp6hdr)))
			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
348
						  skb, addr, new_addr, true);
A
Ansis Atteka 已提交
349 350 351
	}
}

352 353 354
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
			   const __be32 mask[4], __be32 masked[4])
{
355 356 357 358
	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
359 360
}

A
Ansis Atteka 已提交
361 362 363 364 365 366 367
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
			  __be32 addr[4], const __be32 new_addr[4],
			  bool recalculate_csum)
{
	if (recalculate_csum)
		update_ipv6_checksum(skb, l4_proto, addr, new_addr);

368
	skb_clear_hash(skb);
A
Ansis Atteka 已提交
369 370 371
	memcpy(addr, new_addr, sizeof(__be32[4]));
}

372
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
A
Ansis Atteka 已提交
373
{
374
	/* Bits 21-24 are always unmasked, so this retains their values. */
375 376 377
	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
A
Ansis Atteka 已提交
378 379
}

380 381
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
		       u8 mask)
A
Ansis Atteka 已提交
382
{
383
	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
A
Ansis Atteka 已提交
384

385 386 387 388
	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
	nh->ttl = new_ttl;
}

389 390 391
static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv4 *key,
		    const struct ovs_key_ipv4 *mask)
392 393
{
	struct iphdr *nh;
394
	__be32 new_addr;
395 396
	int err;

397 398
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct iphdr));
399 400 401 402 403
	if (unlikely(err))
		return err;

	nh = ip_hdr(skb);

404 405 406 407 408
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (mask->ipv4_src) {
409
		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
410

411 412 413 414
		if (unlikely(new_addr != nh->saddr)) {
			set_ip_addr(skb, nh, &nh->saddr, new_addr);
			flow_key->ipv4.addr.src = new_addr;
		}
415
	}
416
	if (mask->ipv4_dst) {
417
		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
418

419 420 421 422
		if (unlikely(new_addr != nh->daddr)) {
			set_ip_addr(skb, nh, &nh->daddr, new_addr);
			flow_key->ipv4.addr.dst = new_addr;
		}
423
	}
424 425 426 427 428 429 430
	if (mask->ipv4_tos) {
		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
		flow_key->ip.tos = nh->tos;
	}
	if (mask->ipv4_ttl) {
		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
		flow_key->ip.ttl = nh->ttl;
431
	}
432 433 434 435

	return 0;
}

436 437 438 439 440 441 442 443
static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{
	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
}

static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv6 *key,
		    const struct ovs_key_ipv6 *mask)
A
Ansis Atteka 已提交
444 445 446 447
{
	struct ipv6hdr *nh;
	int err;

448 449
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct ipv6hdr));
A
Ansis Atteka 已提交
450 451 452 453 454
	if (unlikely(err))
		return err;

	nh = ipv6_hdr(skb);

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
		__be32 *saddr = (__be32 *)&nh->saddr;
		__be32 masked[4];

		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);

		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
				      true);
			memcpy(&flow_key->ipv6.addr.src, masked,
			       sizeof(flow_key->ipv6.addr.src));
		}
	}
	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
A
Ansis Atteka 已提交
473 474 475
		unsigned int offset = 0;
		int flags = IP6_FH_F_SKIP_RH;
		bool recalc_csum = true;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
		__be32 *daddr = (__be32 *)&nh->daddr;
		__be32 masked[4];

		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);

		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
			if (ipv6_ext_hdr(nh->nexthdr))
				recalc_csum = (ipv6_find_hdr(skb, &offset,
							     NEXTHDR_ROUTING,
							     NULL, &flags)
					       != NEXTHDR_ROUTING);

			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
				      recalc_csum);
			memcpy(&flow_key->ipv6.addr.dst, masked,
			       sizeof(flow_key->ipv6.addr.dst));
		}
	}
	if (mask->ipv6_tclass) {
		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
		flow_key->ip.tos = ipv6_get_dsfield(nh);
	}
	if (mask->ipv6_label) {
		set_ipv6_fl(nh, ntohl(key->ipv6_label),
			    ntohl(mask->ipv6_label));
		flow_key->ipv6.label =
		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
	}
	if (mask->ipv6_hlimit) {
505 506
		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
			       mask->ipv6_hlimit);
507
		flow_key->ip.ttl = nh->hop_limit;
A
Ansis Atteka 已提交
508 509 510 511
	}
	return 0;
}

512
/* Must follow skb_ensure_writable() since that can move the skb data. */
513
static void set_tp_port(struct sk_buff *skb, __be16 *port,
514
			__be16 new_port, __sum16 *check)
515
{
516
	inet_proto_csum_replace2(check, skb, *port, new_port, false);
517
	*port = new_port;
518 519
}

520 521 522
static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_udp *key,
		   const struct ovs_key_udp *mask)
523 524
{
	struct udphdr *uh;
525
	__be16 src, dst;
526 527
	int err;

528 529
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct udphdr));
530 531 532 533
	if (unlikely(err))
		return err;

	uh = udp_hdr(skb);
534
	/* Either of the masks is non-zero, so do not bother checking them. */
535 536
	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
537

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
		if (likely(src != uh->source)) {
			set_tp_port(skb, &uh->source, src, &uh->check);
			flow_key->tp.src = src;
		}
		if (likely(dst != uh->dest)) {
			set_tp_port(skb, &uh->dest, dst, &uh->check);
			flow_key->tp.dst = dst;
		}

		if (unlikely(!uh->check))
			uh->check = CSUM_MANGLED_0;
	} else {
		uh->source = src;
		uh->dest = dst;
		flow_key->tp.src = src;
		flow_key->tp.dst = dst;
555
	}
556

557 558
	skb_clear_hash(skb);

559 560 561
	return 0;
}

562 563 564
static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_tcp *key,
		   const struct ovs_key_tcp *mask)
565 566
{
	struct tcphdr *th;
567
	__be16 src, dst;
568 569
	int err;

570 571
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct tcphdr));
572 573 574 575
	if (unlikely(err))
		return err;

	th = tcp_hdr(skb);
576
	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
577 578 579
	if (likely(src != th->source)) {
		set_tp_port(skb, &th->source, src, &th->check);
		flow_key->tp.src = src;
580
	}
581
	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
582 583 584
	if (likely(dst != th->dest)) {
		set_tp_port(skb, &th->dest, dst, &th->check);
		flow_key->tp.dst = dst;
585
	}
586
	skb_clear_hash(skb);
587 588 589 590

	return 0;
}

591 592 593
static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_sctp *key,
		    const struct ovs_key_sctp *mask)
J
Joe Stringer 已提交
594
{
595
	unsigned int sctphoff = skb_transport_offset(skb);
J
Joe Stringer 已提交
596
	struct sctphdr *sh;
597
	__le32 old_correct_csum, new_csum, old_csum;
J
Joe Stringer 已提交
598 599
	int err;

600
	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
J
Joe Stringer 已提交
601 602 603 604
	if (unlikely(err))
		return err;

	sh = sctp_hdr(skb);
605 606
	old_csum = sh->checksum;
	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
607

608 609
	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
J
Joe Stringer 已提交
610

611
	new_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
612

613 614
	/* Carry any checksum errors through. */
	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
J
Joe Stringer 已提交
615

616 617 618
	skb_clear_hash(skb);
	flow_key->tp.src = sh->source;
	flow_key->tp.dst = sh->dest;
J
Joe Stringer 已提交
619 620 621 622

	return 0;
}

J
Joe Stringer 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
{
	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
	struct vport *vport = data->vport;

	if (skb_cow_head(skb, data->l2_len) < 0) {
		kfree_skb(skb);
		return -ENOMEM;
	}

	__skb_dst_copy(skb, data->dst);
	*OVS_CB(skb) = data->cb;
	skb->inner_protocol = data->inner_protocol;
	skb->vlan_tci = data->vlan_tci;
	skb->vlan_proto = data->vlan_proto;

	/* Reconstruct the MAC header.  */
	skb_push(skb, data->l2_len);
	memcpy(skb->data, &data->l2_data, data->l2_len);
	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
	skb_reset_mac_header(skb);

	ovs_vport_send(vport, skb);
	return 0;
}

static unsigned int
ovs_dst_get_mtu(const struct dst_entry *dst)
{
	return dst->dev->mtu;
}

static struct dst_ops ovs_dst_ops = {
	.family = AF_UNSPEC,
	.mtu = ovs_dst_get_mtu,
};

/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 * ovs_vport_output(), which is called once per fragmented packet.
 */
static void prepare_frag(struct vport *vport, struct sk_buff *skb)
{
	unsigned int hlen = skb_network_offset(skb);
	struct ovs_frag_data *data;

	data = this_cpu_ptr(&ovs_frag_data_storage);
	data->dst = skb->_skb_refdst;
	data->vport = vport;
	data->cb = *OVS_CB(skb);
	data->inner_protocol = skb->inner_protocol;
	data->vlan_tci = skb->vlan_tci;
	data->vlan_proto = skb->vlan_proto;
	data->l2_len = hlen;
	memcpy(&data->l2_data, skb->data, hlen);

	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
	skb_pull(skb, hlen);
}

static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
			 __be16 ethertype)
{
	if (skb_network_offset(skb) > MAX_L2_LEN) {
		OVS_NLERR(1, "L2 header too long to fragment");
		return;
	}

	if (ethertype == htons(ETH_P_IP)) {
		struct dst_entry ovs_dst;
		unsigned long orig_dst;

		prepare_frag(vport, skb);
		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_dst);
		IPCB(skb)->frag_max_size = mru;

		ip_do_fragment(skb->sk, skb, ovs_vport_output);
		refdst_drop(orig_dst);
	} else if (ethertype == htons(ETH_P_IPV6)) {
		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
		unsigned long orig_dst;
		struct rt6_info ovs_rt;

		if (!v6ops) {
			kfree_skb(skb);
			return;
		}

		prepare_frag(vport, skb);
		memset(&ovs_rt, 0, sizeof(ovs_rt));
		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_rt.dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_rt.dst);
		IP6CB(skb)->frag_max_size = mru;

		v6ops->fragment(skb->sk, skb, ovs_vport_output);
		refdst_drop(orig_dst);
	} else {
		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
			  ovs_vport_name(vport), ntohs(ethertype), mru,
			  vport->dev->mtu);
		kfree_skb(skb);
	}
}

static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
		      struct sw_flow_key *key)
737
{
738
	struct vport *vport = ovs_vport_rcu(dp, out_port);
739

J
Joe Stringer 已提交
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	if (likely(vport)) {
		u16 mru = OVS_CB(skb)->mru;

		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
			ovs_vport_send(vport, skb);
		} else if (mru <= vport->dev->mtu) {
			__be16 ethertype = key->eth.type;

			if (!is_flow_key_valid(key)) {
				if (eth_p_mpls(skb->protocol))
					ethertype = skb->inner_protocol;
				else
					ethertype = vlan_get_protocol(skb);
			}

			ovs_fragment(vport, skb, mru, ethertype);
		} else {
			kfree_skb(skb);
		}
	} else {
760
		kfree_skb(skb);
J
Joe Stringer 已提交
761
	}
762 763 764
}

static int output_userspace(struct datapath *dp, struct sk_buff *skb,
765 766
			    struct sw_flow_key *key, const struct nlattr *attr,
			    const struct nlattr *actions, int actions_len)
767
{
768
	struct ip_tunnel_info info;
769 770 771 772
	struct dp_upcall_info upcall;
	const struct nlattr *a;
	int rem;

773
	memset(&upcall, 0, sizeof(upcall));
774
	upcall.cmd = OVS_PACKET_CMD_ACTION;
J
Joe Stringer 已提交
775
	upcall.mru = OVS_CB(skb)->mru;
776 777 778 779 780 781 782 783 784

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
		switch (nla_type(a)) {
		case OVS_USERSPACE_ATTR_USERDATA:
			upcall.userdata = a;
			break;

		case OVS_USERSPACE_ATTR_PID:
785
			upcall.portid = nla_get_u32(a);
786
			break;
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801

		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
			/* Get out tunnel info. */
			struct vport *vport;

			vport = ovs_vport_rcu(dp, nla_get_u32(a));
			if (vport) {
				int err;

				err = ovs_vport_get_egress_tun_info(vport, skb,
								    &info);
				if (!err)
					upcall.egress_tun_info = &info;
			}
			break;
802
		}
803

804 805 806 807 808 809 810
		case OVS_USERSPACE_ATTR_ACTIONS: {
			/* Include actions. */
			upcall.actions = actions;
			upcall.actions_len = actions_len;
			break;
		}

811
		} /* End of switch. */
812 813
	}

814
	return ovs_dp_upcall(dp, skb, key, &upcall);
815 816 817
}

static int sample(struct datapath *dp, struct sk_buff *skb,
818 819
		  struct sw_flow_key *key, const struct nlattr *attr,
		  const struct nlattr *actions, int actions_len)
820 821 822 823 824 825 826
{
	const struct nlattr *acts_list = NULL;
	const struct nlattr *a;
	int rem;

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
827 828
		u32 probability;

829 830
		switch (nla_type(a)) {
		case OVS_SAMPLE_ATTR_PROBABILITY:
831 832
			probability = nla_get_u32(a);
			if (!probability || prandom_u32() > probability)
833 834 835 836 837 838 839 840 841
				return 0;
			break;

		case OVS_SAMPLE_ATTR_ACTIONS:
			acts_list = a;
			break;
		}
	}

842 843 844
	rem = nla_len(acts_list);
	a = nla_data(acts_list);

845 846 847
	/* Actions list is empty, do nothing */
	if (unlikely(!rem))
		return 0;
848

849 850 851 852
	/* The only known usage of sample action is having a single user-space
	 * action. Treat this usage as a special case.
	 * The output_userspace() should clone the skb to be sent to the
	 * user space. This skb will be consumed by its caller.
853
	 */
854
	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
855
		   nla_is_last(a, rem)))
856
		return output_userspace(dp, skb, key, a, actions, actions_len);
857 858 859 860 861 862

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		/* Skip the sample action when out of memory. */
		return 0;

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	if (!add_deferred_actions(skb, key, a)) {
		if (net_ratelimit())
			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
				ovs_dp_name(dp));

		kfree_skb(skb);
	}
	return 0;
}

static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
			 const struct nlattr *attr)
{
	struct ovs_action_hash *hash_act = nla_data(attr);
	u32 hash = 0;

	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
	hash = skb_get_hash(skb);
	hash = jhash_1word(hash, hash_act->hash_basis);
	if (!hash)
		hash = 0x1;

	key->ovs_flow_hash = hash;
886 887
}

888 889 890 891 892 893
static int execute_set_action(struct sk_buff *skb,
			      struct sw_flow_key *flow_key,
			      const struct nlattr *a)
{
	/* Only tunnel set execution is supported without a mask. */
	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
894 895 896 897 898 899 900 901 902
		struct ovs_tunnel_info *tun = nla_data(a);

		skb_dst_drop(skb);
		dst_hold((struct dst_entry *)tun->tun_dst);
		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);

		/* FIXME: Remove when all vports have been converted */
		OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;

903 904 905 906 907 908 909 910 911 912 913 914
		return 0;
	}

	return -EINVAL;
}

/* Mask is at the midpoint of the data. */
#define get_mask(a, type) ((const type)nla_data(a) + 1)

static int execute_masked_set_action(struct sk_buff *skb,
				     struct sw_flow_key *flow_key,
				     const struct nlattr *a)
915 916 917
{
	int err = 0;

918
	switch (nla_type(a)) {
919
	case OVS_KEY_ATTR_PRIORITY:
920 921
		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
			       *get_mask(a, u32 *));
922
		flow_key->phy.priority = skb->priority;
923 924
		break;

925
	case OVS_KEY_ATTR_SKB_MARK:
926
		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
927
		flow_key->phy.skb_mark = skb->mark;
928 929
		break;

930
	case OVS_KEY_ATTR_TUNNEL_INFO:
931 932
		/* Masked data not supported for tunnel. */
		err = -EINVAL;
933 934
		break;

935
	case OVS_KEY_ATTR_ETHERNET:
936 937
		err = set_eth_addr(skb, flow_key, nla_data(a),
				   get_mask(a, struct ovs_key_ethernet *));
938 939 940
		break;

	case OVS_KEY_ATTR_IPV4:
941 942
		err = set_ipv4(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv4 *));
943 944
		break;

A
Ansis Atteka 已提交
945
	case OVS_KEY_ATTR_IPV6:
946 947
		err = set_ipv6(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv6 *));
A
Ansis Atteka 已提交
948 949
		break;

950
	case OVS_KEY_ATTR_TCP:
951 952
		err = set_tcp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_tcp *));
953 954 955
		break;

	case OVS_KEY_ATTR_UDP:
956 957
		err = set_udp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_udp *));
958
		break;
J
Joe Stringer 已提交
959 960

	case OVS_KEY_ATTR_SCTP:
961 962
		err = set_sctp(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_sctp *));
J
Joe Stringer 已提交
963
		break;
964 965

	case OVS_KEY_ATTR_MPLS:
966 967
		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
								    __be32 *));
968
		break;
J
Joe Stringer 已提交
969 970 971

	case OVS_KEY_ATTR_CT_STATE:
	case OVS_KEY_ATTR_CT_ZONE:
972
	case OVS_KEY_ATTR_CT_MARK:
973
	case OVS_KEY_ATTR_CT_LABEL:
J
Joe Stringer 已提交
974 975
		err = -EINVAL;
		break;
976 977 978 979 980
	}

	return err;
}

981 982 983 984 985 986
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
			  struct sw_flow_key *key,
			  const struct nlattr *a, int rem)
{
	struct deferred_action *da;

987 988 989 990 991 992 993 994
	if (!is_flow_key_valid(key)) {
		int err;

		err = ovs_flow_key_update(skb, key);
		if (err)
			return err;
	}
	BUG_ON(!is_flow_key_valid(key));
995

996
	if (!nla_is_last(a, rem)) {
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
		/* Recirc action is the not the last action
		 * of the action list, need to clone the skb.
		 */
		skb = skb_clone(skb, GFP_ATOMIC);

		/* Skip the recirc action when out of memory, but
		 * continue on with the rest of the action list.
		 */
		if (!skb)
			return 0;
	}

	da = add_deferred_actions(skb, key, NULL);
	if (da) {
		da->pkt_key.recirc_id = nla_get_u32(a);
	} else {
		kfree_skb(skb);

		if (net_ratelimit())
			pr_warn("%s: deferred action limit reached, drop recirc action\n",
				ovs_dp_name(dp));
	}

	return 0;
}

1023 1024
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1025
			      struct sw_flow_key *key,
1026
			      const struct nlattr *attr, int len)
1027 1028 1029 1030
{
	/* Every output action needs a separate clone of 'skb', but the common
	 * case is just a single output action, so that doing a clone and
	 * then freeing the original skbuff is wasteful.  So the following code
1031 1032
	 * is slightly obscure just to avoid that.
	 */
1033 1034 1035 1036 1037 1038 1039 1040
	int prev_port = -1;
	const struct nlattr *a;
	int rem;

	for (a = attr, rem = len; rem > 0;
	     a = nla_next(a, &rem)) {
		int err = 0;

1041 1042 1043 1044
		if (unlikely(prev_port != -1)) {
			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);

			if (out_skb)
J
Joe Stringer 已提交
1045
				do_output(dp, out_skb, prev_port, key);
1046

1047 1048 1049 1050 1051 1052 1053 1054 1055
			prev_port = -1;
		}

		switch (nla_type(a)) {
		case OVS_ACTION_ATTR_OUTPUT:
			prev_port = nla_get_u32(a);
			break;

		case OVS_ACTION_ATTR_USERSPACE:
1056
			output_userspace(dp, skb, key, a, attr, len);
1057 1058
			break;

1059 1060 1061 1062
		case OVS_ACTION_ATTR_HASH:
			execute_hash(skb, key, a);
			break;

1063
		case OVS_ACTION_ATTR_PUSH_MPLS:
1064
			err = push_mpls(skb, key, nla_data(a));
1065 1066 1067
			break;

		case OVS_ACTION_ATTR_POP_MPLS:
1068
			err = pop_mpls(skb, key, nla_get_be16(a));
1069 1070
			break;

1071
		case OVS_ACTION_ATTR_PUSH_VLAN:
1072
			err = push_vlan(skb, key, nla_data(a));
1073 1074 1075
			break;

		case OVS_ACTION_ATTR_POP_VLAN:
1076
			err = pop_vlan(skb, key);
1077 1078
			break;

1079 1080
		case OVS_ACTION_ATTR_RECIRC:
			err = execute_recirc(dp, skb, key, a, rem);
1081
			if (nla_is_last(a, rem)) {
1082 1083 1084 1085 1086 1087 1088 1089
				/* If this is the last action, the skb has
				 * been consumed or freed.
				 * Return immediately.
				 */
				return err;
			}
			break;

1090
		case OVS_ACTION_ATTR_SET:
1091
			err = execute_set_action(skb, key, nla_data(a));
1092 1093
			break;

1094 1095 1096 1097 1098
		case OVS_ACTION_ATTR_SET_MASKED:
		case OVS_ACTION_ATTR_SET_TO_MASKED:
			err = execute_masked_set_action(skb, key, nla_data(a));
			break;

1099
		case OVS_ACTION_ATTR_SAMPLE:
1100
			err = sample(dp, skb, key, a, attr, len);
1101
			break;
J
Joe Stringer 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110

		case OVS_ACTION_ATTR_CT:
			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
					     nla_data(a));

			/* Hide stolen IP fragments from user space. */
			if (err == -EINPROGRESS)
				return 0;
			break;
1111 1112 1113 1114 1115 1116 1117 1118
		}

		if (unlikely(err)) {
			kfree_skb(skb);
			return err;
		}
	}

1119
	if (prev_port != -1)
J
Joe Stringer 已提交
1120
		do_output(dp, skb, prev_port, key);
1121
	else
1122 1123 1124 1125 1126
		consume_skb(skb);

	return 0;
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
static void process_deferred_actions(struct datapath *dp)
{
	struct action_fifo *fifo = this_cpu_ptr(action_fifos);

	/* Do not touch the FIFO in case there is no deferred actions. */
	if (action_fifo_is_empty(fifo))
		return;

	/* Finishing executing all deferred actions. */
	do {
		struct deferred_action *da = action_fifo_get(fifo);
		struct sk_buff *skb = da->skb;
		struct sw_flow_key *key = &da->pkt_key;
		const struct nlattr *actions = da->actions;

		if (actions)
			do_execute_actions(dp, skb, key, actions,
					   nla_len(actions));
		else
			ovs_dp_process_packet(skb, key);
	} while (!action_fifo_is_empty(fifo));

	/* Reset FIFO for the next packet.  */
	action_fifo_init(fifo);
}

1153
/* Execute a list of actions against 'skb'. */
1154
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1155 1156
			const struct sw_flow_actions *acts,
			struct sw_flow_key *key)
1157
{
1158 1159 1160 1161
	int level = this_cpu_read(exec_actions_level);
	int err;

	this_cpu_inc(exec_actions_level);
1162
	OVS_CB(skb)->egress_tun_info = NULL;
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	err = do_execute_actions(dp, skb, key,
				 acts->actions, acts->actions_len);

	if (!level)
		process_deferred_actions(dp);

	this_cpu_dec(exec_actions_level);
	return err;
}

int action_fifos_init(void)
{
	action_fifos = alloc_percpu(struct action_fifo);
	if (!action_fifos)
		return -ENOMEM;
1178

1179 1180 1181 1182 1183 1184
	return 0;
}

void action_fifos_exit(void)
{
	free_percpu(action_fifos);
1185
}