actions.c 29.0 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
J
Joe Stringer 已提交
25
#include <linux/netfilter_ipv6.h>
J
Joe Stringer 已提交
26
#include <linux/sctp.h>
27 28 29 30 31
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
32

J
Joe Stringer 已提交
33
#include <net/dst.h>
34
#include <net/ip.h>
A
Ansis Atteka 已提交
35
#include <net/ipv6.h>
36 37
#include <net/checksum.h>
#include <net/dsfield.h>
38
#include <net/mpls.h>
J
Joe Stringer 已提交
39
#include <net/sctp/checksum.h>
40 41

#include "datapath.h"
42
#include "flow.h"
J
Joe Stringer 已提交
43
#include "conntrack.h"
44 45 46
#include "vport.h"

static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
47
			      struct sw_flow_key *key,
48
			      const struct nlattr *attr, int len);
49

50 51 52 53 54 55 56 57
struct deferred_action {
	struct sk_buff *skb;
	const struct nlattr *actions;

	/* Store pkt_key clone when creating deferred action. */
	struct sw_flow_key pkt_key;
};

J
Joe Stringer 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71
#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
struct ovs_frag_data {
	unsigned long dst;
	struct vport *vport;
	struct ovs_skb_cb cb;
	__be16 inner_protocol;
	__u16 vlan_tci;
	__be16 vlan_proto;
	unsigned int l2_len;
	u8 l2_data[MAX_L2_LEN];
};

static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
#define DEFERRED_ACTION_FIFO_SIZE 10
struct action_fifo {
	int head;
	int tail;
	/* Deferred action fifo queue storage. */
	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};

static struct action_fifo __percpu *action_fifos;
static DEFINE_PER_CPU(int, exec_actions_level);

static void action_fifo_init(struct action_fifo *fifo)
{
	fifo->head = 0;
	fifo->tail = 0;
}

89
static bool action_fifo_is_empty(const struct action_fifo *fifo)
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
{
	return (fifo->head == fifo->tail);
}

static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{
	if (action_fifo_is_empty(fifo))
		return NULL;

	return &fifo->fifo[fifo->tail++];
}

static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
		return NULL;

	return &fifo->fifo[fifo->head++];
}

/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
112
						    const struct sw_flow_key *key,
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
						    const struct nlattr *attr)
{
	struct action_fifo *fifo;
	struct deferred_action *da;

	fifo = this_cpu_ptr(action_fifos);
	da = action_fifo_put(fifo);
	if (da) {
		da->skb = skb;
		da->actions = attr;
		da->pkt_key = *key;
	}

	return da;
}

129 130 131 132 133 134 135 136 137 138 139
static void invalidate_flow_key(struct sw_flow_key *key)
{
	key->eth.type = htons(0);
}

static bool is_flow_key_valid(const struct sw_flow_key *key)
{
	return !!key->eth.type;
}

static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;

167 168
	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
169 170
	skb->protocol = mpls->mpls_ethertype;

171
	invalidate_flow_key(key);
172 173 174
	return 0;
}

175 176
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
177 178 179 180
{
	struct ethhdr *hdr;
	int err;

181
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
182 183 184
	if (unlikely(err))
		return err;

185
	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
186 187 188 189 190 191 192 193 194 195 196 197 198 199

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);

	/* skb_mpls_header() is used to locate the ethertype
	 * field correctly in the presence of VLAN tags.
	 */
	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
	hdr->h_proto = ethertype;
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;
200 201

	invalidate_flow_key(key);
202 203 204
	return 0;
}

205 206
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const __be32 *mpls_lse, const __be32 *mask)
207 208
{
	__be32 *stack;
209
	__be32 lse;
210 211
	int err;

212
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
213 214 215 216
	if (unlikely(err))
		return err;

	stack = (__be32 *)skb_mpls_header(skb);
217
	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
218
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
219 220
		__be32 diff[] = { ~(*stack), lse };

221 222 223 224
		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
					  ~skb->csum);
	}

225 226
	*stack = lse;
	flow_key->mpls.top_lse = lse;
227 228 229
	return 0;
}

230
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
231 232 233
{
	int err;

234
	err = skb_vlan_pop(skb);
235
	if (skb_vlan_tag_present(skb))
236 237
		invalidate_flow_key(key);
	else
238
		key->eth.tci = 0;
239
	return err;
240 241
}

242 243
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_vlan *vlan)
244
{
245
	if (skb_vlan_tag_present(skb))
246
		invalidate_flow_key(key);
247
	else
248
		key->eth.tci = vlan->vlan_tci;
249 250
	return skb_vlan_push(skb, vlan->vlan_tpid,
			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
251 252
}

253 254 255 256 257 258 259
/* 'src' is already properly masked. */
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
{
	u16 *dst = (u16 *)dst_;
	const u16 *src = (const u16 *)src_;
	const u16 *mask = (const u16 *)mask_;

260 261 262
	OVS_SET_MASKED(dst[0], src[0], mask[0]);
	OVS_SET_MASKED(dst[1], src[1], mask[1]);
	OVS_SET_MASKED(dst[2], src[2], mask[2]);
263 264 265 266 267
}

static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
268 269
{
	int err;
270

271
	err = skb_ensure_writable(skb, ETH_HLEN);
272 273 274
	if (unlikely(err))
		return err;

275 276
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

277 278 279 280
	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
			       mask->eth_src);
	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);
281

282 283
	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

284 285
	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
286 287 288
	return 0;
}

289 290
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
				  __be32 addr, __be32 new_addr)
291 292 293
{
	int transport_len = skb->len - skb_transport_offset(skb);

294 295 296
	if (nh->frag_off & htons(IP_OFFSET))
		return;

297 298 299
	if (nh->protocol == IPPROTO_TCP) {
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
300
						 addr, new_addr, true);
301
	} else if (nh->protocol == IPPROTO_UDP) {
302 303 304 305 306
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace4(&uh->check, skb,
307
							 addr, new_addr, true);
308 309 310 311
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
312
	}
313
}
314

315 316 317 318
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
			__be32 *addr, __be32 new_addr)
{
	update_ip_l4_checksum(skb, nh, *addr, new_addr);
319
	csum_replace4(&nh->check, *addr, new_addr);
320
	skb_clear_hash(skb);
321 322 323
	*addr = new_addr;
}

A
Ansis Atteka 已提交
324 325 326 327 328
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
				 __be32 addr[4], const __be32 new_addr[4])
{
	int transport_len = skb->len - skb_transport_offset(skb);

329
	if (l4_proto == NEXTHDR_TCP) {
A
Ansis Atteka 已提交
330 331
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
332
						  addr, new_addr, true);
333
	} else if (l4_proto == NEXTHDR_UDP) {
A
Ansis Atteka 已提交
334 335 336 337 338
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace16(&uh->check, skb,
339
							  addr, new_addr, true);
A
Ansis Atteka 已提交
340 341 342 343
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
344 345 346
	} else if (l4_proto == NEXTHDR_ICMP) {
		if (likely(transport_len >= sizeof(struct icmp6hdr)))
			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
347
						  skb, addr, new_addr, true);
A
Ansis Atteka 已提交
348 349 350
	}
}

351 352 353
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
			   const __be32 mask[4], __be32 masked[4])
{
354 355 356 357
	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
358 359
}

A
Ansis Atteka 已提交
360 361 362 363 364 365 366
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
			  __be32 addr[4], const __be32 new_addr[4],
			  bool recalculate_csum)
{
	if (recalculate_csum)
		update_ipv6_checksum(skb, l4_proto, addr, new_addr);

367
	skb_clear_hash(skb);
A
Ansis Atteka 已提交
368 369 370
	memcpy(addr, new_addr, sizeof(__be32[4]));
}

371
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
A
Ansis Atteka 已提交
372
{
373
	/* Bits 21-24 are always unmasked, so this retains their values. */
374 375 376
	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
A
Ansis Atteka 已提交
377 378
}

379 380
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
		       u8 mask)
A
Ansis Atteka 已提交
381
{
382
	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
A
Ansis Atteka 已提交
383

384 385 386 387
	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
	nh->ttl = new_ttl;
}

388 389 390
static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv4 *key,
		    const struct ovs_key_ipv4 *mask)
391 392
{
	struct iphdr *nh;
393
	__be32 new_addr;
394 395
	int err;

396 397
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct iphdr));
398 399 400 401 402
	if (unlikely(err))
		return err;

	nh = ip_hdr(skb);

403 404 405 406 407
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (mask->ipv4_src) {
408
		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
409

410 411 412 413
		if (unlikely(new_addr != nh->saddr)) {
			set_ip_addr(skb, nh, &nh->saddr, new_addr);
			flow_key->ipv4.addr.src = new_addr;
		}
414
	}
415
	if (mask->ipv4_dst) {
416
		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
417

418 419 420 421
		if (unlikely(new_addr != nh->daddr)) {
			set_ip_addr(skb, nh, &nh->daddr, new_addr);
			flow_key->ipv4.addr.dst = new_addr;
		}
422
	}
423 424 425 426 427 428 429
	if (mask->ipv4_tos) {
		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
		flow_key->ip.tos = nh->tos;
	}
	if (mask->ipv4_ttl) {
		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
		flow_key->ip.ttl = nh->ttl;
430
	}
431 432 433 434

	return 0;
}

435 436 437 438 439 440 441 442
static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{
	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
}

static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv6 *key,
		    const struct ovs_key_ipv6 *mask)
A
Ansis Atteka 已提交
443 444 445 446
{
	struct ipv6hdr *nh;
	int err;

447 448
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct ipv6hdr));
A
Ansis Atteka 已提交
449 450 451 452 453
	if (unlikely(err))
		return err;

	nh = ipv6_hdr(skb);

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
		__be32 *saddr = (__be32 *)&nh->saddr;
		__be32 masked[4];

		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);

		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
				      true);
			memcpy(&flow_key->ipv6.addr.src, masked,
			       sizeof(flow_key->ipv6.addr.src));
		}
	}
	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
A
Ansis Atteka 已提交
472 473 474
		unsigned int offset = 0;
		int flags = IP6_FH_F_SKIP_RH;
		bool recalc_csum = true;
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
		__be32 *daddr = (__be32 *)&nh->daddr;
		__be32 masked[4];

		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);

		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
			if (ipv6_ext_hdr(nh->nexthdr))
				recalc_csum = (ipv6_find_hdr(skb, &offset,
							     NEXTHDR_ROUTING,
							     NULL, &flags)
					       != NEXTHDR_ROUTING);

			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
				      recalc_csum);
			memcpy(&flow_key->ipv6.addr.dst, masked,
			       sizeof(flow_key->ipv6.addr.dst));
		}
	}
	if (mask->ipv6_tclass) {
		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
		flow_key->ip.tos = ipv6_get_dsfield(nh);
	}
	if (mask->ipv6_label) {
		set_ipv6_fl(nh, ntohl(key->ipv6_label),
			    ntohl(mask->ipv6_label));
		flow_key->ipv6.label =
		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
	}
	if (mask->ipv6_hlimit) {
504 505
		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
			       mask->ipv6_hlimit);
506
		flow_key->ip.ttl = nh->hop_limit;
A
Ansis Atteka 已提交
507 508 509 510
	}
	return 0;
}

511
/* Must follow skb_ensure_writable() since that can move the skb data. */
512
static void set_tp_port(struct sk_buff *skb, __be16 *port,
513
			__be16 new_port, __sum16 *check)
514
{
515
	inet_proto_csum_replace2(check, skb, *port, new_port, false);
516
	*port = new_port;
517 518
}

519 520 521
static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_udp *key,
		   const struct ovs_key_udp *mask)
522 523
{
	struct udphdr *uh;
524
	__be16 src, dst;
525 526
	int err;

527 528
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct udphdr));
529 530 531 532
	if (unlikely(err))
		return err;

	uh = udp_hdr(skb);
533
	/* Either of the masks is non-zero, so do not bother checking them. */
534 535
	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
536

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
		if (likely(src != uh->source)) {
			set_tp_port(skb, &uh->source, src, &uh->check);
			flow_key->tp.src = src;
		}
		if (likely(dst != uh->dest)) {
			set_tp_port(skb, &uh->dest, dst, &uh->check);
			flow_key->tp.dst = dst;
		}

		if (unlikely(!uh->check))
			uh->check = CSUM_MANGLED_0;
	} else {
		uh->source = src;
		uh->dest = dst;
		flow_key->tp.src = src;
		flow_key->tp.dst = dst;
554
	}
555

556 557
	skb_clear_hash(skb);

558 559 560
	return 0;
}

561 562 563
static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_tcp *key,
		   const struct ovs_key_tcp *mask)
564 565
{
	struct tcphdr *th;
566
	__be16 src, dst;
567 568
	int err;

569 570
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct tcphdr));
571 572 573 574
	if (unlikely(err))
		return err;

	th = tcp_hdr(skb);
575
	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
576 577 578
	if (likely(src != th->source)) {
		set_tp_port(skb, &th->source, src, &th->check);
		flow_key->tp.src = src;
579
	}
580
	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
581 582 583
	if (likely(dst != th->dest)) {
		set_tp_port(skb, &th->dest, dst, &th->check);
		flow_key->tp.dst = dst;
584
	}
585
	skb_clear_hash(skb);
586 587 588 589

	return 0;
}

590 591 592
static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_sctp *key,
		    const struct ovs_key_sctp *mask)
J
Joe Stringer 已提交
593
{
594
	unsigned int sctphoff = skb_transport_offset(skb);
J
Joe Stringer 已提交
595
	struct sctphdr *sh;
596
	__le32 old_correct_csum, new_csum, old_csum;
J
Joe Stringer 已提交
597 598
	int err;

599
	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
J
Joe Stringer 已提交
600 601 602 603
	if (unlikely(err))
		return err;

	sh = sctp_hdr(skb);
604 605
	old_csum = sh->checksum;
	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
606

607 608
	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
J
Joe Stringer 已提交
609

610
	new_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
611

612 613
	/* Carry any checksum errors through. */
	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
J
Joe Stringer 已提交
614

615 616 617
	skb_clear_hash(skb);
	flow_key->tp.src = sh->source;
	flow_key->tp.dst = sh->dest;
J
Joe Stringer 已提交
618 619 620 621

	return 0;
}

J
Joe Stringer 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
{
	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
	struct vport *vport = data->vport;

	if (skb_cow_head(skb, data->l2_len) < 0) {
		kfree_skb(skb);
		return -ENOMEM;
	}

	__skb_dst_copy(skb, data->dst);
	*OVS_CB(skb) = data->cb;
	skb->inner_protocol = data->inner_protocol;
	skb->vlan_tci = data->vlan_tci;
	skb->vlan_proto = data->vlan_proto;

	/* Reconstruct the MAC header.  */
	skb_push(skb, data->l2_len);
	memcpy(skb->data, &data->l2_data, data->l2_len);
	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
	skb_reset_mac_header(skb);

	ovs_vport_send(vport, skb);
	return 0;
}

static unsigned int
ovs_dst_get_mtu(const struct dst_entry *dst)
{
	return dst->dev->mtu;
}

static struct dst_ops ovs_dst_ops = {
	.family = AF_UNSPEC,
	.mtu = ovs_dst_get_mtu,
};

/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 * ovs_vport_output(), which is called once per fragmented packet.
 */
static void prepare_frag(struct vport *vport, struct sk_buff *skb)
{
	unsigned int hlen = skb_network_offset(skb);
	struct ovs_frag_data *data;

	data = this_cpu_ptr(&ovs_frag_data_storage);
	data->dst = skb->_skb_refdst;
	data->vport = vport;
	data->cb = *OVS_CB(skb);
	data->inner_protocol = skb->inner_protocol;
	data->vlan_tci = skb->vlan_tci;
	data->vlan_proto = skb->vlan_proto;
	data->l2_len = hlen;
	memcpy(&data->l2_data, skb->data, hlen);

	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
	skb_pull(skb, hlen);
}

static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
			 __be16 ethertype)
{
	if (skb_network_offset(skb) > MAX_L2_LEN) {
		OVS_NLERR(1, "L2 header too long to fragment");
		return;
	}

	if (ethertype == htons(ETH_P_IP)) {
		struct dst_entry ovs_dst;
		unsigned long orig_dst;

		prepare_frag(vport, skb);
		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_dst);
		IPCB(skb)->frag_max_size = mru;

		ip_do_fragment(skb->sk, skb, ovs_vport_output);
		refdst_drop(orig_dst);
	} else if (ethertype == htons(ETH_P_IPV6)) {
		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
		unsigned long orig_dst;
		struct rt6_info ovs_rt;

		if (!v6ops) {
			kfree_skb(skb);
			return;
		}

		prepare_frag(vport, skb);
		memset(&ovs_rt, 0, sizeof(ovs_rt));
		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_rt.dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_rt.dst);
		IP6CB(skb)->frag_max_size = mru;

		v6ops->fragment(skb->sk, skb, ovs_vport_output);
		refdst_drop(orig_dst);
	} else {
		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
			  ovs_vport_name(vport), ntohs(ethertype), mru,
			  vport->dev->mtu);
		kfree_skb(skb);
	}
}

static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
		      struct sw_flow_key *key)
736
{
737
	struct vport *vport = ovs_vport_rcu(dp, out_port);
738

J
Joe Stringer 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	if (likely(vport)) {
		u16 mru = OVS_CB(skb)->mru;

		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
			ovs_vport_send(vport, skb);
		} else if (mru <= vport->dev->mtu) {
			__be16 ethertype = key->eth.type;

			if (!is_flow_key_valid(key)) {
				if (eth_p_mpls(skb->protocol))
					ethertype = skb->inner_protocol;
				else
					ethertype = vlan_get_protocol(skb);
			}

			ovs_fragment(vport, skb, mru, ethertype);
		} else {
			kfree_skb(skb);
		}
	} else {
759
		kfree_skb(skb);
J
Joe Stringer 已提交
760
	}
761 762 763
}

static int output_userspace(struct datapath *dp, struct sk_buff *skb,
764 765
			    struct sw_flow_key *key, const struct nlattr *attr,
			    const struct nlattr *actions, int actions_len)
766
{
767
	struct ip_tunnel_info info;
768 769 770 771
	struct dp_upcall_info upcall;
	const struct nlattr *a;
	int rem;

772
	memset(&upcall, 0, sizeof(upcall));
773
	upcall.cmd = OVS_PACKET_CMD_ACTION;
J
Joe Stringer 已提交
774
	upcall.mru = OVS_CB(skb)->mru;
775 776 777 778 779 780 781 782 783

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
		switch (nla_type(a)) {
		case OVS_USERSPACE_ATTR_USERDATA:
			upcall.userdata = a;
			break;

		case OVS_USERSPACE_ATTR_PID:
784
			upcall.portid = nla_get_u32(a);
785
			break;
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800

		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
			/* Get out tunnel info. */
			struct vport *vport;

			vport = ovs_vport_rcu(dp, nla_get_u32(a));
			if (vport) {
				int err;

				err = ovs_vport_get_egress_tun_info(vport, skb,
								    &info);
				if (!err)
					upcall.egress_tun_info = &info;
			}
			break;
801
		}
802

803 804 805 806 807 808 809
		case OVS_USERSPACE_ATTR_ACTIONS: {
			/* Include actions. */
			upcall.actions = actions;
			upcall.actions_len = actions_len;
			break;
		}

810
		} /* End of switch. */
811 812
	}

813
	return ovs_dp_upcall(dp, skb, key, &upcall);
814 815 816
}

static int sample(struct datapath *dp, struct sk_buff *skb,
817 818
		  struct sw_flow_key *key, const struct nlattr *attr,
		  const struct nlattr *actions, int actions_len)
819 820 821 822 823 824 825
{
	const struct nlattr *acts_list = NULL;
	const struct nlattr *a;
	int rem;

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
826 827
		u32 probability;

828 829
		switch (nla_type(a)) {
		case OVS_SAMPLE_ATTR_PROBABILITY:
830 831
			probability = nla_get_u32(a);
			if (!probability || prandom_u32() > probability)
832 833 834 835 836 837 838 839 840
				return 0;
			break;

		case OVS_SAMPLE_ATTR_ACTIONS:
			acts_list = a;
			break;
		}
	}

841 842 843
	rem = nla_len(acts_list);
	a = nla_data(acts_list);

844 845 846
	/* Actions list is empty, do nothing */
	if (unlikely(!rem))
		return 0;
847

848 849 850 851
	/* The only known usage of sample action is having a single user-space
	 * action. Treat this usage as a special case.
	 * The output_userspace() should clone the skb to be sent to the
	 * user space. This skb will be consumed by its caller.
852
	 */
853
	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
854
		   nla_is_last(a, rem)))
855
		return output_userspace(dp, skb, key, a, actions, actions_len);
856 857 858 859 860 861

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		/* Skip the sample action when out of memory. */
		return 0;

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	if (!add_deferred_actions(skb, key, a)) {
		if (net_ratelimit())
			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
				ovs_dp_name(dp));

		kfree_skb(skb);
	}
	return 0;
}

static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
			 const struct nlattr *attr)
{
	struct ovs_action_hash *hash_act = nla_data(attr);
	u32 hash = 0;

	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
	hash = skb_get_hash(skb);
	hash = jhash_1word(hash, hash_act->hash_basis);
	if (!hash)
		hash = 0x1;

	key->ovs_flow_hash = hash;
885 886
}

887 888 889 890 891 892
static int execute_set_action(struct sk_buff *skb,
			      struct sw_flow_key *flow_key,
			      const struct nlattr *a)
{
	/* Only tunnel set execution is supported without a mask. */
	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
893 894 895 896 897 898 899 900 901
		struct ovs_tunnel_info *tun = nla_data(a);

		skb_dst_drop(skb);
		dst_hold((struct dst_entry *)tun->tun_dst);
		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);

		/* FIXME: Remove when all vports have been converted */
		OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;

902 903 904 905 906 907 908 909 910 911 912 913
		return 0;
	}

	return -EINVAL;
}

/* Mask is at the midpoint of the data. */
#define get_mask(a, type) ((const type)nla_data(a) + 1)

static int execute_masked_set_action(struct sk_buff *skb,
				     struct sw_flow_key *flow_key,
				     const struct nlattr *a)
914 915 916
{
	int err = 0;

917
	switch (nla_type(a)) {
918
	case OVS_KEY_ATTR_PRIORITY:
919 920
		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
			       *get_mask(a, u32 *));
921
		flow_key->phy.priority = skb->priority;
922 923
		break;

924
	case OVS_KEY_ATTR_SKB_MARK:
925
		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
926
		flow_key->phy.skb_mark = skb->mark;
927 928
		break;

929
	case OVS_KEY_ATTR_TUNNEL_INFO:
930 931
		/* Masked data not supported for tunnel. */
		err = -EINVAL;
932 933
		break;

934
	case OVS_KEY_ATTR_ETHERNET:
935 936
		err = set_eth_addr(skb, flow_key, nla_data(a),
				   get_mask(a, struct ovs_key_ethernet *));
937 938 939
		break;

	case OVS_KEY_ATTR_IPV4:
940 941
		err = set_ipv4(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv4 *));
942 943
		break;

A
Ansis Atteka 已提交
944
	case OVS_KEY_ATTR_IPV6:
945 946
		err = set_ipv6(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv6 *));
A
Ansis Atteka 已提交
947 948
		break;

949
	case OVS_KEY_ATTR_TCP:
950 951
		err = set_tcp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_tcp *));
952 953 954
		break;

	case OVS_KEY_ATTR_UDP:
955 956
		err = set_udp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_udp *));
957
		break;
J
Joe Stringer 已提交
958 959

	case OVS_KEY_ATTR_SCTP:
960 961
		err = set_sctp(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_sctp *));
J
Joe Stringer 已提交
962
		break;
963 964

	case OVS_KEY_ATTR_MPLS:
965 966
		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
								    __be32 *));
967
		break;
J
Joe Stringer 已提交
968 969 970 971 972

	case OVS_KEY_ATTR_CT_STATE:
	case OVS_KEY_ATTR_CT_ZONE:
		err = -EINVAL;
		break;
973 974 975 976 977
	}

	return err;
}

978 979 980 981 982 983
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
			  struct sw_flow_key *key,
			  const struct nlattr *a, int rem)
{
	struct deferred_action *da;

984 985 986 987 988 989 990 991
	if (!is_flow_key_valid(key)) {
		int err;

		err = ovs_flow_key_update(skb, key);
		if (err)
			return err;
	}
	BUG_ON(!is_flow_key_valid(key));
992

993
	if (!nla_is_last(a, rem)) {
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
		/* Recirc action is the not the last action
		 * of the action list, need to clone the skb.
		 */
		skb = skb_clone(skb, GFP_ATOMIC);

		/* Skip the recirc action when out of memory, but
		 * continue on with the rest of the action list.
		 */
		if (!skb)
			return 0;
	}

	da = add_deferred_actions(skb, key, NULL);
	if (da) {
		da->pkt_key.recirc_id = nla_get_u32(a);
	} else {
		kfree_skb(skb);

		if (net_ratelimit())
			pr_warn("%s: deferred action limit reached, drop recirc action\n",
				ovs_dp_name(dp));
	}

	return 0;
}

1020 1021
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1022
			      struct sw_flow_key *key,
1023
			      const struct nlattr *attr, int len)
1024 1025 1026 1027
{
	/* Every output action needs a separate clone of 'skb', but the common
	 * case is just a single output action, so that doing a clone and
	 * then freeing the original skbuff is wasteful.  So the following code
1028 1029
	 * is slightly obscure just to avoid that.
	 */
1030 1031 1032 1033 1034 1035 1036 1037
	int prev_port = -1;
	const struct nlattr *a;
	int rem;

	for (a = attr, rem = len; rem > 0;
	     a = nla_next(a, &rem)) {
		int err = 0;

1038 1039 1040 1041
		if (unlikely(prev_port != -1)) {
			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);

			if (out_skb)
J
Joe Stringer 已提交
1042
				do_output(dp, out_skb, prev_port, key);
1043

1044 1045 1046 1047 1048 1049 1050 1051 1052
			prev_port = -1;
		}

		switch (nla_type(a)) {
		case OVS_ACTION_ATTR_OUTPUT:
			prev_port = nla_get_u32(a);
			break;

		case OVS_ACTION_ATTR_USERSPACE:
1053
			output_userspace(dp, skb, key, a, attr, len);
1054 1055
			break;

1056 1057 1058 1059
		case OVS_ACTION_ATTR_HASH:
			execute_hash(skb, key, a);
			break;

1060
		case OVS_ACTION_ATTR_PUSH_MPLS:
1061
			err = push_mpls(skb, key, nla_data(a));
1062 1063 1064
			break;

		case OVS_ACTION_ATTR_POP_MPLS:
1065
			err = pop_mpls(skb, key, nla_get_be16(a));
1066 1067
			break;

1068
		case OVS_ACTION_ATTR_PUSH_VLAN:
1069
			err = push_vlan(skb, key, nla_data(a));
1070 1071 1072
			break;

		case OVS_ACTION_ATTR_POP_VLAN:
1073
			err = pop_vlan(skb, key);
1074 1075
			break;

1076 1077
		case OVS_ACTION_ATTR_RECIRC:
			err = execute_recirc(dp, skb, key, a, rem);
1078
			if (nla_is_last(a, rem)) {
1079 1080 1081 1082 1083 1084 1085 1086
				/* If this is the last action, the skb has
				 * been consumed or freed.
				 * Return immediately.
				 */
				return err;
			}
			break;

1087
		case OVS_ACTION_ATTR_SET:
1088
			err = execute_set_action(skb, key, nla_data(a));
1089 1090
			break;

1091 1092 1093 1094 1095
		case OVS_ACTION_ATTR_SET_MASKED:
		case OVS_ACTION_ATTR_SET_TO_MASKED:
			err = execute_masked_set_action(skb, key, nla_data(a));
			break;

1096
		case OVS_ACTION_ATTR_SAMPLE:
1097
			err = sample(dp, skb, key, a, attr, len);
1098
			break;
J
Joe Stringer 已提交
1099 1100 1101 1102 1103 1104 1105 1106 1107

		case OVS_ACTION_ATTR_CT:
			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
					     nla_data(a));

			/* Hide stolen IP fragments from user space. */
			if (err == -EINPROGRESS)
				return 0;
			break;
1108 1109 1110 1111 1112 1113 1114 1115
		}

		if (unlikely(err)) {
			kfree_skb(skb);
			return err;
		}
	}

1116
	if (prev_port != -1)
J
Joe Stringer 已提交
1117
		do_output(dp, skb, prev_port, key);
1118
	else
1119 1120 1121 1122 1123
		consume_skb(skb);

	return 0;
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
static void process_deferred_actions(struct datapath *dp)
{
	struct action_fifo *fifo = this_cpu_ptr(action_fifos);

	/* Do not touch the FIFO in case there is no deferred actions. */
	if (action_fifo_is_empty(fifo))
		return;

	/* Finishing executing all deferred actions. */
	do {
		struct deferred_action *da = action_fifo_get(fifo);
		struct sk_buff *skb = da->skb;
		struct sw_flow_key *key = &da->pkt_key;
		const struct nlattr *actions = da->actions;

		if (actions)
			do_execute_actions(dp, skb, key, actions,
					   nla_len(actions));
		else
			ovs_dp_process_packet(skb, key);
	} while (!action_fifo_is_empty(fifo));

	/* Reset FIFO for the next packet.  */
	action_fifo_init(fifo);
}

1150
/* Execute a list of actions against 'skb'. */
1151
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1152 1153
			const struct sw_flow_actions *acts,
			struct sw_flow_key *key)
1154
{
1155 1156 1157 1158
	int level = this_cpu_read(exec_actions_level);
	int err;

	this_cpu_inc(exec_actions_level);
1159
	OVS_CB(skb)->egress_tun_info = NULL;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
	err = do_execute_actions(dp, skb, key,
				 acts->actions, acts->actions_len);

	if (!level)
		process_deferred_actions(dp);

	this_cpu_dec(exec_actions_level);
	return err;
}

int action_fifos_init(void)
{
	action_fifos = alloc_percpu(struct action_fifo);
	if (!action_fifos)
		return -ENOMEM;
1175

1176 1177 1178 1179 1180 1181
	return 0;
}

void action_fifos_exit(void)
{
	free_percpu(action_fifos);
1182
}