actions.c 31.3 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
J
Joe Stringer 已提交
25
#include <linux/netfilter_ipv6.h>
J
Joe Stringer 已提交
26
#include <linux/sctp.h>
27 28 29 30 31
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
32

J
Joe Stringer 已提交
33
#include <net/dst.h>
34
#include <net/ip.h>
A
Ansis Atteka 已提交
35
#include <net/ipv6.h>
J
Joe Stringer 已提交
36
#include <net/ip6_fib.h>
37 38
#include <net/checksum.h>
#include <net/dsfield.h>
39
#include <net/mpls.h>
J
Joe Stringer 已提交
40
#include <net/sctp/checksum.h>
41 42

#include "datapath.h"
43
#include "flow.h"
J
Joe Stringer 已提交
44
#include "conntrack.h"
45 46 47
#include "vport.h"

static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48
			      struct sw_flow_key *key,
49
			      const struct nlattr *attr, int len);
50

51 52 53 54 55 56 57 58
struct deferred_action {
	struct sk_buff *skb;
	const struct nlattr *actions;

	/* Store pkt_key clone when creating deferred action. */
	struct sw_flow_key pkt_key;
};

J
Joe Stringer 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72
#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
struct ovs_frag_data {
	unsigned long dst;
	struct vport *vport;
	struct ovs_skb_cb cb;
	__be16 inner_protocol;
	__u16 vlan_tci;
	__be16 vlan_proto;
	unsigned int l2_len;
	u8 l2_data[MAX_L2_LEN];
};

static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);

73
#define DEFERRED_ACTION_FIFO_SIZE 10
74 75
#define OVS_RECURSION_LIMIT 5
#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
76 77 78 79 80 81 82
struct action_fifo {
	int head;
	int tail;
	/* Deferred action fifo queue storage. */
	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};

83 84 85 86
struct recirc_keys {
	struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
};

87
static struct action_fifo __percpu *action_fifos;
88
static struct recirc_keys __percpu *recirc_keys;
89 90 91 92 93 94 95 96
static DEFINE_PER_CPU(int, exec_actions_level);

static void action_fifo_init(struct action_fifo *fifo)
{
	fifo->head = 0;
	fifo->tail = 0;
}

97
static bool action_fifo_is_empty(const struct action_fifo *fifo)
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
{
	return (fifo->head == fifo->tail);
}

static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{
	if (action_fifo_is_empty(fifo))
		return NULL;

	return &fifo->fifo[fifo->tail++];
}

static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
		return NULL;

	return &fifo->fifo[fifo->head++];
}

/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
120
						    const struct sw_flow_key *key,
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
						    const struct nlattr *attr)
{
	struct action_fifo *fifo;
	struct deferred_action *da;

	fifo = this_cpu_ptr(action_fifos);
	da = action_fifo_put(fifo);
	if (da) {
		da->skb = skb;
		da->actions = attr;
		da->pkt_key = *key;
	}

	return da;
}

137 138 139 140 141 142 143 144 145 146
static void invalidate_flow_key(struct sw_flow_key *key)
{
	key->eth.type = htons(0);
}

static bool is_flow_key_valid(const struct sw_flow_key *key)
{
	return !!key->eth.type;
}

147 148 149 150 151 152 153 154 155 156 157 158 159
static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
			     __be16 ethertype)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		__be16 diff[] = { ~(hdr->h_proto), ethertype };

		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
					~skb->csum);
	}

	hdr->h_proto = ethertype;
}

160
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
161 162
		     const struct ovs_action_push_mpls *mpls)
{
J
Jiri Benc 已提交
163
	struct mpls_shim_hdr *new_mpls_lse;
164 165 166 167 168 169 170 171

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

D
David Ahern 已提交
172 173 174 175 176
	if (!skb->inner_protocol) {
		skb_set_inner_network_header(skb, skb->mac_len);
		skb_set_inner_protocol(skb, skb->protocol);
	}

177 178 179 180
	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);
D
David Ahern 已提交
181
	skb_set_network_header(skb, skb->mac_len);
182

J
Jiri Benc 已提交
183 184
	new_mpls_lse = mpls_hdr(skb);
	new_mpls_lse->label_stack_entry = mpls->mpls_lse;
185

186
	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
187

188
	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
189 190
	skb->protocol = mpls->mpls_ethertype;

191
	invalidate_flow_key(key);
192 193 194
	return 0;
}

195 196
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
197 198 199 200
{
	struct ethhdr *hdr;
	int err;

201
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
202 203 204
	if (unlikely(err))
		return err;

J
Jiri Benc 已提交
205
	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
206 207 208 209 210 211

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);
D
David Ahern 已提交
212
	skb_set_network_header(skb, skb->mac_len);
213

J
Jiri Benc 已提交
214 215
	/* mpls_hdr() is used to locate the ethertype field correctly in the
	 * presence of VLAN tags.
216
	 */
J
Jiri Benc 已提交
217
	hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
218
	update_ethertype(skb, hdr, ethertype);
219 220
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;
221 222

	invalidate_flow_key(key);
223 224 225
	return 0;
}

226 227
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const __be32 *mpls_lse, const __be32 *mask)
228
{
J
Jiri Benc 已提交
229
	struct mpls_shim_hdr *stack;
230
	__be32 lse;
231 232
	int err;

233
	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
234 235 236
	if (unlikely(err))
		return err;

J
Jiri Benc 已提交
237 238
	stack = mpls_hdr(skb);
	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
239
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
J
Jiri Benc 已提交
240
		__be32 diff[] = { ~(stack->label_stack_entry), lse };
241

242 243 244 245
		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
					  ~skb->csum);
	}

J
Jiri Benc 已提交
246
	stack->label_stack_entry = lse;
247
	flow_key->mpls.top_lse = lse;
248 249 250
	return 0;
}

251
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
252 253 254
{
	int err;

255
	err = skb_vlan_pop(skb);
256
	if (skb_vlan_tag_present(skb)) {
257
		invalidate_flow_key(key);
258 259 260 261
	} else {
		key->eth.vlan.tci = 0;
		key->eth.vlan.tpid = 0;
	}
262
	return err;
263 264
}

265 266
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_vlan *vlan)
267
{
268
	if (skb_vlan_tag_present(skb)) {
269
		invalidate_flow_key(key);
270 271 272 273
	} else {
		key->eth.vlan.tci = vlan->vlan_tci;
		key->eth.vlan.tpid = vlan->vlan_tpid;
	}
274 275
	return skb_vlan_push(skb, vlan->vlan_tpid,
			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
276 277
}

278 279 280 281 282 283 284
/* 'src' is already properly masked. */
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
{
	u16 *dst = (u16 *)dst_;
	const u16 *src = (const u16 *)src_;
	const u16 *mask = (const u16 *)mask_;

285 286 287
	OVS_SET_MASKED(dst[0], src[0], mask[0]);
	OVS_SET_MASKED(dst[1], src[1], mask[1]);
	OVS_SET_MASKED(dst[2], src[2], mask[2]);
288 289 290 291 292
}

static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
293 294
{
	int err;
295

296
	err = skb_ensure_writable(skb, ETH_HLEN);
297 298 299
	if (unlikely(err))
		return err;

300 301
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

302 303 304 305
	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
			       mask->eth_src);
	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);
306

307
	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
308

309 310
	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
311 312 313
	return 0;
}

314 315
static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
				  __be32 addr, __be32 new_addr)
316 317 318
{
	int transport_len = skb->len - skb_transport_offset(skb);

319 320 321
	if (nh->frag_off & htons(IP_OFFSET))
		return;

322 323 324
	if (nh->protocol == IPPROTO_TCP) {
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
325
						 addr, new_addr, true);
326
	} else if (nh->protocol == IPPROTO_UDP) {
327 328 329 330 331
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace4(&uh->check, skb,
332
							 addr, new_addr, true);
333 334 335 336
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
337
	}
338
}
339

340 341 342 343
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
			__be32 *addr, __be32 new_addr)
{
	update_ip_l4_checksum(skb, nh, *addr, new_addr);
344
	csum_replace4(&nh->check, *addr, new_addr);
345
	skb_clear_hash(skb);
346 347 348
	*addr = new_addr;
}

A
Ansis Atteka 已提交
349 350 351 352 353
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
				 __be32 addr[4], const __be32 new_addr[4])
{
	int transport_len = skb->len - skb_transport_offset(skb);

354
	if (l4_proto == NEXTHDR_TCP) {
A
Ansis Atteka 已提交
355 356
		if (likely(transport_len >= sizeof(struct tcphdr)))
			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
357
						  addr, new_addr, true);
358
	} else if (l4_proto == NEXTHDR_UDP) {
A
Ansis Atteka 已提交
359 360 361 362 363
		if (likely(transport_len >= sizeof(struct udphdr))) {
			struct udphdr *uh = udp_hdr(skb);

			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
				inet_proto_csum_replace16(&uh->check, skb,
364
							  addr, new_addr, true);
A
Ansis Atteka 已提交
365 366 367 368
				if (!uh->check)
					uh->check = CSUM_MANGLED_0;
			}
		}
369 370 371
	} else if (l4_proto == NEXTHDR_ICMP) {
		if (likely(transport_len >= sizeof(struct icmp6hdr)))
			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
372
						  skb, addr, new_addr, true);
A
Ansis Atteka 已提交
373 374 375
	}
}

376 377 378
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
			   const __be32 mask[4], __be32 masked[4])
{
379 380 381 382
	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
383 384
}

A
Ansis Atteka 已提交
385 386 387 388 389 390 391
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
			  __be32 addr[4], const __be32 new_addr[4],
			  bool recalculate_csum)
{
	if (recalculate_csum)
		update_ipv6_checksum(skb, l4_proto, addr, new_addr);

392
	skb_clear_hash(skb);
A
Ansis Atteka 已提交
393 394 395
	memcpy(addr, new_addr, sizeof(__be32[4]));
}

396
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
A
Ansis Atteka 已提交
397
{
398
	/* Bits 21-24 are always unmasked, so this retains their values. */
399 400 401
	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
A
Ansis Atteka 已提交
402 403
}

404 405
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
		       u8 mask)
A
Ansis Atteka 已提交
406
{
407
	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
A
Ansis Atteka 已提交
408

409 410 411 412
	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
	nh->ttl = new_ttl;
}

413 414 415
static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv4 *key,
		    const struct ovs_key_ipv4 *mask)
416 417
{
	struct iphdr *nh;
418
	__be32 new_addr;
419 420
	int err;

421 422
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct iphdr));
423 424 425 426 427
	if (unlikely(err))
		return err;

	nh = ip_hdr(skb);

428 429 430 431 432
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (mask->ipv4_src) {
433
		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
434

435 436 437 438
		if (unlikely(new_addr != nh->saddr)) {
			set_ip_addr(skb, nh, &nh->saddr, new_addr);
			flow_key->ipv4.addr.src = new_addr;
		}
439
	}
440
	if (mask->ipv4_dst) {
441
		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
442

443 444 445 446
		if (unlikely(new_addr != nh->daddr)) {
			set_ip_addr(skb, nh, &nh->daddr, new_addr);
			flow_key->ipv4.addr.dst = new_addr;
		}
447
	}
448 449 450 451 452 453 454
	if (mask->ipv4_tos) {
		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
		flow_key->ip.tos = nh->tos;
	}
	if (mask->ipv4_ttl) {
		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
		flow_key->ip.ttl = nh->ttl;
455
	}
456 457 458 459

	return 0;
}

460 461 462 463 464 465 466 467
static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{
	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
}

static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv6 *key,
		    const struct ovs_key_ipv6 *mask)
A
Ansis Atteka 已提交
468 469 470 471
{
	struct ipv6hdr *nh;
	int err;

472 473
	err = skb_ensure_writable(skb, skb_network_offset(skb) +
				  sizeof(struct ipv6hdr));
A
Ansis Atteka 已提交
474 475 476 477 478
	if (unlikely(err))
		return err;

	nh = ipv6_hdr(skb);

479 480 481 482 483 484 485 486 487 488 489
	/* Setting an IP addresses is typically only a side effect of
	 * matching on them in the current userspace implementation, so it
	 * makes sense to check if the value actually changed.
	 */
	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
		__be32 *saddr = (__be32 *)&nh->saddr;
		__be32 masked[4];

		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);

		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
490
			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
491 492 493 494 495 496
				      true);
			memcpy(&flow_key->ipv6.addr.src, masked,
			       sizeof(flow_key->ipv6.addr.src));
		}
	}
	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
A
Ansis Atteka 已提交
497 498 499
		unsigned int offset = 0;
		int flags = IP6_FH_F_SKIP_RH;
		bool recalc_csum = true;
500 501 502 503 504 505 506 507 508 509 510 511
		__be32 *daddr = (__be32 *)&nh->daddr;
		__be32 masked[4];

		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);

		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
			if (ipv6_ext_hdr(nh->nexthdr))
				recalc_csum = (ipv6_find_hdr(skb, &offset,
							     NEXTHDR_ROUTING,
							     NULL, &flags)
					       != NEXTHDR_ROUTING);

512
			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
				      recalc_csum);
			memcpy(&flow_key->ipv6.addr.dst, masked,
			       sizeof(flow_key->ipv6.addr.dst));
		}
	}
	if (mask->ipv6_tclass) {
		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
		flow_key->ip.tos = ipv6_get_dsfield(nh);
	}
	if (mask->ipv6_label) {
		set_ipv6_fl(nh, ntohl(key->ipv6_label),
			    ntohl(mask->ipv6_label));
		flow_key->ipv6.label =
		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
	}
	if (mask->ipv6_hlimit) {
529 530
		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
			       mask->ipv6_hlimit);
531
		flow_key->ip.ttl = nh->hop_limit;
A
Ansis Atteka 已提交
532 533 534 535
	}
	return 0;
}

536
/* Must follow skb_ensure_writable() since that can move the skb data. */
537
static void set_tp_port(struct sk_buff *skb, __be16 *port,
538
			__be16 new_port, __sum16 *check)
539
{
540
	inet_proto_csum_replace2(check, skb, *port, new_port, false);
541
	*port = new_port;
542 543
}

544 545 546
static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_udp *key,
		   const struct ovs_key_udp *mask)
547 548
{
	struct udphdr *uh;
549
	__be16 src, dst;
550 551
	int err;

552 553
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct udphdr));
554 555 556 557
	if (unlikely(err))
		return err;

	uh = udp_hdr(skb);
558
	/* Either of the masks is non-zero, so do not bother checking them. */
559 560
	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
561

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
		if (likely(src != uh->source)) {
			set_tp_port(skb, &uh->source, src, &uh->check);
			flow_key->tp.src = src;
		}
		if (likely(dst != uh->dest)) {
			set_tp_port(skb, &uh->dest, dst, &uh->check);
			flow_key->tp.dst = dst;
		}

		if (unlikely(!uh->check))
			uh->check = CSUM_MANGLED_0;
	} else {
		uh->source = src;
		uh->dest = dst;
		flow_key->tp.src = src;
		flow_key->tp.dst = dst;
579
	}
580

581 582
	skb_clear_hash(skb);

583 584 585
	return 0;
}

586 587 588
static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_tcp *key,
		   const struct ovs_key_tcp *mask)
589 590
{
	struct tcphdr *th;
591
	__be16 src, dst;
592 593
	int err;

594 595
	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
				  sizeof(struct tcphdr));
596 597 598 599
	if (unlikely(err))
		return err;

	th = tcp_hdr(skb);
600
	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
601 602 603
	if (likely(src != th->source)) {
		set_tp_port(skb, &th->source, src, &th->check);
		flow_key->tp.src = src;
604
	}
605
	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
606 607 608
	if (likely(dst != th->dest)) {
		set_tp_port(skb, &th->dest, dst, &th->check);
		flow_key->tp.dst = dst;
609
	}
610
	skb_clear_hash(skb);
611 612 613 614

	return 0;
}

615 616 617
static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_sctp *key,
		    const struct ovs_key_sctp *mask)
J
Joe Stringer 已提交
618
{
619
	unsigned int sctphoff = skb_transport_offset(skb);
J
Joe Stringer 已提交
620
	struct sctphdr *sh;
621
	__le32 old_correct_csum, new_csum, old_csum;
J
Joe Stringer 已提交
622 623
	int err;

624
	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
J
Joe Stringer 已提交
625 626 627 628
	if (unlikely(err))
		return err;

	sh = sctp_hdr(skb);
629 630
	old_csum = sh->checksum;
	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
631

632 633
	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
J
Joe Stringer 已提交
634

635
	new_csum = sctp_compute_cksum(skb, sctphoff);
J
Joe Stringer 已提交
636

637 638
	/* Carry any checksum errors through. */
	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
J
Joe Stringer 已提交
639

640 641 642
	skb_clear_hash(skb);
	flow_key->tp.src = sh->source;
	flow_key->tp.dst = sh->dest;
J
Joe Stringer 已提交
643 644 645 646

	return 0;
}

647
static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
J
Joe Stringer 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
{
	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
	struct vport *vport = data->vport;

	if (skb_cow_head(skb, data->l2_len) < 0) {
		kfree_skb(skb);
		return -ENOMEM;
	}

	__skb_dst_copy(skb, data->dst);
	*OVS_CB(skb) = data->cb;
	skb->inner_protocol = data->inner_protocol;
	skb->vlan_tci = data->vlan_tci;
	skb->vlan_proto = data->vlan_proto;

	/* Reconstruct the MAC header.  */
	skb_push(skb, data->l2_len);
	memcpy(skb->data, &data->l2_data, data->l2_len);
666
	skb_postpush_rcsum(skb, skb->data, data->l2_len);
J
Joe Stringer 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
	skb_reset_mac_header(skb);

	ovs_vport_send(vport, skb);
	return 0;
}

static unsigned int
ovs_dst_get_mtu(const struct dst_entry *dst)
{
	return dst->dev->mtu;
}

static struct dst_ops ovs_dst_ops = {
	.family = AF_UNSPEC,
	.mtu = ovs_dst_get_mtu,
};

/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 * ovs_vport_output(), which is called once per fragmented packet.
 */
static void prepare_frag(struct vport *vport, struct sk_buff *skb)
{
	unsigned int hlen = skb_network_offset(skb);
	struct ovs_frag_data *data;

	data = this_cpu_ptr(&ovs_frag_data_storage);
	data->dst = skb->_skb_refdst;
	data->vport = vport;
	data->cb = *OVS_CB(skb);
	data->inner_protocol = skb->inner_protocol;
	data->vlan_tci = skb->vlan_tci;
	data->vlan_proto = skb->vlan_proto;
	data->l2_len = hlen;
	memcpy(&data->l2_data, skb->data, hlen);

	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
	skb_pull(skb, hlen);
}

706 707
static void ovs_fragment(struct net *net, struct vport *vport,
			 struct sk_buff *skb, u16 mru, __be16 ethertype)
J
Joe Stringer 已提交
708 709 710
{
	if (skb_network_offset(skb) > MAX_L2_LEN) {
		OVS_NLERR(1, "L2 header too long to fragment");
711
		goto err;
J
Joe Stringer 已提交
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	}

	if (ethertype == htons(ETH_P_IP)) {
		struct dst_entry ovs_dst;
		unsigned long orig_dst;

		prepare_frag(vport, skb);
		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_dst);
		IPCB(skb)->frag_max_size = mru;

727
		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
J
Joe Stringer 已提交
728 729 730 731 732 733 734
		refdst_drop(orig_dst);
	} else if (ethertype == htons(ETH_P_IPV6)) {
		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
		unsigned long orig_dst;
		struct rt6_info ovs_rt;

		if (!v6ops) {
735
			goto err;
J
Joe Stringer 已提交
736 737 738 739 740 741 742 743 744 745 746 747
		}

		prepare_frag(vport, skb);
		memset(&ovs_rt, 0, sizeof(ovs_rt));
		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
			 DST_OBSOLETE_NONE, DST_NOCOUNT);
		ovs_rt.dst.dev = vport->dev;

		orig_dst = skb->_skb_refdst;
		skb_dst_set_noref(skb, &ovs_rt.dst);
		IP6CB(skb)->frag_max_size = mru;

748
		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
J
Joe Stringer 已提交
749 750 751 752 753
		refdst_drop(orig_dst);
	} else {
		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
			  ovs_vport_name(vport), ntohs(ethertype), mru,
			  vport->dev->mtu);
754
		goto err;
J
Joe Stringer 已提交
755
	}
756 757 758 759

	return;
err:
	kfree_skb(skb);
J
Joe Stringer 已提交
760 761 762 763
}

static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
		      struct sw_flow_key *key)
764
{
765
	struct vport *vport = ovs_vport_rcu(dp, out_port);
766

J
Joe Stringer 已提交
767 768
	if (likely(vport)) {
		u16 mru = OVS_CB(skb)->mru;
769 770 771 772 773 774 775 776
		u32 cutlen = OVS_CB(skb)->cutlen;

		if (unlikely(cutlen > 0)) {
			if (skb->len - cutlen > ETH_HLEN)
				pskb_trim(skb, skb->len - cutlen);
			else
				pskb_trim(skb, ETH_HLEN);
		}
J
Joe Stringer 已提交
777 778 779 780

		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
			ovs_vport_send(vport, skb);
		} else if (mru <= vport->dev->mtu) {
781
			struct net *net = read_pnet(&dp->net);
J
Joe Stringer 已提交
782 783 784 785 786 787 788 789 790
			__be16 ethertype = key->eth.type;

			if (!is_flow_key_valid(key)) {
				if (eth_p_mpls(skb->protocol))
					ethertype = skb->inner_protocol;
				else
					ethertype = vlan_get_protocol(skb);
			}

791
			ovs_fragment(net, vport, skb, mru, ethertype);
J
Joe Stringer 已提交
792 793 794 795
		} else {
			kfree_skb(skb);
		}
	} else {
796
		kfree_skb(skb);
J
Joe Stringer 已提交
797
	}
798 799 800
}

static int output_userspace(struct datapath *dp, struct sk_buff *skb,
801
			    struct sw_flow_key *key, const struct nlattr *attr,
802 803
			    const struct nlattr *actions, int actions_len,
			    uint32_t cutlen)
804 805 806 807 808
{
	struct dp_upcall_info upcall;
	const struct nlattr *a;
	int rem;

809
	memset(&upcall, 0, sizeof(upcall));
810
	upcall.cmd = OVS_PACKET_CMD_ACTION;
J
Joe Stringer 已提交
811
	upcall.mru = OVS_CB(skb)->mru;
812 813 814 815 816 817 818 819 820

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
		switch (nla_type(a)) {
		case OVS_USERSPACE_ATTR_USERDATA:
			upcall.userdata = a;
			break;

		case OVS_USERSPACE_ATTR_PID:
821
			upcall.portid = nla_get_u32(a);
822
			break;
823 824 825 826 827 828 829 830 831

		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
			/* Get out tunnel info. */
			struct vport *vport;

			vport = ovs_vport_rcu(dp, nla_get_u32(a));
			if (vport) {
				int err;

832 833 834
				err = dev_fill_metadata_dst(vport->dev, skb);
				if (!err)
					upcall.egress_tun_info = skb_tunnel_info(skb);
835
			}
836

837
			break;
838
		}
839

840 841 842 843 844 845 846
		case OVS_USERSPACE_ATTR_ACTIONS: {
			/* Include actions. */
			upcall.actions = actions;
			upcall.actions_len = actions_len;
			break;
		}

847
		} /* End of switch. */
848 849
	}

850
	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
851 852 853
}

static int sample(struct datapath *dp, struct sk_buff *skb,
854 855
		  struct sw_flow_key *key, const struct nlattr *attr,
		  const struct nlattr *actions, int actions_len)
856 857 858 859
{
	const struct nlattr *acts_list = NULL;
	const struct nlattr *a;
	int rem;
860
	u32 cutlen = 0;
861 862 863

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
864 865
		u32 probability;

866 867
		switch (nla_type(a)) {
		case OVS_SAMPLE_ATTR_PROBABILITY:
868 869
			probability = nla_get_u32(a);
			if (!probability || prandom_u32() > probability)
870 871 872 873 874 875 876 877 878
				return 0;
			break;

		case OVS_SAMPLE_ATTR_ACTIONS:
			acts_list = a;
			break;
		}
	}

879 880 881
	rem = nla_len(acts_list);
	a = nla_data(acts_list);

882 883 884
	/* Actions list is empty, do nothing */
	if (unlikely(!rem))
		return 0;
885

886
	/* The only known usage of sample action is having a single user-space
887
	 * action, or having a truncate action followed by a single user-space
888 889 890
	 * action. Treat this usage as a special case.
	 * The output_userspace() should clone the skb to be sent to the
	 * user space. This skb will be consumed by its caller.
891
	 */
892 893 894 895 896 897 898 899 900
	if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
		struct ovs_action_trunc *trunc = nla_data(a);

		if (skb->len > trunc->max_len)
			cutlen = skb->len - trunc->max_len;

		a = nla_next(a, &rem);
	}

901
	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
902
		   nla_is_last(a, rem)))
903 904
		return output_userspace(dp, skb, key, a, actions,
					actions_len, cutlen);
905 906 907 908 909 910

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		/* Skip the sample action when out of memory. */
		return 0;

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
	if (!add_deferred_actions(skb, key, a)) {
		if (net_ratelimit())
			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
				ovs_dp_name(dp));

		kfree_skb(skb);
	}
	return 0;
}

static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
			 const struct nlattr *attr)
{
	struct ovs_action_hash *hash_act = nla_data(attr);
	u32 hash = 0;

	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
	hash = skb_get_hash(skb);
	hash = jhash_1word(hash, hash_act->hash_basis);
	if (!hash)
		hash = 0x1;

	key->ovs_flow_hash = hash;
934 935
}

936 937 938 939 940 941
static int execute_set_action(struct sk_buff *skb,
			      struct sw_flow_key *flow_key,
			      const struct nlattr *a)
{
	/* Only tunnel set execution is supported without a mask. */
	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
942 943 944 945 946
		struct ovs_tunnel_info *tun = nla_data(a);

		skb_dst_drop(skb);
		dst_hold((struct dst_entry *)tun->tun_dst);
		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
947 948 949 950 951 952 953 954 955 956 957 958
		return 0;
	}

	return -EINVAL;
}

/* Mask is at the midpoint of the data. */
#define get_mask(a, type) ((const type)nla_data(a) + 1)

static int execute_masked_set_action(struct sk_buff *skb,
				     struct sw_flow_key *flow_key,
				     const struct nlattr *a)
959 960 961
{
	int err = 0;

962
	switch (nla_type(a)) {
963
	case OVS_KEY_ATTR_PRIORITY:
964 965
		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
			       *get_mask(a, u32 *));
966
		flow_key->phy.priority = skb->priority;
967 968
		break;

969
	case OVS_KEY_ATTR_SKB_MARK:
970
		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
971
		flow_key->phy.skb_mark = skb->mark;
972 973
		break;

974
	case OVS_KEY_ATTR_TUNNEL_INFO:
975 976
		/* Masked data not supported for tunnel. */
		err = -EINVAL;
977 978
		break;

979
	case OVS_KEY_ATTR_ETHERNET:
980 981
		err = set_eth_addr(skb, flow_key, nla_data(a),
				   get_mask(a, struct ovs_key_ethernet *));
982 983 984
		break;

	case OVS_KEY_ATTR_IPV4:
985 986
		err = set_ipv4(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv4 *));
987 988
		break;

A
Ansis Atteka 已提交
989
	case OVS_KEY_ATTR_IPV6:
990 991
		err = set_ipv6(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_ipv6 *));
A
Ansis Atteka 已提交
992 993
		break;

994
	case OVS_KEY_ATTR_TCP:
995 996
		err = set_tcp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_tcp *));
997 998 999
		break;

	case OVS_KEY_ATTR_UDP:
1000 1001
		err = set_udp(skb, flow_key, nla_data(a),
			      get_mask(a, struct ovs_key_udp *));
1002
		break;
J
Joe Stringer 已提交
1003 1004

	case OVS_KEY_ATTR_SCTP:
1005 1006
		err = set_sctp(skb, flow_key, nla_data(a),
			       get_mask(a, struct ovs_key_sctp *));
J
Joe Stringer 已提交
1007
		break;
1008 1009

	case OVS_KEY_ATTR_MPLS:
1010 1011
		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
								    __be32 *));
1012
		break;
J
Joe Stringer 已提交
1013 1014 1015

	case OVS_KEY_ATTR_CT_STATE:
	case OVS_KEY_ATTR_CT_ZONE:
1016
	case OVS_KEY_ATTR_CT_MARK:
J
Joe Stringer 已提交
1017
	case OVS_KEY_ATTR_CT_LABELS:
J
Joe Stringer 已提交
1018 1019
		err = -EINVAL;
		break;
1020 1021 1022 1023 1024
	}

	return err;
}

1025 1026 1027 1028 1029
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
			  struct sw_flow_key *key,
			  const struct nlattr *a, int rem)
{
	struct deferred_action *da;
1030
	int level;
1031

1032 1033 1034 1035 1036 1037 1038 1039
	if (!is_flow_key_valid(key)) {
		int err;

		err = ovs_flow_key_update(skb, key);
		if (err)
			return err;
	}
	BUG_ON(!is_flow_key_valid(key));
1040

1041
	if (!nla_is_last(a, rem)) {
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
		/* Recirc action is the not the last action
		 * of the action list, need to clone the skb.
		 */
		skb = skb_clone(skb, GFP_ATOMIC);

		/* Skip the recirc action when out of memory, but
		 * continue on with the rest of the action list.
		 */
		if (!skb)
			return 0;
	}

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	level = this_cpu_read(exec_actions_level);
	if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
		struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
		struct sw_flow_key *recirc_key = &rks->key[level - 1];

		*recirc_key = *key;
		recirc_key->recirc_id = nla_get_u32(a);
		ovs_dp_process_packet(skb, recirc_key);

		return 0;
	}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	da = add_deferred_actions(skb, key, NULL);
	if (da) {
		da->pkt_key.recirc_id = nla_get_u32(a);
	} else {
		kfree_skb(skb);

		if (net_ratelimit())
			pr_warn("%s: deferred action limit reached, drop recirc action\n",
				ovs_dp_name(dp));
	}

	return 0;
}

1080 1081
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1082
			      struct sw_flow_key *key,
1083
			      const struct nlattr *attr, int len)
1084 1085 1086 1087
{
	/* Every output action needs a separate clone of 'skb', but the common
	 * case is just a single output action, so that doing a clone and
	 * then freeing the original skbuff is wasteful.  So the following code
1088 1089
	 * is slightly obscure just to avoid that.
	 */
1090 1091 1092 1093 1094 1095 1096 1097
	int prev_port = -1;
	const struct nlattr *a;
	int rem;

	for (a = attr, rem = len; rem > 0;
	     a = nla_next(a, &rem)) {
		int err = 0;

1098 1099 1100 1101
		if (unlikely(prev_port != -1)) {
			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);

			if (out_skb)
J
Joe Stringer 已提交
1102
				do_output(dp, out_skb, prev_port, key);
1103

1104
			OVS_CB(skb)->cutlen = 0;
1105 1106 1107 1108 1109 1110 1111 1112
			prev_port = -1;
		}

		switch (nla_type(a)) {
		case OVS_ACTION_ATTR_OUTPUT:
			prev_port = nla_get_u32(a);
			break;

1113 1114 1115 1116 1117 1118 1119 1120
		case OVS_ACTION_ATTR_TRUNC: {
			struct ovs_action_trunc *trunc = nla_data(a);

			if (skb->len > trunc->max_len)
				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
			break;
		}

1121
		case OVS_ACTION_ATTR_USERSPACE:
1122 1123 1124
			output_userspace(dp, skb, key, a, attr,
						     len, OVS_CB(skb)->cutlen);
			OVS_CB(skb)->cutlen = 0;
1125 1126
			break;

1127 1128 1129 1130
		case OVS_ACTION_ATTR_HASH:
			execute_hash(skb, key, a);
			break;

1131
		case OVS_ACTION_ATTR_PUSH_MPLS:
1132
			err = push_mpls(skb, key, nla_data(a));
1133 1134 1135
			break;

		case OVS_ACTION_ATTR_POP_MPLS:
1136
			err = pop_mpls(skb, key, nla_get_be16(a));
1137 1138
			break;

1139
		case OVS_ACTION_ATTR_PUSH_VLAN:
1140
			err = push_vlan(skb, key, nla_data(a));
1141 1142 1143
			break;

		case OVS_ACTION_ATTR_POP_VLAN:
1144
			err = pop_vlan(skb, key);
1145 1146
			break;

1147 1148
		case OVS_ACTION_ATTR_RECIRC:
			err = execute_recirc(dp, skb, key, a, rem);
1149
			if (nla_is_last(a, rem)) {
1150 1151 1152 1153 1154 1155 1156 1157
				/* If this is the last action, the skb has
				 * been consumed or freed.
				 * Return immediately.
				 */
				return err;
			}
			break;

1158
		case OVS_ACTION_ATTR_SET:
1159
			err = execute_set_action(skb, key, nla_data(a));
1160 1161
			break;

1162 1163 1164 1165 1166
		case OVS_ACTION_ATTR_SET_MASKED:
		case OVS_ACTION_ATTR_SET_TO_MASKED:
			err = execute_masked_set_action(skb, key, nla_data(a));
			break;

1167
		case OVS_ACTION_ATTR_SAMPLE:
1168
			err = sample(dp, skb, key, a, attr, len);
1169
			break;
J
Joe Stringer 已提交
1170 1171

		case OVS_ACTION_ATTR_CT:
1172 1173 1174 1175 1176 1177
			if (!is_flow_key_valid(key)) {
				err = ovs_flow_key_update(skb, key);
				if (err)
					return err;
			}

J
Joe Stringer 已提交
1178 1179 1180 1181
			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
					     nla_data(a));

			/* Hide stolen IP fragments from user space. */
1182 1183
			if (err)
				return err == -EINPROGRESS ? 0 : err;
J
Joe Stringer 已提交
1184
			break;
1185 1186 1187 1188 1189 1190 1191 1192
		}

		if (unlikely(err)) {
			kfree_skb(skb);
			return err;
		}
	}

1193
	if (prev_port != -1)
J
Joe Stringer 已提交
1194
		do_output(dp, skb, prev_port, key);
1195
	else
1196 1197 1198 1199 1200
		consume_skb(skb);

	return 0;
}

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
static void process_deferred_actions(struct datapath *dp)
{
	struct action_fifo *fifo = this_cpu_ptr(action_fifos);

	/* Do not touch the FIFO in case there is no deferred actions. */
	if (action_fifo_is_empty(fifo))
		return;

	/* Finishing executing all deferred actions. */
	do {
		struct deferred_action *da = action_fifo_get(fifo);
		struct sk_buff *skb = da->skb;
		struct sw_flow_key *key = &da->pkt_key;
		const struct nlattr *actions = da->actions;

		if (actions)
			do_execute_actions(dp, skb, key, actions,
					   nla_len(actions));
		else
			ovs_dp_process_packet(skb, key);
	} while (!action_fifo_is_empty(fifo));

	/* Reset FIFO for the next packet.  */
	action_fifo_init(fifo);
}

1227
/* Execute a list of actions against 'skb'. */
1228
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1229 1230
			const struct sw_flow_actions *acts,
			struct sw_flow_key *key)
1231
{
1232 1233 1234
	int err, level;

	level = __this_cpu_inc_return(exec_actions_level);
1235
	if (unlikely(level > OVS_RECURSION_LIMIT)) {
1236 1237 1238 1239 1240 1241
		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
				     ovs_dp_name(dp));
		kfree_skb(skb);
		err = -ENETDOWN;
		goto out;
	}
1242 1243 1244 1245

	err = do_execute_actions(dp, skb, key,
				 acts->actions, acts->actions_len);

1246
	if (level == 1)
1247 1248
		process_deferred_actions(dp);

1249 1250
out:
	__this_cpu_dec(exec_actions_level);
1251 1252 1253 1254 1255 1256 1257 1258
	return err;
}

int action_fifos_init(void)
{
	action_fifos = alloc_percpu(struct action_fifo);
	if (!action_fifos)
		return -ENOMEM;
1259

1260 1261 1262 1263 1264 1265
	recirc_keys = alloc_percpu(struct recirc_keys);
	if (!recirc_keys) {
		free_percpu(action_fifos);
		return -ENOMEM;
	}

1266 1267 1268 1269 1270 1271
	return 0;
}

void action_fifos_exit(void)
{
	free_percpu(action_fifos);
1272
	free_percpu(recirc_keys);
1273
}