xfrm_policy.c 106.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13
 * xfrm_policy.c
 *
 * Changes:
 *	Mitsuru KANDA @USAGI
 * 	Kazunori MIYAZAWA @USAGI
 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 * 		IPv6 support
 * 	Kazunori MIYAZAWA @USAGI
 * 	YOSHIFUJI Hideaki
 * 		Split up af-specific portion
 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14
 *
L
Linus Torvalds 已提交
15 16
 */

17
#include <linux/err.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
25
#include <linux/netfilter.h>
L
Linus Torvalds 已提交
26
#include <linux/module.h>
27
#include <linux/cache.h>
F
Florian Westphal 已提交
28
#include <linux/cpu.h>
P
Paul Moore 已提交
29
#include <linux/audit.h>
30
#include <linux/rhashtable.h>
31
#include <linux/if_tunnel.h>
32
#include <net/dst.h>
33
#include <net/flow.h>
34
#include <net/inet_ecn.h>
L
Linus Torvalds 已提交
35 36
#include <net/xfrm.h>
#include <net/ip.h>
37
#include <net/gre.h>
38 39 40
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
41 42 43
#ifdef CONFIG_XFRM_STATISTICS
#include <net/snmp.h>
#endif
44
#ifdef CONFIG_XFRM_ESPINTCP
S
Sabrina Dubroca 已提交
45 46
#include <net/espintcp.h>
#endif
L
Linus Torvalds 已提交
47

48 49
#include "xfrm_hash.h"

50 51 52 53
#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN	100

54 55 56 57 58
struct xfrm_flo {
	struct dst_entry *dst_orig;
	u8 flags;
};

59 60 61
/* prefixes smaller than this are stored in lists, not trees. */
#define INEXACT_PREFIXLEN_IPV4	16
#define INEXACT_PREFIXLEN_IPV6	48
62 63 64 65 66 67 68 69 70

struct xfrm_pol_inexact_node {
	struct rb_node node;
	union {
		xfrm_address_t addr;
		struct rcu_head rcu;
	};
	u8 prefixlen;

71 72
	struct rb_root root;

73 74 75 76 77 78 79 80 81 82 83
	/* the policies matching this node, can be empty list */
	struct hlist_head hhead;
};

/* xfrm inexact policy search tree:
 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
 *  |
 * +---- root_d: sorted by daddr:prefix
 * |                 |
 * |        xfrm_pol_inexact_node
 * |                 |
84 85 86 87 88 89 90 91
 * |                 +- root: sorted by saddr/prefix
 * |                 |              |
 * |                 |         xfrm_pol_inexact_node
 * |                 |              |
 * |                 |              + root: unused
 * |                 |              |
 * |                 |              + hhead: saddr:daddr policies
 * |                 |
92 93
 * |                 +- coarse policies and all any:daddr policies
 * |
94 95 96 97 98 99 100 101
 * +---- root_s: sorted by saddr:prefix
 * |                 |
 * |        xfrm_pol_inexact_node
 * |                 |
 * |                 + root: unused
 * |                 |
 * |                 + hhead: saddr:any policies
 * |
102 103
 * +---- coarse policies and all any:any policies
 *
104
 * Lookups return four candidate lists:
105 106
 * 1. any:any list from top-level xfrm_pol_inexact_bin
 * 2. any:daddr list from daddr tree
107 108
 * 3. saddr:daddr list from 2nd level daddr tree
 * 4. saddr:any list from saddr tree
109 110 111 112 113
 *
 * This result set then needs to be searched for the policy with
 * the lowest priority.  If two results have same prio, youngest one wins.
 */

114 115
struct xfrm_pol_inexact_key {
	possible_net_t net;
116
	u32 if_id;
117 118 119 120 121 122 123
	u16 family;
	u8 dir, type;
};

struct xfrm_pol_inexact_bin {
	struct xfrm_pol_inexact_key k;
	struct rhash_head head;
124
	/* list containing '*:*' policies */
125 126
	struct hlist_head hhead;

127
	seqcount_spinlock_t count;
128 129 130
	/* tree sorted by daddr/prefix */
	struct rb_root root_d;

131 132 133
	/* tree sorted by saddr/prefix */
	struct rb_root root_s;

134 135 136 137 138
	/* slow path below */
	struct list_head inexact_bins;
	struct rcu_head rcu;
};

139
enum xfrm_pol_inexact_candidate_type {
140
	XFRM_POL_CAND_BOTH,
141
	XFRM_POL_CAND_SADDR,
142
	XFRM_POL_CAND_DADDR,
143 144 145 146 147 148 149 150 151
	XFRM_POL_CAND_ANY,

	XFRM_POL_CAND_MAX,
};

struct xfrm_pol_inexact_candidates {
	struct hlist_head *res[XFRM_POL_CAND_MAX];
};

152 153 154
static DEFINE_SPINLOCK(xfrm_if_cb_lock);
static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;

155
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
156
static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
157
						__read_mostly;
L
Linus Torvalds 已提交
158

159
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
L
Linus Torvalds 已提交
160

161 162 163
static struct rhashtable xfrm_policy_inexact_table;
static const struct rhashtable_params xfrm_pol_inexact_params;

164
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
165
static int stale_bundle(struct dst_entry *dst);
166
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
167
static void xfrm_policy_queue_process(struct timer_list *t);
L
Linus Torvalds 已提交
168

169
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
W
Wei Yongjun 已提交
170 171 172
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
						int dir);

173
static struct xfrm_pol_inexact_bin *
174 175
xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
			   u32 if_id);
176 177 178

static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net *net,
179
			       u8 type, u16 family, u8 dir, u32 if_id);
180 181 182 183 184 185
static struct xfrm_policy *
xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
			bool excl);
static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
					    struct xfrm_policy *policy);

186 187 188 189 190 191
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
				    struct xfrm_pol_inexact_bin *b,
				    const xfrm_address_t *saddr,
				    const xfrm_address_t *daddr);

192 193
static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
{
194
	return refcount_inc_not_zero(&policy->refcnt);
195 196
}

197
static inline bool
198
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
199
{
200 201
	const struct flowi4 *fl4 = &fl->u.ip4;

202 203
	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
204 205 206 207
		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
208 209
}

210
static inline bool
211
__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
212
{
213 214 215 216 217 218 219 220
	const struct flowi6 *fl6 = &fl->u.ip6;

	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
221 222
}

223 224
bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
			 unsigned short family)
225 226 227 228 229 230 231
{
	switch (family) {
	case AF_INET:
		return __xfrm4_selector_match(sel, fl);
	case AF_INET6:
		return __xfrm6_selector_match(sel, fl);
	}
232
	return false;
233 234
}

235
static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
E
Eric Dumazet 已提交
236
{
237
	const struct xfrm_policy_afinfo *afinfo;
E
Eric Dumazet 已提交
238

239
	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
E
Eric Dumazet 已提交
240 241 242 243 244 245 246 247
		return NULL;
	rcu_read_lock();
	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
	if (unlikely(!afinfo))
		rcu_read_unlock();
	return afinfo;
}

248 249 250 251 252 253
/* Called with rcu_read_lock(). */
static const struct xfrm_if_cb *xfrm_if_get_cb(void)
{
	return rcu_dereference(xfrm_if_cb);
}

254 255 256
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
				    const xfrm_address_t *saddr,
				    const xfrm_address_t *daddr,
257
				    int family, u32 mark)
258
{
259
	const struct xfrm_policy_afinfo *afinfo;
260 261 262 263 264 265
	struct dst_entry *dst;

	afinfo = xfrm_policy_get_afinfo(family);
	if (unlikely(afinfo == NULL))
		return ERR_PTR(-EAFNOSUPPORT);

266
	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
267

268
	rcu_read_unlock();
269 270 271

	return dst;
}
272
EXPORT_SYMBOL(__xfrm_dst_lookup);
273

D
David Ahern 已提交
274 275
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
						int tos, int oif,
276 277
						xfrm_address_t *prev_saddr,
						xfrm_address_t *prev_daddr,
278
						int family, u32 mark)
L
Linus Torvalds 已提交
279
{
280
	struct net *net = xs_net(x);
281 282 283 284
	xfrm_address_t *saddr = &x->props.saddr;
	xfrm_address_t *daddr = &x->id.daddr;
	struct dst_entry *dst;

285
	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
286
		saddr = x->coaddr;
287 288 289 290
		daddr = prev_daddr;
	}
	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
		saddr = prev_saddr;
291
		daddr = x->coaddr;
292
	}
L
Linus Torvalds 已提交
293

294
	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
295 296 297 298 299 300 301

	if (!IS_ERR(dst)) {
		if (prev_saddr != saddr)
			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
		if (prev_daddr != daddr)
			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
	}
L
Linus Torvalds 已提交
302

303
	return dst;
L
Linus Torvalds 已提交
304 305 306 307 308 309 310
}

static inline unsigned long make_jiffies(long secs)
{
	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
		return MAX_SCHEDULE_TIMEOUT-1;
	else
311
		return secs*HZ;
L
Linus Torvalds 已提交
312 313
}

314
static void xfrm_policy_timer(struct timer_list *t)
L
Linus Torvalds 已提交
315
{
316
	struct xfrm_policy *xp = from_timer(xp, t, timer);
317 318
	time64_t now = ktime_get_real_seconds();
	time64_t next = TIME64_MAX;
L
Linus Torvalds 已提交
319 320 321 322 323
	int warn = 0;
	int dir;

	read_lock(&xp->lock);

324
	if (unlikely(xp->walk.dead))
L
Linus Torvalds 已提交
325 326
		goto out;

327
	dir = xfrm_policy_id2dir(xp->index);
L
Linus Torvalds 已提交
328 329

	if (xp->lft.hard_add_expires_seconds) {
330
		time64_t tmo = xp->lft.hard_add_expires_seconds +
L
Linus Torvalds 已提交
331 332 333 334 335 336 337
			xp->curlft.add_time - now;
		if (tmo <= 0)
			goto expired;
		if (tmo < next)
			next = tmo;
	}
	if (xp->lft.hard_use_expires_seconds) {
338
		time64_t tmo = xp->lft.hard_use_expires_seconds +
L
Linus Torvalds 已提交
339 340 341 342 343 344 345
			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
		if (tmo <= 0)
			goto expired;
		if (tmo < next)
			next = tmo;
	}
	if (xp->lft.soft_add_expires_seconds) {
346
		time64_t tmo = xp->lft.soft_add_expires_seconds +
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355
			xp->curlft.add_time - now;
		if (tmo <= 0) {
			warn = 1;
			tmo = XFRM_KM_TIMEOUT;
		}
		if (tmo < next)
			next = tmo;
	}
	if (xp->lft.soft_use_expires_seconds) {
356
		time64_t tmo = xp->lft.soft_use_expires_seconds +
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366
			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
		if (tmo <= 0) {
			warn = 1;
			tmo = XFRM_KM_TIMEOUT;
		}
		if (tmo < next)
			next = tmo;
	}

	if (warn)
367
		km_policy_expired(xp, dir, 0, 0);
368
	if (next != TIME64_MAX &&
L
Linus Torvalds 已提交
369 370 371 372 373 374 375 376 377 378
	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
		xfrm_pol_hold(xp);

out:
	read_unlock(&xp->lock);
	xfrm_pol_put(xp);
	return;

expired:
	read_unlock(&xp->lock);
379
	if (!xfrm_policy_delete(xp, dir))
380
		km_policy_expired(xp, dir, 1, 0);
L
Linus Torvalds 已提交
381 382 383 384 385 386 387
	xfrm_pol_put(xp);
}

/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 * SPD calls.
 */

388
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
L
Linus Torvalds 已提交
389 390 391
{
	struct xfrm_policy *policy;

392
	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
L
Linus Torvalds 已提交
393 394

	if (policy) {
395
		write_pnet(&policy->xp_net, net);
H
Herbert Xu 已提交
396
		INIT_LIST_HEAD(&policy->walk.all);
397
		INIT_HLIST_NODE(&policy->bydst_inexact_list);
398 399
		INIT_HLIST_NODE(&policy->bydst);
		INIT_HLIST_NODE(&policy->byidx);
L
Linus Torvalds 已提交
400
		rwlock_init(&policy->lock);
401
		refcount_set(&policy->refcnt, 1);
402
		skb_queue_head_init(&policy->polq.hold_queue);
403 404 405
		timer_setup(&policy->timer, xfrm_policy_timer, 0);
		timer_setup(&policy->polq.hold_timer,
			    xfrm_policy_queue_process, 0);
L
Linus Torvalds 已提交
406 407 408 409 410
	}
	return policy;
}
EXPORT_SYMBOL(xfrm_policy_alloc);

411 412 413 414 415 416 417 418
static void xfrm_policy_destroy_rcu(struct rcu_head *head)
{
	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);

	security_xfrm_policy_free(policy->security);
	kfree(policy);
}

L
Linus Torvalds 已提交
419 420
/* Destroy xfrm_policy: descendant resources must be released to this moment. */

421
void xfrm_policy_destroy(struct xfrm_policy *policy)
L
Linus Torvalds 已提交
422
{
H
Herbert Xu 已提交
423
	BUG_ON(!policy->walk.dead);
L
Linus Torvalds 已提交
424

425
	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
L
Linus Torvalds 已提交
426 427
		BUG();

428
	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
L
Linus Torvalds 已提交
429
}
430
EXPORT_SYMBOL(xfrm_policy_destroy);
L
Linus Torvalds 已提交
431

A
Alexander Alemayhu 已提交
432
/* Rule must be locked. Release descendant resources, announce
L
Linus Torvalds 已提交
433 434 435 436 437
 * entry dead. The rule must be unlinked from lists to the moment.
 */

static void xfrm_policy_kill(struct xfrm_policy *policy)
{
438
	write_lock_bh(&policy->lock);
H
Herbert Xu 已提交
439
	policy->walk.dead = 1;
440
	write_unlock_bh(&policy->lock);
L
Linus Torvalds 已提交
441

442
	atomic_inc(&policy->genid);
L
Linus Torvalds 已提交
443

444 445
	if (del_timer(&policy->polq.hold_timer))
		xfrm_pol_put(policy);
446
	skb_queue_purge(&policy->polq.hold_queue);
447

448 449 450 451
	if (del_timer(&policy->timer))
		xfrm_pol_put(policy);

	xfrm_pol_put(policy);
L
Linus Torvalds 已提交
452 453
}

454 455
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;

456
static inline unsigned int idx_hash(struct net *net, u32 index)
457
{
458
	return __idx_hash(index, net->xfrm.policy_idx_hmask);
459 460
}

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
/* calculate policy hash thresholds */
static void __get_hash_thresh(struct net *net,
			      unsigned short family, int dir,
			      u8 *dbits, u8 *sbits)
{
	switch (family) {
	case AF_INET:
		*dbits = net->xfrm.policy_bydst[dir].dbits4;
		*sbits = net->xfrm.policy_bydst[dir].sbits4;
		break;

	case AF_INET6:
		*dbits = net->xfrm.policy_bydst[dir].dbits6;
		*sbits = net->xfrm.policy_bydst[dir].sbits6;
		break;

	default:
		*dbits = 0;
		*sbits = 0;
	}
}

483 484 485
static struct hlist_head *policy_hash_bysel(struct net *net,
					    const struct xfrm_selector *sel,
					    unsigned short family, int dir)
486
{
487
	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
488 489 490 491 492 493
	unsigned int hash;
	u8 dbits;
	u8 sbits;

	__get_hash_thresh(net, family, dir, &dbits, &sbits);
	hash = __sel_hash(sel, family, hmask, dbits, sbits);
494

495
	if (hash == hmask + 1)
496
		return NULL;
497 498 499

	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
500 501
}

502 503 504 505
static struct hlist_head *policy_hash_direct(struct net *net,
					     const xfrm_address_t *daddr,
					     const xfrm_address_t *saddr,
					     unsigned short family, int dir)
506
{
507
	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
508 509 510 511 512 513
	unsigned int hash;
	u8 dbits;
	u8 sbits;

	__get_hash_thresh(net, family, dir, &dbits, &sbits);
	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
514

515 516
	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
517 518
}

519 520
static void xfrm_dst_hash_transfer(struct net *net,
				   struct hlist_head *list,
521
				   struct hlist_head *ndsttable,
522 523
				   unsigned int nhashmask,
				   int dir)
524
{
525
	struct hlist_node *tmp, *entry0 = NULL;
526
	struct xfrm_policy *pol;
527
	unsigned int h0 = 0;
528 529
	u8 dbits;
	u8 sbits;
530

531
redo:
532
	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
533 534
		unsigned int h;

535
		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
536
		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
537
				pol->family, nhashmask, dbits, sbits);
538
		if (!entry0) {
539 540
			hlist_del_rcu(&pol->bydst);
			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
541 542 543 544
			h0 = h;
		} else {
			if (h != h0)
				continue;
545 546
			hlist_del_rcu(&pol->bydst);
			hlist_add_behind_rcu(&pol->bydst, entry0);
547
		}
548
		entry0 = &pol->bydst;
549 550 551 552
	}
	if (!hlist_empty(list)) {
		entry0 = NULL;
		goto redo;
553 554 555 556 557 558 559
	}
}

static void xfrm_idx_hash_transfer(struct hlist_head *list,
				   struct hlist_head *nidxtable,
				   unsigned int nhashmask)
{
560
	struct hlist_node *tmp;
561 562
	struct xfrm_policy *pol;

563
	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
564 565 566 567 568 569 570 571 572 573 574 575
		unsigned int h;

		h = __idx_hash(pol->index, nhashmask);
		hlist_add_head(&pol->byidx, nidxtable+h);
	}
}

static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
{
	return ((old_hmask + 1) << 1) - 1;
}

576
static void xfrm_bydst_resize(struct net *net, int dir)
577
{
578
	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
579 580
	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
581
	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
582
	struct hlist_head *odst;
583 584 585 586 587
	int i;

	if (!ndst)
		return;

588
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
589
	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
590

591 592 593
	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
				lockdep_is_held(&net->xfrm.xfrm_policy_lock));

594
	for (i = hmask; i >= 0; i--)
595
		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
596

597
	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
598
	net->xfrm.policy_bydst[dir].hmask = nhashmask;
599

600
	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
601
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
602

603 604
	synchronize_rcu();

605
	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
606 607
}

608
static void xfrm_byidx_resize(struct net *net, int total)
609
{
610
	unsigned int hmask = net->xfrm.policy_idx_hmask;
611 612
	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
613
	struct hlist_head *oidx = net->xfrm.policy_byidx;
614
	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
615 616 617 618 619
	int i;

	if (!nidx)
		return;

620
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
621 622 623 624

	for (i = hmask; i >= 0; i--)
		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);

625 626
	net->xfrm.policy_byidx = nidx;
	net->xfrm.policy_idx_hmask = nhashmask;
627

628
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
629

630
	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
631 632
}

633
static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
634
{
635 636
	unsigned int cnt = net->xfrm.policy_count[dir];
	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
637 638 639 640 641 642 643 644 645 646 647

	if (total)
		*total += cnt;

	if ((hmask + 1) < xfrm_policy_hashmax &&
	    cnt > hmask)
		return 1;

	return 0;
}

648
static inline int xfrm_byidx_should_resize(struct net *net, int total)
649
{
650
	unsigned int hmask = net->xfrm.policy_idx_hmask;
651 652 653 654 655 656 657 658

	if ((hmask + 1) < xfrm_policy_hashmax &&
	    total > hmask)
		return 1;

	return 0;
}

659
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
J
Jamal Hadi Salim 已提交
660
{
661 662 663 664 665 666 667
	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
	si->spdhcnt = net->xfrm.policy_idx_hmask;
J
Jamal Hadi Salim 已提交
668 669 670
	si->spdhmcnt = xfrm_policy_hashmax;
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
671

J
Jamal Hadi Salim 已提交
672
static DEFINE_MUTEX(hash_resize_mutex);
673
static void xfrm_hash_resize(struct work_struct *work)
674
{
675
	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
676 677 678 679 680
	int dir, total;

	mutex_lock(&hash_resize_mutex);

	total = 0;
H
Herbert Xu 已提交
681
	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
682 683
		if (xfrm_bydst_should_resize(net, dir, &total))
			xfrm_bydst_resize(net, dir);
684
	}
685 686
	if (xfrm_byidx_should_resize(net, total))
		xfrm_byidx_resize(net, total);
687 688 689 690

	mutex_unlock(&hash_resize_mutex);
}

691
/* Make sure *pol can be inserted into fastbin.
692
 * Useful to check that later insert requests will be successful
693 694 695 696 697 698 699 700 701 702
 * (provided xfrm_policy_lock is held throughout).
 */
static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
{
	struct xfrm_pol_inexact_bin *bin, *prev;
	struct xfrm_pol_inexact_key k = {
		.family = pol->family,
		.type = pol->type,
		.dir = dir,
703
		.if_id = pol->if_id,
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	};
	struct net *net = xp_net(pol);

	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);

	write_pnet(&k.net, net);
	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
				     xfrm_pol_inexact_params);
	if (bin)
		return bin;

	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
	if (!bin)
		return NULL;

	bin->k = k;
	INIT_HLIST_HEAD(&bin->hhead);
721
	bin->root_d = RB_ROOT;
722
	bin->root_s = RB_ROOT;
723
	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
724 725 726 727 728 729 730 731 732 733 734 735 736 737

	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
						&bin->k, &bin->head,
						xfrm_pol_inexact_params);
	if (!prev) {
		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
		return bin;
	}

	kfree(bin);

	return IS_ERR(prev) ? NULL : prev;
}

738 739
static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
					       int family, u8 prefixlen)
740
{
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	if (xfrm_addr_any(addr, family))
		return true;

	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
		return true;

	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
		return true;

	return false;
}

static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
{
	const xfrm_address_t *addr;
	bool saddr_any, daddr_any;
	u8 prefixlen;

	addr = &policy->selector.saddr;
	prefixlen = policy->selector.prefixlen_s;
762

763 764 765 766 767 768 769 770 771 772 773
	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
						       policy->family,
						       prefixlen);
	addr = &policy->selector.daddr;
	prefixlen = policy->selector.prefixlen_d;
	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
						       policy->family,
						       prefixlen);
	return saddr_any && daddr_any;
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
				       const xfrm_address_t *addr, u8 prefixlen)
{
	node->addr = *addr;
	node->prefixlen = prefixlen;
}

static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
{
	struct xfrm_pol_inexact_node *node;

	node = kzalloc(sizeof(*node), GFP_ATOMIC);
	if (node)
		xfrm_pol_inexact_node_init(node, addr, prefixlen);

	return node;
}

static int xfrm_policy_addr_delta(const xfrm_address_t *a,
				  const xfrm_address_t *b,
				  u8 prefixlen, u16 family)
{
797
	u32 ma, mb, mask;
798 799 800 801 802
	unsigned int pdw, pbi;
	int delta = 0;

	switch (family) {
	case AF_INET:
803 804 805 806 807 808 809 810 811 812
		if (prefixlen == 0)
			return 0;
		mask = ~0U << (32 - prefixlen);
		ma = ntohl(a->a4) & mask;
		mb = ntohl(b->a4) & mask;
		if (ma < mb)
			delta = -1;
		else if (ma > mb)
			delta = 1;
		break;
813 814 815 816 817 818 819 820 821 822
	case AF_INET6:
		pdw = prefixlen >> 5;
		pbi = prefixlen & 0x1f;

		if (pdw) {
			delta = memcmp(a->a6, b->a6, pdw << 2);
			if (delta)
				return delta;
		}
		if (pbi) {
823 824 825 826 827 828 829
			mask = ~0U << (32 - pbi);
			ma = ntohl(a->a6[pdw]) & mask;
			mb = ntohl(b->a6[pdw]) & mask;
			if (ma < mb)
				delta = -1;
			else if (ma > mb)
				delta = 1;
830 831 832 833 834 835 836 837 838 839 840 841 842
		}
		break;
	default:
		break;
	}

	return delta;
}

static void xfrm_policy_inexact_list_reinsert(struct net *net,
					      struct xfrm_pol_inexact_node *n,
					      u16 family)
{
843
	unsigned int matched_s, matched_d;
844 845
	struct xfrm_policy *policy, *p;

846 847 848
	matched_s = 0;
	matched_d = 0;

849
	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
850
		struct hlist_node *newpos = NULL;
851 852
		bool matches_s, matches_d;

853 854 855 856 857 858 859
		if (!policy->bydst_reinsert)
			continue;

		WARN_ON_ONCE(policy->family != family);

		policy->bydst_reinsert = false;
		hlist_for_each_entry(p, &n->hhead, bydst) {
860 861 862 863
			if (policy->priority > p->priority)
				newpos = &p->bydst;
			else if (policy->priority == p->priority &&
				 policy->pos > p->pos)
864 865 866 867 868 869
				newpos = &p->bydst;
			else
				break;
		}

		if (newpos)
870
			hlist_add_behind_rcu(&policy->bydst, newpos);
871
		else
872
			hlist_add_head_rcu(&policy->bydst, &n->hhead);
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

		/* paranoia checks follow.
		 * Check that the reinserted policy matches at least
		 * saddr or daddr for current node prefix.
		 *
		 * Matching both is fine, matching saddr in one policy
		 * (but not daddr) and then matching only daddr in another
		 * is a bug.
		 */
		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
						   &n->addr,
						   n->prefixlen,
						   family) == 0;
		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
						   &n->addr,
						   n->prefixlen,
						   family) == 0;
		if (matches_s && matches_d)
			continue;

		WARN_ON_ONCE(!matches_s && !matches_d);
		if (matches_s)
			matched_s++;
		if (matches_d)
			matched_d++;
		WARN_ON_ONCE(matched_s && matched_d);
899 900 901
	}
}

902 903 904 905 906 907
static void xfrm_policy_inexact_node_reinsert(struct net *net,
					      struct xfrm_pol_inexact_node *n,
					      struct rb_root *new,
					      u16 family)
{
	struct xfrm_pol_inexact_node *node;
908
	struct rb_node **p, *parent;
909 910 911

	/* we should not have another subtree here */
	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
912 913
restart:
	parent = NULL;
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	p = &new->rb_node;
	while (*p) {
		u8 prefixlen;
		int delta;

		parent = *p;
		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);

		prefixlen = min(node->prefixlen, n->prefixlen);

		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
					       prefixlen, family);
		if (delta < 0) {
			p = &parent->rb_left;
		} else if (delta > 0) {
			p = &parent->rb_right;
		} else {
931
			bool same_prefixlen = node->prefixlen == n->prefixlen;
932 933
			struct xfrm_policy *tmp;

934
			hlist_for_each_entry(tmp, &n->hhead, bydst) {
935
				tmp->bydst_reinsert = true;
936 937
				hlist_del_rcu(&tmp->bydst);
			}
938

939 940
			node->prefixlen = prefixlen;

941 942
			xfrm_policy_inexact_list_reinsert(net, node, family);

943
			if (same_prefixlen) {
944 945 946 947 948 949 950
				kfree_rcu(n, rcu);
				return;
			}

			rb_erase(*p, new);
			kfree_rcu(n, rcu);
			n = node;
951
			goto restart;
952 953 954 955 956 957 958
		}
	}

	rb_link_node_rcu(&n->node, parent, p);
	rb_insert_color(&n->node, new);
}

959 960 961 962 963 964
/* merge nodes v and n */
static void xfrm_policy_inexact_node_merge(struct net *net,
					   struct xfrm_pol_inexact_node *v,
					   struct xfrm_pol_inexact_node *n,
					   u16 family)
{
965
	struct xfrm_pol_inexact_node *node;
966
	struct xfrm_policy *tmp;
967 968 969 970 971 972 973 974 975 976 977 978
	struct rb_node *rnode;

	/* To-be-merged node v has a subtree.
	 *
	 * Dismantle it and insert its nodes to n->root.
	 */
	while ((rnode = rb_first(&v->root)) != NULL) {
		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
		rb_erase(&node->node, &v->root);
		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
						  family);
	}
979

980
	hlist_for_each_entry(tmp, &v->hhead, bydst) {
981
		tmp->bydst_reinsert = true;
982 983
		hlist_del_rcu(&tmp->bydst);
	}
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072

	xfrm_policy_inexact_list_reinsert(net, n, family);
}

static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net *net,
				struct rb_root *root,
				xfrm_address_t *addr,
				u16 family, u8 prefixlen, u8 dir)
{
	struct xfrm_pol_inexact_node *cached = NULL;
	struct rb_node **p, *parent = NULL;
	struct xfrm_pol_inexact_node *node;

	p = &root->rb_node;
	while (*p) {
		int delta;

		parent = *p;
		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);

		delta = xfrm_policy_addr_delta(addr, &node->addr,
					       node->prefixlen,
					       family);
		if (delta == 0 && prefixlen >= node->prefixlen) {
			WARN_ON_ONCE(cached); /* ipsec policies got lost */
			return node;
		}

		if (delta < 0)
			p = &parent->rb_left;
		else
			p = &parent->rb_right;

		if (prefixlen < node->prefixlen) {
			delta = xfrm_policy_addr_delta(addr, &node->addr,
						       prefixlen,
						       family);
			if (delta)
				continue;

			/* This node is a subnet of the new prefix. It needs
			 * to be removed and re-inserted with the smaller
			 * prefix and all nodes that are now also covered
			 * by the reduced prefixlen.
			 */
			rb_erase(&node->node, root);

			if (!cached) {
				xfrm_pol_inexact_node_init(node, addr,
							   prefixlen);
				cached = node;
			} else {
				/* This node also falls within the new
				 * prefixlen. Merge the to-be-reinserted
				 * node and this one.
				 */
				xfrm_policy_inexact_node_merge(net, node,
							       cached, family);
				kfree_rcu(node, rcu);
			}

			/* restart */
			p = &root->rb_node;
			parent = NULL;
		}
	}

	node = cached;
	if (!node) {
		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
		if (!node)
			return NULL;
	}

	rb_link_node_rcu(&node->node, parent, p);
	rb_insert_color(&node->node, root);

	return node;
}

static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
{
	struct xfrm_pol_inexact_node *node;
	struct rb_node *rn = rb_first(r);

	while (rn) {
		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);

1073
		xfrm_policy_inexact_gc_tree(&node->root, rm);
1074 1075
		rn = rb_next(rn);

1076
		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1077 1078 1079 1080 1081 1082 1083 1084 1085
			WARN_ON_ONCE(rm);
			continue;
		}

		rb_erase(&node->node, r);
		kfree_rcu(node, rcu);
	}
}

1086 1087
static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
{
1088 1089
	write_seqcount_begin(&b->count);
	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1090
	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1091 1092
	write_seqcount_end(&b->count);

1093
	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1094
	    !hlist_empty(&b->hhead)) {
1095
		WARN_ON_ONCE(net_exit);
1096
		return;
1097
	}
1098 1099 1100 1101 1102 1103 1104 1105

	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
				   xfrm_pol_inexact_params) == 0) {
		list_del(&b->inexact_bins);
		kfree_rcu(b, rcu);
	}
}

1106 1107 1108 1109 1110 1111 1112 1113 1114
static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
{
	struct net *net = read_pnet(&b->k.net);

	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
	__xfrm_policy_inexact_prune_bin(b, false);
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}

1115 1116
static void __xfrm_policy_inexact_flush(struct net *net)
{
1117
	struct xfrm_pol_inexact_bin *bin, *t;
1118 1119 1120

	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);

1121 1122
	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
		__xfrm_policy_inexact_prune_bin(bin, false);
1123 1124
}

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
				struct xfrm_policy *policy, u8 dir)
{
	struct xfrm_pol_inexact_node *n;
	struct net *net;

	net = xp_net(policy);
	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);

	if (xfrm_policy_inexact_insert_use_any_list(policy))
		return &bin->hhead;

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
					       policy->family,
					       policy->selector.prefixlen_d)) {
		write_seqcount_begin(&bin->count);
		n = xfrm_policy_inexact_insert_node(net,
						    &bin->root_s,
						    &policy->selector.saddr,
						    policy->family,
						    policy->selector.prefixlen_s,
						    dir);
		write_seqcount_end(&bin->count);
		if (!n)
			return NULL;

		return &n->hhead;
	}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	/* daddr is fixed */
	write_seqcount_begin(&bin->count);
	n = xfrm_policy_inexact_insert_node(net,
					    &bin->root_d,
					    &policy->selector.daddr,
					    policy->family,
					    policy->selector.prefixlen_d, dir);
	write_seqcount_end(&bin->count);
	if (!n)
		return NULL;
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181

	/* saddr is wildcard */
	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
					       policy->family,
					       policy->selector.prefixlen_s))
		return &n->hhead;

	write_seqcount_begin(&bin->count);
	n = xfrm_policy_inexact_insert_node(net,
					    &n->root,
					    &policy->selector.saddr,
					    policy->family,
					    policy->selector.prefixlen_s, dir);
	write_seqcount_end(&bin->count);
	if (!n)
		return NULL;

1182 1183 1184
	return &n->hhead;
}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
{
	struct xfrm_pol_inexact_bin *bin;
	struct xfrm_policy *delpol;
	struct hlist_head *chain;
	struct net *net;

	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
	if (!bin)
		return ERR_PTR(-ENOMEM);

1197 1198 1199
	net = xp_net(policy);
	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);

1200 1201 1202 1203
	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
	if (!chain) {
		__xfrm_policy_inexact_prune_bin(bin, false);
		return ERR_PTR(-ENOMEM);
1204 1205 1206 1207 1208
	}

	delpol = xfrm_policy_insert_list(chain, policy, excl);
	if (delpol && excl) {
		__xfrm_policy_inexact_prune_bin(bin, false);
1209
		return ERR_PTR(-EEXIST);
1210
	}
1211 1212 1213 1214

	chain = &net->xfrm.policy_inexact[dir];
	xfrm_policy_insert_inexact_list(chain, policy);

1215 1216 1217
	if (delpol)
		__xfrm_policy_inexact_prune_bin(bin, false);

1218 1219 1220
	return delpol;
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
static void xfrm_hash_rebuild(struct work_struct *work)
{
	struct net *net = container_of(work, struct net,
				       xfrm.policy_hthresh.work);
	unsigned int hmask;
	struct xfrm_policy *pol;
	struct xfrm_policy *policy;
	struct hlist_head *chain;
	struct hlist_head *odst;
	struct hlist_node *newpos;
	int i;
	int dir;
	unsigned seq;
	u8 lbits4, rbits4, lbits6, rbits6;

	mutex_lock(&hash_resize_mutex);

	/* read selector prefixlen thresholds */
	do {
		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);

		lbits4 = net->xfrm.policy_hthresh.lbits4;
		rbits4 = net->xfrm.policy_hthresh.rbits4;
		lbits6 = net->xfrm.policy_hthresh.lbits6;
		rbits6 = net->xfrm.policy_hthresh.rbits6;
	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));

1248
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1249
	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1250

1251 1252 1253 1254
	/* make sure that we can insert the indirect policies again before
	 * we start with destructive action.
	 */
	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1255
		struct xfrm_pol_inexact_bin *bin;
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
		u8 dbits, sbits;

		dir = xfrm_policy_id2dir(policy->index);
		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
			continue;

		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
			if (policy->family == AF_INET) {
				dbits = rbits4;
				sbits = lbits4;
			} else {
				dbits = rbits6;
				sbits = lbits6;
			}
		} else {
			if (policy->family == AF_INET) {
				dbits = lbits4;
				sbits = rbits4;
			} else {
				dbits = lbits6;
				sbits = rbits6;
			}
		}

		if (policy->selector.prefixlen_d < dbits ||
		    policy->selector.prefixlen_s < sbits)
			continue;

1284 1285
		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
		if (!bin)
1286
			goto out_unlock;
1287 1288 1289

		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
			goto out_unlock;
1290 1291
	}

1292
	/* reset the bydst and inexact table in all directions */
H
Herbert Xu 已提交
1293
	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1294 1295 1296 1297
		struct hlist_node *n;

		hlist_for_each_entry_safe(policy, n,
					  &net->xfrm.policy_inexact[dir],
1298 1299
					  bydst_inexact_list) {
			hlist_del_rcu(&policy->bydst);
1300
			hlist_del_init(&policy->bydst_inexact_list);
1301
		}
1302

1303 1304
		hmask = net->xfrm.policy_bydst[dir].hmask;
		odst = net->xfrm.policy_bydst[dir].table;
1305 1306 1307 1308
		for (i = hmask; i >= 0; i--) {
			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
				hlist_del_rcu(&policy->bydst);
		}
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
			/* dir out => dst = remote, src = local */
			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
		} else {
			/* dir in/fwd => dst = local, src = remote */
			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
		}
	}

	/* re-insert all policies by order of creation */
	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1326 1327 1328 1329
		if (policy->walk.dead)
			continue;
		dir = xfrm_policy_id2dir(policy->index);
		if (dir >= XFRM_POLICY_MAX) {
1330 1331 1332
			/* skip socket policies */
			continue;
		}
1333 1334
		newpos = NULL;
		chain = policy_hash_bysel(net, &policy->selector,
1335
					  policy->family, dir);
1336

1337 1338 1339 1340 1341 1342 1343
		if (!chain) {
			void *p = xfrm_policy_inexact_insert(policy, dir, 0);

			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
			continue;
		}

1344 1345 1346 1347 1348 1349 1350
		hlist_for_each_entry(pol, chain, bydst) {
			if (policy->priority >= pol->priority)
				newpos = &pol->bydst;
			else
				break;
		}
		if (newpos)
1351
			hlist_add_behind_rcu(&policy->bydst, newpos);
1352
		else
1353
			hlist_add_head_rcu(&policy->bydst, chain);
1354 1355
	}

1356
out_unlock:
1357
	__xfrm_policy_inexact_flush(net);
1358
	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1359
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

	mutex_unlock(&hash_resize_mutex);
}

void xfrm_policy_hash_rebuild(struct net *net)
{
	schedule_work(&net->xfrm.policy_hthresh.work);
}
EXPORT_SYMBOL(xfrm_policy_hash_rebuild);

L
Linus Torvalds 已提交
1370 1371
/* Generate new index... KAME seems to generate them ordered by cost
 * of an absolute inpredictability of ordering of rules. This will not pass. */
1372
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
L
Linus Torvalds 已提交
1373 1374 1375 1376
{
	static u32 idx_generator;

	for (;;) {
1377 1378 1379 1380 1381
		struct hlist_head *list;
		struct xfrm_policy *p;
		u32 idx;
		int found;

1382 1383 1384 1385 1386 1387 1388 1389
		if (!index) {
			idx = (idx_generator | dir);
			idx_generator += 8;
		} else {
			idx = index;
			index = 0;
		}

L
Linus Torvalds 已提交
1390 1391
		if (idx == 0)
			idx = 8;
1392
		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1393
		found = 0;
1394
		hlist_for_each_entry(p, list, byidx) {
1395 1396
			if (p->index == idx) {
				found = 1;
L
Linus Torvalds 已提交
1397
				break;
1398
			}
L
Linus Torvalds 已提交
1399
		}
1400
		if (!found)
L
Linus Torvalds 已提交
1401 1402 1403 1404
			return idx;
	}
}

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
{
	u32 *p1 = (u32 *) s1;
	u32 *p2 = (u32 *) s2;
	int len = sizeof(struct xfrm_selector) / sizeof(u32);
	int i;

	for (i = 0; i < len; i++) {
		if (p1[i] != p2[i])
			return 1;
	}

	return 0;
}

1420 1421 1422 1423 1424 1425
static void xfrm_policy_requeue(struct xfrm_policy *old,
				struct xfrm_policy *new)
{
	struct xfrm_policy_queue *pq = &old->polq;
	struct sk_buff_head list;

1426 1427 1428
	if (skb_queue_empty(&pq->hold_queue))
		return;

1429 1430 1431 1432
	__skb_queue_head_init(&list);

	spin_lock_bh(&pq->hold_queue.lock);
	skb_queue_splice_init(&pq->hold_queue, &list);
1433 1434
	if (del_timer(&pq->hold_timer))
		xfrm_pol_put(old);
1435 1436 1437 1438 1439 1440 1441
	spin_unlock_bh(&pq->hold_queue.lock);

	pq = &new->polq;

	spin_lock_bh(&pq->hold_queue.lock);
	skb_queue_splice(&list, &pq->hold_queue);
	pq->timeout = XFRM_QUEUE_TMO_MIN;
1442 1443
	if (!mod_timer(&pq->hold_timer, jiffies))
		xfrm_pol_hold(new);
1444 1445 1446
	spin_unlock_bh(&pq->hold_queue.lock);
}

1447 1448
static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
					  struct xfrm_policy *pol)
1449
{
1450
	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1451 1452
}

1453 1454 1455 1456 1457
static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
{
	const struct xfrm_pol_inexact_key *k = data;
	u32 a = k->type << 24 | k->dir << 16 | k->family;

1458 1459
	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
			    seed);
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
}

static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
{
	const struct xfrm_pol_inexact_bin *b = data;

	return xfrm_pol_bin_key(&b->k, 0, seed);
}

static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
			    const void *ptr)
{
	const struct xfrm_pol_inexact_key *key = arg->key;
	const struct xfrm_pol_inexact_bin *b = ptr;
	int ret;

	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
		return -1;

	ret = b->k.dir ^ key->dir;
	if (ret)
		return ret;

	ret = b->k.type ^ key->type;
	if (ret)
		return ret;

	ret = b->k.family ^ key->family;
	if (ret)
		return ret;

1491
	return b->k.if_id ^ key->if_id;
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
}

static const struct rhashtable_params xfrm_pol_inexact_params = {
	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
	.hashfn			= xfrm_pol_bin_key,
	.obj_hashfn		= xfrm_pol_bin_obj,
	.obj_cmpfn		= xfrm_pol_bin_cmp,
	.automatic_shrinking	= true,
};

static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
					    struct xfrm_policy *policy)
{
	struct xfrm_policy *pol, *delpol = NULL;
	struct hlist_node *newpos = NULL;
1507
	int i = 0;
1508 1509 1510 1511 1512

	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
		if (pol->type == policy->type &&
		    pol->if_id == policy->if_id &&
		    !selector_cmp(&pol->selector, &policy->selector) &&
1513
		    xfrm_policy_mark_match(&policy->mark, pol) &&
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
		    xfrm_sec_ctx_match(pol->security, policy->security) &&
		    !WARN_ON(delpol)) {
			delpol = pol;
			if (policy->priority > pol->priority)
				continue;
		} else if (policy->priority >= pol->priority) {
			newpos = &pol->bydst_inexact_list;
			continue;
		}
		if (delpol)
			break;
	}

	if (newpos)
		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
	else
		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1531 1532 1533 1534 1535

	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
		pol->pos = i;
		i++;
	}
1536 1537
}

1538 1539 1540
static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
						   struct xfrm_policy *policy,
						   bool excl)
L
Linus Torvalds 已提交
1541
{
1542
	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
L
Linus Torvalds 已提交
1543

1544
	hlist_for_each_entry(pol, chain, bydst) {
H
Herbert Xu 已提交
1545
		if (pol->type == policy->type &&
1546
		    pol->if_id == policy->if_id &&
1547
		    !selector_cmp(&pol->selector, &policy->selector) &&
1548
		    xfrm_policy_mark_match(&policy->mark, pol) &&
H
Herbert Xu 已提交
1549 1550
		    xfrm_sec_ctx_match(pol->security, policy->security) &&
		    !WARN_ON(delpol)) {
1551 1552
			if (excl)
				return ERR_PTR(-EEXIST);
L
Linus Torvalds 已提交
1553 1554 1555 1556
			delpol = pol;
			if (policy->priority > pol->priority)
				continue;
		} else if (policy->priority >= pol->priority) {
1557
			newpos = pol;
L
Linus Torvalds 已提交
1558 1559 1560 1561 1562
			continue;
		}
		if (delpol)
			break;
	}
1563

L
Linus Torvalds 已提交
1564
	if (newpos)
1565
		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1566
	else
1567
		hlist_add_head_rcu(&policy->bydst, chain);
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579

	return delpol;
}

int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
	struct net *net = xp_net(policy);
	struct xfrm_policy *delpol;
	struct hlist_head *chain;

	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1580
	if (chain)
1581
		delpol = xfrm_policy_insert_list(chain, policy, excl);
1582 1583
	else
		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1584 1585 1586 1587 1588 1589

	if (IS_ERR(delpol)) {
		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
		return PTR_ERR(delpol);
	}

1590
	__xfrm_policy_link(policy, dir);
F
fan.du 已提交
1591 1592 1593 1594 1595 1596 1597

	/* After previous checking, family can either be AF_INET or AF_INET6 */
	if (policy->family == AF_INET)
		rt_genid_bump_ipv4(net);
	else
		rt_genid_bump_ipv6(net);

1598 1599
	if (delpol) {
		xfrm_policy_requeue(delpol, policy);
W
Wei Yongjun 已提交
1600
		__xfrm_policy_unlink(delpol, dir);
1601
	}
1602
	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1603
	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1604
	policy->curlft.add_time = ktime_get_real_seconds();
L
Linus Torvalds 已提交
1605 1606 1607
	policy->curlft.use_time = 0;
	if (!mod_timer(&policy->timer, jiffies + HZ))
		xfrm_pol_hold(policy);
1608
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
1609

1610
	if (delpol)
L
Linus Torvalds 已提交
1611
		xfrm_policy_kill(delpol);
1612 1613
	else if (xfrm_bydst_should_resize(net, dir, NULL))
		schedule_work(&net->xfrm.policy_hash_work);
1614

L
Linus Torvalds 已提交
1615 1616 1617 1618
	return 0;
}
EXPORT_SYMBOL(xfrm_policy_insert);

1619
static struct xfrm_policy *
1620 1621
__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
			struct xfrm_sec_ctx *ctx)
{
	struct xfrm_policy *pol;

	if (!chain)
		return NULL;

	hlist_for_each_entry(pol, chain, bydst) {
		if (pol->type == type &&
		    pol->if_id == if_id &&
1632
		    xfrm_policy_mark_match(mark, pol) &&
1633 1634 1635 1636 1637 1638 1639 1640
		    !selector_cmp(sel, &pol->selector) &&
		    xfrm_sec_ctx_match(ctx, pol->security))
			return pol;
	}

	return NULL;
}

1641 1642 1643 1644
struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
		      u8 type, int dir, struct xfrm_selector *sel,
		      struct xfrm_sec_ctx *ctx, int delete, int *err)
L
Linus Torvalds 已提交
1645
{
1646 1647
	struct xfrm_pol_inexact_bin *bin = NULL;
	struct xfrm_policy *pol, *ret = NULL;
1648
	struct hlist_head *chain;
L
Linus Torvalds 已提交
1649

1650
	*err = 0;
1651
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1652
	chain = policy_hash_bysel(net, sel, sel->family, dir);
1653
	if (!chain) {
1654 1655 1656
		struct xfrm_pol_inexact_candidates cand;
		int i;

1657
		bin = xfrm_policy_inexact_lookup(net, type,
1658
						 sel->family, dir, if_id);
1659 1660 1661 1662 1663
		if (!bin) {
			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
			return NULL;
		}

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
							 &sel->saddr,
							 &sel->daddr)) {
			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
			return NULL;
		}

		pol = NULL;
		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
			struct xfrm_policy *tmp;

			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
						      if_id, type, dir,
						      sel, ctx);
1678 1679 1680 1681
			if (!tmp)
				continue;

			if (!pol || tmp->pos < pol->pos)
1682 1683 1684 1685 1686
				pol = tmp;
		}
	} else {
		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
					      sel, ctx);
1687 1688
	}

1689 1690 1691 1692 1693 1694 1695
	if (pol) {
		xfrm_pol_hold(pol);
		if (delete) {
			*err = security_xfrm_policy_delete(pol->security);
			if (*err) {
				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
				return pol;
1696
			}
1697
			__xfrm_policy_unlink(pol, dir);
L
Linus Torvalds 已提交
1698
		}
1699
		ret = pol;
L
Linus Torvalds 已提交
1700
	}
1701
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
1702

1703
	if (ret && delete)
1704
		xfrm_policy_kill(ret);
1705 1706
	if (bin && delete)
		xfrm_policy_inexact_prune_bin(bin);
1707
	return ret;
L
Linus Torvalds 已提交
1708
}
1709
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
L
Linus Torvalds 已提交
1710

1711 1712 1713
struct xfrm_policy *
xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
		 u8 type, int dir, u32 id, int delete, int *err)
L
Linus Torvalds 已提交
1714
{
1715 1716
	struct xfrm_policy *pol, *ret;
	struct hlist_head *chain;
L
Linus Torvalds 已提交
1717

1718 1719 1720 1721
	*err = -ENOENT;
	if (xfrm_policy_id2dir(id) != dir)
		return NULL;

1722
	*err = 0;
1723
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1724
	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1725
	ret = NULL;
1726
	hlist_for_each_entry(pol, chain, byidx) {
J
Jamal Hadi Salim 已提交
1727
		if (pol->type == type && pol->index == id &&
1728
		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
L
Linus Torvalds 已提交
1729
			xfrm_pol_hold(pol);
1730
			if (delete) {
1731 1732
				*err = security_xfrm_policy_delete(
								pol->security);
1733
				if (*err) {
1734
					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1735 1736
					return pol;
				}
W
Wei Yongjun 已提交
1737
				__xfrm_policy_unlink(pol, dir);
1738 1739
			}
			ret = pol;
L
Linus Torvalds 已提交
1740 1741 1742
			break;
		}
	}
1743
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
1744

1745
	if (ret && delete)
1746 1747
		xfrm_policy_kill(ret);
	return ret;
L
Linus Torvalds 已提交
1748 1749 1750
}
EXPORT_SYMBOL(xfrm_policy_byid);

1751 1752
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static inline int
1753
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
L
Linus Torvalds 已提交
1754
{
1755 1756
	struct xfrm_policy *pol;
	int err = 0;
1757

1758 1759 1760 1761 1762
	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
		if (pol->walk.dead ||
		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
		    pol->type != type)
			continue;
1763

1764 1765 1766 1767
		err = security_xfrm_policy_delete(pol->security);
		if (err) {
			xfrm_audit_policy_delete(pol, 0, task_valid);
			return err;
1768 1769 1770 1771 1772 1773
		}
	}
	return err;
}
#else
static inline int
1774
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1775 1776 1777 1778 1779
{
	return 0;
}
#endif

1780
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1781
{
1782
	int dir, err = 0, cnt = 0;
1783
	struct xfrm_policy *pol;
L
Linus Torvalds 已提交
1784

1785
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1786

1787
	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1788 1789 1790
	if (err)
		goto out;

1791 1792 1793 1794 1795 1796 1797
again:
	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
		dir = xfrm_policy_id2dir(pol->index);
		if (pol->walk.dead ||
		    dir >= XFRM_POLICY_MAX ||
		    pol->type != type)
			continue;
1798

1799 1800 1801 1802 1803 1804 1805
		__xfrm_policy_unlink(pol, dir);
		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
		cnt++;
		xfrm_audit_policy_delete(pol, 1, task_valid);
		xfrm_policy_kill(pol);
		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
		goto again;
L
Linus Torvalds 已提交
1806
	}
1807 1808 1809
	if (cnt)
		__xfrm_policy_inexact_flush(net);
	else
1810
		err = -ESRCH;
1811
out:
1812
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1813
	return err;
L
Linus Torvalds 已提交
1814 1815 1816
}
EXPORT_SYMBOL(xfrm_policy_flush);

1817
int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1818
		     int (*func)(struct xfrm_policy *, int, int, void*),
L
Linus Torvalds 已提交
1819 1820
		     void *data)
{
H
Herbert Xu 已提交
1821 1822
	struct xfrm_policy *pol;
	struct xfrm_policy_walk_entry *x;
1823 1824 1825 1826 1827
	int error = 0;

	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
	    walk->type != XFRM_POLICY_TYPE_ANY)
		return -EINVAL;
L
Linus Torvalds 已提交
1828

H
Herbert Xu 已提交
1829
	if (list_empty(&walk->walk.all) && walk->seq != 0)
1830 1831
		return 0;

1832
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
H
Herbert Xu 已提交
1833
	if (list_empty(&walk->walk.all))
1834
		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
H
Herbert Xu 已提交
1835
	else
1836 1837 1838
		x = list_first_entry(&walk->walk.all,
				     struct xfrm_policy_walk_entry, all);

1839
	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
H
Herbert Xu 已提交
1840
		if (x->dead)
1841
			continue;
H
Herbert Xu 已提交
1842 1843 1844 1845 1846 1847 1848 1849 1850
		pol = container_of(x, struct xfrm_policy, walk);
		if (walk->type != XFRM_POLICY_TYPE_ANY &&
		    walk->type != pol->type)
			continue;
		error = func(pol, xfrm_policy_id2dir(pol->index),
			     walk->seq, data);
		if (error) {
			list_move_tail(&walk->walk.all, &x->all);
			goto out;
1851
		}
H
Herbert Xu 已提交
1852
		walk->seq++;
L
Linus Torvalds 已提交
1853
	}
H
Herbert Xu 已提交
1854
	if (walk->seq == 0) {
J
Jamal Hadi Salim 已提交
1855 1856 1857
		error = -ENOENT;
		goto out;
	}
H
Herbert Xu 已提交
1858
	list_del_init(&walk->walk.all);
L
Linus Torvalds 已提交
1859
out:
1860
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
1861 1862 1863 1864
	return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);

H
Herbert Xu 已提交
1865 1866 1867 1868 1869 1870 1871 1872 1873
void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
{
	INIT_LIST_HEAD(&walk->walk.all);
	walk->walk.dead = 1;
	walk->type = type;
	walk->seq = 0;
}
EXPORT_SYMBOL(xfrm_policy_walk_init);

F
Fan Du 已提交
1874
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
H
Herbert Xu 已提交
1875 1876 1877 1878
{
	if (list_empty(&walk->walk.all))
		return;

1879
	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
H
Herbert Xu 已提交
1880
	list_del(&walk->walk.all);
1881
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
H
Herbert Xu 已提交
1882 1883 1884
}
EXPORT_SYMBOL(xfrm_policy_walk_done);

1885 1886 1887 1888 1889
/*
 * Find policy to apply to this flow.
 *
 * Returns 0 if policy found, else an -errno.
 */
1890 1891
static int xfrm_policy_match(const struct xfrm_policy *pol,
			     const struct flowi *fl,
H
Hongbin Wang 已提交
1892
			     u8 type, u16 family, u32 if_id)
L
Linus Torvalds 已提交
1893
{
1894
	const struct xfrm_selector *sel = &pol->selector;
1895 1896
	int ret = -ESRCH;
	bool match;
L
Linus Torvalds 已提交
1897

1898
	if (pol->family != family ||
1899
	    pol->if_id != if_id ||
1900
	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1901
	    pol->type != type)
1902
		return ret;
L
Linus Torvalds 已提交
1903

1904
	match = xfrm_selector_match(sel, fl, family);
1905
	if (match)
1906
		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1907
	return ret;
1908
}
L
Linus Torvalds 已提交
1909

1910 1911
static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1912
				seqcount_spinlock_t *count,
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
				const xfrm_address_t *addr, u16 family)
{
	const struct rb_node *parent;
	int seq;

again:
	seq = read_seqcount_begin(count);

	parent = rcu_dereference_raw(r->rb_node);
	while (parent) {
		struct xfrm_pol_inexact_node *node;
		int delta;

		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);

		delta = xfrm_policy_addr_delta(addr, &node->addr,
					       node->prefixlen, family);
		if (delta < 0) {
			parent = rcu_dereference_raw(parent->rb_left);
			continue;
		} else if (delta > 0) {
			parent = rcu_dereference_raw(parent->rb_right);
			continue;
		}

		return node;
	}

	if (read_seqcount_retry(count, seq))
		goto again;

	return NULL;
}

1947 1948 1949 1950 1951 1952
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
				    struct xfrm_pol_inexact_bin *b,
				    const xfrm_address_t *saddr,
				    const xfrm_address_t *daddr)
{
1953 1954 1955
	struct xfrm_pol_inexact_node *n;
	u16 family;

1956 1957 1958
	if (!b)
		return false;

1959
	family = b->k.family;
1960 1961
	memset(cand, 0, sizeof(*cand));
	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1962 1963 1964

	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
					    family);
1965
	if (n) {
1966
		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1967 1968 1969 1970 1971
		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
						    family);
		if (n)
			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
	}
1972

1973 1974 1975 1976 1977
	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
					    family);
	if (n)
		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;

1978 1979 1980
	return true;
}

1981
static struct xfrm_pol_inexact_bin *
1982 1983
xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
			       u8 dir, u32 if_id)
1984 1985 1986 1987 1988
{
	struct xfrm_pol_inexact_key k = {
		.family = family,
		.type = type,
		.dir = dir,
1989
		.if_id = if_id,
1990 1991 1992 1993 1994 1995 1996 1997 1998
	};

	write_pnet(&k.net, net);

	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
				 xfrm_pol_inexact_params);
}

static struct xfrm_pol_inexact_bin *
1999 2000
xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
			   u8 dir, u32 if_id)
2001 2002 2003 2004 2005 2006
{
	struct xfrm_pol_inexact_bin *bin;

	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);

	rcu_read_lock();
2007
	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2008 2009 2010 2011 2012
	rcu_read_unlock();

	return bin;
}

2013 2014 2015 2016
static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head *chain,
			      struct xfrm_policy *prefer,
			      const struct flowi *fl,
H
Hongbin Wang 已提交
2017
			      u8 type, u16 family, u32 if_id)
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
{
	u32 priority = prefer ? prefer->priority : ~0u;
	struct xfrm_policy *pol;

	if (!chain)
		return NULL;

	hlist_for_each_entry_rcu(pol, chain, bydst) {
		int err;

		if (pol->priority > priority)
			break;

H
Hongbin Wang 已提交
2031
		err = xfrm_policy_match(pol, fl, type, family, if_id);
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
		if (err) {
			if (err != -ESRCH)
				return ERR_PTR(err);

			continue;
		}

		if (prefer) {
			/* matches.  Is it older than *prefer? */
			if (pol->priority == priority &&
			    prefer->pos < pol->pos)
				return prefer;
		}

		return pol;
	}

	return NULL;
}

static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
			    struct xfrm_policy *prefer,
			    const struct flowi *fl,
H
Hongbin Wang 已提交
2056
			    u8 type, u16 family, u32 if_id)
2057 2058 2059 2060 2061 2062 2063
{
	struct xfrm_policy *tmp;
	int i;

	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
		tmp = __xfrm_policy_eval_candidates(cand->res[i],
						    prefer,
H
Hongbin Wang 已提交
2064
						    fl, type, family, if_id);
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
		if (!tmp)
			continue;

		if (IS_ERR(tmp))
			return tmp;
		prefer = tmp;
	}

	return prefer;
}

A
Alexey Dobriyan 已提交
2076
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2077
						     const struct flowi *fl,
2078 2079
						     u16 family, u8 dir,
						     u32 if_id)
2080
{
2081
	struct xfrm_pol_inexact_candidates cand;
2082
	const xfrm_address_t *daddr, *saddr;
2083 2084
	struct xfrm_pol_inexact_bin *bin;
	struct xfrm_policy *pol, *ret;
2085
	struct hlist_head *chain;
2086
	unsigned int sequence;
2087
	int err;
2088

2089 2090 2091 2092 2093
	daddr = xfrm_flowi_daddr(fl, family);
	saddr = xfrm_flowi_saddr(fl, family);
	if (unlikely(!daddr || !saddr))
		return NULL;

2094
	rcu_read_lock();
2095 2096
 retry:
	do {
2097
		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2098
		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2099
	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2100

2101
	ret = NULL;
2102
	hlist_for_each_entry_rcu(pol, chain, bydst) {
H
Hongbin Wang 已提交
2103
		err = xfrm_policy_match(pol, fl, type, family, if_id);
2104 2105 2106 2107 2108 2109 2110 2111
		if (err) {
			if (err == -ESRCH)
				continue;
			else {
				ret = ERR_PTR(err);
				goto fail;
			}
		} else {
2112 2113 2114 2115
			ret = pol;
			break;
		}
	}
2116
	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2117 2118
	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
							 daddr))
2119
		goto skip_inexact;
2120

2121
	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
H
Hongbin Wang 已提交
2122
					  family, if_id);
2123 2124 2125 2126
	if (pol) {
		ret = pol;
		if (IS_ERR(pol))
			goto fail;
L
Linus Torvalds 已提交
2127
	}
2128

2129
skip_inexact:
2130
	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2131 2132
		goto retry;

2133
	if (ret && !xfrm_pol_hold_rcu(ret))
2134
		goto retry;
2135
fail:
2136
	rcu_read_unlock();
2137

2138
	return ret;
2139 2140
}

2141 2142 2143
static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
					      const struct flowi *fl,
					      u16 family, u8 dir, u32 if_id)
2144 2145 2146 2147
{
#ifdef CONFIG_XFRM_SUB_POLICY
	struct xfrm_policy *pol;

2148 2149
	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
					dir, if_id);
2150 2151 2152
	if (pol != NULL)
		return pol;
#endif
2153 2154
	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
					 dir, if_id);
2155 2156
}

2157
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2158 2159
						 const struct flowi *fl,
						 u16 family, u32 if_id)
L
Linus Torvalds 已提交
2160 2161 2162
{
	struct xfrm_policy *pol;

2163
	rcu_read_lock();
2164
 again:
2165 2166
	pol = rcu_dereference(sk->sk_policy[dir]);
	if (pol != NULL) {
2167
		bool match;
2168
		int err = 0;
2169

2170 2171 2172 2173 2174 2175
		if (pol->family != family) {
			pol = NULL;
			goto out;
		}

		match = xfrm_selector_match(&pol->selector, fl, family);
2176
		if (match) {
2177
			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2178
			    pol->if_id != if_id) {
J
Jamal Hadi Salim 已提交
2179 2180 2181
				pol = NULL;
				goto out;
			}
2182
			err = security_xfrm_policy_lookup(pol->security,
2183
						      fl->flowi_secid);
2184 2185 2186 2187
			if (!err) {
				if (!xfrm_pol_hold_rcu(pol))
					goto again;
			} else if (err == -ESRCH) {
2188
				pol = NULL;
2189
			} else {
2190
				pol = ERR_PTR(err);
2191
			}
2192
		} else
L
Linus Torvalds 已提交
2193 2194
			pol = NULL;
	}
J
Jamal Hadi Salim 已提交
2195
out:
2196
	rcu_read_unlock();
L
Linus Torvalds 已提交
2197 2198 2199 2200 2201
	return pol;
}

static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
2202
	struct net *net = xp_net(pol);
2203

2204 2205
	list_add(&pol->walk.all, &net->xfrm.policy_all);
	net->xfrm.policy_count[dir]++;
L
Linus Torvalds 已提交
2206 2207 2208 2209 2210 2211
	xfrm_pol_hold(pol);
}

static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
						int dir)
{
2212 2213
	struct net *net = xp_net(pol);

H
Herbert Xu 已提交
2214
	if (list_empty(&pol->walk.all))
2215
		return NULL;
L
Linus Torvalds 已提交
2216

H
Herbert Xu 已提交
2217 2218
	/* Socket policies are not hashed. */
	if (!hlist_unhashed(&pol->bydst)) {
2219
		hlist_del_rcu(&pol->bydst);
2220
		hlist_del_init(&pol->bydst_inexact_list);
H
Herbert Xu 已提交
2221 2222 2223 2224
		hlist_del(&pol->byidx);
	}

	list_del_init(&pol->walk.all);
2225
	net->xfrm.policy_count[dir]--;
2226 2227

	return pol;
L
Linus Torvalds 已提交
2228 2229
}

H
Herbert Xu 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
{
	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
}

static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
{
	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
}

2240
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
L
Linus Torvalds 已提交
2241
{
F
Fan Du 已提交
2242 2243
	struct net *net = xp_net(pol);

2244
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
2245
	pol = __xfrm_policy_unlink(pol, dir);
2246
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
2247 2248
	if (pol) {
		xfrm_policy_kill(pol);
2249
		return 0;
L
Linus Torvalds 已提交
2250
	}
2251
	return -ENOENT;
L
Linus Torvalds 已提交
2252
}
2253
EXPORT_SYMBOL(xfrm_policy_delete);
L
Linus Torvalds 已提交
2254 2255 2256

int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
2257
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
2258 2259
	struct xfrm_policy *old_pol;

2260 2261 2262 2263 2264
#ifdef CONFIG_XFRM_SUB_POLICY
	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
		return -EINVAL;
#endif

2265
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2266 2267
	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
L
Linus Torvalds 已提交
2268
	if (pol) {
2269
		pol->curlft.add_time = ktime_get_real_seconds();
2270
		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
H
Herbert Xu 已提交
2271
		xfrm_sk_policy_link(pol, dir);
L
Linus Torvalds 已提交
2272
	}
2273
	rcu_assign_pointer(sk->sk_policy[dir], pol);
2274 2275 2276 2277
	if (old_pol) {
		if (pol)
			xfrm_policy_requeue(old_pol, pol);

2278 2279 2280
		/* Unlinking succeeds always. This is the only function
		 * allowed to delete or replace socket policy.
		 */
H
Herbert Xu 已提交
2281
		xfrm_sk_policy_unlink(old_pol, dir);
2282
	}
2283
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
2284 2285 2286 2287 2288 2289 2290

	if (old_pol) {
		xfrm_policy_kill(old_pol);
	}
	return 0;
}

2291
static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
L
Linus Torvalds 已提交
2292
{
2293
	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
F
Fan Du 已提交
2294
	struct net *net = xp_net(old);
L
Linus Torvalds 已提交
2295 2296 2297

	if (newp) {
		newp->selector = old->selector;
2298 2299
		if (security_xfrm_policy_clone(old->security,
					       &newp->security)) {
2300 2301 2302
			kfree(newp);
			return NULL;  /* ENOMEM */
		}
L
Linus Torvalds 已提交
2303 2304
		newp->lft = old->lft;
		newp->curlft = old->curlft;
2305
		newp->mark = old->mark;
2306
		newp->if_id = old->if_id;
L
Linus Torvalds 已提交
2307 2308 2309 2310
		newp->action = old->action;
		newp->flags = old->flags;
		newp->xfrm_nr = old->xfrm_nr;
		newp->index = old->index;
2311
		newp->type = old->type;
2312
		newp->family = old->family;
L
Linus Torvalds 已提交
2313 2314
		memcpy(newp->xfrm_vec, old->xfrm_vec,
		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2315
		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
H
Herbert Xu 已提交
2316
		xfrm_sk_policy_link(newp, dir);
2317
		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
2318 2319 2320 2321 2322
		xfrm_pol_put(newp);
	}
	return newp;
}

2323
int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
L
Linus Torvalds 已提交
2324
{
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	const struct xfrm_policy *p;
	struct xfrm_policy *np;
	int i, ret = 0;

	rcu_read_lock();
	for (i = 0; i < 2; i++) {
		p = rcu_dereference(osk->sk_policy[i]);
		if (p) {
			np = clone_policy(p, i);
			if (unlikely(!np)) {
				ret = -ENOMEM;
				break;
			}
			rcu_assign_pointer(sk->sk_policy[i], np);
		}
	}
	rcu_read_unlock();
	return ret;
L
Linus Torvalds 已提交
2343 2344
}

2345
static int
D
David Ahern 已提交
2346
xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2347
	       xfrm_address_t *remote, unsigned short family, u32 mark)
2348 2349
{
	int err;
2350
	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2351 2352 2353

	if (unlikely(afinfo == NULL))
		return -EINVAL;
2354
	err = afinfo->get_saddr(net, oif, local, remote, mark);
2355
	rcu_read_unlock();
2356 2357 2358
	return err;
}

L
Linus Torvalds 已提交
2359 2360 2361
/* Resolve list of templates for the flow, given policy. */

static int
2362 2363
xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
		      struct xfrm_state **xfrm, unsigned short family)
L
Linus Torvalds 已提交
2364
{
A
Alexey Dobriyan 已提交
2365
	struct net *net = xp_net(policy);
L
Linus Torvalds 已提交
2366 2367
	int nx;
	int i, error;
2368 2369
	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2370
	xfrm_address_t tmp;
L
Linus Torvalds 已提交
2371

2372
	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
L
Linus Torvalds 已提交
2373
		struct xfrm_state *x;
2374 2375
		xfrm_address_t *remote = daddr;
		xfrm_address_t *local  = saddr;
L
Linus Torvalds 已提交
2376 2377
		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];

2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
		if (tmpl->mode == XFRM_MODE_TUNNEL ||
		    tmpl->mode == XFRM_MODE_BEET) {
			remote = &tmpl->id.daddr;
			local = &tmpl->saddr;
			if (xfrm_addr_any(local, tmpl->encap_family)) {
				error = xfrm_get_saddr(net, fl->flowi_oif,
						       &tmp, remote,
						       tmpl->encap_family, 0);
				if (error)
					goto fail;
				local = &tmp;
			}
L
Linus Torvalds 已提交
2390 2391
		}

2392 2393
		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
				    family, policy->if_id);
L
Linus Torvalds 已提交
2394 2395 2396

		if (x && x->km.state == XFRM_STATE_VALID) {
			xfrm[nx++] = x;
2397 2398
			daddr = remote;
			saddr = local;
L
Linus Torvalds 已提交
2399 2400 2401 2402 2403 2404
			continue;
		}
		if (x) {
			error = (x->km.state == XFRM_STATE_ERROR ?
				 -EINVAL : -EAGAIN);
			xfrm_state_put(x);
W
Weilong Chen 已提交
2405
		} else if (error == -ESRCH) {
2406
			error = -EAGAIN;
W
Weilong Chen 已提交
2407
		}
L
Linus Torvalds 已提交
2408 2409 2410 2411 2412 2413 2414

		if (!tmpl->optional)
			goto fail;
	}
	return nx;

fail:
2415
	for (nx--; nx >= 0; nx--)
L
Linus Torvalds 已提交
2416 2417 2418 2419
		xfrm_state_put(xfrm[nx]);
	return error;
}

2420
static int
2421 2422
xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
		  struct xfrm_state **xfrm, unsigned short family)
2423
{
2424 2425
	struct xfrm_state *tp[XFRM_MAX_DEPTH];
	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
	int cnx = 0;
	int error;
	int ret;
	int i;

	for (i = 0; i < npols; i++) {
		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
			error = -ENOBUFS;
			goto fail;
		}
2436 2437

		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2438 2439 2440 2441 2442 2443 2444
		if (ret < 0) {
			error = ret;
			goto fail;
		} else
			cnx += ret;
	}

2445 2446 2447 2448
	/* found states are sorted for outbound processing */
	if (npols > 1)
		xfrm_state_sort(xfrm, tpp, cnx, family);

2449 2450 2451
	return cnx;

 fail:
2452
	for (cnx--; cnx >= 0; cnx--)
2453
		xfrm_state_put(tpp[cnx]);
2454 2455 2456 2457
	return error;

}

2458
static int xfrm_get_tos(const struct flowi *fl, int family)
2459
{
2460 2461
	if (family == AF_INET)
		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2462

2463
	return 0;
2464 2465
}

2466
static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
L
Linus Torvalds 已提交
2467
{
2468
	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2469
	struct dst_ops *dst_ops;
2470 2471 2472 2473 2474
	struct xfrm_dst *xdst;

	if (!afinfo)
		return ERR_PTR(-EINVAL);

2475 2476 2477 2478
	switch (family) {
	case AF_INET:
		dst_ops = &net->xfrm.xfrm4_dst_ops;
		break;
E
Eric Dumazet 已提交
2479
#if IS_ENABLED(CONFIG_IPV6)
2480 2481 2482 2483 2484 2485 2486
	case AF_INET6:
		dst_ops = &net->xfrm.xfrm6_dst_ops;
		break;
#endif
	default:
		BUG();
	}
W
Wei Wang 已提交
2487
	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2488

2489
	if (likely(xdst)) {
2490
		memset_after(xdst, 0, u.dst);
2491
	} else
2492
		xdst = ERR_PTR(-ENOBUFS);
2493

2494
	rcu_read_unlock();
2495

2496 2497 2498
	return xdst;
}

2499 2500
static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
			   int nfheader_len)
2501
{
2502 2503 2504 2505 2506
	if (dst->ops->family == AF_INET6) {
		struct rt6_info *rt = (struct rt6_info *)dst;
		path->path_cookie = rt6_get_cookie(rt);
		path->u.rt6.rt6i_nfheader_len = nfheader_len;
	}
2507 2508
}

H
Herbert Xu 已提交
2509
static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2510
				const struct flowi *fl)
2511
{
2512
	const struct xfrm_policy_afinfo *afinfo =
2513 2514 2515 2516
		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
	int err;

	if (!afinfo)
L
Linus Torvalds 已提交
2517
		return -EINVAL;
2518

H
Herbert Xu 已提交
2519
	err = afinfo->fill_dst(xdst, dev, fl);
2520

2521
	rcu_read_unlock();
2522

L
Linus Torvalds 已提交
2523 2524 2525
	return err;
}

2526

2527 2528 2529 2530 2531
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
 * all the metrics... Shortly, bundle a bundle.
 */

static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2532 2533 2534
					    struct xfrm_state **xfrm,
					    struct xfrm_dst **bundle,
					    int nx,
2535
					    const struct flowi *fl,
2536 2537
					    struct dst_entry *dst)
{
2538
	const struct xfrm_state_afinfo *afinfo;
F
Florian Westphal 已提交
2539
	const struct xfrm_mode *inner_mode;
2540
	struct net *net = xp_net(policy);
2541 2542
	unsigned long now = jiffies;
	struct net_device *dev;
2543 2544
	struct xfrm_dst *xdst_prev = NULL;
	struct xfrm_dst *xdst0 = NULL;
2545 2546 2547
	int i = 0;
	int err;
	int header_len = 0;
2548
	int nfheader_len = 0;
2549 2550 2551
	int trailer_len = 0;
	int tos;
	int family = policy->selector.family;
2552 2553 2554
	xfrm_address_t saddr, daddr;

	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2555 2556 2557 2558 2559 2560

	tos = xfrm_get_tos(fl, family);

	dst_hold(dst);

	for (; i < nx; i++) {
2561
		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2562 2563 2564 2565 2566 2567 2568 2569
		struct dst_entry *dst1 = &xdst->u.dst;

		err = PTR_ERR(xdst);
		if (IS_ERR(xdst)) {
			dst_release(dst);
			goto put_states;
		}

2570
		bundle[i] = xdst;
2571 2572
		if (!xdst_prev)
			xdst0 = xdst;
2573 2574 2575 2576
		else
			/* Ref count is taken during xfrm_alloc_dst()
			 * No need to do dst_clone() on dst1
			 */
2577
			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2578

2579 2580 2581 2582 2583 2584 2585 2586 2587
		if (xfrm[i]->sel.family == AF_UNSPEC) {
			inner_mode = xfrm_ip2inner_mode(xfrm[i],
							xfrm_af2proto(family));
			if (!inner_mode) {
				err = -EAFNOSUPPORT;
				dst_release(dst);
				goto put_states;
			}
		} else
2588
			inner_mode = &xfrm[i]->inner_mode;
2589

2590
		xdst->route = dst;
2591
		dst_copy_metrics(dst1, dst);
2592 2593

		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2594
			__u32 mark = 0;
2595
			int oif;
2596 2597 2598

			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2599

2600
			family = xfrm[i]->props.family;
2601 2602
			oif = fl->flowi_oif ? : fl->flowi_l3mdev;
			dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2603
					      &saddr, &daddr, family, mark);
2604 2605 2606 2607 2608 2609 2610
			err = PTR_ERR(dst);
			if (IS_ERR(dst))
				goto put_states;
		} else
			dst_hold(dst);

		dst1->xfrm = xfrm[i];
2611
		xdst->xfrm_genid = xfrm[i]->genid;
2612

2613
		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2614 2615 2616
		dst1->lastuse = now;

		dst1->input = dst_discard;
2617 2618 2619 2620 2621 2622 2623 2624

		rcu_read_lock();
		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
		if (likely(afinfo))
			dst1->output = afinfo->output;
		else
			dst1->output = dst_discard_out;
		rcu_read_unlock();
2625

2626
		xdst_prev = xdst;
2627 2628

		header_len += xfrm[i]->props.header_len;
2629 2630
		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
			nfheader_len += xfrm[i]->props.header_len;
2631 2632 2633
		trailer_len += xfrm[i]->props.trailer_len;
	}

2634
	xfrm_dst_set_child(xdst_prev, dst);
2635
	xdst0->path = dst;
2636 2637 2638 2639 2640 2641

	err = -ENODEV;
	dev = dst->dev;
	if (!dev)
		goto free_dst;

2642
	xfrm_init_path(xdst0, dst, nfheader_len);
2643
	xfrm_init_pmtu(bundle, nx);
2644

2645 2646 2647
	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
		err = xfrm_fill_dst(xdst_prev, dev, fl);
2648 2649 2650
		if (err)
			goto free_dst;

2651 2652 2653 2654
		xdst_prev->u.dst.header_len = header_len;
		xdst_prev->u.dst.trailer_len = trailer_len;
		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2655 2656
	}

2657
	return &xdst0->u.dst;
2658 2659 2660 2661 2662

put_states:
	for (; i < nx; i++)
		xfrm_state_put(xfrm[i]);
free_dst:
2663 2664
	if (xdst0)
		dst_release_immediate(&xdst0->u.dst);
2665 2666

	return ERR_PTR(err);
2667 2668
}

2669
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
				struct xfrm_policy **pols,
				int *num_pols, int *num_xfrms)
{
	int i;

	if (*num_pols == 0 || !pols[0]) {
		*num_pols = 0;
		*num_xfrms = 0;
		return 0;
	}
2680 2681
	if (IS_ERR(pols[0])) {
		*num_pols = 0;
2682
		return PTR_ERR(pols[0]);
2683
	}
2684 2685 2686 2687

	*num_xfrms = pols[0]->xfrm_nr;

#ifdef CONFIG_XFRM_SUB_POLICY
2688
	if (pols[0]->action == XFRM_POLICY_ALLOW &&
2689 2690 2691 2692
	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
						    XFRM_POLICY_TYPE_MAIN,
						    fl, family,
2693 2694
						    XFRM_POLICY_OUT,
						    pols[0]->if_id);
2695 2696 2697
		if (pols[1]) {
			if (IS_ERR(pols[1])) {
				xfrm_pols_put(pols, *num_pols);
2698
				*num_pols = 0;
2699 2700
				return PTR_ERR(pols[1]);
			}
2701
			(*num_pols)++;
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
			(*num_xfrms) += pols[1]->xfrm_nr;
		}
	}
#endif
	for (i = 0; i < *num_pols; i++) {
		if (pols[i]->action != XFRM_POLICY_ALLOW) {
			*num_xfrms = -1;
			break;
		}
	}

	return 0;

}

static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2719
			       const struct flowi *fl, u16 family,
2720 2721 2722 2723
			       struct dst_entry *dst_orig)
{
	struct net *net = xp_net(pols[0]);
	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2724
	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2725
	struct xfrm_dst *xdst;
2726 2727 2728
	struct dst_entry *dst;
	int err;

2729 2730 2731
	/* Try to instantiate a bundle */
	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
	if (err <= 0) {
2732 2733 2734 2735
		if (err == 0)
			return NULL;

		if (err != -EAGAIN)
2736 2737 2738 2739
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
		return ERR_PTR(err);
	}

2740
	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2741 2742 2743 2744 2745 2746 2747 2748
	if (IS_ERR(dst)) {
		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
		return ERR_CAST(dst);
	}

	xdst = (struct xfrm_dst *)dst;
	xdst->num_xfrms = err;
	xdst->num_pols = num_pols;
2749
	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2750 2751 2752 2753 2754
	xdst->policy_genid = atomic_read(&pols[0]->genid);

	return xdst;
}

2755
static void xfrm_policy_queue_process(struct timer_list *t)
2756 2757 2758 2759
{
	struct sk_buff *skb;
	struct sock *sk;
	struct dst_entry *dst;
2760
	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2761
	struct net *net = xp_net(pol);
2762 2763 2764
	struct xfrm_policy_queue *pq = &pol->polq;
	struct flowi fl;
	struct sk_buff_head list;
2765
	__u32 skb_mark;
2766 2767 2768

	spin_lock(&pq->hold_queue.lock);
	skb = skb_peek(&pq->hold_queue);
2769 2770 2771 2772
	if (!skb) {
		spin_unlock(&pq->hold_queue.lock);
		goto out;
	}
2773 2774
	dst = skb_dst(skb);
	sk = skb->sk;
2775 2776 2777 2778

	/* Fixup the mark to support VTI. */
	skb_mark = skb->mark;
	skb->mark = pol->mark.v;
2779
	xfrm_decode_session(skb, &fl, dst->ops->family);
2780
	skb->mark = skb_mark;
2781 2782
	spin_unlock(&pq->hold_queue.lock);

2783
	dst_hold(xfrm_dst_path(dst));
2784
	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
	if (IS_ERR(dst))
		goto purge_queue;

	if (dst->flags & DST_XFRM_QUEUE) {
		dst_release(dst);

		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
			goto purge_queue;

		pq->timeout = pq->timeout << 1;
2795 2796
		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
			xfrm_pol_hold(pol);
2797
		goto out;
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
	}

	dst_release(dst);

	__skb_queue_head_init(&list);

	spin_lock(&pq->hold_queue.lock);
	pq->timeout = 0;
	skb_queue_splice_init(&pq->hold_queue, &list);
	spin_unlock(&pq->hold_queue.lock);

	while (!skb_queue_empty(&list)) {
		skb = __skb_dequeue(&list);

2812 2813 2814
		/* Fixup the mark to support VTI. */
		skb_mark = skb->mark;
		skb->mark = pol->mark.v;
2815
		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2816 2817
		skb->mark = skb_mark;

2818 2819
		dst_hold(xfrm_dst_path(skb_dst(skb)));
		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2820 2821 2822 2823 2824
		if (IS_ERR(dst)) {
			kfree_skb(skb);
			continue;
		}

2825
		nf_reset_ct(skb);
2826 2827 2828
		skb_dst_drop(skb);
		skb_dst_set(skb, dst);

2829
		dst_output(net, skb->sk, skb);
2830 2831
	}

2832 2833
out:
	xfrm_pol_put(pol);
2834 2835 2836 2837
	return;

purge_queue:
	pq->timeout = 0;
2838
	skb_queue_purge(&pq->hold_queue);
2839
	xfrm_pol_put(pol);
2840 2841
}

E
Eric W. Biederman 已提交
2842
static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2843 2844 2845 2846
{
	unsigned long sched_next;
	struct dst_entry *dst = skb_dst(skb);
	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2847 2848
	struct xfrm_policy *pol = xdst->pols[0];
	struct xfrm_policy_queue *pq = &pol->polq;
2849

2850
	if (unlikely(skb_fclone_busy(sk, skb))) {
2851 2852 2853
		kfree_skb(skb);
		return 0;
	}
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871

	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
		kfree_skb(skb);
		return -EAGAIN;
	}

	skb_dst_force(skb);

	spin_lock_bh(&pq->hold_queue.lock);

	if (!pq->timeout)
		pq->timeout = XFRM_QUEUE_TMO_MIN;

	sched_next = jiffies + pq->timeout;

	if (del_timer(&pq->hold_timer)) {
		if (time_before(pq->hold_timer.expires, sched_next))
			sched_next = pq->hold_timer.expires;
2872
		xfrm_pol_put(pol);
2873 2874 2875
	}

	__skb_queue_tail(&pq->hold_queue, skb);
2876 2877
	if (!mod_timer(&pq->hold_timer, sched_next))
		xfrm_pol_hold(pol);
2878 2879 2880 2881 2882 2883 2884

	spin_unlock_bh(&pq->hold_queue.lock);

	return 0;
}

static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2885
						 struct xfrm_flo *xflo,
2886 2887 2888 2889 2890 2891
						 const struct flowi *fl,
						 int num_xfrms,
						 u16 family)
{
	int err;
	struct net_device *dev;
2892
	struct dst_entry *dst;
2893 2894 2895 2896 2897 2898 2899
	struct dst_entry *dst1;
	struct xfrm_dst *xdst;

	xdst = xfrm_alloc_dst(net, family);
	if (IS_ERR(xdst))
		return xdst;

2900 2901 2902
	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
	    net->xfrm.sysctl_larval_drop ||
	    num_xfrms <= 0)
2903 2904
		return xdst;

2905
	dst = xflo->dst_orig;
2906 2907 2908 2909 2910 2911 2912
	dst1 = &xdst->u.dst;
	dst_hold(dst);
	xdst->route = dst;

	dst_copy_metrics(dst1, dst);

	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
D
David Laight 已提交
2913
	dst1->flags |= DST_XFRM_QUEUE;
2914 2915 2916 2917 2918 2919
	dst1->lastuse = jiffies;

	dst1->input = dst_discard;
	dst1->output = xdst_queue_output;

	dst_hold(dst);
2920
	xfrm_dst_set_child(xdst, dst);
2921
	xdst->path = dst;
2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942

	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);

	err = -ENODEV;
	dev = dst->dev;
	if (!dev)
		goto free_dst;

	err = xfrm_fill_dst(xdst, dev, fl);
	if (err)
		goto free_dst;

out:
	return xdst;

free_dst:
	dst_release(dst1);
	xdst = ERR_PTR(err);
	goto out;
}

2943 2944 2945 2946
static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
					   const struct flowi *fl,
					   u16 family, u8 dir,
					   struct xfrm_flo *xflo, u32 if_id)
2947 2948
{
	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2949
	int num_pols = 0, num_xfrms = 0, err;
2950
	struct xfrm_dst *xdst;
2951 2952 2953

	/* Resolve policies to use if we couldn't get them from
	 * previous cache entry */
2954
	num_pols = 1;
2955
	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2956
	err = xfrm_expand_policies(fl, family, pols,
2957
					   &num_pols, &num_xfrms);
2958 2959 2960 2961 2962 2963
	if (err < 0)
		goto inc_error;
	if (num_pols == 0)
		return NULL;
	if (num_xfrms <= 0)
		goto make_dummy_bundle;
2964

2965
	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2966
					      xflo->dst_orig);
2967 2968
	if (IS_ERR(xdst)) {
		err = PTR_ERR(xdst);
2969 2970 2971 2972 2973
		if (err == -EREMOTE) {
			xfrm_pols_put(pols, num_pols);
			return NULL;
		}

2974 2975
		if (err != -EAGAIN)
			goto error;
2976
		goto make_dummy_bundle;
2977
	} else if (xdst == NULL) {
2978
		num_xfrms = 0;
2979
		goto make_dummy_bundle;
2980 2981
	}

2982
	return xdst;
2983 2984 2985 2986 2987

make_dummy_bundle:
	/* We found policies, but there's no bundles to instantiate:
	 * either because the policy blocks, has no transformations or
	 * we could not build template (no xfrm_states).*/
2988
	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2989 2990 2991 2992 2993 2994
	if (IS_ERR(xdst)) {
		xfrm_pols_put(pols, num_pols);
		return ERR_CAST(xdst);
	}
	xdst->num_pols = num_pols;
	xdst->num_xfrms = num_xfrms;
2995
	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2996

2997
	return xdst;
2998 2999 3000 3001

inc_error:
	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
3002
	xfrm_pols_put(pols, num_pols);
3003 3004
	return ERR_PTR(err);
}
L
Linus Torvalds 已提交
3005

3006 3007 3008
static struct dst_entry *make_blackhole(struct net *net, u16 family,
					struct dst_entry *dst_orig)
{
3009
	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3010 3011 3012 3013
	struct dst_entry *ret;

	if (!afinfo) {
		dst_release(dst_orig);
3014
		return ERR_PTR(-EINVAL);
3015 3016 3017
	} else {
		ret = afinfo->blackhole_route(net, dst_orig);
	}
3018
	rcu_read_unlock();
3019 3020 3021 3022

	return ret;
}

3023
/* Finds/creates a bundle for given flow and if_id
L
Linus Torvalds 已提交
3024 3025 3026
 *
 * At the moment we eat a raw IP route. Mostly to speed up lookups
 * on interfaces with disabled IPsec.
3027 3028 3029
 *
 * xfrm_lookup uses an if_id of 0 by default, and is provided for
 * compatibility
L
Linus Torvalds 已提交
3030
 */
3031 3032 3033 3034 3035
struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
					struct dst_entry *dst_orig,
					const struct flowi *fl,
					const struct sock *sk,
					int flags, u32 if_id)
L
Linus Torvalds 已提交
3036
{
3037
	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3038
	struct xfrm_dst *xdst;
3039
	struct dst_entry *dst, *route;
3040
	u16 family = dst_orig->ops->family;
3041
	u8 dir = XFRM_POLICY_OUT;
3042
	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3043

3044 3045 3046
	dst = NULL;
	xdst = NULL;
	route = NULL;
3047

E
Eric Dumazet 已提交
3048
	sk = sk_const_to_full_sk(sk);
3049
	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3050
		num_pols = 1;
3051 3052
		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
						if_id);
3053 3054 3055
		err = xfrm_expand_policies(fl, family, pols,
					   &num_pols, &num_xfrms);
		if (err < 0)
3056
			goto dropdst;
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066

		if (num_pols) {
			if (num_xfrms <= 0) {
				drop_pols = num_pols;
				goto no_transform;
			}

			xdst = xfrm_resolve_and_create_bundle(
					pols, num_pols, fl,
					family, dst_orig);
3067

3068 3069 3070
			if (IS_ERR(xdst)) {
				xfrm_pols_put(pols, num_pols);
				err = PTR_ERR(xdst);
3071 3072 3073
				if (err == -EREMOTE)
					goto nopol;

3074
				goto dropdst;
3075 3076 3077 3078
			} else if (xdst == NULL) {
				num_xfrms = 0;
				drop_pols = num_pols;
				goto no_transform;
3079 3080 3081
			}

			route = xdst->route;
3082
		}
3083
	}
L
Linus Torvalds 已提交
3084

3085
	if (xdst == NULL) {
3086 3087 3088 3089 3090
		struct xfrm_flo xflo;

		xflo.dst_orig = dst_orig;
		xflo.flags = flags;

L
Linus Torvalds 已提交
3091
		/* To accelerate a bit...  */
3092 3093
		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3094
			goto nopol;
L
Linus Torvalds 已提交
3095

3096
		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3097
		if (xdst == NULL)
3098
			goto nopol;
3099 3100
		if (IS_ERR(xdst)) {
			err = PTR_ERR(xdst);
3101
			goto dropdst;
3102
		}
3103 3104 3105

		num_pols = xdst->num_pols;
		num_xfrms = xdst->num_xfrms;
3106
		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
		route = xdst->route;
	}

	dst = &xdst->u.dst;
	if (route == NULL && num_xfrms > 0) {
		/* The only case when xfrm_bundle_lookup() returns a
		 * bundle with null route, is when the template could
		 * not be resolved. It means policies are there, but
		 * bundle could not be created, since we don't yet
		 * have the xfrm_state's. We need to wait for KM to
		 * negotiate new SA's or bail out with error.*/
		if (net->xfrm.sysctl_larval_drop) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3120 3121
			err = -EREMOTE;
			goto error;
3122 3123
		}

3124
		err = -EAGAIN;
3125 3126 3127

		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
		goto error;
L
Linus Torvalds 已提交
3128 3129
	}

3130 3131
no_transform:
	if (num_pols == 0)
3132
		goto nopol;
L
Linus Torvalds 已提交
3133

3134 3135 3136
	if ((flags & XFRM_LOOKUP_ICMP) &&
	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
		err = -ENOENT;
3137
		goto error;
3138
	}
3139

3140
	for (i = 0; i < num_pols; i++)
3141
		pols[i]->curlft.use_time = ktime_get_real_seconds();
3142

3143
	if (num_xfrms < 0) {
L
Linus Torvalds 已提交
3144
		/* Prohibit the flow */
A
Alexey Dobriyan 已提交
3145
		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3146 3147
		err = -EPERM;
		goto error;
3148 3149 3150 3151 3152 3153
	} else if (num_xfrms > 0) {
		/* Flow transformed */
		dst_release(dst_orig);
	} else {
		/* Flow passes untransformed */
		dst_release(dst);
3154
		dst = dst_orig;
L
Linus Torvalds 已提交
3155
	}
3156 3157
ok:
	xfrm_pols_put(pols, drop_pols);
G
Gao feng 已提交
3158 3159 3160
	if (dst && dst->xfrm &&
	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
		dst->flags |= DST_XFRM_TUNNEL;
3161
	return dst;
L
Linus Torvalds 已提交
3162

3163
nopol:
3164
	if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3165
	    net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3166 3167 3168
		err = -EPERM;
		goto error;
	}
3169 3170
	if (!(flags & XFRM_LOOKUP_ICMP)) {
		dst = dst_orig;
3171
		goto ok;
3172
	}
3173
	err = -ENOENT;
L
Linus Torvalds 已提交
3174
error:
3175
	dst_release(dst);
3176
dropdst:
3177 3178
	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
		dst_release(dst_orig);
3179
	xfrm_pols_put(pols, drop_pols);
3180
	return ERR_PTR(err);
L
Linus Torvalds 已提交
3181
}
3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
EXPORT_SYMBOL(xfrm_lookup_with_ifid);

/* Main function: finds/creates a bundle for given flow.
 *
 * At the moment we eat a raw IP route. Mostly to speed up lookups
 * on interfaces with disabled IPsec.
 */
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
			      const struct flowi *fl, const struct sock *sk,
			      int flags)
{
	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
}
L
Linus Torvalds 已提交
3195 3196
EXPORT_SYMBOL(xfrm_lookup);

3197 3198 3199 3200 3201
/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
 * Otherwise we may send out blackholed packets.
 */
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
				    const struct flowi *fl,
3202
				    const struct sock *sk, int flags)
3203
{
3204
	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3205 3206
					    flags | XFRM_LOOKUP_QUEUE |
					    XFRM_LOOKUP_KEEP_DST_REF);
3207

3208
	if (PTR_ERR(dst) == -EREMOTE)
3209 3210
		return make_blackhole(net, dst_orig->ops->family, dst_orig);

3211 3212 3213
	if (IS_ERR(dst))
		dst_release(dst_orig);

3214 3215 3216 3217
	return dst;
}
EXPORT_SYMBOL(xfrm_lookup_route);

3218
static inline int
3219
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3220
{
3221
	struct sec_path *sp = skb_sec_path(skb);
3222 3223
	struct xfrm_state *x;

3224
	if (!sp || idx < 0 || idx >= sp->len)
3225
		return 0;
3226
	x = sp->xvec[idx];
3227 3228
	if (!x->type->reject)
		return 0;
3229
	return x->type->reject(x, skb, fl);
3230 3231
}

L
Linus Torvalds 已提交
3232 3233 3234 3235 3236 3237 3238
/* When skb is transformed back to its "native" form, we have to
 * check policy restrictions. At the moment we make this in maximally
 * stupid way. Shame on me. :-) Of course, connected sockets must
 * have policy cached at them.
 */

static inline int
3239
xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
L
Linus Torvalds 已提交
3240 3241 3242
	      unsigned short family)
{
	if (xfrm_state_kern(x))
3243
		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
L
Linus Torvalds 已提交
3244 3245 3246 3247
	return	x->id.proto == tmpl->id.proto &&
		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
		x->props.mode == tmpl->mode &&
3248
		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3249
		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3250 3251
		!(x->props.mode != XFRM_MODE_TRANSPORT &&
		  xfrm_state_addr_cmp(tmpl, x, family));
L
Linus Torvalds 已提交
3252 3253
}

3254 3255
/*
 * 0 or more than 0 is returned when validation is succeeded (either bypass
3256
 * because of optional transport mode, or next index of the matched secpath
3257 3258 3259 3260
 * state with the template.
 * -1 is returned when no matching template is found.
 * Otherwise "-2 - errored_index" is returned.
 */
L
Linus Torvalds 已提交
3261
static inline int
3262
xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
L
Linus Torvalds 已提交
3263 3264 3265 3266 3267
	       unsigned short family)
{
	int idx = start;

	if (tmpl->optional) {
3268
		if (tmpl->mode == XFRM_MODE_TRANSPORT)
L
Linus Torvalds 已提交
3269 3270 3271 3272
			return start;
	} else
		start = -1;
	for (; idx < sp->len; idx++) {
3273
		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
L
Linus Torvalds 已提交
3274
			return ++idx;
3275 3276 3277
		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
			if (start == -1)
				start = -2-idx;
L
Linus Torvalds 已提交
3278
			break;
3279
		}
L
Linus Torvalds 已提交
3280 3281 3282 3283
	}
	return start;
}

3284 3285 3286 3287
static void
decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
{
	const struct iphdr *iph = ip_hdr(skb);
3288 3289
	int ihl = iph->ihl;
	u8 *xprth = skb_network_header(skb) + ihl * 4;
3290 3291 3292
	struct flowi4 *fl4 = &fl->u.ip4;
	int oif = 0;

3293
	if (skb_dst(skb) && skb_dst(skb)->dev)
3294 3295 3296 3297 3298 3299
		oif = skb_dst(skb)->dev->ifindex;

	memset(fl4, 0, sizeof(struct flowi4));
	fl4->flowi4_mark = skb->mark;
	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;

3300 3301 3302
	fl4->flowi4_proto = iph->protocol;
	fl4->daddr = reverse ? iph->saddr : iph->daddr;
	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3303
	fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
3304

3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
	if (!ip_is_fragment(iph)) {
		switch (iph->protocol) {
		case IPPROTO_UDP:
		case IPPROTO_UDPLITE:
		case IPPROTO_TCP:
		case IPPROTO_SCTP:
		case IPPROTO_DCCP:
			if (xprth + 4 < skb->data ||
			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
				__be16 *ports;

3316
				xprth = skb_network_header(skb) + ihl * 4;
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
				ports = (__be16 *)xprth;

				fl4->fl4_sport = ports[!!reverse];
				fl4->fl4_dport = ports[!reverse];
			}
			break;
		case IPPROTO_ICMP:
			if (xprth + 2 < skb->data ||
			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
				u8 *icmp;

3328
				xprth = skb_network_header(skb) + ihl * 4;
3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
				icmp = xprth;

				fl4->fl4_icmp_type = icmp[0];
				fl4->fl4_icmp_code = icmp[1];
			}
			break;
		case IPPROTO_GRE:
			if (xprth + 12 < skb->data ||
			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
				__be16 *greflags;
				__be32 *gre_hdr;

3341
				xprth = skb_network_header(skb) + ihl * 4;
3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
				greflags = (__be16 *)xprth;
				gre_hdr = (__be32 *)xprth;

				if (greflags[0] & GRE_KEY) {
					if (greflags[0] & GRE_CSUM)
						gre_hdr++;
					fl4->fl4_gre_key = gre_hdr[1];
				}
			}
			break;
		default:
			break;
		}
	}
}

#if IS_ENABLED(CONFIG_IPV6)
static void
decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
{
	struct flowi6 *fl6 = &fl->u.ip6;
	int onlyproto = 0;
	const struct ipv6hdr *hdr = ipv6_hdr(skb);
	u32 offset = sizeof(*hdr);
	struct ipv6_opt_hdr *exthdr;
	const unsigned char *nh = skb_network_header(skb);
	u16 nhoff = IP6CB(skb)->nhoff;
	int oif = 0;
	u8 nexthdr;

	if (!nhoff)
		nhoff = offsetof(struct ipv6hdr, nexthdr);

	nexthdr = nh[nhoff];

3377
	if (skb_dst(skb) && skb_dst(skb)->dev)
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394
		oif = skb_dst(skb)->dev->ifindex;

	memset(fl6, 0, sizeof(struct flowi6));
	fl6->flowi6_mark = skb->mark;
	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;

	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;

	while (nh + offset + sizeof(*exthdr) < skb->data ||
	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
		nh = skb_network_header(skb);
		exthdr = (struct ipv6_opt_hdr *)(nh + offset);

		switch (nexthdr) {
		case NEXTHDR_FRAGMENT:
			onlyproto = 1;
3395
			fallthrough;
3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
		case NEXTHDR_ROUTING:
		case NEXTHDR_HOP:
		case NEXTHDR_DEST:
			offset += ipv6_optlen(exthdr);
			nexthdr = exthdr->nexthdr;
			break;
		case IPPROTO_UDP:
		case IPPROTO_UDPLITE:
		case IPPROTO_TCP:
		case IPPROTO_SCTP:
		case IPPROTO_DCCP:
			if (!onlyproto && (nh + offset + 4 < skb->data ||
			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
				__be16 *ports;

				nh = skb_network_header(skb);
				ports = (__be16 *)(nh + offset);
				fl6->fl6_sport = ports[!!reverse];
				fl6->fl6_dport = ports[!reverse];
			}
			fl6->flowi6_proto = nexthdr;
			return;
		case IPPROTO_ICMPV6:
			if (!onlyproto && (nh + offset + 2 < skb->data ||
			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
				u8 *icmp;

				nh = skb_network_header(skb);
				icmp = (u8 *)(nh + offset);
				fl6->fl6_icmp_type = icmp[0];
				fl6->fl6_icmp_code = icmp[1];
			}
			fl6->flowi6_proto = nexthdr;
			return;
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449
		case IPPROTO_GRE:
			if (!onlyproto &&
			    (nh + offset + 12 < skb->data ||
			     pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
				struct gre_base_hdr *gre_hdr;
				__be32 *gre_key;

				nh = skb_network_header(skb);
				gre_hdr = (struct gre_base_hdr *)(nh + offset);
				gre_key = (__be32 *)(gre_hdr + 1);

				if (gre_hdr->flags & GRE_KEY) {
					if (gre_hdr->flags & GRE_CSUM)
						gre_key++;
					fl6->fl6_gre_key = *gre_key;
				}
			}
			fl6->flowi6_proto = nexthdr;
			return;

3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
#if IS_ENABLED(CONFIG_IPV6_MIP6)
		case IPPROTO_MH:
			offset += ipv6_optlen(exthdr);
			if (!onlyproto && (nh + offset + 3 < skb->data ||
			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
				struct ip6_mh *mh;

				nh = skb_network_header(skb);
				mh = (struct ip6_mh *)(nh + offset);
				fl6->fl6_mh_type = mh->ip6mh_type;
			}
			fl6->flowi6_proto = nexthdr;
			return;
#endif
		default:
			fl6->flowi6_proto = nexthdr;
			return;
		}
	}
}
#endif

3472 3473
int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
			  unsigned int family, int reverse)
L
Linus Torvalds 已提交
3474
{
3475 3476 3477 3478 3479 3480 3481 3482 3483 3484
	switch (family) {
	case AF_INET:
		decode_session4(skb, fl, reverse);
		break;
#if IS_ENABLED(CONFIG_IPV6)
	case AF_INET6:
		decode_session6(skb, fl, reverse);
		break;
#endif
	default:
L
Linus Torvalds 已提交
3485
		return -EAFNOSUPPORT;
3486
	}
L
Linus Torvalds 已提交
3487

3488
	return security_xfrm_decode_session(skb, &fl->flowi_secid);
L
Linus Torvalds 已提交
3489
}
3490
EXPORT_SYMBOL(__xfrm_decode_session);
L
Linus Torvalds 已提交
3491

3492
static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
L
Linus Torvalds 已提交
3493 3494
{
	for (; k < sp->len; k++) {
3495
		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3496
			*idxp = k;
L
Linus Torvalds 已提交
3497
			return 1;
3498
		}
L
Linus Torvalds 已提交
3499 3500 3501 3502 3503
	}

	return 0;
}

3504
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
L
Linus Torvalds 已提交
3505 3506
			unsigned short family)
{
3507
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
3508
	struct xfrm_policy *pol;
3509 3510 3511 3512
	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
	int npols = 0;
	int xfrm_nr;
	int pi;
3513
	int reverse;
L
Linus Torvalds 已提交
3514
	struct flowi fl;
3515
	int xerr_idx = -1;
3516
	const struct xfrm_if_cb *ifcb;
3517
	struct sec_path *sp;
3518 3519 3520 3521 3522 3523
	u32 if_id = 0;

	rcu_read_lock();
	ifcb = xfrm_if_get_cb();

	if (ifcb) {
3524 3525 3526 3527 3528
		struct xfrm_if_decode_session_result r;

		if (ifcb->decode_session(skb, family, &r)) {
			if_id = r.if_id;
			net = r.net;
3529
		}
3530 3531
	}
	rcu_read_unlock();
L
Linus Torvalds 已提交
3532

3533 3534 3535
	reverse = dir & ~XFRM_POLICY_MASK;
	dir &= XFRM_POLICY_MASK;

3536
	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
A
Alexey Dobriyan 已提交
3537
		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
L
Linus Torvalds 已提交
3538
		return 0;
3539 3540
	}

3541
	nf_nat_decode_session(skb, &fl, family);
L
Linus Torvalds 已提交
3542 3543

	/* First, check used SA against their selectors. */
3544 3545
	sp = skb_sec_path(skb);
	if (sp) {
L
Linus Torvalds 已提交
3546 3547
		int i;

3548 3549
		for (i = sp->len - 1; i >= 0; i--) {
			struct xfrm_state *x = sp->xvec[i];
3550
			if (!xfrm_selector_match(&x->sel, &fl, family)) {
A
Alexey Dobriyan 已提交
3551
				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
L
Linus Torvalds 已提交
3552
				return 0;
3553
			}
L
Linus Torvalds 已提交
3554 3555 3556 3557
		}
	}

	pol = NULL;
E
Eric Dumazet 已提交
3558
	sk = sk_to_full_sk(sk);
3559
	if (sk && sk->sk_policy[dir]) {
3560
		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3561
		if (IS_ERR(pol)) {
A
Alexey Dobriyan 已提交
3562
			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3563
			return 0;
3564
		}
3565
	}
L
Linus Torvalds 已提交
3566

3567
	if (!pol)
3568
		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
L
Linus Torvalds 已提交
3569

3570
	if (IS_ERR(pol)) {
A
Alexey Dobriyan 已提交
3571
		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3572
		return 0;
3573
	}
3574

3575
	if (!pol) {
3576
		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3577 3578 3579 3580
			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
			return 0;
		}

3581
		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3582
			xfrm_secpath_reject(xerr_idx, skb, &fl);
A
Alexey Dobriyan 已提交
3583
			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3584 3585 3586 3587
			return 0;
		}
		return 1;
	}
L
Linus Torvalds 已提交
3588

3589
	pol->curlft.use_time = ktime_get_real_seconds();
L
Linus Torvalds 已提交
3590

3591
	pols[0] = pol;
3592
	npols++;
3593 3594
#ifdef CONFIG_XFRM_SUB_POLICY
	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3595
		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3596
						    &fl, family,
3597
						    XFRM_POLICY_IN, if_id);
3598
		if (pols[1]) {
3599
			if (IS_ERR(pols[1])) {
A
Alexey Dobriyan 已提交
3600
				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3601
				xfrm_pol_put(pols[0]);
3602
				return 0;
3603
			}
3604
			pols[1]->curlft.use_time = ktime_get_real_seconds();
3605
			npols++;
3606 3607 3608 3609
		}
	}
#endif

L
Linus Torvalds 已提交
3610 3611
	if (pol->action == XFRM_POLICY_ALLOW) {
		static struct sec_path dummy;
3612
		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3613
		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3614 3615
		struct xfrm_tmpl **tpp = tp;
		int ti = 0;
L
Linus Torvalds 已提交
3616 3617
		int i, k;

3618 3619
		sp = skb_sec_path(skb);
		if (!sp)
L
Linus Torvalds 已提交
3620 3621
			sp = &dummy;

3622 3623
		for (pi = 0; pi < npols; pi++) {
			if (pols[pi] != pol &&
3624
			    pols[pi]->action != XFRM_POLICY_ALLOW) {
A
Alexey Dobriyan 已提交
3625
				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3626
				goto reject;
3627 3628
			}
			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
A
Alexey Dobriyan 已提交
3629
				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3630
				goto reject_error;
3631
			}
3632 3633 3634 3635
			for (i = 0; i < pols[pi]->xfrm_nr; i++)
				tpp[ti++] = &pols[pi]->xfrm_vec[i];
		}
		xfrm_nr = ti;
3636

3637 3638
		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
		    !xfrm_nr) {
3639 3640 3641 3642
			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
			goto reject;
		}

3643
		if (npols > 1) {
3644
			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3645 3646
			tpp = stp;
		}
3647

L
Linus Torvalds 已提交
3648 3649 3650 3651 3652 3653
		/* For each tunnel xfrm, find the first matching tmpl.
		 * For each tmpl before that, find corresponding xfrm.
		 * Order is _important_. Later we will implement
		 * some barriers, but at the moment barriers
		 * are implied between each two transformations.
		 */
3654 3655
		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
			k = xfrm_policy_ok(tpp[i], sp, k, family);
3656
			if (k < 0) {
3657 3658 3659
				if (k < -1)
					/* "-2 - errored_index" returned */
					xerr_idx = -(2+k);
A
Alexey Dobriyan 已提交
3660
				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
L
Linus Torvalds 已提交
3661
				goto reject;
3662
			}
L
Linus Torvalds 已提交
3663 3664
		}

3665
		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
A
Alexey Dobriyan 已提交
3666
			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
L
Linus Torvalds 已提交
3667
			goto reject;
3668
		}
L
Linus Torvalds 已提交
3669

3670
		xfrm_pols_put(pols, npols);
L
Linus Torvalds 已提交
3671 3672
		return 1;
	}
A
Alexey Dobriyan 已提交
3673
	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
L
Linus Torvalds 已提交
3674 3675

reject:
3676
	xfrm_secpath_reject(xerr_idx, skb, &fl);
3677 3678
reject_error:
	xfrm_pols_put(pols, npols);
L
Linus Torvalds 已提交
3679 3680 3681 3682 3683 3684
	return 0;
}
EXPORT_SYMBOL(__xfrm_policy_check);

int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
3685
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
3686
	struct flowi fl;
E
Eric Dumazet 已提交
3687
	struct dst_entry *dst;
E
Eric Dumazet 已提交
3688
	int res = 1;
L
Linus Torvalds 已提交
3689

3690
	if (xfrm_decode_session(skb, &fl, family) < 0) {
3691
		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
L
Linus Torvalds 已提交
3692
		return 0;
3693
	}
L
Linus Torvalds 已提交
3694

3695
	skb_dst_force(skb);
3696 3697 3698 3699
	if (!skb_dst(skb)) {
		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
		return 0;
	}
E
Eric Dumazet 已提交
3700

3701
	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3702
	if (IS_ERR(dst)) {
E
Eric Dumazet 已提交
3703
		res = 0;
3704 3705
		dst = NULL;
	}
E
Eric Dumazet 已提交
3706 3707
	skb_dst_set(skb, dst);
	return res;
L
Linus Torvalds 已提交
3708 3709 3710
}
EXPORT_SYMBOL(__xfrm_route_forward);

3711 3712
/* Optimize later using cookies and generation ids. */

L
Linus Torvalds 已提交
3713 3714
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
3715
	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3716 3717 3718 3719 3720 3721 3722
	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
	 * get validated by dst_ops->check on every use.  We do this
	 * because when a normal route referenced by an XFRM dst is
	 * obsoleted we do not go looking around for all parent
	 * referencing XFRM dsts so that we can invalidate them.  It
	 * is just too much work.  Instead we make the checks here on
	 * every use.  For example:
3723 3724 3725 3726 3727 3728 3729 3730
	 *
	 *	XFRM dst A --> IPv4 dst X
	 *
	 * X is the "xdst->route" of A (X is also the "dst->path" of A
	 * in this example).  If X is marked obsolete, "A" will not
	 * notice.  That's what we are validating here via the
	 * stale_bundle() check.
	 *
3731 3732
	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
	 * be marked on it.
F
Florian Westphal 已提交
3733
	 * This will force stale_bundle() to fail on any xdst bundle with
3734
	 * this dst linked in it.
3735
	 */
3736 3737 3738
	if (dst->obsolete < 0 && !stale_bundle(dst))
		return dst;

L
Linus Torvalds 已提交
3739 3740 3741 3742 3743
	return NULL;
}

static int stale_bundle(struct dst_entry *dst)
{
3744
	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
L
Linus Torvalds 已提交
3745 3746
}

H
Herbert Xu 已提交
3747
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
L
Linus Torvalds 已提交
3748
{
3749
	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3750
		dst->dev = blackhole_netdev;
3751
		dev_hold(dst->dev);
L
Linus Torvalds 已提交
3752 3753 3754
		dev_put(dev);
	}
}
H
Herbert Xu 已提交
3755
EXPORT_SYMBOL(xfrm_dst_ifdown);
L
Linus Torvalds 已提交
3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772

static void xfrm_link_failure(struct sk_buff *skb)
{
	/* Impossible. Such dst must be popped before reaches point of failure. */
}

static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
{
	if (dst) {
		if (dst->obsolete) {
			dst_release(dst);
			dst = NULL;
		}
	}
	return dst;
}

3773
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
L
Linus Torvalds 已提交
3774
{
3775 3776
	while (nr--) {
		struct xfrm_dst *xdst = bundle[nr];
L
Linus Torvalds 已提交
3777
		u32 pmtu, route_mtu_cached;
3778
		struct dst_entry *dst;
L
Linus Torvalds 已提交
3779

3780
		dst = &xdst->u.dst;
3781
		pmtu = dst_mtu(xfrm_dst_child(dst));
L
Linus Torvalds 已提交
3782 3783 3784 3785 3786 3787 3788 3789 3790 3791
		xdst->child_mtu_cached = pmtu;

		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);

		route_mtu_cached = dst_mtu(xdst->route);
		xdst->route_mtu_cached = route_mtu_cached;

		if (pmtu > route_mtu_cached)
			pmtu = route_mtu_cached;

3792
		dst_metric_set(dst, RTAX_MTU, pmtu);
3793
	}
L
Linus Torvalds 已提交
3794 3795 3796 3797 3798 3799
}

/* Check that the bundle accepts the flow and its components are
 * still valid.
 */

3800
static int xfrm_bundle_ok(struct xfrm_dst *first)
L
Linus Torvalds 已提交
3801
{
3802
	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
L
Linus Torvalds 已提交
3803
	struct dst_entry *dst = &first->u.dst;
3804 3805
	struct xfrm_dst *xdst;
	int start_from, nr;
L
Linus Torvalds 已提交
3806 3807
	u32 mtu;

3808
	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
L
Linus Torvalds 已提交
3809 3810 3811
	    (dst->dev && !netif_running(dst->dev)))
		return 0;

3812 3813 3814
	if (dst->flags & DST_XFRM_QUEUE)
		return 1;

3815
	start_from = nr = 0;
L
Linus Torvalds 已提交
3816 3817 3818 3819 3820
	do {
		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;

		if (dst->xfrm->km.state != XFRM_STATE_VALID)
			return 0;
3821 3822
		if (xdst->xfrm_genid != dst->xfrm->genid)
			return 0;
3823 3824
		if (xdst->num_pols > 0 &&
		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3825
			return 0;
3826

3827 3828
		bundle[nr++] = xdst;

3829
		mtu = dst_mtu(xfrm_dst_child(dst));
L
Linus Torvalds 已提交
3830
		if (xdst->child_mtu_cached != mtu) {
3831
			start_from = nr;
L
Linus Torvalds 已提交
3832 3833 3834
			xdst->child_mtu_cached = mtu;
		}

3835
		if (!dst_check(xdst->route, xdst->route_cookie))
L
Linus Torvalds 已提交
3836 3837 3838
			return 0;
		mtu = dst_mtu(xdst->route);
		if (xdst->route_mtu_cached != mtu) {
3839
			start_from = nr;
L
Linus Torvalds 已提交
3840 3841 3842
			xdst->route_mtu_cached = mtu;
		}

3843
		dst = xfrm_dst_child(dst);
L
Linus Torvalds 已提交
3844 3845
	} while (dst->xfrm);

3846
	if (likely(!start_from))
L
Linus Torvalds 已提交
3847 3848
		return 1;

3849 3850 3851 3852
	xdst = bundle[start_from - 1];
	mtu = xdst->child_mtu_cached;
	while (start_from--) {
		dst = &xdst->u.dst;
L
Linus Torvalds 已提交
3853 3854

		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3855 3856
		if (mtu > xdst->route_mtu_cached)
			mtu = xdst->route_mtu_cached;
3857
		dst_metric_set(dst, RTAX_MTU, mtu);
3858
		if (!start_from)
L
Linus Torvalds 已提交
3859 3860
			break;

3861 3862
		xdst = bundle[start_from - 1];
		xdst->child_mtu_cached = mtu;
L
Linus Torvalds 已提交
3863 3864 3865 3866 3867
	}

	return 1;
}

3868 3869
static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
{
3870
	return dst_metric_advmss(xfrm_dst_path(dst));
3871 3872
}

3873
static unsigned int xfrm_mtu(const struct dst_entry *dst)
3874
{
3875 3876
	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);

3877
	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3878 3879
}

3880 3881
static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
					const void *daddr)
3882
{
3883
	while (dst->xfrm) {
3884 3885
		const struct xfrm_state *xfrm = dst->xfrm;

3886 3887
		dst = xfrm_dst_child(dst);

3888 3889 3890 3891 3892 3893 3894
		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
			continue;
		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
			daddr = xfrm->coaddr;
		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
			daddr = &xfrm->id.daddr;
	}
3895 3896 3897 3898 3899 3900 3901
	return daddr;
}

static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
					   struct sk_buff *skb,
					   const void *daddr)
{
3902
	const struct dst_entry *path = xfrm_dst_path(dst);
3903 3904 3905 3906 3907 3908 3909 3910

	if (!skb)
		daddr = xfrm_get_dst_nexthop(dst, daddr);
	return path->ops->neigh_lookup(path, skb, daddr);
}

static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
3911
	const struct dst_entry *path = xfrm_dst_path(dst);
3912 3913

	daddr = xfrm_get_dst_nexthop(dst, daddr);
3914 3915 3916
	path->ops->confirm_neigh(path, daddr);
}

3917
int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
L
Linus Torvalds 已提交
3918 3919
{
	int err = 0;
3920 3921

	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
L
Linus Torvalds 已提交
3922
		return -EAFNOSUPPORT;
3923

E
Eric Dumazet 已提交
3924
	spin_lock(&xfrm_policy_afinfo_lock);
3925
	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3926
		err = -EEXIST;
L
Linus Torvalds 已提交
3927 3928 3929 3930 3931 3932
	else {
		struct dst_ops *dst_ops = afinfo->dst_ops;
		if (likely(dst_ops->kmem_cachep == NULL))
			dst_ops->kmem_cachep = xfrm_dst_cache;
		if (likely(dst_ops->check == NULL))
			dst_ops->check = xfrm_dst_check;
3933 3934
		if (likely(dst_ops->default_advmss == NULL))
			dst_ops->default_advmss = xfrm_default_advmss;
3935 3936
		if (likely(dst_ops->mtu == NULL))
			dst_ops->mtu = xfrm_mtu;
L
Linus Torvalds 已提交
3937 3938 3939 3940
		if (likely(dst_ops->negative_advice == NULL))
			dst_ops->negative_advice = xfrm_negative_advice;
		if (likely(dst_ops->link_failure == NULL))
			dst_ops->link_failure = xfrm_link_failure;
3941 3942
		if (likely(dst_ops->neigh_lookup == NULL))
			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3943 3944
		if (likely(!dst_ops->confirm_neigh))
			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3945
		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
L
Linus Torvalds 已提交
3946
	}
E
Eric Dumazet 已提交
3947
	spin_unlock(&xfrm_policy_afinfo_lock);
3948

L
Linus Torvalds 已提交
3949 3950 3951 3952
	return err;
}
EXPORT_SYMBOL(xfrm_policy_register_afinfo);

3953
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
L
Linus Torvalds 已提交
3954
{
3955
	struct dst_ops *dst_ops = afinfo->dst_ops;
3956
	int i;
3957

3958 3959 3960 3961 3962
	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
		if (xfrm_policy_afinfo[i] != afinfo)
			continue;
		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
		break;
E
Eric Dumazet 已提交
3963 3964
	}

3965
	synchronize_rcu();
E
Eric Dumazet 已提交
3966

3967 3968 3969 3970
	dst_ops->kmem_cachep = NULL;
	dst_ops->check = NULL;
	dst_ops->negative_advice = NULL;
	dst_ops->link_failure = NULL;
L
Linus Torvalds 已提交
3971 3972 3973
}
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);

3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
{
	spin_lock(&xfrm_if_cb_lock);
	rcu_assign_pointer(xfrm_if_cb, ifcb);
	spin_unlock(&xfrm_if_cb_lock);
}
EXPORT_SYMBOL(xfrm_if_register_cb);

void xfrm_if_unregister_cb(void)
{
	RCU_INIT_POINTER(xfrm_if_cb, NULL);
	synchronize_rcu();
}
EXPORT_SYMBOL(xfrm_if_unregister_cb);

3989
#ifdef CONFIG_XFRM_STATISTICS
A
Alexey Dobriyan 已提交
3990
static int __net_init xfrm_statistics_init(struct net *net)
3991
{
3992
	int rv;
W
WANG Cong 已提交
3993 3994
	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
	if (!net->mib.xfrm_statistics)
3995
		return -ENOMEM;
3996 3997
	rv = xfrm_proc_init(net);
	if (rv < 0)
W
WANG Cong 已提交
3998
		free_percpu(net->mib.xfrm_statistics);
3999
	return rv;
4000
}
A
Alexey Dobriyan 已提交
4001 4002 4003

static void xfrm_statistics_fini(struct net *net)
{
4004
	xfrm_proc_fini(net);
W
WANG Cong 已提交
4005
	free_percpu(net->mib.xfrm_statistics);
A
Alexey Dobriyan 已提交
4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
}
#else
static int __net_init xfrm_statistics_init(struct net *net)
{
	return 0;
}

static void xfrm_statistics_fini(struct net *net)
{
}
4016 4017
#endif

4018
static int __net_init xfrm_policy_init(struct net *net)
L
Linus Torvalds 已提交
4019
{
4020
	unsigned int hmask, sz;
4021
	int dir, err;
4022

4023
	if (net_eq(net, &init_net)) {
4024
		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
L
Linus Torvalds 已提交
4025
					   sizeof(struct xfrm_dst),
A
Alexey Dobriyan 已提交
4026
					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4027
					   NULL);
4028 4029 4030 4031
		err = rhashtable_init(&xfrm_policy_inexact_table,
				      &xfrm_pol_inexact_params);
		BUG_ON(err);
	}
L
Linus Torvalds 已提交
4032

4033 4034 4035
	hmask = 8 - 1;
	sz = (hmask+1) * sizeof(struct hlist_head);

4036 4037 4038
	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
	if (!net->xfrm.policy_byidx)
		goto out_byidx;
4039
	net->xfrm.policy_idx_hmask = hmask;
4040

H
Herbert Xu 已提交
4041
	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4042 4043
		struct xfrm_policy_hash *htab;

4044
		net->xfrm.policy_count[dir] = 0;
H
Herbert Xu 已提交
4045
		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4046
		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4047

4048
		htab = &net->xfrm.policy_bydst[dir];
4049
		htab->table = xfrm_hash_alloc(sz);
4050
		if (!htab->table)
4051 4052
			goto out_bydst;
		htab->hmask = hmask;
4053 4054 4055 4056
		htab->dbits4 = 32;
		htab->sbits4 = 32;
		htab->dbits6 = 128;
		htab->sbits6 = 128;
4057
	}
4058 4059 4060 4061 4062 4063
	net->xfrm.policy_hthresh.lbits4 = 32;
	net->xfrm.policy_hthresh.rbits4 = 32;
	net->xfrm.policy_hthresh.lbits6 = 128;
	net->xfrm.policy_hthresh.rbits6 = 128;

	seqlock_init(&net->xfrm.policy_hthresh.lock);
4064

4065
	INIT_LIST_HEAD(&net->xfrm.policy_all);
4066
	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4067
	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4068
	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4069
	return 0;
4070

4071 4072 4073 4074 4075 4076 4077 4078
out_bydst:
	for (dir--; dir >= 0; dir--) {
		struct xfrm_policy_hash *htab;

		htab = &net->xfrm.policy_bydst[dir];
		xfrm_hash_free(htab->table, sz);
	}
	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4079 4080
out_byidx:
	return -ENOMEM;
4081 4082 4083 4084
}

static void xfrm_policy_fini(struct net *net)
{
4085
	struct xfrm_pol_inexact_bin *b, *t;
4086
	unsigned int sz;
4087
	int dir;
4088

4089 4090
	flush_work(&net->xfrm.policy_hash_work);
#ifdef CONFIG_XFRM_SUB_POLICY
4091
	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4092
#endif
4093
	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4094

4095
	WARN_ON(!list_empty(&net->xfrm.policy_all));
4096

H
Herbert Xu 已提交
4097
	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4098 4099
		struct xfrm_policy_hash *htab;

4100
		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4101 4102

		htab = &net->xfrm.policy_bydst[dir];
4103
		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4104 4105
		WARN_ON(!hlist_empty(htab->table));
		xfrm_hash_free(htab->table, sz);
4106 4107
	}

4108
	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4109 4110
	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4111

4112 4113 4114 4115
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
		__xfrm_policy_inexact_prune_bin(b, true);
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
L
Linus Torvalds 已提交
4116 4117
}

4118 4119 4120 4121
static int __net_init xfrm_net_init(struct net *net)
{
	int rv;

4122 4123 4124
	/* Initialize the per-net locks here */
	spin_lock_init(&net->xfrm.xfrm_state_lock);
	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4125
	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4126
	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4127 4128 4129
	net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
	net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
	net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4130

A
Alexey Dobriyan 已提交
4131 4132 4133
	rv = xfrm_statistics_init(net);
	if (rv < 0)
		goto out_statistics;
4134 4135 4136 4137 4138 4139
	rv = xfrm_state_init(net);
	if (rv < 0)
		goto out_state;
	rv = xfrm_policy_init(net);
	if (rv < 0)
		goto out_policy;
A
Alexey Dobriyan 已提交
4140 4141 4142
	rv = xfrm_sysctl_init(net);
	if (rv < 0)
		goto out_sysctl;
F
Fan Du 已提交
4143

4144 4145
	return 0;

A
Alexey Dobriyan 已提交
4146 4147
out_sysctl:
	xfrm_policy_fini(net);
4148 4149 4150
out_policy:
	xfrm_state_fini(net);
out_state:
A
Alexey Dobriyan 已提交
4151 4152
	xfrm_statistics_fini(net);
out_statistics:
4153 4154 4155 4156 4157
	return rv;
}

static void __net_exit xfrm_net_exit(struct net *net)
{
A
Alexey Dobriyan 已提交
4158
	xfrm_sysctl_fini(net);
4159 4160
	xfrm_policy_fini(net);
	xfrm_state_fini(net);
A
Alexey Dobriyan 已提交
4161
	xfrm_statistics_fini(net);
4162 4163 4164 4165 4166 4167 4168
}

static struct pernet_operations __net_initdata xfrm_net_ops = {
	.init = xfrm_net_init,
	.exit = xfrm_net_exit,
};

L
Linus Torvalds 已提交
4169 4170
void __init xfrm_init(void)
{
4171
	register_pernet_subsys(&xfrm_net_ops);
4172
	xfrm_dev_init();
L
Linus Torvalds 已提交
4173
	xfrm_input_init();
4174

4175
#ifdef CONFIG_XFRM_ESPINTCP
S
Sabrina Dubroca 已提交
4176 4177
	espintcp_init();
#endif
L
Linus Torvalds 已提交
4178 4179
}

J
Joy Latten 已提交
4180
#ifdef CONFIG_AUDITSYSCALL
4181 4182
static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
					 struct audit_buffer *audit_buf)
J
Joy Latten 已提交
4183
{
4184 4185 4186 4187
	struct xfrm_sec_ctx *ctx = xp->security;
	struct xfrm_selector *sel = &xp->selector;

	if (ctx)
J
Joy Latten 已提交
4188
		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4189
				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
J
Joy Latten 已提交
4190

4191
	switch (sel->family) {
J
Joy Latten 已提交
4192
	case AF_INET:
H
Harvey Harrison 已提交
4193
		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4194 4195 4196
		if (sel->prefixlen_s != 32)
			audit_log_format(audit_buf, " src_prefixlen=%d",
					 sel->prefixlen_s);
H
Harvey Harrison 已提交
4197
		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4198 4199 4200
		if (sel->prefixlen_d != 32)
			audit_log_format(audit_buf, " dst_prefixlen=%d",
					 sel->prefixlen_d);
J
Joy Latten 已提交
4201 4202
		break;
	case AF_INET6:
H
Harvey Harrison 已提交
4203
		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4204 4205 4206
		if (sel->prefixlen_s != 128)
			audit_log_format(audit_buf, " src_prefixlen=%d",
					 sel->prefixlen_s);
H
Harvey Harrison 已提交
4207
		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4208 4209 4210
		if (sel->prefixlen_d != 128)
			audit_log_format(audit_buf, " dst_prefixlen=%d",
					 sel->prefixlen_d);
J
Joy Latten 已提交
4211 4212 4213 4214
		break;
	}
}

4215
void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
J
Joy Latten 已提交
4216 4217 4218
{
	struct audit_buffer *audit_buf;

P
Paul Moore 已提交
4219
	audit_buf = xfrm_audit_start("SPD-add");
J
Joy Latten 已提交
4220 4221
	if (audit_buf == NULL)
		return;
4222
	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
P
Paul Moore 已提交
4223
	audit_log_format(audit_buf, " res=%u", result);
J
Joy Latten 已提交
4224 4225 4226 4227 4228
	xfrm_audit_common_policyinfo(xp, audit_buf);
	audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);

P
Paul Moore 已提交
4229
void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4230
			      bool task_valid)
J
Joy Latten 已提交
4231 4232 4233
{
	struct audit_buffer *audit_buf;

P
Paul Moore 已提交
4234
	audit_buf = xfrm_audit_start("SPD-delete");
J
Joy Latten 已提交
4235 4236
	if (audit_buf == NULL)
		return;
4237
	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
P
Paul Moore 已提交
4238
	audit_log_format(audit_buf, " res=%u", result);
J
Joy Latten 已提交
4239 4240 4241 4242 4243 4244
	xfrm_audit_common_policyinfo(xp, audit_buf);
	audit_log_end(audit_buf);
}
EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif

4245
#ifdef CONFIG_XFRM_MIGRATE
4246 4247
static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
					const struct xfrm_selector *sel_tgt)
4248 4249 4250
{
	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
		if (sel_tgt->family == sel_cmp->family &&
4251 4252 4253 4254
		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
				    sel_cmp->family) &&
		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
				    sel_cmp->family) &&
4255 4256
		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4257
			return true;
4258 4259 4260
		}
	} else {
		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4261
			return true;
4262 4263
		}
	}
4264
	return false;
4265 4266
}

4267
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
Y
Yan Yan 已提交
4268
						    u8 dir, u8 type, struct net *net, u32 if_id)
4269 4270 4271 4272 4273
{
	struct xfrm_policy *pol, *ret = NULL;
	struct hlist_head *chain;
	u32 priority = ~0U;

4274
	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4275
	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4276
	hlist_for_each_entry(pol, chain, bydst) {
Y
Yan Yan 已提交
4277 4278
		if ((if_id == 0 || pol->if_id == if_id) &&
		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4279 4280 4281 4282 4283 4284
		    pol->type == type) {
			ret = pol;
			priority = ret->priority;
			break;
		}
	}
4285
	chain = &net->xfrm.policy_inexact[dir];
4286
	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4287 4288 4289
		if ((pol->priority >= priority) && ret)
			break;

Y
Yan Yan 已提交
4290 4291
		if ((if_id == 0 || pol->if_id == if_id) &&
		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4292
		    pol->type == type) {
4293 4294 4295 4296 4297
			ret = pol;
			break;
		}
	}

4298
	xfrm_pol_hold(ret);
4299

4300
	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4301 4302 4303 4304

	return ret;
}

4305
static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4306 4307 4308 4309 4310 4311 4312 4313
{
	int match = 0;

	if (t->mode == m->mode && t->id.proto == m->proto &&
	    (m->reqid == 0 || t->reqid == m->reqid)) {
		switch (t->mode) {
		case XFRM_MODE_TUNNEL:
		case XFRM_MODE_BEET:
4314 4315 4316 4317
			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
					    m->old_family) &&
			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
					    m->old_family)) {
4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341
				match = 1;
			}
			break;
		case XFRM_MODE_TRANSPORT:
			/* in case of transport mode, template does not store
			   any IP addresses, hence we just compare mode and
			   protocol */
			match = 1;
			break;
		default:
			break;
		}
	}
	return match;
}

/* update endpoint address(es) of template(s) */
static int xfrm_policy_migrate(struct xfrm_policy *pol,
			       struct xfrm_migrate *m, int num_migrate)
{
	struct xfrm_migrate *mp;
	int i, j, n = 0;

	write_lock_bh(&pol->lock);
H
Herbert Xu 已提交
4342
	if (unlikely(pol->walk.dead)) {
4343 4344 4345 4346 4347 4348 4349 4350 4351 4352
		/* target policy has been deleted */
		write_unlock_bh(&pol->lock);
		return -ENOENT;
	}

	for (i = 0; i < pol->xfrm_nr; i++) {
		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
				continue;
			n++;
H
Herbert Xu 已提交
4353 4354
			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4355 4356 4357 4358 4359 4360 4361 4362
				continue;
			/* update endpoints */
			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
			       sizeof(pol->xfrm_vec[i].id.daddr));
			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
			       sizeof(pol->xfrm_vec[i].saddr));
			pol->xfrm_vec[i].encap_family = mp->new_family;
			/* flush bundles */
4363
			atomic_inc(&pol->genid);
4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374
		}
	}

	write_unlock_bh(&pol->lock);

	if (!n)
		return -ENODATA;

	return 0;
}

4375
static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403
{
	int i, j;

	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
		return -EINVAL;

	for (i = 0; i < num_migrate; i++) {
		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
			return -EINVAL;

		/* check if there is any duplicated entry */
		for (j = i + 1; j < num_migrate; j++) {
			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
				    sizeof(m[i].old_daddr)) &&
			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
				    sizeof(m[i].old_saddr)) &&
			    m[i].proto == m[j].proto &&
			    m[i].mode == m[j].mode &&
			    m[i].reqid == m[j].reqid &&
			    m[i].old_family == m[j].old_family)
				return -EINVAL;
		}
	}

	return 0;
}

4404
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4405
		 struct xfrm_migrate *m, int num_migrate,
4406
		 struct xfrm_kmaddress *k, struct net *net,
Y
Yan Yan 已提交
4407
		 struct xfrm_encap_tmpl *encap, u32 if_id)
4408 4409 4410 4411 4412 4413 4414 4415
{
	int i, err, nx_cur = 0, nx_new = 0;
	struct xfrm_policy *pol = NULL;
	struct xfrm_state *x, *xc;
	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
	struct xfrm_migrate *mp;

4416
	/* Stage 0 - sanity checks */
4417 4418 4419
	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
		goto out;

4420 4421 4422 4423 4424
	if (dir >= XFRM_POLICY_MAX) {
		err = -EINVAL;
		goto out;
	}

4425
	/* Stage 1 - find policy */
Y
Yan Yan 已提交
4426
	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
4427 4428 4429 4430 4431 4432
		err = -ENOENT;
		goto out;
	}

	/* Stage 2 - find and update state(s) */
	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
Y
Yan Yan 已提交
4433
		if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4434 4435
			x_cur[nx_cur] = x;
			nx_cur++;
4436 4437
			xc = xfrm_state_migrate(x, mp, encap);
			if (xc) {
4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457
				x_new[nx_new] = xc;
				nx_new++;
			} else {
				err = -ENODATA;
				goto restore_state;
			}
		}
	}

	/* Stage 3 - update policy */
	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
		goto restore_state;

	/* Stage 4 - delete old state(s) */
	if (nx_cur) {
		xfrm_states_put(x_cur, nx_cur);
		xfrm_states_delete(x_cur, nx_cur);
	}

	/* Stage 5 - announce */
4458
	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475

	xfrm_pol_put(pol);

	return 0;
out:
	return err;

restore_state:
	if (pol)
		xfrm_pol_put(pol);
	if (nx_cur)
		xfrm_states_put(x_cur, nx_cur);
	if (nx_new)
		xfrm_states_delete(x_new, nx_new);

	return err;
}
4476
EXPORT_SYMBOL(xfrm_migrate);
4477
#endif