fib_rules.c 26.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * net/core/fib_rules.c		Generic Routing Rules
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License as
 *	published by the Free Software Foundation, version 2.
 *
 * Authors:	Thomas Graf <tgraf@suug.ch>
 */

#include <linux/types.h>
#include <linux/kernel.h>
13
#include <linux/slab.h>
14
#include <linux/list.h>
15
#include <linux/module.h>
16
#include <net/net_namespace.h>
17
#include <net/sock.h>
18
#include <net/fib_rules.h>
19
#include <net/ip_tunnels.h>
20

21 22 23 24 25
static const struct fib_kuid_range fib_kuid_range_unset = {
	KUIDT_INIT(0),
	KUIDT_INIT(~0),
};

26 27 28 29 30 31 32 33 34 35
bool fib_rule_matchall(const struct fib_rule *rule)
{
	if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
	    rule->flags)
		return false;
	if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
		return false;
	if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) ||
	    !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end))
		return false;
36 37 38 39
	if (fib_rule_port_range_set(&rule->sport_range))
		return false;
	if (fib_rule_port_range_set(&rule->dport_range))
		return false;
40 41 42 43
	return true;
}
EXPORT_SYMBOL_GPL(fib_rule_matchall);

44 45 46 47 48 49 50 51 52
int fib_default_rule_add(struct fib_rules_ops *ops,
			 u32 pref, u32 table, u32 flags)
{
	struct fib_rule *r;

	r = kzalloc(ops->rule_size, GFP_KERNEL);
	if (r == NULL)
		return -ENOMEM;

53
	refcount_set(&r->refcnt, 1);
54 55 56 57
	r->action = FR_ACT_TO_TBL;
	r->pref = pref;
	r->table = table;
	r->flags = flags;
58
	r->proto = RTPROT_KERNEL;
59
	r->fr_net = ops->fro_net;
60
	r->uid_range = fib_kuid_range_unset;
61

62 63 64
	r->suppress_prefixlen = -1;
	r->suppress_ifgroup = -1;

65 66 67 68 69 70 71
	/* The lock is not required here, the list in unreacheable
	 * at the moment this function is called */
	list_add_tail(&r->list, &ops->rules_list);
	return 0;
}
EXPORT_SYMBOL(fib_default_rule_add);

72
static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
{
	struct list_head *pos;
	struct fib_rule *rule;

	if (!list_empty(&ops->rules_list)) {
		pos = ops->rules_list.next;
		if (pos->next != &ops->rules_list) {
			rule = list_entry(pos->next, struct fib_rule, list);
			if (rule->pref)
				return rule->pref - 1;
		}
	}

	return 0;
}

D
Denis V. Lunev 已提交
89
static void notify_rule_change(int event, struct fib_rule *rule,
90 91
			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
			       u32 pid);
92

93
static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
94 95 96 97
{
	struct fib_rules_ops *ops;

	rcu_read_lock();
98
	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
		if (ops->family == family) {
			if (!try_module_get(ops->owner))
				ops = NULL;
			rcu_read_unlock();
			return ops;
		}
	}
	rcu_read_unlock();

	return NULL;
}

static void rules_ops_put(struct fib_rules_ops *ops)
{
	if (ops)
		module_put(ops->owner);
}

117 118 119
static void flush_route_cache(struct fib_rules_ops *ops)
{
	if (ops->flush_cache)
120
		ops->flush_cache(ops);
121 122
}

123
static int __fib_rules_register(struct fib_rules_ops *ops)
124 125 126
{
	int err = -EEXIST;
	struct fib_rules_ops *o;
D
Denis V. Lunev 已提交
127 128 129
	struct net *net;

	net = ops->fro_net;
130 131 132 133 134 135 136 137 138

	if (ops->rule_size < sizeof(struct fib_rule))
		return -EINVAL;

	if (ops->match == NULL || ops->configure == NULL ||
	    ops->compare == NULL || ops->fill == NULL ||
	    ops->action == NULL)
		return -EINVAL;

139 140
	spin_lock(&net->rules_mod_lock);
	list_for_each_entry(o, &net->rules_ops, list)
141 142 143
		if (ops->family == o->family)
			goto errout;

144
	list_add_tail_rcu(&ops->list, &net->rules_ops);
145 146
	err = 0;
errout:
147
	spin_unlock(&net->rules_mod_lock);
148 149 150 151

	return err;
}

152
struct fib_rules_ops *
153
fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
154 155 156 157
{
	struct fib_rules_ops *ops;
	int err;

158
	ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
159 160 161 162 163 164 165 166 167 168 169 170 171 172
	if (ops == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&ops->rules_list);
	ops->fro_net = net;

	err = __fib_rules_register(ops);
	if (err) {
		kfree(ops);
		ops = ERR_PTR(err);
	}

	return ops;
}
173 174
EXPORT_SYMBOL_GPL(fib_rules_register);

175
static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
176 177 178
{
	struct fib_rule *rule, *tmp;

179
	list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
180
		list_del_rcu(&rule->list);
181 182
		if (ops->delete)
			ops->delete(rule);
183 184 185 186
		fib_rule_put(rule);
	}
}

D
Denis V. Lunev 已提交
187
void fib_rules_unregister(struct fib_rules_ops *ops)
188
{
D
Denis V. Lunev 已提交
189
	struct net *net = ops->fro_net;
190

191
	spin_lock(&net->rules_mod_lock);
192
	list_del_rcu(&ops->list);
193
	spin_unlock(&net->rules_mod_lock);
194

195
	fib_rules_cleanup_ops(ops);
196
	kfree_rcu(ops, rcu);
197 198 199
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static int uid_range_set(struct fib_kuid_range *range)
{
	return uid_valid(range->start) && uid_valid(range->end);
}

static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
{
	struct fib_rule_uid_range *in;
	struct fib_kuid_range out;

	in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);

	out.start = make_kuid(current_user_ns(), in->start);
	out.end = make_kuid(current_user_ns(), in->end);

	return out;
}

static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
{
	struct fib_rule_uid_range out = {
		from_kuid_munged(current_user_ns(), range->start),
		from_kuid_munged(current_user_ns(), range->end)
	};

	return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
}

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static int nla_get_port_range(struct nlattr *pattr,
			      struct fib_rule_port_range *port_range)
{
	const struct fib_rule_port_range *pr = nla_data(pattr);

	if (!fib_rule_port_range_valid(pr))
		return -EINVAL;

	port_range->start = pr->start;
	port_range->end = pr->end;

	return 0;
}

static int nla_put_port_range(struct sk_buff *skb, int attrtype,
			      struct fib_rule_port_range *range)
{
	return nla_put(skb, attrtype, sizeof(*range), range);
}

248
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
D
David Ahern 已提交
249 250
			  struct flowi *fl, int flags,
			  struct fib_lookup_arg *arg)
251 252 253
{
	int ret = 0;

254
	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
255 256
		goto out;

257
	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
258 259
		goto out;

260
	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
261 262
		goto out;

263 264 265
	if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
		goto out;

D
David Ahern 已提交
266 267 268
	if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
		goto out;

269 270 271 272
	if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
	    uid_gt(fl->flowi_uid, rule->uid_range.end))
		goto out;

273 274 275 276 277
	ret = ops->match(rule, fl, flags);
out:
	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
}

278 279 280 281 282 283 284 285
int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
		     int flags, struct fib_lookup_arg *arg)
{
	struct fib_rule *rule;
	int err;

	rcu_read_lock();

286
	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
T
Thomas Graf 已提交
287
jumped:
D
David Ahern 已提交
288
		if (!fib_rule_match(rule, ops, fl, flags, arg))
289 290
			continue;

T
Thomas Graf 已提交
291 292 293 294 295 296 297 298 299 300
		if (rule->action == FR_ACT_GOTO) {
			struct fib_rule *target;

			target = rcu_dereference(rule->ctarget);
			if (target == NULL) {
				continue;
			} else {
				rule = target;
				goto jumped;
			}
301 302 303
		} else if (rule->action == FR_ACT_NOP)
			continue;
		else
T
Thomas Graf 已提交
304 305
			err = ops->action(rule, fl, flags, arg);

306 307 308
		if (!err && ops->suppress && ops->suppress(rule, arg))
			continue;

309
		if (err != -EAGAIN) {
E
Eric Dumazet 已提交
310
			if ((arg->flags & FIB_LOOKUP_NOREF) ||
311
			    likely(refcount_inc_not_zero(&rule->refcnt))) {
312 313 314 315
				arg->rule = rule;
				goto out;
			}
			break;
316 317 318
		}
	}

319
	err = -ESRCH;
320 321 322 323 324 325 326
out:
	rcu_read_unlock();

	return err;
}
EXPORT_SYMBOL_GPL(fib_rules_lookup);

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net,
				  enum fib_event_type event_type,
				  struct fib_rule *rule, int family)
{
	struct fib_rule_notifier_info info = {
		.info.family = family,
		.rule = rule,
	};

	return call_fib_notifier(nb, net, event_type, &info.info);
}

static int call_fib_rule_notifiers(struct net *net,
				   enum fib_event_type event_type,
				   struct fib_rule *rule,
D
David Ahern 已提交
342 343
				   struct fib_rules_ops *ops,
				   struct netlink_ext_ack *extack)
344 345 346
{
	struct fib_rule_notifier_info info = {
		.info.family = ops->family,
D
David Ahern 已提交
347
		.info.extack = extack,
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
		.rule = rule,
	};

	ops->fib_rules_seq++;
	return call_fib_notifiers(net, event_type, &info.info);
}

/* Called with rcu_read_lock() */
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family)
{
	struct fib_rules_ops *ops;
	struct fib_rule *rule;

	ops = lookup_rules_ops(net, family);
	if (!ops)
		return -EAFNOSUPPORT;
	list_for_each_entry_rcu(rule, &ops->rules_list, list)
		call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule,
				       family);
	rules_ops_put(ops);

	return 0;
}
EXPORT_SYMBOL_GPL(fib_rules_dump);

unsigned int fib_rules_seq_read(struct net *net, int family)
{
	unsigned int fib_rules_seq;
	struct fib_rules_ops *ops;

	ASSERT_RTNL();

	ops = lookup_rules_ops(net, family);
	if (!ops)
		return 0;
	fib_rules_seq = ops->fib_rules_seq;
	rules_ops_put(ops);

	return fib_rules_seq;
}
EXPORT_SYMBOL_GPL(fib_rules_seq_read);

390 391 392 393 394
static struct fib_rule *rule_find(struct fib_rules_ops *ops,
				  struct fib_rule_hdr *frh,
				  struct nlattr **tb,
				  struct fib_rule *rule,
				  bool user_priority)
395 396 397 398
{
	struct fib_rule *r;

	list_for_each_entry(r, &ops->rules_list, list) {
399
		if (rule->action && r->action != rule->action)
400 401
			continue;

402
		if (rule->table && r->table != rule->table)
403 404
			continue;

405
		if (user_priority && r->pref != rule->pref)
406 407
			continue;

408 409
		if (rule->iifname[0] &&
		    memcmp(r->iifname, rule->iifname, IFNAMSIZ))
410 411
			continue;

412 413
		if (rule->oifname[0] &&
		    memcmp(r->oifname, rule->oifname, IFNAMSIZ))
414 415
			continue;

416
		if (rule->mark && r->mark != rule->mark)
417 418
			continue;

419
		if (rule->mark_mask && r->mark_mask != rule->mark_mask)
420 421
			continue;

422
		if (rule->tun_id && r->tun_id != rule->tun_id)
423 424 425 426 427
			continue;

		if (r->fr_net != rule->fr_net)
			continue;

428
		if (rule->l3mdev && r->l3mdev != rule->l3mdev)
429 430
			continue;

431 432 433
		if (uid_range_set(&rule->uid_range) &&
		    (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
		    !uid_eq(r->uid_range.end, rule->uid_range.end)))
434 435
			continue;

436
		if (rule->ip_proto && r->ip_proto != rule->ip_proto)
437 438
			continue;

439 440
		if (fib_rule_port_range_set(&rule->sport_range) &&
		    !fib_rule_port_range_compare(&r->sport_range,
441 442 443
						 &rule->sport_range))
			continue;

444 445
		if (fib_rule_port_range_set(&rule->dport_range) &&
		    !fib_rule_port_range_compare(&r->dport_range,
446 447 448
						 &rule->dport_range))
			continue;

449 450
		if (!ops->compare(r, frh, tb))
			continue;
451
		return r;
452
	}
453 454

	return NULL;
455 456
}

457 458 459 460 461 462
static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
		       struct netlink_ext_ack *extack,
		       struct fib_rules_ops *ops,
		       struct nlattr *tb[],
		       struct fib_rule **rule,
		       bool *user_priority)
463
{
464
	struct net *net = sock_net(skb->sk);
465
	struct fib_rule_hdr *frh = nlmsg_data(nlh);
466 467
	struct fib_rule *nlrule = NULL;
	int err = -EINVAL;
468

469 470 471
	if (frh->src_len)
		if (!tb[FRA_SRC] ||
		    frh->src_len > (ops->addr_size * 8) ||
472 473
		    nla_len(tb[FRA_SRC]) != ops->addr_size) {
			NL_SET_ERR_MSG(extack, "Invalid source address");
474
			goto errout;
475
	}
476

477 478 479
	if (frh->dst_len)
		if (!tb[FRA_DST] ||
		    frh->dst_len > (ops->addr_size * 8) ||
480 481
		    nla_len(tb[FRA_DST]) != ops->addr_size) {
			NL_SET_ERR_MSG(extack, "Invalid dst address");
482
			goto errout;
483
	}
484

485 486
	nlrule = kzalloc(ops->rule_size, GFP_KERNEL);
	if (!nlrule) {
487 488 489
		err = -ENOMEM;
		goto errout;
	}
490 491
	refcount_set(&nlrule->refcnt, 1);
	nlrule->fr_net = net;
492

493 494 495 496 497 498
	if (tb[FRA_PRIORITY]) {
		nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
		*user_priority = true;
	} else {
		nlrule->pref = fib_default_rule_pref(ops);
	}
499

500
	nlrule->proto = tb[FRA_PROTOCOL] ?
501 502
		nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;

503
	if (tb[FRA_IIFNAME]) {
504 505
		struct net_device *dev;

506 507 508
		nlrule->iifindex = -1;
		nla_strlcpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
		dev = __dev_get_by_name(net, nlrule->iifname);
509
		if (dev)
510
			nlrule->iifindex = dev->ifindex;
511 512
	}

513 514 515
	if (tb[FRA_OIFNAME]) {
		struct net_device *dev;

516 517 518
		nlrule->oifindex = -1;
		nla_strlcpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
		dev = __dev_get_by_name(net, nlrule->oifname);
519
		if (dev)
520
			nlrule->oifindex = dev->ifindex;
521 522
	}

523
	if (tb[FRA_FWMARK]) {
524 525
		nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
		if (nlrule->mark)
526 527 528
			/* compatibility: if the mark value is non-zero all bits
			 * are compared unless a mask is explicitly specified.
			 */
529
			nlrule->mark_mask = 0xFFFFFFFF;
530 531 532
	}

	if (tb[FRA_FWMASK])
533
		nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
534

535
	if (tb[FRA_TUN_ID])
536
		nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
537

W
Wei Yongjun 已提交
538
	err = -EINVAL;
D
David Ahern 已提交
539 540
	if (tb[FRA_L3MDEV]) {
#ifdef CONFIG_NET_L3_MASTER_DEV
541 542
		nlrule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
		if (nlrule->l3mdev != 1)
D
David Ahern 已提交
543
#endif
544
			NL_SET_ERR_MSG(extack, "Invalid l3mdev");
D
David Ahern 已提交
545 546 547
			goto errout_free;
	}

548 549 550
	nlrule->action = frh->action;
	nlrule->flags = frh->flags;
	nlrule->table = frh_get_table(frh, tb);
551
	if (tb[FRA_SUPPRESS_PREFIXLEN])
552
		nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
553
	else
554
		nlrule->suppress_prefixlen = -1;
555

556
	if (tb[FRA_SUPPRESS_IFGROUP])
557
		nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
558
	else
559
		nlrule->suppress_ifgroup = -1;
560

T
Thomas Graf 已提交
561
	if (tb[FRA_GOTO]) {
562 563
		if (nlrule->action != FR_ACT_GOTO) {
			NL_SET_ERR_MSG(extack, "Unexpected goto");
T
Thomas Graf 已提交
564
			goto errout_free;
565
		}
T
Thomas Graf 已提交
566

567
		nlrule->target = nla_get_u32(tb[FRA_GOTO]);
T
Thomas Graf 已提交
568
		/* Backward jumps are prohibited to avoid endless loops */
569 570
		if (nlrule->target <= nlrule->pref) {
			NL_SET_ERR_MSG(extack, "Backward goto not supported");
T
Thomas Graf 已提交
571
			goto errout_free;
572
		}
573
	} else if (nlrule->action == FR_ACT_GOTO) {
574
		NL_SET_ERR_MSG(extack, "Missing goto target for action goto");
T
Thomas Graf 已提交
575
		goto errout_free;
576
	}
T
Thomas Graf 已提交
577

578 579
	if (nlrule->l3mdev && nlrule->table) {
		NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive");
D
David Ahern 已提交
580
		goto errout_free;
581
	}
D
David Ahern 已提交
582

583 584 585
	if (tb[FRA_UID_RANGE]) {
		if (current_user_ns() != net->user_ns) {
			err = -EPERM;
586
			NL_SET_ERR_MSG(extack, "No permission to set uid");
587 588 589
			goto errout_free;
		}

590
		nlrule->uid_range = nla_get_kuid_range(tb);
591

592
		if (!uid_range_set(&nlrule->uid_range) ||
593 594
		    !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) {
			NL_SET_ERR_MSG(extack, "Invalid uid range");
595
			goto errout_free;
596
		}
597
	} else {
598
		nlrule->uid_range = fib_kuid_range_unset;
599 600
	}

601
	if (tb[FRA_IP_PROTO])
602
		nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
603 604 605

	if (tb[FRA_SPORT_RANGE]) {
		err = nla_get_port_range(tb[FRA_SPORT_RANGE],
606
					 &nlrule->sport_range);
607 608
		if (err) {
			NL_SET_ERR_MSG(extack, "Invalid sport range");
609
			goto errout_free;
610
		}
611 612 613 614
	}

	if (tb[FRA_DPORT_RANGE]) {
		err = nla_get_port_range(tb[FRA_DPORT_RANGE],
615
					 &nlrule->dport_range);
616 617
		if (err) {
			NL_SET_ERR_MSG(extack, "Invalid dport range");
618
			goto errout_free;
619
		}
620 621
	}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	*rule = nlrule;

	return 0;

errout_free:
	kfree(nlrule);
errout:
	return err;
}

int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
		   struct netlink_ext_ack *extack)
{
	struct net *net = sock_net(skb->sk);
	struct fib_rule_hdr *frh = nlmsg_data(nlh);
	struct fib_rules_ops *ops = NULL;
	struct fib_rule *rule = NULL, *r, *last = NULL;
	struct nlattr *tb[FRA_MAX + 1];
	int err = -EINVAL, unresolved = 0;
	bool user_priority = false;

643 644
	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
		NL_SET_ERR_MSG(extack, "Invalid msg length");
645
		goto errout;
646
	}
647 648 649 650

	ops = lookup_rules_ops(net, frh->family);
	if (!ops) {
		err = -EAFNOSUPPORT;
651
		NL_SET_ERR_MSG(extack, "Rule family not supported");
652 653 654 655
		goto errout;
	}

	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
656 657
	if (err < 0) {
		NL_SET_ERR_MSG(extack, "Error parsing msg");
658
		goto errout;
659
	}
660 661 662 663 664

	err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
	if (err)
		goto errout;

665
	if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
666
	    rule_find(ops, frh, tb, rule, user_priority)) {
667 668 669 670
		err = -EEXIST;
		goto errout_free;
	}

671
	err = ops->configure(rule, skb, frh, tb, extack);
672 673 674
	if (err < 0)
		goto errout_free;

675 676 677 678 679
	err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
				      extack);
	if (err < 0)
		goto errout_free;

680 681 682 683 684 685 686 687 688 689
	list_for_each_entry(r, &ops->rules_list, list) {
		if (r->pref == rule->target) {
			RCU_INIT_POINTER(rule->ctarget, r);
			break;
		}
	}

	if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
		unresolved = 1;

690
	list_for_each_entry(r, &ops->rules_list, list) {
691 692 693 694 695
		if (r->pref > rule->pref)
			break;
		last = r;
	}

E
Eric Dumazet 已提交
696 697 698 699 700
	if (last)
		list_add_rcu(&rule->list, &last->list);
	else
		list_add_rcu(&rule->list, &ops->rules_list);

T
Thomas Graf 已提交
701 702 703 704 705
	if (ops->unresolved_rules) {
		/*
		 * There are unresolved goto rules in the list, check if
		 * any of them are pointing to this new rule.
		 */
706
		list_for_each_entry(r, &ops->rules_list, list) {
T
Thomas Graf 已提交
707
			if (r->action == FR_ACT_GOTO &&
708 709
			    r->target == rule->pref &&
			    rtnl_dereference(r->ctarget) == NULL) {
T
Thomas Graf 已提交
710 711 712 713 714 715 716 717 718 719 720 721 722
				rcu_assign_pointer(r->ctarget, rule);
				if (--ops->unresolved_rules == 0)
					break;
			}
		}
	}

	if (rule->action == FR_ACT_GOTO)
		ops->nr_goto_rules++;

	if (unresolved)
		ops->unresolved_rules++;

723 724 725
	if (rule->tun_id)
		ip_tunnel_need_metadata();

726
	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
727
	flush_route_cache(ops);
728 729 730 731 732 733 734 735 736
	rules_ops_put(ops);
	return 0;

errout_free:
	kfree(rule);
errout:
	rules_ops_put(ops);
	return err;
}
D
David Ahern 已提交
737
EXPORT_SYMBOL_GPL(fib_nl_newrule);
738

739 740
int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
		   struct netlink_ext_ack *extack)
741
{
742
	struct net *net = sock_net(skb->sk);
743 744
	struct fib_rule_hdr *frh = nlmsg_data(nlh);
	struct fib_rules_ops *ops = NULL;
745
	struct fib_rule *rule = NULL, *r, *nlrule = NULL;
746 747
	struct nlattr *tb[FRA_MAX+1];
	int err = -EINVAL;
748
	bool user_priority = false;
749

750 751
	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
		NL_SET_ERR_MSG(extack, "Invalid msg length");
752
		goto errout;
753
	}
754

755
	ops = lookup_rules_ops(net, frh->family);
756
	if (ops == NULL) {
757
		err = -EAFNOSUPPORT;
758
		NL_SET_ERR_MSG(extack, "Rule family not supported");
759 760 761
		goto errout;
	}

762
	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
763 764
	if (err < 0) {
		NL_SET_ERR_MSG(extack, "Error parsing msg");
765
		goto errout;
766
	}
767

768 769
	err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
	if (err)
770 771
		goto errout;

772 773 774 775
	rule = rule_find(ops, frh, tb, nlrule, user_priority);
	if (!rule) {
		err = -ENOENT;
		goto errout;
776 777
	}

778 779 780
	if (rule->flags & FIB_RULE_PERMANENT) {
		err = -EPERM;
		goto errout;
781 782
	}

783 784
	if (ops->delete) {
		err = ops->delete(rule);
785 786 787 788
		if (err)
			goto errout;
	}

789 790
	if (rule->tun_id)
		ip_tunnel_unneed_metadata();
791

792
	list_del_rcu(&rule->list);
793

794 795 796 797 798
	if (rule->action == FR_ACT_GOTO) {
		ops->nr_goto_rules--;
		if (rtnl_dereference(rule->ctarget) == NULL)
			ops->unresolved_rules--;
	}
T
Thomas Graf 已提交
799

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	/*
	 * Check if this rule is a target to any of them. If so,
	 * adjust to the next one with the same preference or
	 * disable them. As this operation is eventually very
	 * expensive, it is only performed if goto rules, except
	 * current if it is goto rule, have actually been added.
	 */
	if (ops->nr_goto_rules > 0) {
		struct fib_rule *n;

		n = list_next_entry(rule, list);
		if (&n->list == &ops->rules_list || n->pref != rule->pref)
			n = NULL;
		list_for_each_entry(r, &ops->rules_list, list) {
			if (rtnl_dereference(r->ctarget) != rule)
				continue;
			rcu_assign_pointer(r->ctarget, n);
			if (!n)
				ops->unresolved_rules++;
T
Thomas Graf 已提交
819
		}
820 821
	}

822 823 824 825 826 827 828 829 830 831
	call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
				NULL);
	notify_rule_change(RTM_DELRULE, rule, ops, nlh,
			   NETLINK_CB(skb).portid);
	fib_rule_put(rule);
	flush_route_cache(ops);
	rules_ops_put(ops);
	kfree(nlrule);
	return 0;

832
errout:
833 834
	if (nlrule)
		kfree(nlrule);
835 836 837
	rules_ops_put(ops);
	return err;
}
D
David Ahern 已提交
838
EXPORT_SYMBOL_GPL(fib_nl_delrule);
839

840 841 842 843
static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
					 struct fib_rule *rule)
{
	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
844
			 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
845
			 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
846 847
			 + nla_total_size(4) /* FRA_PRIORITY */
			 + nla_total_size(4) /* FRA_TABLE */
848
			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
849
			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
850
			 + nla_total_size(4) /* FRA_FWMARK */
851
			 + nla_total_size(4) /* FRA_FWMASK */
852
			 + nla_total_size_64bit(8) /* FRA_TUN_ID */
853
			 + nla_total_size(sizeof(struct fib_kuid_range))
854 855 856 857
			 + nla_total_size(1) /* FRA_PROTOCOL */
			 + nla_total_size(1) /* FRA_IP_PROTO */
			 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */
			 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */
858 859 860 861 862 863 864

	if (ops->nlmsg_payload)
		payload += ops->nlmsg_payload(rule);

	return payload;
}

865 866 867 868 869 870 871 872 873
static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
			    u32 pid, u32 seq, int type, int flags,
			    struct fib_rules_ops *ops)
{
	struct nlmsghdr *nlh;
	struct fib_rule_hdr *frh;

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
	if (nlh == NULL)
874
		return -EMSGSIZE;
875 876

	frh = nlmsg_data(nlh);
877
	frh->family = ops->family;
878
	frh->table = rule->table;
879 880
	if (nla_put_u32(skb, FRA_TABLE, rule->table))
		goto nla_put_failure;
881
	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
882
		goto nla_put_failure;
883
	frh->res1 = 0;
884
	frh->res2 = 0;
885 886
	frh->action = rule->action;
	frh->flags = rule->flags;
887 888 889

	if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto))
		goto nla_put_failure;
890

E
Eric Dumazet 已提交
891
	if (rule->action == FR_ACT_GOTO &&
892
	    rcu_access_pointer(rule->ctarget) == NULL)
T
Thomas Graf 已提交
893 894
		frh->flags |= FIB_RULE_UNRESOLVED;

895
	if (rule->iifname[0]) {
896 897
		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
			goto nla_put_failure;
898 899
		if (rule->iifindex == -1)
			frh->flags |= FIB_RULE_IIF_DETACHED;
900 901
	}

902
	if (rule->oifname[0]) {
903 904
		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
			goto nla_put_failure;
905 906 907 908
		if (rule->oifindex == -1)
			frh->flags |= FIB_RULE_OIF_DETACHED;
	}

909 910 911 912 913 914 915
	if ((rule->pref &&
	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
	    (rule->mark &&
	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
	    ((rule->mark_mask || rule->mark) &&
	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
	    (rule->target &&
916 917
	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
	    (rule->tun_id &&
D
David Ahern 已提交
918 919
	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
	    (rule->l3mdev &&
920 921
	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
	    (uid_range_set(&rule->uid_range) &&
922 923 924 925 926 927
	     nla_put_uid_range(skb, &rule->uid_range)) ||
	    (fib_rule_port_range_set(&rule->sport_range) &&
	     nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) ||
	    (fib_rule_port_range_set(&rule->dport_range) &&
	     nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) ||
	    (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto)))
928
		goto nla_put_failure;
929 930 931 932 933 934

	if (rule->suppress_ifgroup != -1) {
		if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
			goto nla_put_failure;
	}

935
	if (ops->fill(rule, skb, frh) < 0)
936 937
		goto nla_put_failure;

938 939
	nlmsg_end(skb, nlh);
	return 0;
940 941

nla_put_failure:
942 943
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
944 945
}

T
Thomas Graf 已提交
946 947
static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
		      struct fib_rules_ops *ops)
948 949 950
{
	int idx = 0;
	struct fib_rule *rule;
951
	int err = 0;
952

953 954
	rcu_read_lock();
	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
T
Thomas Graf 已提交
955
		if (idx < cb->args[1])
956 957
			goto skip;

958 959 960 961
		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
				       NLM_F_MULTI, ops);
		if (err)
962 963 964 965
			break;
skip:
		idx++;
	}
966
	rcu_read_unlock();
T
Thomas Graf 已提交
967
	cb->args[1] = idx;
968 969
	rules_ops_put(ops);

970
	return err;
971 972
}

T
Thomas Graf 已提交
973 974
static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
{
975
	struct net *net = sock_net(skb->sk);
T
Thomas Graf 已提交
976 977 978 979 980 981
	struct fib_rules_ops *ops;
	int idx = 0, family;

	family = rtnl_msg_family(cb->nlh);
	if (family != AF_UNSPEC) {
		/* Protocol specific dump request */
982
		ops = lookup_rules_ops(net, family);
T
Thomas Graf 已提交
983 984 985
		if (ops == NULL)
			return -EAFNOSUPPORT;

986 987 988
		dump_rules(skb, cb, ops);

		return skb->len;
T
Thomas Graf 已提交
989 990 991
	}

	rcu_read_lock();
992
	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
T
Thomas Graf 已提交
993 994 995 996 997 998 999
		if (idx < cb->args[0] || !try_module_get(ops->owner))
			goto skip;

		if (dump_rules(skb, cb, ops) < 0)
			break;

		cb->args[1] = 0;
1000
skip:
T
Thomas Graf 已提交
1001 1002 1003 1004 1005 1006 1007
		idx++;
	}
	rcu_read_unlock();
	cb->args[0] = idx;

	return skb->len;
}
1008

D
Denis V. Lunev 已提交
1009
static void notify_rule_change(int event, struct fib_rule *rule,
1010 1011
			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
			       u32 pid)
1012
{
D
Denis V. Lunev 已提交
1013
	struct net *net;
1014 1015
	struct sk_buff *skb;
	int err = -ENOBUFS;
1016

D
Denis V. Lunev 已提交
1017
	net = ops->fro_net;
1018
	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1019
	if (skb == NULL)
1020 1021 1022
		goto errout;

	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
1023 1024 1025 1026 1027 1028
	if (err < 0) {
		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
D
Denis V. Lunev 已提交
1029

1030 1031
	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
	return;
1032 1033
errout:
	if (err < 0)
1034
		rtnl_set_sk_err(net, ops->nlgroup, err);
1035 1036 1037 1038 1039 1040 1041
}

static void attach_rules(struct list_head *rules, struct net_device *dev)
{
	struct fib_rule *rule;

	list_for_each_entry(rule, rules, list) {
1042 1043 1044
		if (rule->iifindex == -1 &&
		    strcmp(dev->name, rule->iifname) == 0)
			rule->iifindex = dev->ifindex;
1045 1046 1047
		if (rule->oifindex == -1 &&
		    strcmp(dev->name, rule->oifname) == 0)
			rule->oifindex = dev->ifindex;
1048 1049 1050 1051 1052 1053 1054
	}
}

static void detach_rules(struct list_head *rules, struct net_device *dev)
{
	struct fib_rule *rule;

1055
	list_for_each_entry(rule, rules, list) {
1056 1057
		if (rule->iifindex == dev->ifindex)
			rule->iifindex = -1;
1058 1059 1060
		if (rule->oifindex == dev->ifindex)
			rule->oifindex = -1;
	}
1061 1062 1063 1064
}


static int fib_rules_event(struct notifier_block *this, unsigned long event,
1065
			   void *ptr)
1066
{
1067
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1068
	struct net *net = dev_net(dev);
1069 1070
	struct fib_rules_ops *ops;

1071
	ASSERT_RTNL();
1072 1073 1074

	switch (event) {
	case NETDEV_REGISTER:
1075
		list_for_each_entry(ops, &net->rules_ops, list)
1076
			attach_rules(&ops->rules_list, dev);
1077 1078
		break;

1079 1080 1081 1082 1083 1084 1085
	case NETDEV_CHANGENAME:
		list_for_each_entry(ops, &net->rules_ops, list) {
			detach_rules(&ops->rules_list, dev);
			attach_rules(&ops->rules_list, dev);
		}
		break;

1086
	case NETDEV_UNREGISTER:
1087
		list_for_each_entry(ops, &net->rules_ops, list)
1088
			detach_rules(&ops->rules_list, dev);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		break;
	}

	return NOTIFY_DONE;
}

static struct notifier_block fib_rules_notifier = {
	.notifier_call = fib_rules_event,
};

1099
static int __net_init fib_rules_net_init(struct net *net)
1100 1101 1102 1103 1104 1105
{
	INIT_LIST_HEAD(&net->rules_ops);
	spin_lock_init(&net->rules_mod_lock);
	return 0;
}

1106 1107 1108 1109 1110
static void __net_exit fib_rules_net_exit(struct net *net)
{
	WARN_ON_ONCE(!list_empty(&net->rules_ops));
}

1111 1112
static struct pernet_operations fib_rules_net_ops = {
	.init = fib_rules_net_init,
1113
	.exit = fib_rules_net_exit,
1114 1115
};

1116 1117
static int __init fib_rules_init(void)
{
1118
	int err;
1119 1120 1121
	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0);
	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0);
	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0);
1122

E
Eric W. Biederman 已提交
1123
	err = register_pernet_subsys(&fib_rules_net_ops);
1124 1125 1126
	if (err < 0)
		goto fail;

E
Eric W. Biederman 已提交
1127
	err = register_netdevice_notifier(&fib_rules_notifier);
1128 1129
	if (err < 0)
		goto fail_unregister;
E
Eric W. Biederman 已提交
1130

1131 1132 1133
	return 0;

fail_unregister:
E
Eric W. Biederman 已提交
1134
	unregister_pernet_subsys(&fib_rules_net_ops);
1135 1136 1137 1138 1139
fail:
	rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
	rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
	rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
	return err;
1140 1141 1142
}

subsys_initcall(fib_rules_init);