fib_rules.c 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * net/core/fib_rules.c		Generic Routing Rules
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License as
 *	published by the Free Software Foundation, version 2.
 *
 * Authors:	Thomas Graf <tgraf@suug.ch>
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <net/fib_rules.h>

static LIST_HEAD(rules_ops);
static DEFINE_SPINLOCK(rules_mod_lock);

static void notify_rule_change(int event, struct fib_rule *rule,
20 21
			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
			       u32 pid);
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

static struct fib_rules_ops *lookup_rules_ops(int family)
{
	struct fib_rules_ops *ops;

	rcu_read_lock();
	list_for_each_entry_rcu(ops, &rules_ops, list) {
		if (ops->family == family) {
			if (!try_module_get(ops->owner))
				ops = NULL;
			rcu_read_unlock();
			return ops;
		}
	}
	rcu_read_unlock();

	return NULL;
}

static void rules_ops_put(struct fib_rules_ops *ops)
{
	if (ops)
		module_put(ops->owner);
}

int fib_rules_register(struct fib_rules_ops *ops)
{
	int err = -EEXIST;
	struct fib_rules_ops *o;

	if (ops->rule_size < sizeof(struct fib_rule))
		return -EINVAL;

	if (ops->match == NULL || ops->configure == NULL ||
	    ops->compare == NULL || ops->fill == NULL ||
	    ops->action == NULL)
		return -EINVAL;

	spin_lock(&rules_mod_lock);
	list_for_each_entry(o, &rules_ops, list)
		if (ops->family == o->family)
			goto errout;

	list_add_tail_rcu(&ops->list, &rules_ops);
	err = 0;
errout:
	spin_unlock(&rules_mod_lock);

	return err;
}

EXPORT_SYMBOL_GPL(fib_rules_register);

static void cleanup_ops(struct fib_rules_ops *ops)
{
	struct fib_rule *rule, *tmp;

	list_for_each_entry_safe(rule, tmp, ops->rules_list, list) {
		list_del_rcu(&rule->list);
		fib_rule_put(rule);
	}
}

int fib_rules_unregister(struct fib_rules_ops *ops)
{
	int err = 0;
	struct fib_rules_ops *o;

	spin_lock(&rules_mod_lock);
	list_for_each_entry(o, &rules_ops, list) {
		if (o == ops) {
			list_del_rcu(&o->list);
			cleanup_ops(ops);
			goto out;
		}
	}

	err = -ENOENT;
out:
	spin_unlock(&rules_mod_lock);

	synchronize_rcu();

	return err;
}

EXPORT_SYMBOL_GPL(fib_rules_unregister);

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
			  struct flowi *fl, int flags)
{
	int ret = 0;

	if (rule->ifindex && (rule->ifindex != fl->iif))
		goto out;

	if ((rule->mark ^ fl->mark) & rule->mark_mask)
		goto out;

	ret = ops->match(rule, fl, flags);
out:
	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
}

126 127 128 129 130 131 132 133 134
int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
		     int flags, struct fib_lookup_arg *arg)
{
	struct fib_rule *rule;
	int err;

	rcu_read_lock();

	list_for_each_entry_rcu(rule, ops->rules_list, list) {
135
		if (!fib_rule_match(rule, ops, fl, flags))
136 137 138 139 140 141 142 143 144 145
			continue;

		err = ops->action(rule, fl, flags, arg);
		if (err != -EAGAIN) {
			fib_rule_get(rule);
			arg->rule = rule;
			goto out;
		}
	}

146
	err = -ESRCH;
147 148 149 150 151 152 153 154
out:
	rcu_read_unlock();

	return err;
}

EXPORT_SYMBOL_GPL(fib_rules_lookup);

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
			    struct fib_rules_ops *ops)
{
	int err = -EINVAL;

	if (frh->src_len)
		if (tb[FRA_SRC] == NULL ||
		    frh->src_len > (ops->addr_size * 8) ||
		    nla_len(tb[FRA_SRC]) != ops->addr_size)
			goto errout;

	if (frh->dst_len)
		if (tb[FRA_DST] == NULL ||
		    frh->dst_len > (ops->addr_size * 8) ||
		    nla_len(tb[FRA_DST]) != ops->addr_size)
			goto errout;

	err = 0;
errout:
	return err;
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
	struct fib_rule_hdr *frh = nlmsg_data(nlh);
	struct fib_rules_ops *ops = NULL;
	struct fib_rule *rule, *r, *last = NULL;
	struct nlattr *tb[FRA_MAX+1];
	int err = -EINVAL;

	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
		goto errout;

	ops = lookup_rules_ops(frh->family);
	if (ops == NULL) {
		err = EAFNOSUPPORT;
		goto errout;
	}

	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
	if (err < 0)
		goto errout;

198 199 200 201
	err = validate_rulemsg(frh, tb, ops);
	if (err < 0)
		goto errout;

202 203 204 205 206 207 208 209 210 211 212 213 214
	rule = kzalloc(ops->rule_size, GFP_KERNEL);
	if (rule == NULL) {
		err = -ENOMEM;
		goto errout;
	}

	if (tb[FRA_PRIORITY])
		rule->pref = nla_get_u32(tb[FRA_PRIORITY]);

	if (tb[FRA_IFNAME]) {
		struct net_device *dev;

		rule->ifindex = -1;
215
		nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
216 217 218 219 220
		dev = __dev_get_by_name(rule->ifname);
		if (dev)
			rule->ifindex = dev->ifindex;
	}

221 222 223 224 225 226 227 228 229 230 231 232
	if (tb[FRA_FWMARK]) {
		rule->mark = nla_get_u32(tb[FRA_FWMARK]);
		if (rule->mark)
			/* compatibility: if the mark value is non-zero all bits
			 * are compared unless a mask is explicitly specified.
			 */
			rule->mark_mask = 0xFFFFFFFF;
	}

	if (tb[FRA_FWMASK])
		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);

233 234
	rule->action = frh->action;
	rule->flags = frh->flags;
235
	rule->table = frh_get_table(frh, tb);
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	if (!rule->pref && ops->default_pref)
		rule->pref = ops->default_pref();

	err = ops->configure(rule, skb, nlh, frh, tb);
	if (err < 0)
		goto errout_free;

	list_for_each_entry(r, ops->rules_list, list) {
		if (r->pref > rule->pref)
			break;
		last = r;
	}

	fib_rule_get(rule);

	if (last)
		list_add_rcu(&rule->list, &last->list);
	else
		list_add_rcu(&rule->list, ops->rules_list);

257
	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	rules_ops_put(ops);
	return 0;

errout_free:
	kfree(rule);
errout:
	rules_ops_put(ops);
	return err;
}

int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
	struct fib_rule_hdr *frh = nlmsg_data(nlh);
	struct fib_rules_ops *ops = NULL;
	struct fib_rule *rule;
	struct nlattr *tb[FRA_MAX+1];
	int err = -EINVAL;

	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
		goto errout;

	ops = lookup_rules_ops(frh->family);
	if (ops == NULL) {
		err = EAFNOSUPPORT;
		goto errout;
	}

	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
	if (err < 0)
		goto errout;

289 290 291 292
	err = validate_rulemsg(frh, tb, ops);
	if (err < 0)
		goto errout;

293 294 295 296
	list_for_each_entry(rule, ops->rules_list, list) {
		if (frh->action && (frh->action != rule->action))
			continue;

297
		if (frh->table && (frh_get_table(frh, tb) != rule->table))
298 299 300 301 302 303 304 305 306 307
			continue;

		if (tb[FRA_PRIORITY] &&
		    (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
			continue;

		if (tb[FRA_IFNAME] &&
		    nla_strcmp(tb[FRA_IFNAME], rule->ifname))
			continue;

308 309 310 311 312 313 314 315
		if (tb[FRA_FWMARK] &&
		    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
			continue;

		if (tb[FRA_FWMASK] &&
		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
			continue;

316 317 318 319 320 321 322 323 324 325
		if (!ops->compare(rule, frh, tb))
			continue;

		if (rule->flags & FIB_RULE_PERMANENT) {
			err = -EPERM;
			goto errout;
		}

		list_del_rcu(&rule->list);
		synchronize_rcu();
326 327
		notify_rule_change(RTM_DELRULE, rule, ops, nlh,
				   NETLINK_CB(skb).pid);
328 329 330 331 332 333 334 335 336 337 338
		fib_rule_put(rule);
		rules_ops_put(ops);
		return 0;
	}

	err = -ENOENT;
errout:
	rules_ops_put(ops);
	return err;
}

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
					 struct fib_rule *rule)
{
	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
			 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
			 + nla_total_size(4) /* FRA_PRIORITY */
			 + nla_total_size(4) /* FRA_TABLE */
			 + nla_total_size(4) /* FRA_FWMARK */
			 + nla_total_size(4); /* FRA_FWMASK */

	if (ops->nlmsg_payload)
		payload += ops->nlmsg_payload(rule);

	return payload;
}

355 356 357 358 359 360 361 362 363
static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
			    u32 pid, u32 seq, int type, int flags,
			    struct fib_rules_ops *ops)
{
	struct nlmsghdr *nlh;
	struct fib_rule_hdr *frh;

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
	if (nlh == NULL)
364
		return -EMSGSIZE;
365 366 367

	frh = nlmsg_data(nlh);
	frh->table = rule->table;
368
	NLA_PUT_U32(skb, FRA_TABLE, rule->table);
369 370 371 372 373 374 375 376 377 378 379
	frh->res1 = 0;
	frh->res2 = 0;
	frh->action = rule->action;
	frh->flags = rule->flags;

	if (rule->ifname[0])
		NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);

	if (rule->pref)
		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);

380 381 382 383 384 385
	if (rule->mark)
		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);

	if (rule->mark_mask || rule->mark)
		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);

386 387 388 389 390 391
	if (ops->fill(rule, skb, nlh, frh) < 0)
		goto nla_put_failure;

	return nlmsg_end(skb, nlh);

nla_put_failure:
392 393
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
394 395 396 397 398 399 400 401 402 403 404 405 406
}

int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
{
	int idx = 0;
	struct fib_rule *rule;
	struct fib_rules_ops *ops;

	ops = lookup_rules_ops(family);
	if (ops == NULL)
		return -EAFNOSUPPORT;

	rcu_read_lock();
P
Patrick McHardy 已提交
407
	list_for_each_entry_rcu(rule, ops->rules_list, list) {
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
		if (idx < cb->args[0])
			goto skip;

		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
				     NLM_F_MULTI, ops) < 0)
			break;
skip:
		idx++;
	}
	rcu_read_unlock();
	cb->args[0] = idx;
	rules_ops_put(ops);

	return skb->len;
}

EXPORT_SYMBOL_GPL(fib_rules_dump);

static void notify_rule_change(int event, struct fib_rule *rule,
428 429
			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
			       u32 pid)
430
{
431 432
	struct sk_buff *skb;
	int err = -ENOBUFS;
433

434
	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
435
	if (skb == NULL)
436 437 438
		goto errout;

	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
439 440 441 442 443 444
	if (err < 0) {
		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
445 446 447 448
	err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
errout:
	if (err < 0)
		rtnl_set_sk_err(ops->nlgroup, err);
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
}

static void attach_rules(struct list_head *rules, struct net_device *dev)
{
	struct fib_rule *rule;

	list_for_each_entry(rule, rules, list) {
		if (rule->ifindex == -1 &&
		    strcmp(dev->name, rule->ifname) == 0)
			rule->ifindex = dev->ifindex;
	}
}

static void detach_rules(struct list_head *rules, struct net_device *dev)
{
	struct fib_rule *rule;

	list_for_each_entry(rule, rules, list)
		if (rule->ifindex == dev->ifindex)
			rule->ifindex = -1;
}


static int fib_rules_event(struct notifier_block *this, unsigned long event,
			    void *ptr)
{
	struct net_device *dev = ptr;
	struct fib_rules_ops *ops;

	ASSERT_RTNL();
	rcu_read_lock();

	switch (event) {
	case NETDEV_REGISTER:
		list_for_each_entry(ops, &rules_ops, list)
			attach_rules(ops->rules_list, dev);
		break;

	case NETDEV_UNREGISTER:
		list_for_each_entry(ops, &rules_ops, list)
			detach_rules(ops->rules_list, dev);
		break;
	}

	rcu_read_unlock();

	return NOTIFY_DONE;
}

static struct notifier_block fib_rules_notifier = {
	.notifier_call = fib_rules_event,
};

static int __init fib_rules_init(void)
{
	return register_netdevice_notifier(&fib_rules_notifier);
}

subsys_initcall(fib_rules_init);