rtnetlink.c 105.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Routing netlink socket interface: protocol independent part.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	Fixes:
 *	Vitaly E. Lavrov		RTA_OK arithmetics was wrong.
 */

19
#include <linux/bitops.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/security.h>
37
#include <linux/mutex.h>
38
#include <linux/if_addr.h>
39
#include <linux/if_bridge.h>
40
#include <linux/if_vlan.h>
41
#include <linux/pci.h>
42
#include <linux/etherdevice.h>
M
Martin KaFai Lau 已提交
43
#include <linux/bpf.h>
L
Linus Torvalds 已提交
44

45
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
46 47 48

#include <linux/inet.h>
#include <linux/netdevice.h>
49
#include <net/switchdev.h>
L
Linus Torvalds 已提交
50 51 52 53 54
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/udp.h>
55
#include <net/tcp.h>
L
Linus Torvalds 已提交
56 57
#include <net/sock.h>
#include <net/pkt_sched.h>
58
#include <net/fib_rules.h>
59
#include <net/rtnetlink.h>
60
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
61

E
Eric Dumazet 已提交
62
struct rtnl_link {
63 64
	rtnl_doit_func		doit;
	rtnl_dumpit_func	dumpit;
65
	unsigned int		flags;
66 67
};

68
static DEFINE_MUTEX(rtnl_mutex);
L
Linus Torvalds 已提交
69 70 71

void rtnl_lock(void)
{
72
	mutex_lock(&rtnl_mutex);
L
Linus Torvalds 已提交
73
}
E
Eric Dumazet 已提交
74
EXPORT_SYMBOL(rtnl_lock);
L
Linus Torvalds 已提交
75

76 77 78 79 80 81 82 83 84 85
static struct sk_buff *defer_kfree_skb_list;
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
{
	if (head && tail) {
		tail->next = defer_kfree_skb_list;
		defer_kfree_skb_list = head;
	}
}
EXPORT_SYMBOL(rtnl_kfree_skbs);

86
void __rtnl_unlock(void)
L
Linus Torvalds 已提交
87
{
88 89 90 91
	struct sk_buff *head = defer_kfree_skb_list;

	defer_kfree_skb_list = NULL;

92
	mutex_unlock(&rtnl_mutex);
93 94 95 96 97 98 99 100

	while (head) {
		struct sk_buff *next = head->next;

		kfree_skb(head);
		cond_resched();
		head = next;
	}
L
Linus Torvalds 已提交
101
}
102

L
Linus Torvalds 已提交
103 104
void rtnl_unlock(void)
{
H
Herbert Xu 已提交
105
	/* This fellow will unlock it for us. */
L
Linus Torvalds 已提交
106 107
	netdev_run_todo();
}
E
Eric Dumazet 已提交
108
EXPORT_SYMBOL(rtnl_unlock);
L
Linus Torvalds 已提交
109

110 111 112 113
int rtnl_trylock(void)
{
	return mutex_trylock(&rtnl_mutex);
}
E
Eric Dumazet 已提交
114
EXPORT_SYMBOL(rtnl_trylock);
115

116 117 118 119
int rtnl_is_locked(void)
{
	return mutex_is_locked(&rtnl_mutex);
}
E
Eric Dumazet 已提交
120
EXPORT_SYMBOL(rtnl_is_locked);
121

122
#ifdef CONFIG_PROVE_LOCKING
123
bool lockdep_rtnl_is_held(void)
124 125 126 127 128 129
{
	return lockdep_is_held(&rtnl_mutex);
}
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */

130
static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
131
static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152

static inline int rtm_msgindex(int msgtype)
{
	int msgindex = msgtype - RTM_BASE;

	/*
	 * msgindex < 0 implies someone tried to register a netlink
	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
	 * the message type has not been added to linux/rtnetlink.h
	 */
	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);

	return msgindex;
}

/**
 * __rtnl_register - Register a rtnetlink message type
 * @protocol: Protocol family or PF_UNSPEC
 * @msgtype: rtnetlink message type
 * @doit: Function pointer called for each request message
 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
153
 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
154 155 156 157 158 159 160 161 162 163 164 165
 *
 * Registers the specified function pointers (at least one of them has
 * to be non-NULL) to be called whenever a request message for the
 * specified protocol family and message type is received.
 *
 * The special protocol family PF_UNSPEC may be used to define fallback
 * function pointers for the case when no entry for the specific protocol
 * family exists.
 *
 * Returns 0 on success or a negative error code.
 */
int __rtnl_register(int protocol, int msgtype,
166
		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
167
		    unsigned int flags)
168 169 170 171
{
	struct rtnl_link *tab;
	int msgindex;

172
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
173 174
	msgindex = rtm_msgindex(msgtype);

175
	tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
176 177 178 179 180
	if (tab == NULL) {
		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
		if (tab == NULL)
			return -ENOBUFS;

181
		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
182 183 184 185 186 187
	}

	if (doit)
		tab[msgindex].doit = doit;
	if (dumpit)
		tab[msgindex].dumpit = dumpit;
188
	tab[msgindex].flags |= flags;
189 190 191 192 193 194 195 196 197 198 199 200

	return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_register);

/**
 * rtnl_register - Register a rtnetlink message type
 *
 * Identical to __rtnl_register() but panics on failure. This is useful
 * as failure of this function is very unlikely, it can only happen due
 * to lack of memory when allocating the chain to store all message
 * handlers for a protocol. Meant for use in init functions where lack
L
Lucas De Marchi 已提交
201
 * of memory implies no sense in continuing.
202 203
 */
void rtnl_register(int protocol, int msgtype,
204
		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
205
		   unsigned int flags)
206
{
207
	if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
		panic("Unable to register rtnetlink message handler, "
		      "protocol = %d, message type = %d\n",
		      protocol, msgtype);
}
EXPORT_SYMBOL_GPL(rtnl_register);

/**
 * rtnl_unregister - Unregister a rtnetlink message type
 * @protocol: Protocol family or PF_UNSPEC
 * @msgtype: rtnetlink message type
 *
 * Returns 0 on success or a negative error code.
 */
int rtnl_unregister(int protocol, int msgtype)
{
223
	struct rtnl_link *handlers;
224 225
	int msgindex;

226
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
227 228
	msgindex = rtm_msgindex(msgtype);

229 230 231 232
	rtnl_lock();
	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
	if (!handlers) {
		rtnl_unlock();
233
		return -ENOENT;
234
	}
235

236 237
	handlers[msgindex].doit = NULL;
	handlers[msgindex].dumpit = NULL;
238
	handlers[msgindex].flags = 0;
239
	rtnl_unlock();
240 241 242 243 244 245 246 247 248 249 250 251 252 253

	return 0;
}
EXPORT_SYMBOL_GPL(rtnl_unregister);

/**
 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
 * @protocol : Protocol family or PF_UNSPEC
 *
 * Identical to calling rtnl_unregster() for all registered message types
 * of a certain protocol family.
 */
void rtnl_unregister_all(int protocol)
{
254 255
	struct rtnl_link *handlers;

256
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
257

258
	rtnl_lock();
259 260
	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
261 262
	rtnl_unlock();

263 264
	synchronize_net();

265
	while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
266 267
		schedule();
	kfree(handlers);
268 269
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
L
Linus Torvalds 已提交
270

P
Patrick McHardy 已提交
271 272
static LIST_HEAD(link_ops);

273 274 275 276 277 278 279 280 281 282 283
static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
{
	const struct rtnl_link_ops *ops;

	list_for_each_entry(ops, &link_ops, list) {
		if (!strcmp(ops->kind, kind))
			return ops;
	}
	return NULL;
}

P
Patrick McHardy 已提交
284 285 286 287 288 289 290 291 292 293 294 295
/**
 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 * @ops: struct rtnl_link_ops * to register
 *
 * The caller must hold the rtnl_mutex. This function should be used
 * by drivers that create devices during module initialization. It
 * must be called before registering the devices.
 *
 * Returns 0 on success or a negative error code.
 */
int __rtnl_link_register(struct rtnl_link_ops *ops)
{
296 297 298
	if (rtnl_link_ops_get(ops->kind))
		return -EEXIST;

299 300 301 302 303 304
	/* The check for setup is here because if ops
	 * does not have that filled up, it is not possible
	 * to use the ops for creating device. So do not
	 * fill up dellink as well. That disables rtnl_dellink.
	 */
	if (ops->setup && !ops->dellink)
305
		ops->dellink = unregister_netdevice_queue;
306

P
Patrick McHardy 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	list_add_tail(&ops->list, &link_ops);
	return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_link_register);

/**
 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 * @ops: struct rtnl_link_ops * to register
 *
 * Returns 0 on success or a negative error code.
 */
int rtnl_link_register(struct rtnl_link_ops *ops)
{
	int err;

	rtnl_lock();
	err = __rtnl_link_register(ops);
	rtnl_unlock();
	return err;
}
EXPORT_SYMBOL_GPL(rtnl_link_register);

329 330 331
static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
{
	struct net_device *dev;
332 333
	LIST_HEAD(list_kill);

334
	for_each_netdev(net, dev) {
335 336
		if (dev->rtnl_link_ops == ops)
			ops->dellink(dev, &list_kill);
337
	}
338
	unregister_netdevice_many(&list_kill);
339 340
}

P
Patrick McHardy 已提交
341 342 343 344
/**
 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 * @ops: struct rtnl_link_ops * to unregister
 *
345
 * The caller must hold the rtnl_mutex.
P
Patrick McHardy 已提交
346 347 348
 */
void __rtnl_link_unregister(struct rtnl_link_ops *ops)
{
349
	struct net *net;
350

351
	for_each_net(net) {
352
		__rtnl_kill_links(net, ops);
353
	}
P
Patrick McHardy 已提交
354 355 356 357
	list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);

358 359 360 361 362 363 364
/* Return with the rtnl_lock held when there are no network
 * devices unregistering in any network namespace.
 */
static void rtnl_lock_unregistering_all(void)
{
	struct net *net;
	bool unregistering;
365
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
366

367
	add_wait_queue(&netdev_unregistering_wq, &wait);
368 369 370 371 372 373 374 375 376 377 378 379
	for (;;) {
		unregistering = false;
		rtnl_lock();
		for_each_net(net) {
			if (net->dev_unreg_count > 0) {
				unregistering = true;
				break;
			}
		}
		if (!unregistering)
			break;
		__rtnl_unlock();
380 381

		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
382
	}
383
	remove_wait_queue(&netdev_unregistering_wq, &wait);
384 385
}

P
Patrick McHardy 已提交
386 387 388 389 390 391
/**
 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 * @ops: struct rtnl_link_ops * to unregister
 */
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
392 393 394
	/* Close the race with cleanup_net() */
	mutex_lock(&net_mutex);
	rtnl_lock_unregistering_all();
P
Patrick McHardy 已提交
395 396
	__rtnl_link_unregister(ops);
	rtnl_unlock();
397
	mutex_unlock(&net_mutex);
P
Patrick McHardy 已提交
398 399 400
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);

401 402 403 404
static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
{
	struct net_device *master_dev;
	const struct rtnl_link_ops *ops;
405
	size_t size = 0;
406

407 408 409
	rcu_read_lock();

	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
410
	if (!master_dev)
411 412
		goto out;

413
	ops = master_dev->rtnl_link_ops;
414
	if (!ops || !ops->get_slave_size)
415
		goto out;
416
	/* IFLA_INFO_SLAVE_DATA + nested data */
417
	size = nla_total_size(sizeof(struct nlattr)) +
418
	       ops->get_slave_size(master_dev, dev);
419 420 421 422

out:
	rcu_read_unlock();
	return size;
423 424
}

P
Patrick McHardy 已提交
425 426 427 428 429 430 431 432
static size_t rtnl_link_get_size(const struct net_device *dev)
{
	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
	size_t size;

	if (!ops)
		return 0;

433 434
	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
P
Patrick McHardy 已提交
435 436 437

	if (ops->get_size)
		/* IFLA_INFO_DATA + nested data */
438
		size += nla_total_size(sizeof(struct nlattr)) +
P
Patrick McHardy 已提交
439 440 441
			ops->get_size(dev);

	if (ops->get_xstats_size)
442 443
		/* IFLA_INFO_XSTATS */
		size += nla_total_size(ops->get_xstats_size(dev));
P
Patrick McHardy 已提交
444

445 446
	size += rtnl_link_get_slave_info_data_size(dev);

P
Patrick McHardy 已提交
447 448 449
	return size;
}

T
Thomas Graf 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
static LIST_HEAD(rtnl_af_ops);

static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
	const struct rtnl_af_ops *ops;

	list_for_each_entry(ops, &rtnl_af_ops, list) {
		if (ops->family == family)
			return ops;
	}

	return NULL;
}

/**
 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
 * @ops: struct rtnl_af_ops * to register
 *
 * Returns 0 on success or a negative error code.
 */
470
void rtnl_af_register(struct rtnl_af_ops *ops)
T
Thomas Graf 已提交
471 472
{
	rtnl_lock();
473
	list_add_tail(&ops->list, &rtnl_af_ops);
T
Thomas Graf 已提交
474 475 476 477 478 479 480 481 482 483 484
	rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_register);

/**
 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 * @ops: struct rtnl_af_ops * to unregister
 */
void rtnl_af_unregister(struct rtnl_af_ops *ops)
{
	rtnl_lock();
485
	list_del(&ops->list);
T
Thomas Graf 已提交
486 487 488 489
	rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);

490 491
static size_t rtnl_link_get_af_size(const struct net_device *dev,
				    u32 ext_filter_mask)
T
Thomas Graf 已提交
492 493 494 495 496 497 498 499 500 501 502
{
	struct rtnl_af_ops *af_ops;
	size_t size;

	/* IFLA_AF_SPEC */
	size = nla_total_size(sizeof(struct nlattr));

	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
		if (af_ops->get_link_af_size) {
			/* AF_* + nested data */
			size += nla_total_size(sizeof(struct nlattr)) +
503
				af_ops->get_link_af_size(dev, ext_filter_mask);
T
Thomas Graf 已提交
504 505 506 507 508 509
		}
	}

	return size;
}

510
static bool rtnl_have_link_slave_info(const struct net_device *dev)
P
Patrick McHardy 已提交
511
{
512
	struct net_device *master_dev;
513
	bool ret = false;
P
Patrick McHardy 已提交
514

515 516 517
	rcu_read_lock();

	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
518
	if (master_dev && master_dev->rtnl_link_ops)
519 520 521
		ret = true;
	rcu_read_unlock();
	return ret;
522 523 524 525 526 527 528 529 530
}

static int rtnl_link_slave_info_fill(struct sk_buff *skb,
				     const struct net_device *dev)
{
	struct net_device *master_dev;
	const struct rtnl_link_ops *ops;
	struct nlattr *slave_data;
	int err;
P
Patrick McHardy 已提交
531

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
	if (!master_dev)
		return 0;
	ops = master_dev->rtnl_link_ops;
	if (!ops)
		return 0;
	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
		return -EMSGSIZE;
	if (ops->fill_slave_info) {
		slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
		if (!slave_data)
			return -EMSGSIZE;
		err = ops->fill_slave_info(skb, master_dev, dev);
		if (err < 0)
			goto err_cancel_slave_data;
		nla_nest_end(skb, slave_data);
	}
	return 0;

err_cancel_slave_data:
	nla_nest_cancel(skb, slave_data);
	return err;
}

static int rtnl_link_info_fill(struct sk_buff *skb,
			       const struct net_device *dev)
{
	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
	struct nlattr *data;
	int err;

	if (!ops)
		return 0;
P
Patrick McHardy 已提交
565
	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
566
		return -EMSGSIZE;
P
Patrick McHardy 已提交
567 568 569
	if (ops->fill_xstats) {
		err = ops->fill_xstats(skb, dev);
		if (err < 0)
570
			return err;
P
Patrick McHardy 已提交
571 572 573
	}
	if (ops->fill_info) {
		data = nla_nest_start(skb, IFLA_INFO_DATA);
574 575
		if (data == NULL)
			return -EMSGSIZE;
P
Patrick McHardy 已提交
576 577 578 579 580 581 582 583 584
		err = ops->fill_info(skb, dev);
		if (err < 0)
			goto err_cancel_data;
		nla_nest_end(skb, data);
	}
	return 0;

err_cancel_data:
	nla_nest_cancel(skb, data);
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	return err;
}

static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
{
	struct nlattr *linkinfo;
	int err = -EMSGSIZE;

	linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
	if (linkinfo == NULL)
		goto out;

	err = rtnl_link_info_fill(skb, dev);
	if (err < 0)
		goto err_cancel_link;

	err = rtnl_link_slave_info_fill(skb, dev);
	if (err < 0)
		goto err_cancel_link;

	nla_nest_end(skb, linkinfo);
	return 0;

P
Patrick McHardy 已提交
608 609 610 611 612 613
err_cancel_link:
	nla_nest_cancel(skb, linkinfo);
out:
	return err;
}

614
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
L
Linus Torvalds 已提交
615
{
616
	struct sock *rtnl = net->rtnl;
L
Linus Torvalds 已提交
617 618
	int err = 0;

619
	NETLINK_CB(skb).dst_group = group;
L
Linus Torvalds 已提交
620
	if (echo)
621
		refcount_inc(&skb->users);
L
Linus Torvalds 已提交
622 623 624 625 626 627
	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
	if (echo)
		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
	return err;
}

628
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
629
{
630 631
	struct sock *rtnl = net->rtnl;

632 633
	return nlmsg_unicast(rtnl, skb, pid);
}
E
Eric Dumazet 已提交
634
EXPORT_SYMBOL(rtnl_unicast);
635

636 637
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
		 struct nlmsghdr *nlh, gfp_t flags)
638
{
639
	struct sock *rtnl = net->rtnl;
640 641 642 643 644
	int report = 0;

	if (nlh)
		report = nlmsg_report(nlh);

645
	nlmsg_notify(rtnl, skb, pid, group, report, flags);
646
}
E
Eric Dumazet 已提交
647
EXPORT_SYMBOL(rtnl_notify);
648

649
void rtnl_set_sk_err(struct net *net, u32 group, int error)
650
{
651 652
	struct sock *rtnl = net->rtnl;

653 654
	netlink_set_err(rtnl, 0, group, error);
}
E
Eric Dumazet 已提交
655
EXPORT_SYMBOL(rtnl_set_sk_err);
656

L
Linus Torvalds 已提交
657 658
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
{
659 660 661 662 663 664 665 666 667
	struct nlattr *mx;
	int i, valid = 0;

	mx = nla_nest_start(skb, RTA_METRICS);
	if (mx == NULL)
		return -ENOBUFS;

	for (i = 0; i < RTAX_MAX; i++) {
		if (metrics[i]) {
668 669 670 671 672 673 674 675
			if (i == RTAX_CC_ALGO - 1) {
				char tmp[TCP_CA_NAME_MAX], *name;

				name = tcp_ca_get_name_by_key(metrics[i], tmp);
				if (!name)
					continue;
				if (nla_put_string(skb, i + 1, name))
					goto nla_put_failure;
676 677 678
			} else if (i == RTAX_FEATURES - 1) {
				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;

679 680
				if (!user_features)
					continue;
681 682 683
				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
				if (nla_put_u32(skb, i + 1, user_features))
					goto nla_put_failure;
684 685 686 687
			} else {
				if (nla_put_u32(skb, i + 1, metrics[i]))
					goto nla_put_failure;
			}
688 689
			valid++;
		}
L
Linus Torvalds 已提交
690 691
	}

692 693 694 695
	if (!valid) {
		nla_nest_cancel(skb, mx);
		return 0;
	}
696 697 698 699

	return nla_nest_end(skb, mx);

nla_put_failure:
700 701
	nla_nest_cancel(skb, mx);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
702
}
E
Eric Dumazet 已提交
703
EXPORT_SYMBOL(rtnetlink_put_metrics);
L
Linus Torvalds 已提交
704

705
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
706
		       long expires, u32 error)
707 708
{
	struct rta_cacheinfo ci = {
709
		.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
710 711 712 713 714 715
		.rta_used = dst->__use,
		.rta_clntref = atomic_read(&(dst->__refcnt)),
		.rta_error = error,
		.rta_id =  id,
	};

716 717
	if (expires) {
		unsigned long clock;
718

719 720 721 722
		clock = jiffies_to_clock_t(abs(expires));
		clock = min_t(unsigned long, clock, INT_MAX);
		ci.rta_expires = (expires > 0) ? clock : -clock;
	}
723 724 725
	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
L
Linus Torvalds 已提交
726

727
static void set_operstate(struct net_device *dev, unsigned char transition)
S
Stefan Rompf 已提交
728 729 730
{
	unsigned char operstate = dev->operstate;

E
Eric Dumazet 已提交
731
	switch (transition) {
S
Stefan Rompf 已提交
732 733 734 735 736 737 738 739 740 741 742 743
	case IF_OPER_UP:
		if ((operstate == IF_OPER_DORMANT ||
		     operstate == IF_OPER_UNKNOWN) &&
		    !netif_dormant(dev))
			operstate = IF_OPER_UP;
		break;

	case IF_OPER_DORMANT:
		if (operstate == IF_OPER_UP ||
		    operstate == IF_OPER_UNKNOWN)
			operstate = IF_OPER_DORMANT;
		break;
744
	}
S
Stefan Rompf 已提交
745 746 747 748 749

	if (dev->operstate != operstate) {
		write_lock_bh(&dev_base_lock);
		dev->operstate = operstate;
		write_unlock_bh(&dev_base_lock);
750 751
		netdev_state_change(dev);
	}
S
Stefan Rompf 已提交
752 753
}

754 755 756 757 758 759
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
{
	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
}

760 761 762 763 764 765 766 767
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
					   const struct ifinfomsg *ifm)
{
	unsigned int flags = ifm->ifi_flags;

	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
	if (ifm->ifi_change)
		flags = (flags & ifm->ifi_change) |
768
			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
769 770 771 772

	return flags;
}

773
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
774
				 const struct rtnl_link_stats64 *b)
L
Linus Torvalds 已提交
775
{
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	a->rx_packets = b->rx_packets;
	a->tx_packets = b->tx_packets;
	a->rx_bytes = b->rx_bytes;
	a->tx_bytes = b->tx_bytes;
	a->rx_errors = b->rx_errors;
	a->tx_errors = b->tx_errors;
	a->rx_dropped = b->rx_dropped;
	a->tx_dropped = b->tx_dropped;

	a->multicast = b->multicast;
	a->collisions = b->collisions;

	a->rx_length_errors = b->rx_length_errors;
	a->rx_over_errors = b->rx_over_errors;
	a->rx_crc_errors = b->rx_crc_errors;
	a->rx_frame_errors = b->rx_frame_errors;
	a->rx_fifo_errors = b->rx_fifo_errors;
	a->rx_missed_errors = b->rx_missed_errors;

	a->tx_aborted_errors = b->tx_aborted_errors;
	a->tx_carrier_errors = b->tx_carrier_errors;
	a->tx_fifo_errors = b->tx_fifo_errors;
	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
	a->tx_window_errors = b->tx_window_errors;

	a->rx_compressed = b->rx_compressed;
	a->tx_compressed = b->tx_compressed;
803 804

	a->rx_nohandler = b->rx_nohandler;
805 806
}

807
/* All VF info */
808 809
static inline int rtnl_vfinfo_size(const struct net_device *dev,
				   u32 ext_filter_mask)
810
{
811
	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
812
		int num_vfs = dev_num_vf(dev->dev.parent);
S
Sabrina Dubroca 已提交
813
		size_t size = nla_total_size(0);
814
		size += num_vfs *
S
Sabrina Dubroca 已提交
815 816 817 818
			(nla_total_size(0) +
			 nla_total_size(sizeof(struct ifla_vf_mac)) +
			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
			 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
819 820
			 nla_total_size(MAX_VLAN_LIST_LEN *
					sizeof(struct ifla_vf_vlan_info)) +
821
			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
S
Sabrina Dubroca 已提交
822
			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
J
Jiri Benc 已提交
823
			 nla_total_size(sizeof(struct ifla_vf_rate)) +
824
			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
825
			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
S
Sabrina Dubroca 已提交
826
			 nla_total_size(0) + /* nest IFLA_VF_STATS */
827
			 /* IFLA_VF_STATS_RX_PACKETS */
N
Nicolas Dichtel 已提交
828
			 nla_total_size_64bit(sizeof(__u64)) +
829
			 /* IFLA_VF_STATS_TX_PACKETS */
N
Nicolas Dichtel 已提交
830
			 nla_total_size_64bit(sizeof(__u64)) +
831
			 /* IFLA_VF_STATS_RX_BYTES */
N
Nicolas Dichtel 已提交
832
			 nla_total_size_64bit(sizeof(__u64)) +
833
			 /* IFLA_VF_STATS_TX_BYTES */
N
Nicolas Dichtel 已提交
834
			 nla_total_size_64bit(sizeof(__u64)) +
835
			 /* IFLA_VF_STATS_BROADCAST */
N
Nicolas Dichtel 已提交
836
			 nla_total_size_64bit(sizeof(__u64)) +
837
			 /* IFLA_VF_STATS_MULTICAST */
N
Nicolas Dichtel 已提交
838
			 nla_total_size_64bit(sizeof(__u64)) +
H
Hiroshi Shimamoto 已提交
839
			 nla_total_size(sizeof(struct ifla_vf_trust)));
840 841
		return size;
	} else
842 843 844
		return 0;
}

845 846
static size_t rtnl_port_size(const struct net_device *dev,
			     u32 ext_filter_mask)
847 848 849 850 851 852 853 854 855 856 857 858 859
{
	size_t port_size = nla_total_size(4)		/* PORT_VF */
		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
		+ port_size;
	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
		+ port_size;

860 861
	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
	    !(ext_filter_mask & RTEXT_FILTER_VF))
862 863 864 865 866 867 868 869
		return 0;
	if (dev_num_vf(dev->dev.parent))
		return port_self_size + vf_ports_size +
			vf_port_size * dev_num_vf(dev->dev.parent);
	else
		return port_self_size;
}

D
David S. Miller 已提交
870
static size_t rtnl_xdp_size(void)
871
{
872
	size_t xdp_size = nla_total_size(0) +	/* nest IFLA_XDP */
M
Martin KaFai Lau 已提交
873 874
			  nla_total_size(1) +	/* XDP_ATTACHED */
			  nla_total_size(4);	/* XDP_PROG_ID */
875

D
David S. Miller 已提交
876
	return xdp_size;
877 878
}

879 880
static noinline size_t if_nlmsg_size(const struct net_device *dev,
				     u32 ext_filter_mask)
881 882 883
{
	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
884
	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
885
	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
886
	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
887
	       + nla_total_size(sizeof(struct rtnl_link_stats))
888
	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
889 890 891 892 893 894 895
	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
	       + nla_total_size(4) /* IFLA_TXQLEN */
	       + nla_total_size(4) /* IFLA_WEIGHT */
	       + nla_total_size(4) /* IFLA_MTU */
	       + nla_total_size(4) /* IFLA_LINK */
	       + nla_total_size(4) /* IFLA_MASTER */
896
	       + nla_total_size(1) /* IFLA_CARRIER */
897
	       + nla_total_size(4) /* IFLA_PROMISCUITY */
898 899
	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
900 901
	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
902
	       + nla_total_size(1) /* IFLA_OPERSTATE */
P
Patrick McHardy 已提交
903
	       + nla_total_size(1) /* IFLA_LINKMODE */
904
	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
905
	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
906
	       + nla_total_size(4) /* IFLA_GROUP */
907 908 909
	       + nla_total_size(ext_filter_mask
			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
910
	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
T
Thomas Graf 已提交
911
	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
912
	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
913
	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
914
	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
915
	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
D
David S. Miller 已提交
916
	       + rtnl_xdp_size() /* IFLA_XDP */
917
	       + nla_total_size(4)  /* IFLA_EVENT */
918
	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
919 920
	       + nla_total_size(1); /* IFLA_PROTO_DOWN */

921 922
}

923 924 925 926 927 928 929 930 931 932 933 934 935
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
{
	struct nlattr *vf_ports;
	struct nlattr *vf_port;
	int vf;
	int err;

	vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
	if (!vf_ports)
		return -EMSGSIZE;

	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
		vf_port = nla_nest_start(skb, IFLA_VF_PORT);
936 937
		if (!vf_port)
			goto nla_put_failure;
938 939
		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
			goto nla_put_failure;
940
		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
941 942
		if (err == -EMSGSIZE)
			goto nla_put_failure;
943 944 945 946 947 948 949 950 951 952
		if (err) {
			nla_nest_cancel(skb, vf_port);
			continue;
		}
		nla_nest_end(skb, vf_port);
	}

	nla_nest_end(skb, vf_ports);

	return 0;
953 954 955 956

nla_put_failure:
	nla_nest_cancel(skb, vf_ports);
	return -EMSGSIZE;
957 958 959 960 961 962 963 964 965 966 967 968 969 970
}

static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
{
	struct nlattr *port_self;
	int err;

	port_self = nla_nest_start(skb, IFLA_PORT_SELF);
	if (!port_self)
		return -EMSGSIZE;

	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
	if (err) {
		nla_nest_cancel(skb, port_self);
971
		return (err == -EMSGSIZE) ? err : 0;
972 973 974 975 976 977 978
	}

	nla_nest_end(skb, port_self);

	return 0;
}

979 980
static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
			  u32 ext_filter_mask)
981 982 983
{
	int err;

984 985
	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
	    !(ext_filter_mask & RTEXT_FILTER_VF))
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
		return 0;

	err = rtnl_port_self_fill(skb, dev);
	if (err)
		return err;

	if (dev_num_vf(dev->dev.parent)) {
		err = rtnl_vf_ports_fill(skb, dev);
		if (err)
			return err;
	}

	return 0;
}

1001 1002 1003
static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
{
	int err;
1004
	struct netdev_phys_item_id ppid;
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

	err = dev_get_phys_port_id(dev, &ppid);
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
		return -EMSGSIZE;

	return 0;
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
{
	char name[IFNAMSIZ];
	int err;

	err = dev_get_phys_port_name(dev, name, sizeof(name));
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

1031
	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1032 1033 1034 1035 1036
		return -EMSGSIZE;

	return 0;
}

1037 1038 1039
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
	int err;
1040
	struct switchdev_attr attr = {
1041
		.orig_dev = dev,
1042
		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1043 1044
		.flags = SWITCHDEV_F_NO_RECURSE,
	};
1045

1046
	err = switchdev_port_attr_get(dev, &attr);
1047 1048 1049 1050 1051 1052
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

1053 1054
	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
		    attr.u.ppid.id))
1055 1056 1057 1058 1059
		return -EMSGSIZE;

	return 0;
}

1060 1061 1062
static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
					      struct net_device *dev)
{
1063
	struct rtnl_link_stats64 *sp;
1064
	struct nlattr *attr;
1065

1066 1067
	attr = nla_reserve_64bit(skb, IFLA_STATS64,
				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1068 1069 1070
	if (!attr)
		return -EMSGSIZE;

1071 1072
	sp = nla_data(attr);
	dev_get_stats(dev, sp);
1073

1074 1075
	attr = nla_reserve(skb, IFLA_STATS,
			   sizeof(struct rtnl_link_stats));
1076 1077 1078
	if (!attr)
		return -EMSGSIZE;

1079
	copy_rtnl_link_stats(nla_data(attr), sp);
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

	return 0;
}

static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
					       struct net_device *dev,
					       int vfs_num,
					       struct nlattr *vfinfo)
{
	struct ifla_vf_rss_query_en vf_rss_query_en;
1090
	struct nlattr *vf, *vfstats, *vfvlanlist;
1091
	struct ifla_vf_link_state vf_linkstate;
1092
	struct ifla_vf_vlan_info vf_vlan_info;
1093 1094 1095 1096 1097 1098 1099 1100 1101
	struct ifla_vf_spoofchk vf_spoofchk;
	struct ifla_vf_tx_rate vf_tx_rate;
	struct ifla_vf_stats vf_stats;
	struct ifla_vf_trust vf_trust;
	struct ifla_vf_vlan vf_vlan;
	struct ifla_vf_rate vf_rate;
	struct ifla_vf_mac vf_mac;
	struct ifla_vf_info ivi;

1102 1103
	memset(&ivi, 0, sizeof(ivi));

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
	/* Not all SR-IOV capable drivers support the
	 * spoofcheck and "RSS query enable" query.  Preset to
	 * -1 so the user space tool can detect that the driver
	 * didn't report anything.
	 */
	ivi.spoofchk = -1;
	ivi.rss_query_en = -1;
	ivi.trusted = -1;
	/* The default value for VF link state is "auto"
	 * IFLA_VF_LINK_STATE_AUTO which equals zero
	 */
	ivi.linkstate = 0;
1116 1117
	/* VLAN Protocol by default is 802.1Q */
	ivi.vlan_proto = htons(ETH_P_8021Q);
1118 1119 1120
	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
		return 0;

1121 1122
	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));

1123 1124
	vf_mac.vf =
		vf_vlan.vf =
1125
		vf_vlan_info.vf =
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
		vf_rate.vf =
		vf_tx_rate.vf =
		vf_spoofchk.vf =
		vf_linkstate.vf =
		vf_rss_query_en.vf =
		vf_trust.vf = ivi.vf;

	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
	vf_vlan.vlan = ivi.vlan;
	vf_vlan.qos = ivi.qos;
1136 1137 1138
	vf_vlan_info.vlan = ivi.vlan;
	vf_vlan_info.qos = ivi.qos;
	vf_vlan_info.vlan_proto = ivi.vlan_proto;
1139 1140 1141 1142 1143 1144 1145 1146
	vf_tx_rate.rate = ivi.max_tx_rate;
	vf_rate.min_tx_rate = ivi.min_tx_rate;
	vf_rate.max_tx_rate = ivi.max_tx_rate;
	vf_spoofchk.setting = ivi.spoofchk;
	vf_linkstate.link_state = ivi.linkstate;
	vf_rss_query_en.setting = ivi.rss_query_en;
	vf_trust.setting = ivi.trusted;
	vf = nla_nest_start(skb, IFLA_VF_INFO);
1147 1148
	if (!vf)
		goto nla_put_vfinfo_failure;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
		    &vf_rate) ||
	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
		    &vf_tx_rate) ||
	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
		    &vf_spoofchk) ||
	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
		    &vf_linkstate) ||
	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
		    sizeof(vf_rss_query_en),
		    &vf_rss_query_en) ||
	    nla_put(skb, IFLA_VF_TRUST,
		    sizeof(vf_trust), &vf_trust))
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		goto nla_put_vf_failure;
	vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
	if (!vfvlanlist)
		goto nla_put_vf_failure;
	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
		    &vf_vlan_info)) {
		nla_nest_cancel(skb, vfvlanlist);
		goto nla_put_vf_failure;
	}
	nla_nest_end(skb, vfvlanlist);
1174 1175 1176 1177 1178
	memset(&vf_stats, 0, sizeof(vf_stats));
	if (dev->netdev_ops->ndo_get_vf_stats)
		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
						&vf_stats);
	vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1179 1180
	if (!vfstats)
		goto nla_put_vf_failure;
N
Nicolas Dichtel 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
			      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
			      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
			      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
			      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1192 1193 1194 1195
			      vf_stats.multicast, IFLA_VF_STATS_PAD)) {
		nla_nest_cancel(skb, vfstats);
		goto nla_put_vf_failure;
	}
1196 1197 1198
	nla_nest_end(skb, vfstats);
	nla_nest_end(skb, vf);
	return 0;
1199 1200 1201 1202 1203 1204

nla_put_vf_failure:
	nla_nest_cancel(skb, vf);
nla_put_vfinfo_failure:
	nla_nest_cancel(skb, vfinfo);
	return -EMSGSIZE;
1205 1206
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
					   struct net_device *dev,
					   u32 ext_filter_mask)
{
	struct nlattr *vfinfo;
	int i, num_vfs;

	if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
		return 0;

	num_vfs = dev_num_vf(dev->dev.parent);
	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
		return -EMSGSIZE;

	if (!dev->netdev_ops->ndo_get_vf_config)
		return 0;

	vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
	if (!vfinfo)
		return -EMSGSIZE;

	for (i = 0; i < num_vfs; i++) {
		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
			return -EMSGSIZE;
	}

	nla_nest_end(skb, vfinfo);
	return 0;
}

1237 1238
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
{
K
Kangjie Lu 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	struct rtnl_link_ifmap map;

	memset(&map, 0, sizeof(map));
	map.mem_start   = dev->mem_start;
	map.mem_end     = dev->mem_end;
	map.base_addr   = dev->base_addr;
	map.irq         = dev->irq;
	map.dma         = dev->dma;
	map.port        = dev->if_port;

1249
	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1250 1251 1252 1253 1254
		return -EMSGSIZE;

	return 0;
}

M
Martin KaFai Lau 已提交
1255
static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1256 1257
{
	const struct net_device_ops *ops = dev->netdev_ops;
M
Martin KaFai Lau 已提交
1258
	const struct bpf_prog *generic_xdp_prog;
1259 1260 1261

	ASSERT_RTNL();

M
Martin KaFai Lau 已提交
1262 1263 1264 1265
	*prog_id = 0;
	generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
	if (generic_xdp_prog) {
		*prog_id = generic_xdp_prog->aux->id;
1266
		return XDP_ATTACHED_SKB;
M
Martin KaFai Lau 已提交
1267
	}
1268 1269
	if (!ops->ndo_xdp)
		return XDP_ATTACHED_NONE;
1270

1271
	return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
1272 1273
}

1274 1275 1276
static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
{
	struct nlattr *xdp;
M
Martin KaFai Lau 已提交
1277
	u32 prog_id;
1278 1279 1280 1281 1282
	int err;

	xdp = nla_nest_start(skb, IFLA_XDP);
	if (!xdp)
		return -EMSGSIZE;
1283 1284

	err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
M
Martin KaFai Lau 已提交
1285
			 rtnl_xdp_attached_mode(dev, &prog_id));
1286 1287 1288
	if (err)
		goto err_cancel;

M
Martin KaFai Lau 已提交
1289 1290 1291 1292 1293 1294
	if (prog_id) {
		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
		if (err)
			goto err_cancel;
	}

1295 1296 1297 1298 1299 1300 1301 1302
	nla_nest_end(skb, xdp);
	return 0;

err_cancel:
	nla_nest_cancel(skb, xdp);
	return err;
}

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
static u32 rtnl_get_event(unsigned long event)
{
	u32 rtnl_event_type = IFLA_EVENT_NONE;

	switch (event) {
	case NETDEV_REBOOT:
		rtnl_event_type = IFLA_EVENT_REBOOT;
		break;
	case NETDEV_FEAT_CHANGE:
		rtnl_event_type = IFLA_EVENT_FEATURES;
		break;
	case NETDEV_BONDING_FAILOVER:
		rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
		break;
	case NETDEV_NOTIFY_PEERS:
		rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
		break;
	case NETDEV_RESEND_IGMP:
		rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
		break;
	case NETDEV_CHANGEINFODATA:
		rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
		break;
	default:
		break;
	}

	return rtnl_event_type;
}

1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
{
	const struct net_device *upper_dev;
	int ret = 0;

	rcu_read_lock();

	upper_dev = netdev_master_upper_dev_get_rcu(dev);
	if (upper_dev)
		ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);

	rcu_read_unlock();
	return ret;
}

static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
{
	int ifindex = dev_get_iflink(dev);

	if (dev->ifindex == ifindex)
		return 0;

	return nla_put_u32(skb, IFLA_LINK, ifindex);
}

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
					      struct net_device *dev)
{
	char buf[IFALIASZ];
	int ret;

	ret = dev_get_alias(dev, buf, sizeof(buf));
	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
}

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
static int rtnl_fill_link_netnsid(struct sk_buff *skb,
				  const struct net_device *dev)
{
	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);

		if (!net_eq(dev_net(dev), link_net)) {
			int id = peernet2id_alloc(dev_net(dev), link_net);

			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
				return -EMSGSIZE;
		}
	}

	return 0;
}

1385
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1386
			    int type, u32 pid, u32 seq, u32 change,
1387
			    unsigned int flags, u32 ext_filter_mask,
1388
			    u32 event, int *new_nsid)
1389 1390 1391
{
	struct ifinfomsg *ifm;
	struct nlmsghdr *nlh;
1392
	struct nlattr *af_spec;
T
Thomas Graf 已提交
1393
	struct rtnl_af_ops *af_ops;
L
Linus Torvalds 已提交
1394

1395
	ASSERT_RTNL();
1396 1397
	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
	if (nlh == NULL)
1398
		return -EMSGSIZE;
L
Linus Torvalds 已提交
1399

1400 1401 1402 1403 1404 1405 1406 1407
	ifm = nlmsg_data(nlh);
	ifm->ifi_family = AF_UNSPEC;
	ifm->__ifi_pad = 0;
	ifm->ifi_type = dev->type;
	ifm->ifi_index = dev->ifindex;
	ifm->ifi_flags = dev_get_flags(dev);
	ifm->ifi_change = change;

1408 1409 1410 1411 1412 1413 1414
	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
	    nla_put_u8(skb, IFLA_OPERSTATE,
		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1415
	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1416
	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1417 1418
	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1419
#ifdef CONFIG_RPS
1420
	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1421
#endif
1422 1423
	    nla_put_iflink(skb, dev) ||
	    put_master_ifindex(skb, dev) ||
1424
	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1425 1426
	    (dev->qdisc &&
	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1427
	    nla_put_ifalias(skb, dev) ||
1428
	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1429 1430
			atomic_read(&dev->carrier_changes)) ||
	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1431
		goto nla_put_failure;
1432

1433 1434 1435 1436 1437
	if (event != IFLA_EVENT_NONE) {
		if (nla_put_u32(skb, IFLA_EVENT, event))
			goto nla_put_failure;
	}

1438 1439
	if (rtnl_fill_link_ifmap(skb, dev))
		goto nla_put_failure;
L
Linus Torvalds 已提交
1440 1441

	if (dev->addr_len) {
1442 1443 1444
		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
			goto nla_put_failure;
L
Linus Torvalds 已提交
1445 1446
	}

1447 1448 1449
	if (rtnl_phys_port_id_fill(skb, dev))
		goto nla_put_failure;

1450 1451 1452
	if (rtnl_phys_port_name_fill(skb, dev))
		goto nla_put_failure;

1453 1454 1455
	if (rtnl_phys_switch_id_fill(skb, dev))
		goto nla_put_failure;

1456
	if (rtnl_fill_stats(skb, dev))
1457 1458
		goto nla_put_failure;

1459
	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1460
		goto nla_put_failure;
1461

1462
	if (rtnl_port_fill(skb, dev, ext_filter_mask))
1463 1464
		goto nla_put_failure;

1465 1466 1467
	if (rtnl_xdp_fill(skb, dev))
		goto nla_put_failure;

1468
	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
P
Patrick McHardy 已提交
1469 1470 1471 1472
		if (rtnl_link_fill(skb, dev) < 0)
			goto nla_put_failure;
	}

1473 1474
	if (rtnl_fill_link_netnsid(skb, dev))
		goto nla_put_failure;
1475

1476 1477 1478 1479
	if (new_nsid &&
	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
		goto nla_put_failure;

T
Thomas Graf 已提交
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
	if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
		goto nla_put_failure;

	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
		if (af_ops->fill_link_af) {
			struct nlattr *af;
			int err;

			if (!(af = nla_nest_start(skb, af_ops->family)))
				goto nla_put_failure;

1491
			err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
T
Thomas Graf 已提交
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509

			/*
			 * Caller may return ENODATA to indicate that there
			 * was no data to be dumped. This is not an error, it
			 * means we should trim the attribute header and
			 * continue.
			 */
			if (err == -ENODATA)
				nla_nest_cancel(skb, af);
			else if (err < 0)
				goto nla_put_failure;

			nla_nest_end(skb, af);
		}
	}

	nla_nest_end(skb, af_spec);

1510 1511
	nlmsg_end(skb, nlh);
	return 0;
1512 1513

nla_put_failure:
1514 1515
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
L
Linus Torvalds 已提交
1516 1517
}

J
Jiri Pirko 已提交
1518
static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1519
	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
P
Patrick McHardy 已提交
1520 1521
	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1522
	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1523
	[IFLA_MTU]		= { .type = NLA_U32 },
1524
	[IFLA_LINK]		= { .type = NLA_U32 },
1525
	[IFLA_MASTER]		= { .type = NLA_U32 },
1526
	[IFLA_CARRIER]		= { .type = NLA_U8 },
1527 1528 1529 1530
	[IFLA_TXQLEN]		= { .type = NLA_U32 },
	[IFLA_WEIGHT]		= { .type = NLA_U32 },
	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1531
	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1532
	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
1533
	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
1534
	[IFLA_IFALIAS]	        = { .type = NLA_STRING, .len = IFALIASZ-1 },
1535
	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
1536 1537
	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
T
Thomas Graf 已提交
1538
	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
1539
	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
1540
	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
1541 1542
	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
1543
	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1544
	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
1545
	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1546
	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
1547
	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
1548
	[IFLA_XDP]		= { .type = NLA_NESTED },
1549
	[IFLA_EVENT]		= { .type = NLA_U32 },
1550
	[IFLA_GROUP]		= { .type = NLA_U32 },
1551 1552
};

P
Patrick McHardy 已提交
1553 1554 1555
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
1556 1557
	[IFLA_INFO_SLAVE_KIND]	= { .type = NLA_STRING },
	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
P
Patrick McHardy 已提交
1558 1559
};

1560
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1561 1562
	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
1563
	[IFLA_VF_VLAN_LIST]     = { .type = NLA_NESTED },
1564 1565 1566 1567
	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
1568
	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
1569
	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
H
Hiroshi Shimamoto 已提交
1570
	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
1571 1572
	[IFLA_VF_IB_NODE_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
	[IFLA_VF_IB_PORT_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
1573 1574
};

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
	[IFLA_PORT_VF]		= { .type = NLA_U32 },
	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
				    .len = PORT_PROFILE_MAX },
	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
				      .len = PORT_UUID_MAX },
	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
				    .len = PORT_UUID_MAX },
	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
1585 1586 1587 1588 1589 1590 1591

	/* Unused, but we need to keep it here since user space could
	 * fill it. It's also broken with regard to NLA_BINARY use in
	 * combination with structs.
	 */
	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
				    .len = sizeof(struct ifla_port_vsi) },
1592 1593
};

1594 1595 1596
static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
	[IFLA_XDP_FD]		= { .type = NLA_S32 },
	[IFLA_XDP_ATTACHED]	= { .type = NLA_U8 },
1597
	[IFLA_XDP_FLAGS]	= { .type = NLA_U32 },
M
Martin KaFai Lau 已提交
1598
	[IFLA_XDP_PROG_ID]	= { .type = NLA_U32 },
1599 1600
};

1601 1602 1603 1604 1605
static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
{
	const struct rtnl_link_ops *ops = NULL;
	struct nlattr *linfo[IFLA_INFO_MAX + 1];

1606 1607
	if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
			     ifla_info_policy, NULL) < 0)
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		return NULL;

	if (linfo[IFLA_INFO_KIND]) {
		char kind[MODULE_NAME_LEN];

		nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
		ops = rtnl_link_ops_get(kind);
	}

	return ops;
}

static bool link_master_filtered(struct net_device *dev, int master_idx)
{
	struct net_device *master;

	if (!master_idx)
		return false;

	master = netdev_master_upper_dev_get(dev);
	if (!master || master->ifindex != master_idx)
		return true;

	return false;
}

static bool link_kind_filtered(const struct net_device *dev,
			       const struct rtnl_link_ops *kind_ops)
{
	if (kind_ops && dev->rtnl_link_ops != kind_ops)
		return true;

	return false;
}

static bool link_dump_filtered(struct net_device *dev,
			       int master_idx,
			       const struct rtnl_link_ops *kind_ops)
{
	if (link_master_filtered(dev, master_idx) ||
	    link_kind_filtered(dev, kind_ops))
		return true;

	return false;
}

J
Jiri Pirko 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	int h, s_h;
	int idx = 0, s_idx;
	struct net_device *dev;
	struct hlist_head *head;
	struct nlattr *tb[IFLA_MAX+1];
	u32 ext_filter_mask = 0;
1663 1664 1665
	const struct rtnl_link_ops *kind_ops = NULL;
	unsigned int flags = NLM_F_MULTI;
	int master_idx = 0;
1666
	int err;
1667
	int hdrlen;
J
Jiri Pirko 已提交
1668 1669 1670 1671

	s_h = cb->args[0];
	s_idx = cb->args[1];

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	/* A hack to preserve kernel<->userspace interface.
	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
	 * what iproute2 < v3.9.0 used.
	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
	 * attribute, its netlink message is shorter than struct ifinfomsg.
	 */
	hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);

1682 1683
	if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
			ifla_policy, NULL) >= 0) {
J
Jiri Pirko 已提交
1684 1685
		if (tb[IFLA_EXT_MASK])
			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1686 1687 1688 1689 1690 1691 1692 1693 1694

		if (tb[IFLA_MASTER])
			master_idx = nla_get_u32(tb[IFLA_MASTER]);

		if (tb[IFLA_LINKINFO])
			kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);

		if (master_idx || kind_ops)
			flags |= NLM_F_DUMP_FILTERED;
J
Jiri Pirko 已提交
1695 1696 1697 1698 1699
	}

	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
		idx = 0;
		head = &net->dev_index_head[h];
1700
		hlist_for_each_entry(dev, head, index_hlist) {
1701
			if (link_dump_filtered(dev, master_idx, kind_ops))
1702
				goto cont;
J
Jiri Pirko 已提交
1703 1704
			if (idx < s_idx)
				goto cont;
1705 1706 1707
			err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
					       NETLINK_CB(cb->skb).portid,
					       cb->nlh->nlmsg_seq, 0,
1708
					       flags,
1709
					       ext_filter_mask, 0, NULL);
1710

1711 1712 1713 1714 1715 1716
			if (err < 0) {
				if (likely(skb->len))
					goto out;

				goto out_err;
			}
J
Jiri Pirko 已提交
1717 1718 1719 1720 1721
cont:
			idx++;
		}
	}
out:
1722 1723
	err = skb->len;
out_err:
J
Jiri Pirko 已提交
1724 1725
	cb->args[1] = idx;
	cb->args[0] = h;
1726 1727
	cb->seq = net->dev_base_seq;
	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
J
Jiri Pirko 已提交
1728

1729
	return err;
J
Jiri Pirko 已提交
1730 1731
}

1732 1733
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
			struct netlink_ext_ack *exterr)
J
Jiri Pirko 已提交
1734
{
1735
	return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
J
Jiri Pirko 已提交
1736 1737 1738
}
EXPORT_SYMBOL(rtnl_nla_parse_ifla);

1739 1740 1741 1742 1743 1744 1745 1746
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
	struct net *net;
	/* Examine the link attributes and figure out which
	 * network namespace we are talking about.
	 */
	if (tb[IFLA_NET_NS_PID])
		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1747 1748
	else if (tb[IFLA_NET_NS_FD])
		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1749 1750 1751 1752 1753 1754
	else
		net = get_net(src_net);
	return net;
}
EXPORT_SYMBOL(rtnl_link_get_net);

1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
	if (dev) {
		if (tb[IFLA_ADDRESS] &&
		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
			return -EINVAL;

		if (tb[IFLA_BROADCAST] &&
		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
			return -EINVAL;
	}

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	if (tb[IFLA_AF_SPEC]) {
		struct nlattr *af;
		int rem, err;

		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
			const struct rtnl_af_ops *af_ops;

			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
				return -EAFNOSUPPORT;

			if (!af_ops->set_link_af)
				return -EOPNOTSUPP;

			if (af_ops->validate_link_af) {
1781
				err = af_ops->validate_link_af(dev, af);
1782 1783 1784 1785 1786 1787
				if (err < 0)
					return err;
			}
		}
	}

1788 1789 1790
	return 0;
}

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
				  int guid_type)
{
	const struct net_device_ops *ops = dev->netdev_ops;

	return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
}

static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
{
	if (dev->type != ARPHRD_INFINIBAND)
		return -EOPNOTSUPP;

	return handle_infiniband_guid(dev, ivt, guid_type);
}

1807
static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1808 1809
{
	const struct net_device_ops *ops = dev->netdev_ops;
1810
	int err = -EINVAL;
1811

1812 1813
	if (tb[IFLA_VF_MAC]) {
		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1814

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_mac)
			err = ops->ndo_set_vf_mac(dev, ivm->vf,
						  ivm->mac);
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_VLAN]) {
		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_vlan)
			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
						   ivv->qos,
						   htons(ETH_P_8021Q));
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_VLAN_LIST]) {
		struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
		struct nlattr *attr;
		int rem, len = 0;

		err = -EOPNOTSUPP;
		if (!ops->ndo_set_vf_vlan)
			return err;

		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
			    nla_len(attr) < NLA_HDRLEN) {
				return -EINVAL;
			}
			if (len >= MAX_VLAN_LIST_LEN)
				return -EOPNOTSUPP;
			ivvl[len] = nla_data(attr);

			len++;
		}
1855 1856 1857
		if (len == 0)
			return -EINVAL;

1858 1859
		err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
					   ivvl[0]->qos, ivvl[0]->vlan_proto);
1860 1861
		if (err < 0)
			return err;
1862
	}
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928

	if (tb[IFLA_VF_TX_RATE]) {
		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
		struct ifla_vf_info ivf;

		err = -EOPNOTSUPP;
		if (ops->ndo_get_vf_config)
			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
		if (err < 0)
			return err;

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_rate)
			err = ops->ndo_set_vf_rate(dev, ivt->vf,
						   ivf.min_tx_rate,
						   ivt->rate);
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_RATE]) {
		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_rate)
			err = ops->ndo_set_vf_rate(dev, ivt->vf,
						   ivt->min_tx_rate,
						   ivt->max_tx_rate);
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_SPOOFCHK]) {
		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_spoofchk)
			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
						       ivs->setting);
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_LINK_STATE]) {
		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_link_state)
			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
							 ivl->link_state);
		if (err < 0)
			return err;
	}

	if (tb[IFLA_VF_RSS_QUERY_EN]) {
		struct ifla_vf_rss_query_en *ivrssq_en;

		err = -EOPNOTSUPP;
		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
		if (ops->ndo_set_vf_rss_query_en)
			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
							   ivrssq_en->setting);
		if (err < 0)
			return err;
	}

H
Hiroshi Shimamoto 已提交
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
	if (tb[IFLA_VF_TRUST]) {
		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_trust)
			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
		if (err < 0)
			return err;
	}

1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
	if (tb[IFLA_VF_IB_NODE_GUID]) {
		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);

		if (!ops->ndo_set_vf_guid)
			return -EOPNOTSUPP;

		return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
	}

	if (tb[IFLA_VF_IB_PORT_GUID]) {
		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);

		if (!ops->ndo_set_vf_guid)
			return -EOPNOTSUPP;

		return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
	}

1957 1958 1959
	return err;
}

D
David Ahern 已提交
1960 1961
static int do_set_master(struct net_device *dev, int ifindex,
			 struct netlink_ext_ack *extack)
1962
{
1963
	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1964 1965 1966
	const struct net_device_ops *ops;
	int err;

1967 1968
	if (upper_dev) {
		if (upper_dev->ifindex == ifindex)
1969
			return 0;
1970
		ops = upper_dev->netdev_ops;
1971
		if (ops->ndo_del_slave) {
1972
			err = ops->ndo_del_slave(upper_dev, dev);
1973 1974 1975 1976 1977 1978 1979 1980
			if (err)
				return err;
		} else {
			return -EOPNOTSUPP;
		}
	}

	if (ifindex) {
1981 1982
		upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
		if (!upper_dev)
1983
			return -EINVAL;
1984
		ops = upper_dev->netdev_ops;
1985
		if (ops->ndo_add_slave) {
D
David Ahern 已提交
1986
			err = ops->ndo_add_slave(upper_dev, dev, extack);
1987 1988 1989 1990 1991 1992 1993 1994 1995
			if (err)
				return err;
		} else {
			return -EOPNOTSUPP;
		}
	}
	return 0;
}

1996
#define DO_SETLINK_MODIFIED	0x01
1997 1998
/* notify flag means notify + modified. */
#define DO_SETLINK_NOTIFY	0x03
1999 2000
static int do_setlink(const struct sk_buff *skb,
		      struct net_device *dev, struct ifinfomsg *ifm,
2001
		      struct netlink_ext_ack *extack,
2002
		      struct nlattr **tb, char *ifname, int status)
L
Linus Torvalds 已提交
2003
{
2004
	const struct net_device_ops *ops = dev->netdev_ops;
2005
	int err;
L
Linus Torvalds 已提交
2006

2007
	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
2008
		struct net *net = rtnl_link_get_net(dev_net(dev), tb);
2009 2010 2011 2012
		if (IS_ERR(net)) {
			err = PTR_ERR(net);
			goto errout;
		}
2013
		if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
2014
			put_net(net);
2015 2016 2017
			err = -EPERM;
			goto errout;
		}
2018 2019 2020 2021
		err = dev_change_net_namespace(dev, net, ifname);
		put_net(net);
		if (err)
			goto errout;
2022
		status |= DO_SETLINK_MODIFIED;
2023 2024
	}

2025
	if (tb[IFLA_MAP]) {
L
Linus Torvalds 已提交
2026 2027 2028
		struct rtnl_link_ifmap *u_map;
		struct ifmap k_map;

2029
		if (!ops->ndo_set_config) {
L
Linus Torvalds 已提交
2030
			err = -EOPNOTSUPP;
2031
			goto errout;
L
Linus Torvalds 已提交
2032 2033 2034 2035
		}

		if (!netif_device_present(dev)) {
			err = -ENODEV;
2036
			goto errout;
L
Linus Torvalds 已提交
2037 2038
		}

2039
		u_map = nla_data(tb[IFLA_MAP]);
L
Linus Torvalds 已提交
2040 2041 2042 2043 2044 2045 2046
		k_map.mem_start = (unsigned long) u_map->mem_start;
		k_map.mem_end = (unsigned long) u_map->mem_end;
		k_map.base_addr = (unsigned short) u_map->base_addr;
		k_map.irq = (unsigned char) u_map->irq;
		k_map.dma = (unsigned char) u_map->dma;
		k_map.port = (unsigned char) u_map->port;

2047
		err = ops->ndo_set_config(dev, &k_map);
2048
		if (err < 0)
2049
			goto errout;
L
Linus Torvalds 已提交
2050

2051
		status |= DO_SETLINK_NOTIFY;
L
Linus Torvalds 已提交
2052 2053
	}

2054
	if (tb[IFLA_ADDRESS]) {
2055 2056 2057
		struct sockaddr *sa;
		int len;

2058 2059
		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
						  sizeof(*sa));
2060 2061 2062
		sa = kmalloc(len, GFP_KERNEL);
		if (!sa) {
			err = -ENOMEM;
2063
			goto errout;
2064 2065
		}
		sa->sa_family = dev->type;
2066
		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2067
		       dev->addr_len);
2068
		err = dev_set_mac_address(dev, sa);
2069
		kfree(sa);
L
Linus Torvalds 已提交
2070
		if (err)
2071
			goto errout;
2072
		status |= DO_SETLINK_MODIFIED;
L
Linus Torvalds 已提交
2073 2074
	}

2075 2076 2077
	if (tb[IFLA_MTU]) {
		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
		if (err < 0)
2078
			goto errout;
2079
		status |= DO_SETLINK_MODIFIED;
L
Linus Torvalds 已提交
2080 2081
	}

2082 2083
	if (tb[IFLA_GROUP]) {
		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2084
		status |= DO_SETLINK_NOTIFY;
2085 2086
	}

2087 2088 2089 2090 2091
	/*
	 * Interface selected by interface index but interface
	 * name provided implies that a name change has been
	 * requested.
	 */
2092
	if (ifm->ifi_index > 0 && ifname[0]) {
2093 2094
		err = dev_change_name(dev, ifname);
		if (err < 0)
2095
			goto errout;
2096
		status |= DO_SETLINK_MODIFIED;
L
Linus Torvalds 已提交
2097 2098
	}

2099 2100 2101 2102 2103
	if (tb[IFLA_IFALIAS]) {
		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
				    nla_len(tb[IFLA_IFALIAS]));
		if (err < 0)
			goto errout;
2104
		status |= DO_SETLINK_NOTIFY;
2105 2106
	}

2107 2108
	if (tb[IFLA_BROADCAST]) {
		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2109
		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
L
Linus Torvalds 已提交
2110 2111
	}

2112
	if (ifm->ifi_flags || ifm->ifi_change) {
2113
		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2114 2115
		if (err < 0)
			goto errout;
2116
	}
L
Linus Torvalds 已提交
2117

2118
	if (tb[IFLA_MASTER]) {
D
David Ahern 已提交
2119
		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2120 2121
		if (err)
			goto errout;
2122
		status |= DO_SETLINK_MODIFIED;
2123 2124
	}

2125 2126 2127 2128
	if (tb[IFLA_CARRIER]) {
		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
		if (err)
			goto errout;
2129
		status |= DO_SETLINK_MODIFIED;
2130 2131
	}

2132
	if (tb[IFLA_TXQLEN]) {
2133 2134
		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
		unsigned int orig_len = dev->tx_queue_len;
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144

		if (dev->tx_queue_len ^ value) {
			dev->tx_queue_len = value;
			err = call_netdevice_notifiers(
			      NETDEV_CHANGE_TX_QUEUE_LEN, dev);
			err = notifier_to_errno(err);
			if (err) {
				dev->tx_queue_len = orig_len;
				goto errout;
			}
2145
			status |= DO_SETLINK_NOTIFY;
2146
		}
2147
	}
S
Stefan Rompf 已提交
2148

2149
	if (tb[IFLA_OPERSTATE])
2150
		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
S
Stefan Rompf 已提交
2151

2152
	if (tb[IFLA_LINKMODE]) {
2153 2154
		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);

2155
		write_lock_bh(&dev_base_lock);
2156
		if (dev->link_mode ^ value)
2157
			status |= DO_SETLINK_NOTIFY;
2158
		dev->link_mode = value;
2159
		write_unlock_bh(&dev_base_lock);
S
Stefan Rompf 已提交
2160 2161
	}

2162
	if (tb[IFLA_VFINFO_LIST]) {
2163
		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2164 2165
		struct nlattr *attr;
		int rem;
2166

2167
		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2168 2169
			if (nla_type(attr) != IFLA_VF_INFO ||
			    nla_len(attr) < NLA_HDRLEN) {
2170
				err = -EINVAL;
2171
				goto errout;
2172
			}
2173
			err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2174
					       ifla_vf_policy, NULL);
2175 2176 2177
			if (err < 0)
				goto errout;
			err = do_setvfinfo(dev, vfinfo);
2178 2179
			if (err < 0)
				goto errout;
2180
			status |= DO_SETLINK_NOTIFY;
2181
		}
2182
	}
L
Linus Torvalds 已提交
2183 2184
	err = 0;

2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
	if (tb[IFLA_VF_PORTS]) {
		struct nlattr *port[IFLA_PORT_MAX+1];
		struct nlattr *attr;
		int vf;
		int rem;

		err = -EOPNOTSUPP;
		if (!ops->ndo_set_vf_port)
			goto errout;

		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2196 2197 2198 2199 2200 2201
			if (nla_type(attr) != IFLA_VF_PORT ||
			    nla_len(attr) < NLA_HDRLEN) {
				err = -EINVAL;
				goto errout;
			}
			err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2202
					       ifla_port_policy, NULL);
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
			if (err < 0)
				goto errout;
			if (!port[IFLA_PORT_VF]) {
				err = -EOPNOTSUPP;
				goto errout;
			}
			vf = nla_get_u32(port[IFLA_PORT_VF]);
			err = ops->ndo_set_vf_port(dev, vf, port);
			if (err < 0)
				goto errout;
2213
			status |= DO_SETLINK_NOTIFY;
2214 2215 2216 2217 2218 2219 2220 2221
		}
	}
	err = 0;

	if (tb[IFLA_PORT_SELF]) {
		struct nlattr *port[IFLA_PORT_MAX+1];

		err = nla_parse_nested(port, IFLA_PORT_MAX,
2222 2223
				       tb[IFLA_PORT_SELF], ifla_port_policy,
				       NULL);
2224 2225 2226 2227 2228 2229 2230 2231
		if (err < 0)
			goto errout;

		err = -EOPNOTSUPP;
		if (ops->ndo_set_vf_port)
			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
		if (err < 0)
			goto errout;
2232
		status |= DO_SETLINK_NOTIFY;
2233
	}
T
Thomas Graf 已提交
2234 2235 2236 2237 2238 2239 2240 2241 2242

	if (tb[IFLA_AF_SPEC]) {
		struct nlattr *af;
		int rem;

		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
			const struct rtnl_af_ops *af_ops;

			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
2243
				BUG();
T
Thomas Graf 已提交
2244

2245
			err = af_ops->set_link_af(dev, af);
T
Thomas Graf 已提交
2246 2247 2248
			if (err < 0)
				goto errout;

2249
			status |= DO_SETLINK_NOTIFY;
T
Thomas Graf 已提交
2250 2251
		}
	}
2252 2253
	err = 0;

2254 2255 2256 2257 2258 2259 2260 2261
	if (tb[IFLA_PROTO_DOWN]) {
		err = dev_change_proto_down(dev,
					    nla_get_u8(tb[IFLA_PROTO_DOWN]));
		if (err)
			goto errout;
		status |= DO_SETLINK_NOTIFY;
	}

2262 2263
	if (tb[IFLA_XDP]) {
		struct nlattr *xdp[IFLA_XDP_MAX + 1];
2264
		u32 xdp_flags = 0;
2265 2266

		err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2267
				       ifla_xdp_policy, NULL);
2268 2269 2270
		if (err < 0)
			goto errout;

M
Martin KaFai Lau 已提交
2271
		if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2272 2273 2274
			err = -EINVAL;
			goto errout;
		}
2275 2276 2277 2278 2279 2280 2281

		if (xdp[IFLA_XDP_FLAGS]) {
			xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
			if (xdp_flags & ~XDP_FLAGS_MASK) {
				err = -EINVAL;
				goto errout;
			}
2282
			if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2283 2284 2285
				err = -EINVAL;
				goto errout;
			}
2286 2287
		}

2288
		if (xdp[IFLA_XDP_FD]) {
2289
			err = dev_change_xdp_fd(dev, extack,
2290 2291
						nla_get_s32(xdp[IFLA_XDP_FD]),
						xdp_flags);
2292 2293 2294 2295 2296 2297
			if (err)
				goto errout;
			status |= DO_SETLINK_NOTIFY;
		}
	}

2298
errout:
2299 2300 2301 2302 2303 2304 2305 2306
	if (status & DO_SETLINK_MODIFIED) {
		if (status & DO_SETLINK_NOTIFY)
			netdev_state_change(dev);

		if (err < 0)
			net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
					     dev->name);
	}
2307

2308 2309
	return err;
}
L
Linus Torvalds 已提交
2310

2311 2312
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
2313
{
2314
	struct net *net = sock_net(skb->sk);
2315 2316 2317 2318 2319 2320
	struct ifinfomsg *ifm;
	struct net_device *dev;
	int err;
	struct nlattr *tb[IFLA_MAX+1];
	char ifname[IFNAMSIZ];

2321 2322
	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
			  extack);
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
	if (err < 0)
		goto errout;

	if (tb[IFLA_IFNAME])
		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
	else
		ifname[0] = '\0';

	err = -EINVAL;
	ifm = nlmsg_data(nlh);
	if (ifm->ifi_index > 0)
2334
		dev = __dev_get_by_index(net, ifm->ifi_index);
2335
	else if (tb[IFLA_IFNAME])
2336
		dev = __dev_get_by_name(net, ifname);
2337 2338 2339 2340 2341 2342 2343 2344
	else
		goto errout;

	if (dev == NULL) {
		err = -ENODEV;
		goto errout;
	}

E
Eric Dumazet 已提交
2345 2346
	err = validate_linkmsg(dev, tb);
	if (err < 0)
2347
		goto errout;
2348

2349
	err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2350
errout:
L
Linus Torvalds 已提交
2351 2352 2353
	return err;
}

2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
static int rtnl_group_dellink(const struct net *net, int group)
{
	struct net_device *dev, *aux;
	LIST_HEAD(list_kill);
	bool found = false;

	if (!group)
		return -EPERM;

	for_each_netdev(net, dev) {
		if (dev->group == group) {
			const struct rtnl_link_ops *ops;

			found = true;
			ops = dev->rtnl_link_ops;
			if (!ops || !ops->dellink)
				return -EOPNOTSUPP;
		}
	}

	if (!found)
		return -ENODEV;

	for_each_netdev_safe(net, dev, aux) {
		if (dev->group == group) {
			const struct rtnl_link_ops *ops;

			ops = dev->rtnl_link_ops;
			ops->dellink(dev, &list_kill);
		}
	}
	unregister_netdevice_many(&list_kill);

	return 0;
}

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
int rtnl_delete_link(struct net_device *dev)
{
	const struct rtnl_link_ops *ops;
	LIST_HEAD(list_kill);

	ops = dev->rtnl_link_ops;
	if (!ops || !ops->dellink)
		return -EOPNOTSUPP;

	ops->dellink(dev, &list_kill);
	unregister_netdevice_many(&list_kill);

	return 0;
}
EXPORT_SYMBOL_GPL(rtnl_delete_link);

2406 2407
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
P
Patrick McHardy 已提交
2408
{
2409
	struct net *net = sock_net(skb->sk);
P
Patrick McHardy 已提交
2410 2411 2412 2413 2414 2415
	struct net_device *dev;
	struct ifinfomsg *ifm;
	char ifname[IFNAMSIZ];
	struct nlattr *tb[IFLA_MAX+1];
	int err;

2416
	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
P
Patrick McHardy 已提交
2417 2418 2419 2420 2421 2422 2423 2424
	if (err < 0)
		return err;

	if (tb[IFLA_IFNAME])
		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);

	ifm = nlmsg_data(nlh);
	if (ifm->ifi_index > 0)
2425
		dev = __dev_get_by_index(net, ifm->ifi_index);
P
Patrick McHardy 已提交
2426
	else if (tb[IFLA_IFNAME])
2427
		dev = __dev_get_by_name(net, ifname);
2428 2429
	else if (tb[IFLA_GROUP])
		return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
P
Patrick McHardy 已提交
2430 2431 2432 2433 2434 2435
	else
		return -EINVAL;

	if (!dev)
		return -ENODEV;

2436
	return rtnl_delete_link(dev);
P
Patrick McHardy 已提交
2437 2438
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
{
	unsigned int old_flags;
	int err;

	old_flags = dev->flags;
	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
		if (err < 0)
			return err;
	}

	dev->rtnl_link_state = RTNL_LINK_INITIALIZED;

2453
	__dev_notify_flags(dev, old_flags, ~0U);
2454 2455 2456 2457
	return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);

2458
struct net_device *rtnl_create_link(struct net *net,
2459
	const char *ifname, unsigned char name_assign_type,
2460
	const struct rtnl_link_ops *ops, struct nlattr *tb[])
2461 2462
{
	struct net_device *dev;
2463 2464
	unsigned int num_tx_queues = 1;
	unsigned int num_rx_queues = 1;
2465

2466 2467 2468
	if (tb[IFLA_NUM_TX_QUEUES])
		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
	else if (ops->get_num_tx_queues)
2469
		num_tx_queues = ops->get_num_tx_queues();
2470 2471 2472 2473

	if (tb[IFLA_NUM_RX_QUEUES])
		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
	else if (ops->get_num_rx_queues)
2474
		num_rx_queues = ops->get_num_rx_queues();
2475

2476
	dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2477
			       ops->setup, num_tx_queues, num_rx_queues);
2478
	if (!dev)
2479
		return ERR_PTR(-ENOMEM);
2480

2481 2482
	dev_net_set(dev, net);
	dev->rtnl_link_ops = ops;
2483
	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2484

2485 2486
	if (tb[IFLA_MTU])
		dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2487
	if (tb[IFLA_ADDRESS]) {
2488 2489
		memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
				nla_len(tb[IFLA_ADDRESS]));
2490 2491
		dev->addr_assign_type = NET_ADDR_SET;
	}
2492 2493 2494 2495 2496 2497
	if (tb[IFLA_BROADCAST])
		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
				nla_len(tb[IFLA_BROADCAST]));
	if (tb[IFLA_TXQLEN])
		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
	if (tb[IFLA_OPERSTATE])
2498
		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2499 2500
	if (tb[IFLA_LINKMODE])
		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2501 2502
	if (tb[IFLA_GROUP])
		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2503 2504 2505

	return dev;
}
E
Eric Dumazet 已提交
2506
EXPORT_SYMBOL(rtnl_create_link);
2507

2508 2509
static int rtnl_group_changelink(const struct sk_buff *skb,
		struct net *net, int group,
2510
		struct ifinfomsg *ifm,
2511
		struct netlink_ext_ack *extack,
2512 2513
		struct nlattr **tb)
{
2514
	struct net_device *dev, *aux;
2515 2516
	int err;

2517
	for_each_netdev_safe(net, dev, aux) {
2518
		if (dev->group == group) {
2519
			err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2520 2521 2522 2523 2524 2525 2526 2527
			if (err < 0)
				return err;
		}
	}

	return 0;
}

2528 2529
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
P
Patrick McHardy 已提交
2530
{
2531
	struct net *net = sock_net(skb->sk);
P
Patrick McHardy 已提交
2532
	const struct rtnl_link_ops *ops;
2533
	const struct rtnl_link_ops *m_ops = NULL;
P
Patrick McHardy 已提交
2534
	struct net_device *dev;
2535
	struct net_device *master_dev = NULL;
P
Patrick McHardy 已提交
2536 2537 2538 2539 2540
	struct ifinfomsg *ifm;
	char kind[MODULE_NAME_LEN];
	char ifname[IFNAMSIZ];
	struct nlattr *tb[IFLA_MAX+1];
	struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2541
	unsigned char name_assign_type = NET_NAME_USER;
P
Patrick McHardy 已提交
2542 2543
	int err;

2544
#ifdef CONFIG_MODULES
P
Patrick McHardy 已提交
2545
replay:
2546
#endif
2547
	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
P
Patrick McHardy 已提交
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
	if (err < 0)
		return err;

	if (tb[IFLA_IFNAME])
		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
	else
		ifname[0] = '\0';

	ifm = nlmsg_data(nlh);
	if (ifm->ifi_index > 0)
2558
		dev = __dev_get_by_index(net, ifm->ifi_index);
2559 2560 2561 2562 2563 2564
	else {
		if (ifname[0])
			dev = __dev_get_by_name(net, ifname);
		else
			dev = NULL;
	}
P
Patrick McHardy 已提交
2565

2566 2567 2568 2569 2570 2571
	if (dev) {
		master_dev = netdev_master_upper_dev_get(dev);
		if (master_dev)
			m_ops = master_dev->rtnl_link_ops;
	}

E
Eric Dumazet 已提交
2572 2573
	err = validate_linkmsg(dev, tb);
	if (err < 0)
2574 2575
		return err;

P
Patrick McHardy 已提交
2576 2577
	if (tb[IFLA_LINKINFO]) {
		err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2578 2579
				       tb[IFLA_LINKINFO], ifla_info_policy,
				       NULL);
P
Patrick McHardy 已提交
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
		if (err < 0)
			return err;
	} else
		memset(linkinfo, 0, sizeof(linkinfo));

	if (linkinfo[IFLA_INFO_KIND]) {
		nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
		ops = rtnl_link_ops_get(kind);
	} else {
		kind[0] = '\0';
		ops = NULL;
	}

	if (1) {
S
Sasha Levin 已提交
2594 2595
		struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
		struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2596 2597
		struct nlattr **data = NULL;
		struct nlattr **slave_data = NULL;
2598
		struct net *dest_net, *link_net = NULL;
P
Patrick McHardy 已提交
2599 2600 2601 2602 2603

		if (ops) {
			if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
				err = nla_parse_nested(attr, ops->maxtype,
						       linkinfo[IFLA_INFO_DATA],
2604
						       ops->policy, NULL);
P
Patrick McHardy 已提交
2605 2606 2607 2608 2609
				if (err < 0)
					return err;
				data = attr;
			}
			if (ops->validate) {
2610
				err = ops->validate(tb, data, extack);
P
Patrick McHardy 已提交
2611 2612 2613 2614 2615
				if (err < 0)
					return err;
			}
		}

2616 2617 2618 2619 2620 2621
		if (m_ops) {
			if (m_ops->slave_maxtype &&
			    linkinfo[IFLA_INFO_SLAVE_DATA]) {
				err = nla_parse_nested(slave_attr,
						       m_ops->slave_maxtype,
						       linkinfo[IFLA_INFO_SLAVE_DATA],
2622 2623
						       m_ops->slave_policy,
						       NULL);
2624 2625 2626 2627 2628 2629
				if (err < 0)
					return err;
				slave_data = slave_attr;
			}
		}

P
Patrick McHardy 已提交
2630
		if (dev) {
2631
			int status = 0;
P
Patrick McHardy 已提交
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642

			if (nlh->nlmsg_flags & NLM_F_EXCL)
				return -EEXIST;
			if (nlh->nlmsg_flags & NLM_F_REPLACE)
				return -EOPNOTSUPP;

			if (linkinfo[IFLA_INFO_DATA]) {
				if (!ops || ops != dev->rtnl_link_ops ||
				    !ops->changelink)
					return -EOPNOTSUPP;

2643
				err = ops->changelink(dev, tb, data, extack);
P
Patrick McHardy 已提交
2644 2645
				if (err < 0)
					return err;
2646
				status |= DO_SETLINK_NOTIFY;
P
Patrick McHardy 已提交
2647 2648
			}

2649 2650 2651 2652 2653
			if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
				if (!m_ops || !m_ops->slave_changelink)
					return -EOPNOTSUPP;

				err = m_ops->slave_changelink(master_dev, dev,
2654 2655
							      tb, slave_data,
							      extack);
2656 2657
				if (err < 0)
					return err;
2658
				status |= DO_SETLINK_NOTIFY;
2659 2660
			}

2661 2662
			return do_setlink(skb, dev, ifm, extack, tb, ifname,
					  status);
P
Patrick McHardy 已提交
2663 2664
		}

2665 2666
		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
			if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2667
				return rtnl_group_changelink(skb, net,
2668
						nla_get_u32(tb[IFLA_GROUP]),
2669
						ifm, extack, tb);
P
Patrick McHardy 已提交
2670
			return -ENODEV;
2671
		}
P
Patrick McHardy 已提交
2672

2673
		if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
P
Patrick McHardy 已提交
2674 2675 2676
			return -EOPNOTSUPP;

		if (!ops) {
2677
#ifdef CONFIG_MODULES
P
Patrick McHardy 已提交
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
			if (kind[0]) {
				__rtnl_unlock();
				request_module("rtnl-link-%s", kind);
				rtnl_lock();
				ops = rtnl_link_ops_get(kind);
				if (ops)
					goto replay;
			}
#endif
			return -EOPNOTSUPP;
		}

2690 2691 2692
		if (!ops->setup)
			return -EOPNOTSUPP;

2693
		if (!ifname[0]) {
P
Patrick McHardy 已提交
2694
			snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
2695 2696
			name_assign_type = NET_NAME_ENUM;
		}
2697

2698
		dest_net = rtnl_link_get_net(net, tb);
2699 2700 2701
		if (IS_ERR(dest_net))
			return PTR_ERR(dest_net);

2702 2703 2704 2705
		err = -EPERM;
		if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
			goto out;

2706 2707 2708 2709 2710 2711 2712 2713
		if (tb[IFLA_LINK_NETNSID]) {
			int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);

			link_net = get_net_ns_by_id(dest_net, id);
			if (!link_net) {
				err =  -EINVAL;
				goto out;
			}
2714 2715 2716
			err = -EPERM;
			if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
				goto out;
2717 2718 2719 2720
		}

		dev = rtnl_create_link(link_net ? : dest_net, ifname,
				       name_assign_type, ops, tb);
2721
		if (IS_ERR(dev)) {
2722
			err = PTR_ERR(dev);
2723 2724 2725 2726 2727
			goto out;
		}

		dev->ifindex = ifm->ifi_index;

2728
		if (ops->newlink) {
2729 2730
			err = ops->newlink(link_net ? : net, dev, tb, data,
					   extack);
2731
			/* Drivers should call free_netdev() in ->destructor
2732 2733
			 * and unregister it on failure after registration
			 * so that device could be finally freed in rtnl_unlock.
2734
			 */
2735 2736 2737 2738
			if (err < 0) {
				/* If device is not registered at all, free it now */
				if (dev->reg_state == NETREG_UNINITIALIZED)
					free_netdev(dev);
2739
				goto out;
2740
			}
2741
		} else {
2742
			err = register_netdevice(dev);
2743 2744 2745 2746
			if (err < 0) {
				free_netdev(dev);
				goto out;
			}
2747
		}
2748
		err = rtnl_configure_link(dev, ifm);
2749 2750
		if (err < 0)
			goto out_unregister;
2751
		if (link_net) {
2752
			err = dev_change_net_namespace(dev, dest_net, ifname);
2753
			if (err < 0)
2754
				goto out_unregister;
2755
		}
2756
		if (tb[IFLA_MASTER]) {
D
David Ahern 已提交
2757 2758
			err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
					    extack);
2759 2760 2761
			if (err)
				goto out_unregister;
		}
2762
out:
2763 2764
		if (link_net)
			put_net(link_net);
2765
		put_net(dest_net);
P
Patrick McHardy 已提交
2766
		return err;
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
out_unregister:
		if (ops->newlink) {
			LIST_HEAD(list_kill);

			ops->dellink(dev, &list_kill);
			unregister_netdevice_many(&list_kill);
		} else {
			unregister_netdevice(dev);
		}
		goto out;
P
Patrick McHardy 已提交
2777 2778 2779
	}
}

2780 2781
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
2782
{
2783
	struct net *net = sock_net(skb->sk);
2784
	struct ifinfomsg *ifm;
2785
	char ifname[IFNAMSIZ];
2786 2787 2788
	struct nlattr *tb[IFLA_MAX+1];
	struct net_device *dev = NULL;
	struct sk_buff *nskb;
2789
	int err;
2790
	u32 ext_filter_mask = 0;
2791

2792
	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2793
	if (err < 0)
2794
		return err;
2795

2796 2797 2798
	if (tb[IFLA_IFNAME])
		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);

2799 2800 2801
	if (tb[IFLA_EXT_MASK])
		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);

2802
	ifm = nlmsg_data(nlh);
2803 2804 2805 2806 2807
	if (ifm->ifi_index > 0)
		dev = __dev_get_by_index(net, ifm->ifi_index);
	else if (tb[IFLA_IFNAME])
		dev = __dev_get_by_name(net, ifname);
	else
2808 2809
		return -EINVAL;

2810 2811 2812
	if (dev == NULL)
		return -ENODEV;

2813
	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
2814 2815
	if (nskb == NULL)
		return -ENOBUFS;
2816

2817
	err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
2818
			       nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0, NULL);
2819 2820 2821 2822
	if (err < 0) {
		/* -EMSGSIZE implies BUG in if_nlmsg_size */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(nskb);
2823
	} else
2824
		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
2825

2826
	return err;
2827 2828
}

2829
static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2830
{
2831 2832 2833 2834 2835
	struct net *net = sock_net(skb->sk);
	struct net_device *dev;
	struct nlattr *tb[IFLA_MAX+1];
	u32 ext_filter_mask = 0;
	u16 min_ifinfo_dump_size = 0;
2836 2837 2838 2839 2840
	int hdrlen;

	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2841

2842
	if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
2843 2844 2845
		if (tb[IFLA_EXT_MASK])
			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
	}
2846 2847 2848 2849 2850 2851 2852

	if (!ext_filter_mask)
		return NLMSG_GOODSIZE;
	/*
	 * traverse the list of net devices and compute the minimum
	 * buffer size based upon the filter mask.
	 */
2853 2854
	rcu_read_lock();
	for_each_netdev_rcu(net, dev) {
2855 2856 2857 2858
		min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
					     if_nlmsg_size(dev,
						           ext_filter_mask));
	}
2859
	rcu_read_unlock();
2860

2861
	return nlmsg_total_size(min_ifinfo_dump_size);
2862 2863
}

A
Adrian Bunk 已提交
2864
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
L
Linus Torvalds 已提交
2865 2866 2867 2868 2869 2870
{
	int idx;
	int s_idx = cb->family;

	if (s_idx == 0)
		s_idx = 1;
2871

2872
	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
L
Linus Torvalds 已提交
2873
		int type = cb->nlh->nlmsg_type-RTM_BASE;
2874 2875 2876
		struct rtnl_link *handlers;
		rtnl_dumpit_func dumpit;

L
Linus Torvalds 已提交
2877 2878
		if (idx < s_idx || idx == PF_PACKET)
			continue;
2879 2880 2881

		handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
		if (!handlers)
L
Linus Torvalds 已提交
2882
			continue;
2883 2884 2885 2886 2887

		dumpit = READ_ONCE(handlers[type].dumpit);
		if (!dumpit)
			continue;

2888
		if (idx > s_idx) {
L
Linus Torvalds 已提交
2889
			memset(&cb->args[0], 0, sizeof(cb->args));
2890 2891 2892
			cb->prev_seq = 0;
			cb->seq = 0;
		}
2893
		if (dumpit(skb, cb))
L
Linus Torvalds 已提交
2894 2895 2896 2897 2898 2899 2900
			break;
	}
	cb->family = idx;

	return skb->len;
}

2901
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
2902
				       unsigned int change,
2903
				       u32 event, gfp_t flags, int *new_nsid)
L
Linus Torvalds 已提交
2904
{
2905
	struct net *net = dev_net(dev);
L
Linus Torvalds 已提交
2906
	struct sk_buff *skb;
2907
	int err = -ENOBUFS;
2908
	size_t if_info_size;
L
Linus Torvalds 已提交
2909

2910
	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
2911 2912
	if (skb == NULL)
		goto errout;
L
Linus Torvalds 已提交
2913

2914 2915
	err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event,
			       new_nsid);
2916 2917 2918 2919 2920 2921
	if (err < 0) {
		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(skb);
		goto errout;
	}
2922
	return skb;
2923 2924
errout:
	if (err < 0)
2925
		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
	return NULL;
}

void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
{
	struct net *net = dev_net(dev);

	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
}

2936 2937
static void rtmsg_ifinfo_event(int type, struct net_device *dev,
			       unsigned int change, u32 event,
2938
			       gfp_t flags, int *new_nsid)
2939 2940 2941
{
	struct sk_buff *skb;

2942 2943 2944
	if (dev->reg_state != NETREG_REGISTERED)
		return;

2945
	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid);
2946 2947
	if (skb)
		rtmsg_ifinfo_send(skb, dev, flags);
L
Linus Torvalds 已提交
2948
}
2949 2950 2951 2952

void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
		  gfp_t flags)
{
2953
	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags, NULL);
2954
}
2955
EXPORT_SYMBOL(rtmsg_ifinfo);
L
Linus Torvalds 已提交
2956

2957 2958 2959 2960 2961 2962 2963
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
			 gfp_t flags, int *new_nsid)
{
	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
			   new_nsid);
}

2964 2965
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
				   struct net_device *dev,
2966
				   u8 *addr, u16 vid, u32 pid, u32 seq,
2967
				   int type, unsigned int flags,
2968
				   int nlflags, u16 ndm_state)
2969 2970 2971 2972
{
	struct nlmsghdr *nlh;
	struct ndmsg *ndm;

2973
	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
	if (!nlh)
		return -EMSGSIZE;

	ndm = nlmsg_data(nlh);
	ndm->ndm_family  = AF_BRIDGE;
	ndm->ndm_pad1	 = 0;
	ndm->ndm_pad2    = 0;
	ndm->ndm_flags	 = flags;
	ndm->ndm_type	 = 0;
	ndm->ndm_ifindex = dev->ifindex;
2984
	ndm->ndm_state   = ndm_state;
2985 2986 2987

	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
		goto nla_put_failure;
2988 2989 2990
	if (vid)
		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
			goto nla_put_failure;
2991

2992 2993
	nlmsg_end(skb, nlh);
	return 0;
2994 2995 2996 2997 2998 2999

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

3000 3001
static inline size_t rtnl_fdb_nlmsg_size(void)
{
3002 3003 3004 3005
	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
	       nla_total_size(ETH_ALEN) +	/* NDA_LLADDR */
	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
	       0;
3006 3007
}

3008 3009
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
			    u16 ndm_state)
3010 3011 3012 3013 3014 3015 3016 3017 3018
{
	struct net *net = dev_net(dev);
	struct sk_buff *skb;
	int err = -ENOBUFS;

	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
	if (!skb)
		goto errout;

3019
	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
3020
				      0, 0, type, NTF_SELF, 0, ndm_state);
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
	if (err < 0) {
		kfree_skb(skb);
		goto errout;
	}

	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
	return;
errout:
	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}

3032 3033 3034 3035 3036 3037
/**
 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
 */
int ndo_dflt_fdb_add(struct ndmsg *ndm,
		     struct nlattr *tb[],
		     struct net_device *dev,
3038
		     const unsigned char *addr, u16 vid,
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
		     u16 flags)
{
	int err = -EINVAL;

	/* If aging addresses are supported device will need to
	 * implement its own handler for this.
	 */
	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
		pr_info("%s: FDB only supports static addresses\n", dev->name);
		return err;
	}

3051 3052 3053 3054 3055
	if (vid) {
		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
		return err;
	}

3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068
	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
		err = dev_uc_add_excl(dev, addr);
	else if (is_multicast_ether_addr(addr))
		err = dev_mc_add_excl(dev, addr);

	/* Only return duplicate errors if NLM_F_EXCL is set */
	if (err == -EEXIST && !(flags & NLM_F_EXCL))
		err = 0;

	return err;
}
EXPORT_SYMBOL(ndo_dflt_fdb_add);

3069 3070
static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
			 struct netlink_ext_ack *extack)
3071 3072 3073 3074 3075
{
	u16 vid = 0;

	if (vlan_attr) {
		if (nla_len(vlan_attr) != sizeof(u16)) {
3076
			NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
3077 3078 3079 3080 3081 3082
			return -EINVAL;
		}

		vid = nla_get_u16(vlan_attr);

		if (!vid || vid >= VLAN_VID_MASK) {
3083
			NL_SET_ERR_MSG(extack, "invalid vlan id");
3084 3085 3086 3087 3088 3089 3090
			return -EINVAL;
		}
	}
	*p_vid = vid;
	return 0;
}

3091 3092
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
3093 3094 3095 3096 3097 3098
{
	struct net *net = sock_net(skb->sk);
	struct ndmsg *ndm;
	struct nlattr *tb[NDA_MAX+1];
	struct net_device *dev;
	u8 *addr;
3099
	u16 vid;
3100 3101
	int err;

3102
	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3103 3104 3105 3106 3107
	if (err < 0)
		return err;

	ndm = nlmsg_data(nlh);
	if (ndm->ndm_ifindex == 0) {
3108
		NL_SET_ERR_MSG(extack, "invalid ifindex");
3109 3110 3111 3112 3113
		return -EINVAL;
	}

	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
	if (dev == NULL) {
3114
		NL_SET_ERR_MSG(extack, "unknown ifindex");
3115 3116 3117 3118
		return -ENODEV;
	}

	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3119
		NL_SET_ERR_MSG(extack, "invalid address");
3120 3121 3122 3123 3124
		return -EINVAL;
	}

	addr = nla_data(tb[NDA_LLADDR]);

3125
	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3126 3127 3128
	if (err)
		return err;

3129 3130 3131 3132 3133
	err = -EOPNOTSUPP;

	/* Support fdb on master device the net/bridge default case */
	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
3134 3135 3136
		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
		const struct net_device_ops *ops = br_dev->netdev_ops;

3137 3138
		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
				       nlh->nlmsg_flags);
3139 3140 3141 3142 3143 3144 3145
		if (err)
			goto out;
		else
			ndm->ndm_flags &= ~NTF_MASTER;
	}

	/* Embedded bridge, macvlan, and any other device support */
3146 3147 3148
	if ((ndm->ndm_flags & NTF_SELF)) {
		if (dev->netdev_ops->ndo_fdb_add)
			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3149
							   vid,
3150 3151
							   nlh->nlmsg_flags);
		else
3152
			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3153
					       nlh->nlmsg_flags);
3154

3155
		if (!err) {
3156 3157
			rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
					ndm->ndm_state);
3158
			ndm->ndm_flags &= ~NTF_SELF;
3159
		}
3160 3161 3162 3163 3164
	}
out:
	return err;
}

3165 3166 3167 3168 3169 3170
/**
 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
 */
int ndo_dflt_fdb_del(struct ndmsg *ndm,
		     struct nlattr *tb[],
		     struct net_device *dev,
3171
		     const unsigned char *addr, u16 vid)
3172
{
3173
	int err = -EINVAL;
3174 3175 3176 3177

	/* If aging addresses are supported device will need to
	 * implement its own handler for this.
	 */
3178
	if (!(ndm->ndm_state & NUD_PERMANENT)) {
3179
		pr_info("%s: FDB only supports static addresses\n", dev->name);
3180
		return err;
3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191
	}

	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
		err = dev_uc_del(dev, addr);
	else if (is_multicast_ether_addr(addr))
		err = dev_mc_del(dev, addr);

	return err;
}
EXPORT_SYMBOL(ndo_dflt_fdb_del);

3192 3193
static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
			struct netlink_ext_ack *extack)
3194 3195 3196
{
	struct net *net = sock_net(skb->sk);
	struct ndmsg *ndm;
3197
	struct nlattr *tb[NDA_MAX+1];
3198 3199 3200
	struct net_device *dev;
	int err = -EINVAL;
	__u8 *addr;
3201
	u16 vid;
3202

3203
	if (!netlink_capable(skb, CAP_NET_ADMIN))
3204 3205
		return -EPERM;

3206
	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3207 3208
	if (err < 0)
		return err;
3209 3210 3211

	ndm = nlmsg_data(nlh);
	if (ndm->ndm_ifindex == 0) {
3212
		NL_SET_ERR_MSG(extack, "invalid ifindex");
3213 3214 3215 3216 3217
		return -EINVAL;
	}

	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
	if (dev == NULL) {
3218
		NL_SET_ERR_MSG(extack, "unknown ifindex");
3219 3220 3221
		return -ENODEV;
	}

3222
	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3223
		NL_SET_ERR_MSG(extack, "invalid address");
3224 3225 3226 3227
		return -EINVAL;
	}

	addr = nla_data(tb[NDA_LLADDR]);
3228

3229
	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
3230 3231 3232
	if (err)
		return err;

3233 3234 3235 3236 3237
	err = -EOPNOTSUPP;

	/* Support fdb on master device the net/bridge default case */
	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
3238 3239
		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
		const struct net_device_ops *ops = br_dev->netdev_ops;
3240

3241
		if (ops->ndo_fdb_del)
3242
			err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3243 3244 3245 3246 3247 3248 3249 3250

		if (err)
			goto out;
		else
			ndm->ndm_flags &= ~NTF_MASTER;
	}

	/* Embedded bridge, macvlan, and any other device support */
3251 3252
	if (ndm->ndm_flags & NTF_SELF) {
		if (dev->netdev_ops->ndo_fdb_del)
3253 3254
			err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
							   vid);
3255
		else
3256
			err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3257

3258
		if (!err) {
3259 3260
			rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
					ndm->ndm_state);
3261
			ndm->ndm_flags &= ~NTF_SELF;
3262
		}
3263 3264 3265 3266 3267
	}
out:
	return err;
}

3268 3269 3270 3271 3272 3273 3274 3275
static int nlmsg_populate_fdb(struct sk_buff *skb,
			      struct netlink_callback *cb,
			      struct net_device *dev,
			      int *idx,
			      struct netdev_hw_addr_list *list)
{
	struct netdev_hw_addr *ha;
	int err;
3276
	u32 portid, seq;
3277

3278
	portid = NETLINK_CB(cb->skb).portid;
3279 3280 3281
	seq = cb->nlh->nlmsg_seq;

	list_for_each_entry(ha, &list->list, list) {
3282
		if (*idx < cb->args[2])
3283 3284
			goto skip;

3285
		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3286
					      portid, seq,
3287
					      RTM_NEWNEIGH, NTF_SELF,
3288
					      NLM_F_MULTI, NUD_PERMANENT);
3289 3290 3291 3292 3293 3294 3295 3296 3297
		if (err < 0)
			return err;
skip:
		*idx += 1;
	}
	return 0;
}

/**
3298
 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3299 3300 3301 3302
 * @nlh: netlink message header
 * @dev: netdevice
 *
 * Default netdevice operation to dump the existing unicast address list.
3303
 * Returns number of addresses from list put in skb.
3304 3305 3306 3307
 */
int ndo_dflt_fdb_dump(struct sk_buff *skb,
		      struct netlink_callback *cb,
		      struct net_device *dev,
3308
		      struct net_device *filter_dev,
3309
		      int *idx)
3310 3311 3312 3313
{
	int err;

	netif_addr_lock_bh(dev);
3314
	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3315 3316
	if (err)
		goto out;
3317
	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3318 3319
out:
	netif_addr_unlock_bh(dev);
3320
	return err;
3321 3322 3323
}
EXPORT_SYMBOL(ndo_dflt_fdb_dump);

3324 3325 3326
static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net_device *dev;
3327 3328 3329 3330 3331 3332
	struct nlattr *tb[IFLA_MAX+1];
	struct net_device *br_dev = NULL;
	const struct net_device_ops *ops = NULL;
	const struct net_device_ops *cops = NULL;
	struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
	struct net *net = sock_net(skb->sk);
3333
	struct hlist_head *head;
3334 3335
	int brport_idx = 0;
	int br_idx = 0;
3336 3337 3338 3339
	int h, s_h;
	int idx = 0, s_idx;
	int err = 0;
	int fidx = 0;
3340

3341 3342 3343 3344 3345
	err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
			  IFLA_MAX, ifla_policy, NULL);
	if (err < 0) {
		return -EINVAL;
	} else if (err == 0) {
3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359
		if (tb[IFLA_MASTER])
			br_idx = nla_get_u32(tb[IFLA_MASTER]);
	}

	brport_idx = ifm->ifi_index;

	if (br_idx) {
		br_dev = __dev_get_by_index(net, br_idx);
		if (!br_dev)
			return -ENODEV;

		ops = br_dev->netdev_ops;
	}

3360 3361
	s_h = cb->args[0];
	s_idx = cb->args[1];
3362

3363 3364 3365 3366
	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
		idx = 0;
		head = &net->dev_index_head[h];
		hlist_for_each_entry(dev, head, index_hlist) {
3367

3368
			if (brport_idx && (dev->ifindex != brport_idx))
3369 3370
				continue;

3371 3372 3373 3374 3375 3376 3377 3378 3379
			if (!br_idx) { /* user did not specify a specific bridge */
				if (dev->priv_flags & IFF_BRIDGE_PORT) {
					br_dev = netdev_master_upper_dev_get(dev);
					cops = br_dev->netdev_ops;
				}
			} else {
				if (dev != br_dev &&
				    !(dev->priv_flags & IFF_BRIDGE_PORT))
					continue;
3380

3381 3382 3383 3384 3385
				if (br_dev != netdev_master_upper_dev_get(dev) &&
				    !(dev->priv_flags & IFF_EBRIDGE))
					continue;
				cops = ops;
			}
3386

3387 3388
			if (idx < s_idx)
				goto cont;
3389

3390 3391 3392 3393 3394 3395 3396 3397 3398
			if (dev->priv_flags & IFF_BRIDGE_PORT) {
				if (cops && cops->ndo_fdb_dump) {
					err = cops->ndo_fdb_dump(skb, cb,
								br_dev, dev,
								&fidx);
					if (err == -EMSGSIZE)
						goto out;
				}
			}
3399

3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417
			if (dev->netdev_ops->ndo_fdb_dump)
				err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
								    dev, NULL,
								    &fidx);
			else
				err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
							&fidx);
			if (err == -EMSGSIZE)
				goto out;

			cops = NULL;

			/* reset fdb offset to 0 for rest of the interfaces */
			cb->args[2] = 0;
			fidx = 0;
cont:
			idx++;
		}
3418 3419
	}

3420 3421 3422 3423 3424
out:
	cb->args[0] = h;
	cb->args[1] = idx;
	cb->args[2] = fidx;

3425 3426 3427
	return skb->len;
}

3428 3429 3430 3431 3432 3433 3434 3435
static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
			       unsigned int attrnum, unsigned int flag)
{
	if (mask & flag)
		return nla_put_u8(skb, attrnum, !!(flags & flag));
	return 0;
}

3436
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3437
			    struct net_device *dev, u16 mode,
3438 3439 3440 3441 3442
			    u32 flags, u32 mask, int nlflags,
			    u32 filter_mask,
			    int (*vlan_fill)(struct sk_buff *skb,
					     struct net_device *dev,
					     u32 filter_mask))
3443 3444 3445 3446
{
	struct nlmsghdr *nlh;
	struct ifinfomsg *ifm;
	struct nlattr *br_afspec;
3447
	struct nlattr *protinfo;
3448
	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3449
	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3450
	int err = 0;
3451

3452
	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467
	if (nlh == NULL)
		return -EMSGSIZE;

	ifm = nlmsg_data(nlh);
	ifm->ifi_family = AF_BRIDGE;
	ifm->__ifi_pad = 0;
	ifm->ifi_type = dev->type;
	ifm->ifi_index = dev->ifindex;
	ifm->ifi_flags = dev_get_flags(dev);
	ifm->ifi_change = 0;


	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3468 3469
	    (br_dev &&
	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3470 3471
	    (dev->addr_len &&
	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3472 3473
	    (dev->ifindex != dev_get_iflink(dev) &&
	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3474 3475 3476 3477 3478 3479
		goto nla_put_failure;

	br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
	if (!br_afspec)
		goto nla_put_failure;

R
Roopa Prabhu 已提交
3480
	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3481 3482 3483
		nla_nest_cancel(skb, br_afspec);
		goto nla_put_failure;
	}
R
Roopa Prabhu 已提交
3484 3485 3486 3487 3488 3489 3490

	if (mode != BRIDGE_MODE_UNDEF) {
		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
			nla_nest_cancel(skb, br_afspec);
			goto nla_put_failure;
		}
	}
3491 3492 3493 3494 3495 3496 3497
	if (vlan_fill) {
		err = vlan_fill(skb, dev, filter_mask);
		if (err) {
			nla_nest_cancel(skb, br_afspec);
			goto nla_put_failure;
		}
	}
3498 3499
	nla_nest_end(skb, br_afspec);

3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
	protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
	if (!protinfo)
		goto nla_put_failure;

	if (brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_FAST_LEAVE,
				BR_MULTICAST_FAST_LEAVE) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_LEARNING, BR_LEARNING) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
	    brport_nla_put_flag(skb, flags, mask,
				IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
		nla_nest_cancel(skb, protinfo);
		goto nla_put_failure;
	}

	nla_nest_end(skb, protinfo);

3527 3528
	nlmsg_end(skb, nlh);
	return 0;
3529 3530
nla_put_failure:
	nlmsg_cancel(skb, nlh);
3531
	return err ? err : -EMSGSIZE;
3532
}
3533
EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3534

J
John Fastabend 已提交
3535 3536 3537 3538 3539 3540 3541
static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct net_device *dev;
	int idx = 0;
	u32 portid = NETLINK_CB(cb->skb).portid;
	u32 seq = cb->nlh->nlmsg_seq;
3542
	u32 filter_mask = 0;
3543
	int err;
3544

3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556
	if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
		struct nlattr *extfilt;

		extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
					  IFLA_EXT_MASK);
		if (extfilt) {
			if (nla_len(extfilt) < sizeof(filter_mask))
				return -EINVAL;

			filter_mask = nla_get_u32(extfilt);
		}
	}
J
John Fastabend 已提交
3557 3558 3559 3560

	rcu_read_lock();
	for_each_netdev_rcu(net, dev) {
		const struct net_device_ops *ops = dev->netdev_ops;
3561
		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
J
John Fastabend 已提交
3562

3563
		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3564 3565 3566 3567
			if (idx >= cb->args[0]) {
				err = br_dev->netdev_ops->ndo_bridge_getlink(
						skb, portid, seq, dev,
						filter_mask, NLM_F_MULTI);
3568 3569 3570 3571 3572 3573
				if (err < 0 && err != -EOPNOTSUPP) {
					if (likely(skb->len))
						break;

					goto out_err;
				}
3574
			}
3575
			idx++;
J
John Fastabend 已提交
3576 3577 3578
		}

		if (ops->ndo_bridge_getlink) {
3579 3580 3581 3582 3583
			if (idx >= cb->args[0]) {
				err = ops->ndo_bridge_getlink(skb, portid,
							      seq, dev,
							      filter_mask,
							      NLM_F_MULTI);
3584 3585 3586 3587 3588 3589
				if (err < 0 && err != -EOPNOTSUPP) {
					if (likely(skb->len))
						break;

					goto out_err;
				}
3590
			}
3591
			idx++;
J
John Fastabend 已提交
3592 3593
		}
	}
3594 3595
	err = skb->len;
out_err:
J
John Fastabend 已提交
3596 3597 3598
	rcu_read_unlock();
	cb->args[0] = idx;

3599
	return err;
J
John Fastabend 已提交
3600 3601
}

3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616
static inline size_t bridge_nlmsg_size(void)
{
	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
}

3617
static int rtnl_bridge_notify(struct net_device *dev)
3618 3619 3620 3621 3622
{
	struct net *net = dev_net(dev);
	struct sk_buff *skb;
	int err = -EOPNOTSUPP;

3623 3624 3625
	if (!dev->netdev_ops->ndo_bridge_getlink)
		return 0;

3626 3627 3628 3629 3630 3631
	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
	if (!skb) {
		err = -ENOMEM;
		goto errout;
	}

3632
	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3633 3634
	if (err < 0)
		goto errout;
3635

3636 3637 3638
	if (!skb->len)
		goto errout;

3639 3640 3641 3642 3643
	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
	return 0;
errout:
	WARN_ON(err == -EMSGSIZE);
	kfree_skb(skb);
3644 3645
	if (err)
		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3646 3647 3648
	return err;
}

3649 3650
static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
J
John Fastabend 已提交
3651 3652 3653 3654
{
	struct net *net = sock_net(skb->sk);
	struct ifinfomsg *ifm;
	struct net_device *dev;
3655 3656
	struct nlattr *br_spec, *attr = NULL;
	int rem, err = -EOPNOTSUPP;
3657
	u16 flags = 0;
3658
	bool have_flags = false;
J
John Fastabend 已提交
3659 3660 3661 3662 3663 3664 3665 3666 3667 3668

	if (nlmsg_len(nlh) < sizeof(*ifm))
		return -EINVAL;

	ifm = nlmsg_data(nlh);
	if (ifm->ifi_family != AF_BRIDGE)
		return -EPFNOSUPPORT;

	dev = __dev_get_by_index(net, ifm->ifi_index);
	if (!dev) {
3669
		NL_SET_ERR_MSG(extack, "unknown ifindex");
J
John Fastabend 已提交
3670 3671 3672
		return -ENODEV;
	}

3673 3674 3675 3676
	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
	if (br_spec) {
		nla_for_each_nested(attr, br_spec, rem) {
			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3677 3678 3679
				if (nla_len(attr) < sizeof(flags))
					return -EINVAL;

3680
				have_flags = true;
3681 3682 3683 3684 3685 3686 3687
				flags = nla_get_u16(attr);
				break;
			}
		}
	}

	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3688 3689 3690
		struct net_device *br_dev = netdev_master_upper_dev_get(dev);

		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
3691 3692 3693 3694
			err = -EOPNOTSUPP;
			goto out;
		}

3695
		err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
J
John Fastabend 已提交
3696 3697
		if (err)
			goto out;
3698 3699

		flags &= ~BRIDGE_FLAGS_MASTER;
J
John Fastabend 已提交
3700 3701
	}

3702 3703 3704 3705
	if ((flags & BRIDGE_FLAGS_SELF)) {
		if (!dev->netdev_ops->ndo_bridge_setlink)
			err = -EOPNOTSUPP;
		else
3706 3707
			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
								  flags);
3708
		if (!err) {
3709
			flags &= ~BRIDGE_FLAGS_SELF;
3710 3711 3712 3713 3714 3715

			/* Generate event to notify upper layer of bridge
			 * change
			 */
			err = rtnl_bridge_notify(dev);
		}
3716
	}
J
John Fastabend 已提交
3717

3718
	if (have_flags)
3719
		memcpy(nla_data(attr), &flags, sizeof(flags));
J
John Fastabend 已提交
3720 3721 3722 3723
out:
	return err;
}

3724 3725
static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
3726 3727 3728 3729 3730 3731
{
	struct net *net = sock_net(skb->sk);
	struct ifinfomsg *ifm;
	struct net_device *dev;
	struct nlattr *br_spec, *attr = NULL;
	int rem, err = -EOPNOTSUPP;
3732
	u16 flags = 0;
3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743
	bool have_flags = false;

	if (nlmsg_len(nlh) < sizeof(*ifm))
		return -EINVAL;

	ifm = nlmsg_data(nlh);
	if (ifm->ifi_family != AF_BRIDGE)
		return -EPFNOSUPPORT;

	dev = __dev_get_by_index(net, ifm->ifi_index);
	if (!dev) {
3744
		NL_SET_ERR_MSG(extack, "unknown ifindex");
3745 3746 3747 3748 3749 3750 3751
		return -ENODEV;
	}

	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
	if (br_spec) {
		nla_for_each_nested(attr, br_spec, rem) {
			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3752 3753 3754
				if (nla_len(attr) < sizeof(flags))
					return -EINVAL;

3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769
				have_flags = true;
				flags = nla_get_u16(attr);
				break;
			}
		}
	}

	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
		struct net_device *br_dev = netdev_master_upper_dev_get(dev);

		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
			err = -EOPNOTSUPP;
			goto out;
		}

3770
		err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
3771 3772 3773 3774 3775 3776 3777 3778 3779 3780
		if (err)
			goto out;

		flags &= ~BRIDGE_FLAGS_MASTER;
	}

	if ((flags & BRIDGE_FLAGS_SELF)) {
		if (!dev->netdev_ops->ndo_bridge_dellink)
			err = -EOPNOTSUPP;
		else
3781 3782
			err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
								  flags);
3783

3784
		if (!err) {
3785
			flags &= ~BRIDGE_FLAGS_SELF;
3786 3787 3788 3789 3790 3791

			/* Generate event to notify upper layer of bridge
			 * change
			 */
			err = rtnl_bridge_notify(dev);
		}
3792 3793 3794 3795 3796 3797 3798 3799
	}

	if (have_flags)
		memcpy(nla_data(attr), &flags, sizeof(flags));
out:
	return err;
}

3800 3801 3802 3803 3804 3805
static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
{
	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
	       (!idxattr || idxattr == attrid);
}

3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837
#define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
static int rtnl_get_offload_stats_attr_size(int attr_id)
{
	switch (attr_id) {
	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
		return sizeof(struct rtnl_link_stats64);
	}

	return 0;
}

static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
				  int *prividx)
{
	struct nlattr *attr = NULL;
	int attr_id, size;
	void *attr_data;
	int err;

	if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
	      dev->netdev_ops->ndo_get_offload_stats))
		return -ENODATA;

	for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
	     attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
		if (attr_id < *prividx)
			continue;

		size = rtnl_get_offload_stats_attr_size(attr_id);
		if (!size)
			continue;

3838
		if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
			continue;

		attr = nla_reserve_64bit(skb, attr_id, size,
					 IFLA_OFFLOAD_XSTATS_UNSPEC);
		if (!attr)
			goto nla_put_failure;

		attr_data = nla_data(attr);
		memset(attr_data, 0, size);
		err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
							     attr_data);
		if (err)
			goto get_offload_stats_failure;
	}

	if (!attr)
		return -ENODATA;

	*prividx = 0;
	return 0;

nla_put_failure:
	err = -EMSGSIZE;
get_offload_stats_failure:
	*prividx = attr_id;
	return err;
}

static int rtnl_get_offload_stats_size(const struct net_device *dev)
{
	int nla_size = 0;
	int attr_id;
	int size;

	if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
	      dev->netdev_ops->ndo_get_offload_stats))
		return 0;

	for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
	     attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3879
		if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
			continue;
		size = rtnl_get_offload_stats_attr_size(attr_id);
		nla_size += nla_total_size_64bit(size);
	}

	if (nla_size != 0)
		nla_size += nla_total_size(0);

	return nla_size;
}

3891 3892
static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
			       int type, u32 pid, u32 seq, u32 change,
3893 3894
			       unsigned int flags, unsigned int filter_mask,
			       int *idxattr, int *prividx)
3895 3896 3897 3898
{
	struct if_stats_msg *ifsm;
	struct nlmsghdr *nlh;
	struct nlattr *attr;
3899
	int s_prividx = *prividx;
3900
	int err;
3901 3902 3903 3904 3905 3906 3907 3908

	ASSERT_RTNL();

	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
	if (!nlh)
		return -EMSGSIZE;

	ifsm = nlmsg_data(nlh);
3909 3910 3911
	ifsm->family = PF_UNSPEC;
	ifsm->pad1 = 0;
	ifsm->pad2 = 0;
3912 3913 3914
	ifsm->ifindex = dev->ifindex;
	ifsm->filter_mask = filter_mask;

3915
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3916 3917
		struct rtnl_link_stats64 *sp;

3918 3919 3920
		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
					 sizeof(struct rtnl_link_stats64),
					 IFLA_STATS_UNSPEC);
3921 3922 3923 3924 3925 3926 3927
		if (!attr)
			goto nla_put_failure;

		sp = nla_data(attr);
		dev_get_stats(dev, sp);
	}

3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;

		if (ops && ops->fill_linkxstats) {
			*idxattr = IFLA_STATS_LINK_XSTATS;
			attr = nla_nest_start(skb,
					      IFLA_STATS_LINK_XSTATS);
			if (!attr)
				goto nla_put_failure;

3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
			nla_nest_end(skb, attr);
			if (err)
				goto nla_put_failure;
			*idxattr = 0;
		}
	}

	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
			     *idxattr)) {
		const struct rtnl_link_ops *ops = NULL;
		const struct net_device *master;

		master = netdev_master_upper_dev_get(dev);
		if (master)
			ops = master->rtnl_link_ops;
		if (ops && ops->fill_linkxstats) {
			*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
			attr = nla_nest_start(skb,
					      IFLA_STATS_LINK_XSTATS_SLAVE);
			if (!attr)
				goto nla_put_failure;

			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3962 3963 3964 3965 3966 3967 3968
			nla_nest_end(skb, attr);
			if (err)
				goto nla_put_failure;
			*idxattr = 0;
		}
	}

3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
			     *idxattr)) {
		*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
		attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
		if (!attr)
			goto nla_put_failure;

		err = rtnl_get_offload_stats(skb, dev, prividx);
		if (err == -ENODATA)
			nla_nest_cancel(skb, attr);
		else
			nla_nest_end(skb, attr);

		if (err && err != -ENODATA)
			goto nla_put_failure;
		*idxattr = 0;
	}

3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
		struct rtnl_af_ops *af_ops;

		*idxattr = IFLA_STATS_AF_SPEC;
		attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
		if (!attr)
			goto nla_put_failure;

		list_for_each_entry(af_ops, &rtnl_af_ops, list) {
			if (af_ops->fill_stats_af) {
				struct nlattr *af;
				int err;

				af = nla_nest_start(skb, af_ops->family);
				if (!af)
					goto nla_put_failure;

				err = af_ops->fill_stats_af(skb, dev);

				if (err == -ENODATA)
					nla_nest_cancel(skb, af);
				else if (err < 0)
					goto nla_put_failure;

				nla_nest_end(skb, af);
			}
		}

		nla_nest_end(skb, attr);

		*idxattr = 0;
	}

4020 4021 4022 4023 4024
	nlmsg_end(skb, nlh);

	return 0;

nla_put_failure:
4025 4026 4027 4028 4029
	/* not a multi message or no progress mean a real error */
	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
		nlmsg_cancel(skb, nlh);
	else
		nlmsg_end(skb, nlh);
4030 4031 4032 4033 4034 4035 4036 4037 4038

	return -EMSGSIZE;
}

static size_t if_nlmsg_stats_size(const struct net_device *dev,
				  u32 filter_mask)
{
	size_t size = 0;

4039
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
4040 4041
		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));

4042 4043
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4044
		int attr = IFLA_STATS_LINK_XSTATS;
4045 4046

		if (ops && ops->get_linkxstats_size) {
4047 4048
			size += nla_total_size(ops->get_linkxstats_size(dev,
									attr));
4049 4050 4051 4052 4053
			/* for IFLA_STATS_LINK_XSTATS */
			size += nla_total_size(0);
		}
	}

4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
		struct net_device *_dev = (struct net_device *)dev;
		const struct rtnl_link_ops *ops = NULL;
		const struct net_device *master;

		/* netdev_master_upper_dev_get can't take const */
		master = netdev_master_upper_dev_get(_dev);
		if (master)
			ops = master->rtnl_link_ops;
		if (ops && ops->get_linkxstats_size) {
			int attr = IFLA_STATS_LINK_XSTATS_SLAVE;

			size += nla_total_size(ops->get_linkxstats_size(dev,
									attr));
			/* for IFLA_STATS_LINK_XSTATS_SLAVE */
			size += nla_total_size(0);
		}
	}

4073 4074 4075
	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
		size += rtnl_get_offload_stats_size(dev);

4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092
	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
		struct rtnl_af_ops *af_ops;

		/* for IFLA_STATS_AF_SPEC */
		size += nla_total_size(0);

		list_for_each_entry(af_ops, &rtnl_af_ops, list) {
			if (af_ops->get_stats_af_size) {
				size += nla_total_size(
					af_ops->get_stats_af_size(dev));

				/* for AF_* */
				size += nla_total_size(0);
			}
		}
	}

4093 4094 4095
	return size;
}

4096 4097
static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
4098 4099 4100
{
	struct net *net = sock_net(skb->sk);
	struct net_device *dev = NULL;
4101 4102
	int idxattr = 0, prividx = 0;
	struct if_stats_msg *ifsm;
4103 4104 4105 4106
	struct sk_buff *nskb;
	u32 filter_mask;
	int err;

4107 4108 4109
	if (nlmsg_len(nlh) < sizeof(*ifsm))
		return -EINVAL;

4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128
	ifsm = nlmsg_data(nlh);
	if (ifsm->ifindex > 0)
		dev = __dev_get_by_index(net, ifsm->ifindex);
	else
		return -EINVAL;

	if (!dev)
		return -ENODEV;

	filter_mask = ifsm->filter_mask;
	if (!filter_mask)
		return -EINVAL;

	nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
	if (!nskb)
		return -ENOBUFS;

	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4129
				  0, filter_mask, &idxattr, &prividx);
4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142
	if (err < 0) {
		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
		WARN_ON(err == -EMSGSIZE);
		kfree_skb(nskb);
	} else {
		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
	}

	return err;
}

static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
4143
	int h, s_h, err, s_idx, s_idxattr, s_prividx;
4144
	struct net *net = sock_net(skb->sk);
4145
	unsigned int flags = NLM_F_MULTI;
4146 4147
	struct if_stats_msg *ifsm;
	struct hlist_head *head;
4148
	struct net_device *dev;
4149
	u32 filter_mask = 0;
4150
	int idx = 0;
4151 4152 4153

	s_h = cb->args[0];
	s_idx = cb->args[1];
4154 4155
	s_idxattr = cb->args[2];
	s_prividx = cb->args[3];
4156 4157 4158

	cb->seq = net->dev_base_seq;

4159 4160 4161
	if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
		return -EINVAL;

4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175
	ifsm = nlmsg_data(cb->nlh);
	filter_mask = ifsm->filter_mask;
	if (!filter_mask)
		return -EINVAL;

	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
		idx = 0;
		head = &net->dev_index_head[h];
		hlist_for_each_entry(dev, head, index_hlist) {
			if (idx < s_idx)
				goto cont;
			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
						  NETLINK_CB(cb->skb).portid,
						  cb->nlh->nlmsg_seq, 0,
4176 4177
						  flags, filter_mask,
						  &s_idxattr, &s_prividx);
4178 4179 4180 4181 4182 4183 4184
			/* If we ran out of room on the first message,
			 * we're in trouble
			 */
			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));

			if (err < 0)
				goto out;
4185 4186
			s_prividx = 0;
			s_idxattr = 0;
4187 4188 4189 4190 4191 4192
			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
			idx++;
		}
	}
out:
4193 4194
	cb->args[3] = s_prividx;
	cb->args[2] = s_idxattr;
4195 4196 4197 4198 4199 4200
	cb->args[1] = idx;
	cb->args[0] = h;

	return skb->len;
}

L
Linus Torvalds 已提交
4201 4202
/* Process one rtnetlink message. */

J
Johannes Berg 已提交
4203 4204
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
4205
{
4206
	struct net *net = sock_net(skb->sk);
4207 4208
	struct rtnl_link *handlers;
	int err = -EOPNOTSUPP;
4209
	rtnl_doit_func doit;
4210
	unsigned int flags;
4211
	int kind;
L
Linus Torvalds 已提交
4212 4213 4214 4215 4216
	int family;
	int type;

	type = nlh->nlmsg_type;
	if (type > RTM_MAX)
4217
		return -EOPNOTSUPP;
L
Linus Torvalds 已提交
4218 4219 4220 4221

	type -= RTM_BASE;

	/* All the messages must have at least 1 byte length */
4222
	if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
L
Linus Torvalds 已提交
4223 4224
		return 0;

4225
	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
L
Linus Torvalds 已提交
4226 4227
	kind = type&3;

4228
	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4229
		return -EPERM;
L
Linus Torvalds 已提交
4230

4231
	if (family >= ARRAY_SIZE(rtnl_msg_handlers))
4232 4233 4234 4235 4236 4237 4238 4239 4240
		family = PF_UNSPEC;

	rcu_read_lock();
	handlers = rcu_dereference(rtnl_msg_handlers[family]);
	if (!handlers) {
		family = PF_UNSPEC;
		handlers = rcu_dereference(rtnl_msg_handlers[family]);
	}

4241
	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4242
		struct sock *rtnl;
4243
		rtnl_dumpit_func dumpit;
4244
		u16 min_dump_alloc = 0;
L
Linus Torvalds 已提交
4245

4246 4247 4248 4249 4250 4251
		dumpit = READ_ONCE(handlers[type].dumpit);
		if (!dumpit) {
			family = PF_UNSPEC;
			handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
			if (!handlers)
				goto err_unlock;
4252

4253 4254 4255 4256
			dumpit = READ_ONCE(handlers[type].dumpit);
			if (!dumpit)
				goto err_unlock;
		}
4257

4258 4259
		refcount_inc(&rtnl_msg_handlers_ref[family]);

4260
		if (type == RTM_GETLINK - RTM_BASE)
4261
			min_dump_alloc = rtnl_calcit(skb, nlh);
4262

4263 4264
		rcu_read_unlock();

4265
		rtnl = net->rtnl;
4266 4267 4268 4269 4270 4271 4272
		{
			struct netlink_dump_control c = {
				.dump		= dumpit,
				.min_dump_alloc	= min_dump_alloc,
			};
			err = netlink_dump_start(rtnl, skb, nlh, &c);
		}
4273
		refcount_dec(&rtnl_msg_handlers_ref[family]);
4274
		return err;
L
Linus Torvalds 已提交
4275 4276
	}

4277 4278 4279 4280 4281 4282
	doit = READ_ONCE(handlers[type].doit);
	if (!doit) {
		family = PF_UNSPEC;
		handlers = rcu_dereference(rtnl_msg_handlers[family]);
	}

4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
	flags = READ_ONCE(handlers[type].flags);
	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
		refcount_inc(&rtnl_msg_handlers_ref[family]);
		doit = READ_ONCE(handlers[type].doit);
		rcu_read_unlock();
		if (doit)
			err = doit(skb, nlh, extack);
		refcount_dec(&rtnl_msg_handlers_ref[family]);
		return err;
	}

4294
	rcu_read_unlock();
L
Linus Torvalds 已提交
4295

4296 4297 4298 4299 4300 4301 4302
	rtnl_lock();
	handlers = rtnl_dereference(rtnl_msg_handlers[family]);
	if (handlers) {
		doit = READ_ONCE(handlers[type].doit);
		if (doit)
			err = doit(skb, nlh, extack);
	}
4303 4304 4305 4306
	rtnl_unlock();
	return err;

err_unlock:
4307
	rcu_read_unlock();
4308
	return -EOPNOTSUPP;
L
Linus Torvalds 已提交
4309 4310
}

4311
static void rtnetlink_rcv(struct sk_buff *skb)
L
Linus Torvalds 已提交
4312
{
4313
	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
L
Linus Torvalds 已提交
4314 4315
}

4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
static int rtnetlink_bind(struct net *net, int group)
{
	switch (group) {
	case RTNLGRP_IPV4_MROUTE_R:
	case RTNLGRP_IPV6_MROUTE_R:
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
			return -EPERM;
		break;
	}
	return 0;
}

L
Linus Torvalds 已提交
4328 4329
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
4330
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4331

L
Linus Torvalds 已提交
4332
	switch (event) {
4333
	case NETDEV_REBOOT:
4334
	case NETDEV_CHANGEADDR:
4335 4336 4337 4338 4339 4340
	case NETDEV_CHANGENAME:
	case NETDEV_FEAT_CHANGE:
	case NETDEV_BONDING_FAILOVER:
	case NETDEV_NOTIFY_PEERS:
	case NETDEV_RESEND_IGMP:
	case NETDEV_CHANGEINFODATA:
4341
		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4342
				   GFP_KERNEL, NULL);
L
Linus Torvalds 已提交
4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353
		break;
	default:
		break;
	}
	return NOTIFY_DONE;
}

static struct notifier_block rtnetlink_dev_notifier = {
	.notifier_call	= rtnetlink_event,
};

4354

4355
static int __net_init rtnetlink_net_init(struct net *net)
4356 4357
{
	struct sock *sk;
4358 4359 4360 4361
	struct netlink_kernel_cfg cfg = {
		.groups		= RTNLGRP_MAX,
		.input		= rtnetlink_rcv,
		.cb_mutex	= &rtnl_mutex,
4362
		.flags		= NL_CFG_F_NONROOT_RECV,
4363
		.bind		= rtnetlink_bind,
4364 4365
	};

4366
	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4367 4368 4369 4370 4371 4372
	if (!sk)
		return -ENOMEM;
	net->rtnl = sk;
	return 0;
}

4373
static void __net_exit rtnetlink_net_exit(struct net *net)
4374
{
4375 4376
	netlink_kernel_release(net->rtnl);
	net->rtnl = NULL;
4377 4378 4379 4380 4381 4382 4383
}

static struct pernet_operations rtnetlink_net_ops = {
	.init = rtnetlink_net_init,
	.exit = rtnetlink_net_exit,
};

L
Linus Torvalds 已提交
4384 4385
void __init rtnetlink_init(void)
{
4386 4387 4388 4389 4390
	int i;

	for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
		refcount_set(&rtnl_msg_handlers_ref[i], 1);

4391
	if (register_pernet_subsys(&rtnetlink_net_ops))
L
Linus Torvalds 已提交
4392
		panic("rtnetlink_init: cannot initialize rtnetlink\n");
4393

L
Linus Torvalds 已提交
4394
	register_netdevice_notifier(&rtnetlink_dev_notifier);
4395

4396
	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4397 4398 4399 4400
		      rtnl_dump_ifinfo, 0);
	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
4401

4402 4403 4404
	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
	rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
4405

4406 4407 4408
	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
J
John Fastabend 已提交
4409

4410 4411 4412
	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
4413 4414

	rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
4415
		      0);
L
Linus Torvalds 已提交
4416
}