dsa2.c 37.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11
/*
 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
 * Copyright (c) 2008-2009 Marvell Semiconductor
 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
12
#include <linux/netdevice.h>
13 14 15 16
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_net.h>
17
#include <net/devlink.h>
18

19 20 21
#include "dsa_priv.h"

static DEFINE_MUTEX(dsa2_mutex);
22
LIST_HEAD(dsa_tree_list);
23

24 25 26
/* Track the bridges with forwarding offload enabled */
static unsigned long dsa_fwd_offloading_bridges;

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/**
 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
 * @dst: collection of struct dsa_switch devices to notify.
 * @e: event, must be of type DSA_NOTIFIER_*
 * @v: event-specific value.
 *
 * Given a struct dsa_switch_tree, this can be used to run a function once for
 * each member DSA switch. The other alternative of traversing the tree is only
 * through its ports list, which does not uniquely list the switches.
 */
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
{
	struct raw_notifier_head *nh = &dst->nh;
	int err;

	err = raw_notifier_call_chain(nh, e, v);

	return notifier_to_errno(err);
}

/**
 * dsa_broadcast - Notify all DSA trees in the system.
 * @e: event, must be of type DSA_NOTIFIER_*
 * @v: event-specific value.
 *
 * Can be used to notify the switching fabric of events such as cross-chip
 * bridging between disjoint trees (such as islands of tagger-compatible
 * switches bridged by an incompatible middle switch).
55 56 57
 *
 * WARNING: this function is not reliable during probe time, because probing
 * between trees is asynchronous and not all DSA trees might have probed.
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
 */
int dsa_broadcast(unsigned long e, void *v)
{
	struct dsa_switch_tree *dst;
	int err = 0;

	list_for_each_entry(dst, &dsa_tree_list, list) {
		err = dsa_tree_notify(dst, e, v);
		if (err)
			break;
	}

	return err;
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/**
 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
 * @dst: Tree in which to record the mapping.
 * @lag: Netdev that is to be mapped to an ID.
 *
 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
 * two spaces. The size of the mapping space is determined by the
 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
 * it unset if it is not needed, in which case these functions become
 * no-ops.
 */
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
{
	unsigned int id;

	if (dsa_lag_id(dst, lag) >= 0)
		/* Already mapped */
		return;

	for (id = 0; id < dst->lags_len; id++) {
		if (!dsa_lag_dev(dst, id)) {
			dst->lags[id] = lag;
			return;
		}
	}

	/* No IDs left, which is OK. Some drivers do not need it. The
	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
	 * returns an error for this device when joining the LAG. The
	 * driver can then return -EOPNOTSUPP back to DSA, which will
	 * fall back to a software LAG.
	 */
}

/**
 * dsa_lag_unmap() - Remove a LAG ID mapping
 * @dst: Tree in which the mapping is recorded.
 * @lag: Netdev that was mapped.
 *
 * As there may be multiple users of the mapping, it is only removed
 * if there are no other references to it.
 */
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
{
	struct dsa_port *dp;
	unsigned int id;

	dsa_lag_foreach_port(dp, dst, lag)
		/* There are remaining users of this mapping */
		return;

	dsa_lags_foreach_id(id, dst) {
		if (dsa_lag_dev(dst, id) == lag) {
			dst->lags[id] = NULL;
			break;
		}
	}
}

132 133 134 135 136 137 138 139 140 141 142 143
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
	struct dsa_switch_tree *dst;
	struct dsa_port *dp;

	/* When preparing the offload for a port, it will have a valid
	 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
	 * However there might be other ports having the same dp->bridge_dev
	 * and a valid dp->bridge_num, so just ignore this port.
	 */
	list_for_each_entry(dst, &dsa_tree_list, list)
		list_for_each_entry(dp, &dst->ports, list)
144
			if (dp->bridge_dev == bridge_dev && dp->bridge_num)
145 146
				return dp->bridge_num;

147
	return 0;
148 149
}

150
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
151
{
152
	unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
153

154
	if (!bridge_num) {
155 156 157
		/* First port that requests FDB isolation or TX forwarding
		 * offload for this bridge
		 */
158 159 160
		bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
						DSA_MAX_NUM_OFFLOADING_BRIDGES,
						1);
161
		if (bridge_num >= max)
162
			return 0;
163 164 165 166 167 168 169

		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
	}

	return bridge_num;
}

170 171
void dsa_bridge_num_put(const struct net_device *bridge_dev,
			unsigned int bridge_num)
172 173 174 175
{
	/* Check if the bridge is still in use, otherwise it is time
	 * to clean it up so we can reuse this bridge_num later.
	 */
176
	if (!dsa_bridge_num_find(bridge_dev))
177 178 179
		clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
	struct dsa_switch_tree *dst;
	struct dsa_port *dp;

	list_for_each_entry(dst, &dsa_tree_list, list) {
		if (dst->index != tree_index)
			continue;

		list_for_each_entry(dp, &dst->ports, list) {
			if (dp->ds->index != sw_index)
				continue;

			return dp->ds;
		}
	}

	return NULL;
}
EXPORT_SYMBOL_GPL(dsa_switch_find);

201
static struct dsa_switch_tree *dsa_tree_find(int index)
202 203 204
{
	struct dsa_switch_tree *dst;

205
	list_for_each_entry(dst, &dsa_tree_list, list)
206
		if (dst->index == index)
207
			return dst;
208

209 210 211
	return NULL;
}

212
static struct dsa_switch_tree *dsa_tree_alloc(int index)
213 214 215 216 217 218
{
	struct dsa_switch_tree *dst;

	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
	if (!dst)
		return NULL;
219

220
	dst->index = index;
221

222 223
	INIT_LIST_HEAD(&dst->rtable);

224 225
	INIT_LIST_HEAD(&dst->ports);

226
	INIT_LIST_HEAD(&dst->list);
V
Vivien Didelot 已提交
227
	list_add_tail(&dst->list, &dsa_tree_list);
228

229 230 231 232 233
	kref_init(&dst->refcount);

	return dst;
}

234 235
static void dsa_tree_free(struct dsa_switch_tree *dst)
{
236 237
	if (dst->tag_ops)
		dsa_tag_driver_put(dst->tag_ops);
238 239 240 241
	list_del(&dst->list);
	kfree(dst);
}

242
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
243
{
244 245
	if (dst)
		kref_get(&dst->refcount);
246 247 248 249

	return dst;
}

250
static struct dsa_switch_tree *dsa_tree_touch(int index)
251
{
252 253 254 255 256 257 258
	struct dsa_switch_tree *dst;

	dst = dsa_tree_find(index);
	if (dst)
		return dsa_tree_get(dst);
	else
		return dsa_tree_alloc(index);
259 260 261 262 263 264 265 266 267 268 269 270 271
}

static void dsa_tree_release(struct kref *ref)
{
	struct dsa_switch_tree *dst;

	dst = container_of(ref, struct dsa_switch_tree, refcount);

	dsa_tree_free(dst);
}

static void dsa_tree_put(struct dsa_switch_tree *dst)
{
272 273
	if (dst)
		kref_put(&dst->refcount, dsa_tree_release);
274 275
}

276 277
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
						   struct device_node *dn)
278
{
279
	struct dsa_port *dp;
280

281 282 283
	list_for_each_entry(dp, &dst->ports, list)
		if (dp->dn == dn)
			return dp;
284 285 286 287

	return NULL;
}

288 289
static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
				       struct dsa_port *link_dp)
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
{
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst;
	struct dsa_link *dl;

	dst = ds->dst;

	list_for_each_entry(dl, &dst->rtable, list)
		if (dl->dp == dp && dl->link_dp == link_dp)
			return dl;

	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
		return NULL;

	dl->dp = dp;
	dl->link_dp = link_dp;

	INIT_LIST_HEAD(&dl->list);
	list_add_tail(&dl->list, &dst->rtable);

	return dl;
}

V
Vivien Didelot 已提交
314
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
315
{
V
Vivien Didelot 已提交
316 317 318
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst = ds->dst;
	struct device_node *dn = dp->dn;
V
Vivien Didelot 已提交
319
	struct of_phandle_iterator it;
320
	struct dsa_port *link_dp;
321
	struct dsa_link *dl;
V
Vivien Didelot 已提交
322
	int err;
323

V
Vivien Didelot 已提交
324 325 326 327
	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
		link_dp = dsa_tree_find_port_by_node(dst, it.node);
		if (!link_dp) {
			of_node_put(it.node);
V
Vivien Didelot 已提交
328
			return false;
V
Vivien Didelot 已提交
329
		}
330

331 332 333 334 335
		dl = dsa_link_touch(dp, link_dp);
		if (!dl) {
			of_node_put(it.node);
			return false;
		}
336 337
	}

V
Vivien Didelot 已提交
338
	return true;
339 340
}

341
static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
342
{
V
Vivien Didelot 已提交
343 344
	bool complete = true;
	struct dsa_port *dp;
345

346
	list_for_each_entry(dp, &dst->ports, list) {
347
		if (dsa_port_is_dsa(dp)) {
V
Vivien Didelot 已提交
348 349 350 351
			complete = dsa_port_setup_routing_table(dp);
			if (!complete)
				break;
		}
352 353
	}

V
Vivien Didelot 已提交
354
	return complete;
355 356
}

357 358 359 360
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

361 362 363
	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_cpu(dp))
			return dp;
364 365 366 367

	return NULL;
}

368 369 370
/* Assign the default CPU port (the first one in the tree) to all ports of the
 * fabric which don't already have one as part of their own switch.
 */
371 372
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{
373
	struct dsa_port *cpu_dp, *dp;
374

375 376 377
	cpu_dp = dsa_tree_find_first_cpu(dst);
	if (!cpu_dp) {
		pr_err("DSA: tree %d has no CPU port\n", dst->index);
378 379 380
		return -EINVAL;
	}

381 382 383 384
	list_for_each_entry(dp, &dst->ports, list) {
		if (dp->cpu_dp)
			continue;

385 386
		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
			dp->cpu_dp = cpu_dp;
387
	}
388 389 390 391

	return 0;
}

392 393 394 395 396 397 398 399 400 401 402 403 404
/* Perform initial assignment of CPU ports to user ports and DSA links in the
 * fabric, giving preference to CPU ports local to each switch. Default to
 * using the first CPU port in the switch tree if the port does not have a CPU
 * port local to this switch.
 */
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
	struct dsa_port *cpu_dp, *dp;

	list_for_each_entry(cpu_dp, &dst->ports, list) {
		if (!dsa_port_is_cpu(cpu_dp))
			continue;

405 406
		/* Prefer a local CPU port */
		dsa_switch_for_each_port(dp, cpu_dp->ds) {
407 408 409 410 411 412 413 414 415 416 417 418
			/* Prefer the first local CPU port found */
			if (dp->cpu_dp)
				continue;

			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
				dp->cpu_dp = cpu_dp;
		}
	}

	return dsa_tree_setup_default_cpu(dst);
}

419
static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
420
{
421 422 423 424 425
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
			dp->cpu_dp = NULL;
426 427
}

428
static int dsa_port_setup(struct dsa_port *dp)
429
{
430
	struct devlink_port *dlp = &dp->devlink_port;
431
	bool dsa_port_link_registered = false;
432
	struct dsa_switch *ds = dp->ds;
433 434
	bool dsa_port_enabled = false;
	int err = 0;
435

436 437 438
	if (dp->setup)
		return 0;

439
	mutex_init(&dp->addr_lists_lock);
440
	INIT_LIST_HEAD(&dp->fdbs);
441 442
	INIT_LIST_HEAD(&dp->mdbs);

443 444 445 446 447 448
	if (ds->ops->port_setup) {
		err = ds->ops->port_setup(ds, dp->index);
		if (err)
			return err;
	}

449 450
	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
451
		dsa_port_disable(dp);
452 453
		break;
	case DSA_PORT_TYPE_CPU:
454
		err = dsa_port_link_register_of(dp);
455
		if (err)
456 457
			break;
		dsa_port_link_registered = true;
458 459

		err = dsa_port_enable(dp, NULL);
460
		if (err)
461 462 463
			break;
		dsa_port_enabled = true;

464
		break;
465
	case DSA_PORT_TYPE_DSA:
466
		err = dsa_port_link_register_of(dp);
467
		if (err)
468 469
			break;
		dsa_port_link_registered = true;
470 471

		err = dsa_port_enable(dp, NULL);
472
		if (err)
473 474 475
			break;
		dsa_port_enabled = true;

476 477
		break;
	case DSA_PORT_TYPE_USER:
478
		of_get_mac_address(dp->dn, dp->mac);
479 480
		err = dsa_slave_create(dp);
		if (err)
481
			break;
482 483

		devlink_port_type_eth_set(dlp, dp->slave);
484
		break;
485 486
	}

487 488 489 490
	if (err && dsa_port_enabled)
		dsa_port_disable(dp);
	if (err && dsa_port_link_registered)
		dsa_port_link_unregister_of(dp);
491 492 493
	if (err) {
		if (ds->ops->port_teardown)
			ds->ops->port_teardown(ds, dp->index);
494
		return err;
495
	}
496

497 498 499
	dp->setup = true;

	return 0;
500 501
}

502
static int dsa_port_devlink_setup(struct dsa_port *dp)
503
{
504
	struct devlink_port *dlp = &dp->devlink_port;
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	struct dsa_switch_tree *dst = dp->ds->dst;
	struct devlink_port_attrs attrs = {};
	struct devlink *dl = dp->ds->devlink;
	const unsigned char *id;
	unsigned char len;
	int err;

	id = (const unsigned char *)&dst->index;
	len = sizeof(dst->index);

	attrs.phys.port_number = dp->index;
	memcpy(attrs.switch_id.id, id, len);
	attrs.switch_id.id_len = len;
	memset(dlp, 0, sizeof(*dlp));

	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
		break;
	case DSA_PORT_TYPE_CPU:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
		break;
	case DSA_PORT_TYPE_DSA:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
		break;
	case DSA_PORT_TYPE_USER:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
		break;
	}

	devlink_port_attrs_set(dlp, &attrs);
	err = devlink_port_register(dl, dlp, dp->index);

	if (!err)
		dp->devlink_port_setup = true;
540

541 542 543 544 545
	return err;
}

static void dsa_port_teardown(struct dsa_port *dp)
{
546
	struct devlink_port *dlp = &dp->devlink_port;
547
	struct dsa_switch *ds = dp->ds;
548
	struct dsa_mac_addr *a, *tmp;
549

550 551 552
	if (!dp->setup)
		return;

553 554 555
	if (ds->ops->port_teardown)
		ds->ops->port_teardown(ds, dp->index);

556 557
	devlink_port_type_clear(dlp);

558 559 560 561
	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
		break;
	case DSA_PORT_TYPE_CPU:
562
		dsa_port_disable(dp);
563 564
		dsa_port_link_unregister_of(dp);
		break;
565
	case DSA_PORT_TYPE_DSA:
566
		dsa_port_disable(dp);
567
		dsa_port_link_unregister_of(dp);
568 569 570 571 572 573 574
		break;
	case DSA_PORT_TYPE_USER:
		if (dp->slave) {
			dsa_slave_destroy(dp->slave);
			dp->slave = NULL;
		}
		break;
575
	}
576

577 578 579 580 581
	list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
		list_del(&a->list);
		kfree(a);
	}

582 583 584 585 586
	list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
		list_del(&a->list);
		kfree(a);
	}

587
	dp->setup = false;
588 589
}

590 591 592 593 594 595 596 597 598
static void dsa_port_devlink_teardown(struct dsa_port *dp)
{
	struct devlink_port *dlp = &dp->devlink_port;

	if (dp->devlink_port_setup)
		devlink_port_unregister(dlp);
	dp->devlink_port_setup = false;
}

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
/* Destroy the current devlink port, and create a new one which has the UNUSED
 * flavour. At this point, any call to ds->ops->port_setup has been already
 * balanced out by a call to ds->ops->port_teardown, so we know that any
 * devlink port regions the driver had are now unregistered. We then call its
 * ds->ops->port_setup again, in order for the driver to re-create them on the
 * new devlink port.
 */
static int dsa_port_reinit_as_unused(struct dsa_port *dp)
{
	struct dsa_switch *ds = dp->ds;
	int err;

	dsa_port_devlink_teardown(dp);
	dp->type = DSA_PORT_TYPE_UNUSED;
	err = dsa_port_devlink_setup(dp);
	if (err)
		return err;

	if (ds->ops->port_setup) {
		/* On error, leave the devlink port registered,
		 * dsa_switch_teardown will clean it up later.
		 */
		err = ds->ops->port_setup(ds, dp->index);
		if (err)
			return err;
	}

	return 0;
}

A
Andrew Lunn 已提交
629 630 631 632 633 634 635 636 637 638 639 640
static int dsa_devlink_info_get(struct devlink *dl,
				struct devlink_info_req *req,
				struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (ds->ops->devlink_info_get)
		return ds->ops->devlink_info_get(ds, req, extack);

	return -EOPNOTSUPP;
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
static int dsa_devlink_sb_pool_get(struct devlink *dl,
				   unsigned int sb_index, u16 pool_index,
				   struct devlink_sb_pool_info *pool_info)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
					    pool_info);
}

static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
				   u16 pool_index, u32 size,
				   enum devlink_sb_threshold_type threshold_type,
				   struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_pool_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
					    threshold_type, extack);
}

static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
					unsigned int sb_index, u16 pool_index,
					u32 *p_threshold)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_port_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
						 pool_index, p_threshold);
}

static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
					unsigned int sb_index, u16 pool_index,
					u32 threshold,
					struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_port_pool_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
						 pool_index, threshold, extack);
}

static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
				unsigned int sb_index, u16 tc_index,
				enum devlink_sb_pool_type pool_type,
				u16 *p_pool_index, u32 *p_threshold)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_tc_pool_bind_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
						    tc_index, pool_type,
						    p_pool_index, p_threshold);
}

static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
				unsigned int sb_index, u16 tc_index,
				enum devlink_sb_pool_type pool_type,
				u16 pool_index, u32 threshold,
				struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_tc_pool_bind_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
						    tc_index, pool_type,
						    pool_index, threshold,
						    extack);
}

static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
				       unsigned int sb_index)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_occ_snapshot)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
}

static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
					unsigned int sb_index)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_occ_max_clear)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
}

static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
					    unsigned int sb_index,
					    u16 pool_index, u32 *p_cur,
					    u32 *p_max)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_occ_port_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
						     pool_index, p_cur, p_max);
}

static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
				    unsigned int sb_index, u16 tc_index,
				    enum devlink_sb_pool_type pool_type,
				    u32 *p_cur, u32 *p_max)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
							sb_index, tc_index,
							pool_type, p_cur,
							p_max);
}

A
Andrew Lunn 已提交
788
static const struct devlink_ops dsa_devlink_ops = {
789 790 791 792 793 794 795 796 797 798 799
	.info_get			= dsa_devlink_info_get,
	.sb_pool_get			= dsa_devlink_sb_pool_get,
	.sb_pool_set			= dsa_devlink_sb_pool_set,
	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
A
Andrew Lunn 已提交
800 801
};

802 803 804 805
static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
	struct dsa_switch_tree *dst = ds->dst;
806 807
	struct dsa_port *cpu_dp;
	int err;
808 809 810 811

	if (tag_ops->proto == dst->default_proto)
		return 0;

812
	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
813
		rtnl_lock();
814 815
		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
						   tag_ops->proto);
816
		rtnl_unlock();
817 818 819 820 821 822 823 824 825 826
		if (err) {
			dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
				tag_ops->name, ERR_PTR(err));
			return err;
		}
	}

	return 0;
}

827
static int dsa_switch_setup(struct dsa_switch *ds)
828
{
829
	struct dsa_devlink_priv *dl_priv;
830
	struct dsa_port *dp;
831 832 833 834
	int err;

	if (ds->setup)
		return 0;
835

836
	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
837
	 * driver and before ops->setup() has run, since the switch drivers and
838 839 840
	 * the slave MDIO bus driver rely on these values for probing PHY
	 * devices or not
	 */
841
	ds->phys_mii_mask |= dsa_user_ports(ds);
842

843 844 845
	/* Add the switch to devlink before calling setup, so that setup can
	 * add dpipe tables
	 */
846 847
	ds->devlink =
		devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
848 849
	if (!ds->devlink)
		return -ENOMEM;
850 851
	dl_priv = devlink_priv(ds->devlink);
	dl_priv->ds = ds;
852

853 854 855
	/* Setup devlink port instances now, so that the switch
	 * setup() can register regions etc, against the ports
	 */
856 857 858 859
	dsa_switch_for_each_port(dp, ds) {
		err = dsa_port_devlink_setup(dp);
		if (err)
			goto unregister_devlink_ports;
860 861
	}

V
Vivien Didelot 已提交
862 863
	err = dsa_switch_register_notifier(ds);
	if (err)
864
		goto unregister_devlink_ports;
V
Vivien Didelot 已提交
865

866 867
	ds->configure_vlan_while_not_filtering = true;

868 869
	err = ds->ops->setup(ds);
	if (err < 0)
870
		goto unregister_notifier;
871

872 873 874 875
	err = dsa_switch_setup_tag_protocol(ds);
	if (err)
		goto teardown;

876
	if (!ds->slave_mii_bus && ds->ops->phy_read) {
877
		ds->slave_mii_bus = mdiobus_alloc();
878 879
		if (!ds->slave_mii_bus) {
			err = -ENOMEM;
880
			goto teardown;
881
		}
882 883 884 885 886

		dsa_slave_mii_bus_init(ds);

		err = mdiobus_register(ds->slave_mii_bus);
		if (err < 0)
887
			goto free_slave_mii_bus;
888 889
	}

890
	ds->setup = true;
891
	devlink_register(ds->devlink);
892
	return 0;
893

894 895 896
free_slave_mii_bus:
	if (ds->slave_mii_bus && ds->ops->phy_read)
		mdiobus_free(ds->slave_mii_bus);
897 898 899
teardown:
	if (ds->ops->teardown)
		ds->ops->teardown(ds);
900 901
unregister_notifier:
	dsa_switch_unregister_notifier(ds);
902
unregister_devlink_ports:
903 904
	dsa_switch_for_each_port(dp, ds)
		dsa_port_devlink_teardown(dp);
905 906 907
	devlink_free(ds->devlink);
	ds->devlink = NULL;
	return err;
908 909
}

910
static void dsa_switch_teardown(struct dsa_switch *ds)
911
{
912 913
	struct dsa_port *dp;

914 915 916
	if (!ds->setup)
		return;

917 918 919
	if (ds->devlink)
		devlink_unregister(ds->devlink);

920
	if (ds->slave_mii_bus && ds->ops->phy_read) {
921
		mdiobus_unregister(ds->slave_mii_bus);
922 923 924
		mdiobus_free(ds->slave_mii_bus);
		ds->slave_mii_bus = NULL;
	}
V
Vivien Didelot 已提交
925

926 927 928
	if (ds->ops->teardown)
		ds->ops->teardown(ds);

929 930
	dsa_switch_unregister_notifier(ds);

931
	if (ds->devlink) {
932 933
		dsa_switch_for_each_port(dp, ds)
			dsa_port_devlink_teardown(dp);
934 935 936 937
		devlink_free(ds->devlink);
		ds->devlink = NULL;
	}

938
	ds->setup = false;
939 940
}

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
/* First tear down the non-shared, then the shared ports. This ensures that
 * all work items scheduled by our switchdev handlers for user ports have
 * completed before we destroy the refcounting kept on the shared ports.
 */
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
			dsa_port_teardown(dp);

	dsa_flush_workqueue();

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
			dsa_port_teardown(dp);
}

static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		dsa_switch_teardown(dp->ds);
}

968 969
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
970
	struct dsa_port *dp;
971
	int err;
972

973 974
	list_for_each_entry(dp, &dst->ports, list) {
		err = dsa_switch_setup(dp->ds);
975
		if (err)
976 977
			goto teardown;
	}
978

979 980
	list_for_each_entry(dp, &dst->ports, list) {
		err = dsa_port_setup(dp);
981
		if (err) {
982
			err = dsa_port_reinit_as_unused(dp);
983 984 985
			if (err)
				goto teardown;
		}
986 987 988
	}

	return 0;
989

990
teardown:
991
	dsa_tree_teardown_ports(dst);
992

993
	dsa_tree_teardown_switches(dst);
994 995

	return err;
996 997
}

998 999
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
1000 1001
	struct dsa_port *dp;
	int err;
1002

1003 1004 1005 1006 1007 1008 1009 1010 1011
	list_for_each_entry(dp, &dst->ports, list) {
		if (dsa_port_is_cpu(dp)) {
			err = dsa_master_setup(dp->master, dp);
			if (err)
				return err;
		}
	}

	return 0;
1012 1013 1014 1015
}

static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
1016
	struct dsa_port *dp;
1017

1018 1019 1020
	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_cpu(dp))
			dsa_master_teardown(dp->master);
1021 1022
}

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
{
	unsigned int len = 0;
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list) {
		if (dp->ds->num_lag_ids > len)
			len = dp->ds->num_lag_ids;
	}

	if (!len)
		return 0;

	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
	if (!dst->lags)
		return -ENOMEM;

	dst->lags_len = len;
	return 0;
}

static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
{
	kfree(dst->lags);
}

V
Vivien Didelot 已提交
1049
static int dsa_tree_setup(struct dsa_switch_tree *dst)
1050
{
V
Vivien Didelot 已提交
1051
	bool complete;
1052 1053
	int err;

V
Vivien Didelot 已提交
1054 1055 1056 1057 1058 1059
	if (dst->setup) {
		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
		       dst->index);
		return -EEXIST;
	}

V
Vivien Didelot 已提交
1060 1061 1062 1063
	complete = dsa_tree_setup_routing_table(dst);
	if (!complete)
		return 0;

1064
	err = dsa_tree_setup_cpu_ports(dst);
1065 1066 1067
	if (err)
		return err;

1068 1069
	err = dsa_tree_setup_switches(dst);
	if (err)
1070
		goto teardown_cpu_ports;
1071

1072
	err = dsa_tree_setup_master(dst);
1073
	if (err)
1074
		goto teardown_switches;
1075

1076 1077 1078 1079
	err = dsa_tree_setup_lags(dst);
	if (err)
		goto teardown_master;

V
Vivien Didelot 已提交
1080 1081 1082
	dst->setup = true;

	pr_info("DSA: tree %d setup\n", dst->index);
1083 1084

	return 0;
1085

1086 1087
teardown_master:
	dsa_tree_teardown_master(dst);
1088
teardown_switches:
1089
	dsa_tree_teardown_ports(dst);
1090
	dsa_tree_teardown_switches(dst);
1091 1092
teardown_cpu_ports:
	dsa_tree_teardown_cpu_ports(dst);
1093 1094

	return err;
1095 1096
}

V
Vivien Didelot 已提交
1097
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1098
{
1099 1100
	struct dsa_link *dl, *next;

V
Vivien Didelot 已提交
1101
	if (!dst->setup)
1102 1103
		return;

1104 1105
	dsa_tree_teardown_lags(dst);

1106
	dsa_tree_teardown_master(dst);
1107

1108 1109
	dsa_tree_teardown_ports(dst);

1110
	dsa_tree_teardown_switches(dst);
1111

1112
	dsa_tree_teardown_cpu_ports(dst);
1113

1114 1115 1116 1117 1118
	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
		list_del(&dl->list);
		kfree(dl);
	}

V
Vivien Didelot 已提交
1119 1120 1121
	pr_info("DSA: tree %d torn down\n", dst->index);

	dst->setup = false;
1122 1123
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
/* Since the dsa/tagging sysfs device attribute is per master, the assumption
 * is that all DSA switches within a tree share the same tagger, otherwise
 * they would have formed disjoint trees (different "dsa,member" values).
 */
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
			      struct net_device *master,
			      const struct dsa_device_ops *tag_ops,
			      const struct dsa_device_ops *old_tag_ops)
{
	struct dsa_notifier_tag_proto_info info;
	struct dsa_port *dp;
	int err = -EBUSY;

	if (!rtnl_trylock())
		return restart_syscall();

	/* At the moment we don't allow changing the tag protocol under
	 * traffic. The rtnl_mutex also happens to serialize concurrent
	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
	 * restriction, there needs to be another mutex which serializes this.
	 */
	if (master->flags & IFF_UP)
		goto out_unlock;

	list_for_each_entry(dp, &dst->ports, list) {
1149
		if (!dsa_port_is_user(dp))
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
			continue;

		if (dp->slave->flags & IFF_UP)
			goto out_unlock;
	}

	info.tag_ops = tag_ops;
	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
	if (err)
		goto out_unwind_tagger;

	dst->tag_ops = tag_ops;

	rtnl_unlock();

	return 0;

out_unwind_tagger:
	info.tag_ops = old_tag_ops;
	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
out_unlock:
	rtnl_unlock();
	return err;
}

1175 1176 1177 1178 1179
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
{
	struct dsa_switch_tree *dst = ds->dst;
	struct dsa_port *dp;

1180 1181
	dsa_switch_for_each_port(dp, ds)
		if (dp->index == index)
V
Vivien Didelot 已提交
1182 1183 1184 1185 1186
			return dp;

	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
	if (!dp)
		return NULL;
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196

	dp->ds = ds;
	dp->index = index;

	INIT_LIST_HEAD(&dp->list);
	list_add_tail(&dp->list, &dst->ports);

	return dp;
}

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
{
	if (!name)
		name = "eth%d";

	dp->type = DSA_PORT_TYPE_USER;
	dp->name = name;

	return 0;
}

static int dsa_port_parse_dsa(struct dsa_port *dp)
{
	dp->type = DSA_PORT_TYPE_DSA;

	return 0;
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
						  struct net_device *master)
{
	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
	struct dsa_switch *mds, *ds = dp->ds;
	unsigned int mdp_upstream;
	struct dsa_port *mdp;

	/* It is possible to stack DSA switches onto one another when that
	 * happens the switch driver may want to know if its tagging protocol
	 * is going to work in such a configuration.
	 */
	if (dsa_slave_dev_check(master)) {
		mdp = dsa_slave_to_port(master);
		mds = mdp->ds;
		mdp_upstream = dsa_upstream_port(mds, mdp->index);
		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
							  DSA_TAG_PROTO_NONE);
	}

	/* If the master device is not itself a DSA slave in a disjoint DSA
	 * tree, then return immediately.
	 */
	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}

1241 1242
static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
			      const char *user_protocol)
1243
{
1244 1245
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst = ds->dst;
1246
	const struct dsa_device_ops *tag_ops;
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	enum dsa_tag_protocol default_proto;

	/* Find out which protocol the switch would prefer. */
	default_proto = dsa_get_tag_protocol(dp, master);
	if (dst->default_proto) {
		if (dst->default_proto != default_proto) {
			dev_err(ds->dev,
				"A DSA switch tree can have only one tagging protocol\n");
			return -EINVAL;
		}
	} else {
		dst->default_proto = default_proto;
	}

	/* See if the user wants to override that preference. */
	if (user_protocol) {
		if (!ds->ops->change_tag_protocol) {
			dev_err(ds->dev, "Tag protocol cannot be modified\n");
			return -EINVAL;
		}

		tag_ops = dsa_find_tagger_by_name(user_protocol);
	} else {
		tag_ops = dsa_tag_driver_get(default_proto);
	}

	if (IS_ERR(tag_ops)) {
		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
			return -EPROBE_DEFER;

		dev_warn(ds->dev, "No tagger for this switch\n");
		return PTR_ERR(tag_ops);
	}
1280

1281
	if (dst->tag_ops) {
1282
		if (dst->tag_ops != tag_ops) {
1283 1284
			dev_err(ds->dev,
				"A DSA switch tree can have only one tagging protocol\n");
1285 1286

			dsa_tag_driver_put(tag_ops);
1287 1288
			return -EINVAL;
		}
1289

1290
		/* In the case of multiple CPU ports per switch, the tagging
1291
		 * protocol is still reference-counted only per switch tree.
1292
		 */
1293
		dsa_tag_driver_put(tag_ops);
1294
	} else {
1295
		dst->tag_ops = tag_ops;
1296 1297
	}

1298
	dp->master = master;
1299
	dp->type = DSA_PORT_TYPE_CPU;
1300
	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1301
	dp->dst = dst;
1302

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	/* At this point, the tree may be configured to use a different
	 * tagger than the one chosen by the switch driver during
	 * .setup, in the case when a user selects a custom protocol
	 * through the DT.
	 *
	 * This is resolved by syncing the driver with the tree in
	 * dsa_switch_setup_tag_protocol once .setup has run and the
	 * driver is ready to accept calls to .change_tag_protocol. If
	 * the driver does not support the custom protocol at that
	 * point, the tree is wholly rejected, thereby ensuring that the
	 * tree and driver are always in agreement on the protocol to
	 * use.
	 */
1316 1317 1318
	return 0;
}

1319 1320
static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
{
1321
	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1322
	const char *name = of_get_property(dn, "label", NULL);
1323
	bool link = of_property_read_bool(dn, "link");
1324

1325 1326
	dp->dn = dn;

1327
	if (ethernet) {
1328
		struct net_device *master;
1329
		const char *user_protocol;
1330 1331 1332 1333 1334

		master = of_find_net_device_by_node(ethernet);
		if (!master)
			return -EPROBE_DEFER;

1335 1336
		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
		return dsa_port_parse_cpu(dp, master, user_protocol);
1337 1338
	}

1339 1340
	if (link)
		return dsa_port_parse_dsa(dp);
1341

1342
	return dsa_port_parse_user(dp, name);
1343 1344
}

V
Vivien Didelot 已提交
1345 1346
static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
				     struct device_node *dn)
1347
{
1348
	struct device_node *ports, *port;
1349
	struct dsa_port *dp;
1350
	int err = 0;
1351
	u32 reg;
1352 1353 1354

	ports = of_get_child_by_name(dn, "ports");
	if (!ports) {
1355 1356 1357 1358 1359 1360
		/* The second possibility is "ethernet-ports" */
		ports = of_get_child_by_name(dn, "ethernet-ports");
		if (!ports) {
			dev_err(ds->dev, "no ports child node found\n");
			return -EINVAL;
		}
1361
	}
1362 1363 1364

	for_each_available_child_of_node(ports, port) {
		err = of_property_read_u32(port, "reg", &reg);
1365 1366
		if (err) {
			of_node_put(port);
1367
			goto out_put_node;
1368
		}
1369

1370
		if (reg >= ds->num_ports) {
1371 1372
			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
				port, reg, ds->num_ports);
1373
			of_node_put(port);
1374 1375 1376
			err = -EINVAL;
			goto out_put_node;
		}
1377

1378
		dp = dsa_to_port(ds, reg);
1379 1380

		err = dsa_port_parse_of(dp, port);
1381 1382
		if (err) {
			of_node_put(port);
1383
			goto out_put_node;
1384
		}
1385 1386
	}

1387 1388 1389
out_put_node:
	of_node_put(ports);
	return err;
1390 1391
}

V
Vivien Didelot 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
				      struct device_node *dn)
{
	u32 m[2] = { 0, 0 };
	int sz;

	/* Don't error out if this optional property isn't found */
	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
	if (sz < 0 && sz != -EINVAL)
		return sz;

	ds->index = m[1];

	ds->dst = dsa_tree_touch(m[0]);
	if (!ds->dst)
		return -ENOMEM;

1409 1410 1411 1412 1413 1414 1415
	if (dsa_switch_find(ds->dst->index, ds->index)) {
		dev_err(ds->dev,
			"A DSA switch with index %d already exists in tree %d\n",
			ds->index, ds->dst->index);
		return -EEXIST;
	}

1416 1417 1418
	if (ds->dst->last_switch < ds->index)
		ds->dst->last_switch = ds->index;

V
Vivien Didelot 已提交
1419 1420 1421
	return 0;
}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
static int dsa_switch_touch_ports(struct dsa_switch *ds)
{
	struct dsa_port *dp;
	int port;

	for (port = 0; port < ds->num_ports; port++) {
		dp = dsa_port_touch(ds, port);
		if (!dp)
			return -ENOMEM;
	}

	return 0;
}

V
Vivien Didelot 已提交
1436 1437 1438 1439 1440 1441 1442 1443
static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
{
	int err;

	err = dsa_switch_parse_member_of(ds, dn);
	if (err)
		return err;

1444 1445 1446 1447
	err = dsa_switch_touch_ports(ds);
	if (err)
		return err;

V
Vivien Didelot 已提交
1448 1449 1450
	return dsa_switch_parse_ports_of(ds, dn);
}

1451 1452 1453
static int dsa_port_parse(struct dsa_port *dp, const char *name,
			  struct device *dev)
{
1454
	if (!strcmp(name, "cpu")) {
1455 1456 1457 1458 1459 1460 1461 1462
		struct net_device *master;

		master = dsa_dev_to_net_device(dev);
		if (!master)
			return -EPROBE_DEFER;

		dev_put(master);

1463
		return dsa_port_parse_cpu(dp, master, NULL);
1464 1465
	}

1466 1467
	if (!strcmp(name, "dsa"))
		return dsa_port_parse_dsa(dp);
1468

1469
	return dsa_port_parse_user(dp, name);
1470 1471
}

V
Vivien Didelot 已提交
1472 1473
static int dsa_switch_parse_ports(struct dsa_switch *ds,
				  struct dsa_chip_data *cd)
1474 1475
{
	bool valid_name_found = false;
1476 1477 1478
	struct dsa_port *dp;
	struct device *dev;
	const char *name;
1479
	unsigned int i;
1480
	int err;
1481 1482

	for (i = 0; i < DSA_MAX_PORTS; i++) {
1483 1484
		name = cd->port_names[i];
		dev = cd->netdev[i];
1485
		dp = dsa_to_port(ds, i);
1486 1487

		if (!name)
1488 1489
			continue;

1490 1491 1492 1493
		err = dsa_port_parse(dp, name, dev);
		if (err)
			return err;

1494 1495 1496 1497 1498 1499 1500 1501 1502
		valid_name_found = true;
	}

	if (!valid_name_found && i == DSA_MAX_PORTS)
		return -EINVAL;

	return 0;
}

V
Vivien Didelot 已提交
1503
static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1504
{
1505 1506
	int err;

V
Vivien Didelot 已提交
1507
	ds->cd = cd;
1508

V
Vivien Didelot 已提交
1509 1510 1511 1512 1513 1514 1515
	/* We don't support interconnected switches nor multiple trees via
	 * platform data, so this is the unique switch of the tree.
	 */
	ds->index = 0;
	ds->dst = dsa_tree_touch(0);
	if (!ds->dst)
		return -ENOMEM;
1516

1517 1518 1519 1520
	err = dsa_switch_touch_ports(ds);
	if (err)
		return err;

V
Vivien Didelot 已提交
1521
	return dsa_switch_parse_ports(ds, cd);
1522 1523
}

1524 1525 1526 1527
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
	struct dsa_port *dp, *next;

1528
	dsa_switch_for_each_port_safe(dp, next, ds) {
1529 1530 1531 1532 1533
		list_del(&dp->list);
		kfree(dp);
	}
}

1534
static int dsa_switch_probe(struct dsa_switch *ds)
1535
{
1536
	struct dsa_switch_tree *dst;
1537 1538
	struct dsa_chip_data *pdata;
	struct device_node *np;
V
Vivien Didelot 已提交
1539
	int err;
1540

1541 1542 1543
	if (!ds->dev)
		return -ENODEV;

1544 1545 1546
	pdata = ds->dev->platform_data;
	np = ds->dev->of_node;

1547 1548 1549
	if (!ds->num_ports)
		return -EINVAL;

1550
	if (np) {
V
Vivien Didelot 已提交
1551
		err = dsa_switch_parse_of(ds, np);
1552 1553 1554
		if (err)
			dsa_switch_release_ports(ds);
	} else if (pdata) {
V
Vivien Didelot 已提交
1555
		err = dsa_switch_parse(ds, pdata);
1556 1557 1558
		if (err)
			dsa_switch_release_ports(ds);
	} else {
V
Vivien Didelot 已提交
1559
		err = -ENODEV;
1560
	}
1561

V
Vivien Didelot 已提交
1562 1563
	if (err)
		return err;
1564

1565 1566 1567
	dst = ds->dst;
	dsa_tree_get(dst);
	err = dsa_tree_setup(dst);
1568 1569
	if (err) {
		dsa_switch_release_ports(ds);
1570
		dsa_tree_put(dst);
1571
	}
1572 1573

	return err;
1574 1575
}

1576
int dsa_register_switch(struct dsa_switch *ds)
1577 1578 1579 1580
{
	int err;

	mutex_lock(&dsa2_mutex);
1581
	err = dsa_switch_probe(ds);
1582
	dsa_tree_put(ds->dst);
1583 1584 1585 1586 1587 1588
	mutex_unlock(&dsa2_mutex);

	return err;
}
EXPORT_SYMBOL_GPL(dsa_register_switch);

1589
static void dsa_switch_remove(struct dsa_switch *ds)
1590 1591
{
	struct dsa_switch_tree *dst = ds->dst;
V
Vivien Didelot 已提交
1592

1593
	dsa_tree_teardown(dst);
1594
	dsa_switch_release_ports(ds);
1595
	dsa_tree_put(dst);
1596 1597 1598 1599 1600
}

void dsa_unregister_switch(struct dsa_switch *ds)
{
	mutex_lock(&dsa2_mutex);
1601
	dsa_switch_remove(ds);
1602 1603 1604
	mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619

/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
 * blocking that operation from completion, due to the dev_hold taken inside
 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
 * the DSA master, so that the system can reboot successfully.
 */
void dsa_switch_shutdown(struct dsa_switch *ds)
{
	struct net_device *master, *slave_dev;
	LIST_HEAD(unregister_list);
	struct dsa_port *dp;

	mutex_lock(&dsa2_mutex);
	rtnl_lock();

1620
	dsa_switch_for_each_user_port(dp, ds) {
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
		master = dp->cpu_dp->master;
		slave_dev = dp->slave;

		netdev_upper_dev_unlink(master, slave_dev);
		/* Just unlinking ourselves as uppers of the master is not
		 * sufficient. When the master net device unregisters, that will
		 * also call dev_close, which we will catch as NETDEV_GOING_DOWN
		 * and trigger a dev_close on our own devices (dsa_slave_close).
		 * In turn, that will call dev_mc_unsync on the master's net
		 * device. If the master is also a DSA switch port, this will
		 * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
		 * its own master. Lockdep will complain about the fact that
		 * all cascaded masters have the same dsa_master_addr_list_lock_key,
		 * which it normally would not do if the cascaded masters would
		 * be in a proper upper/lower relationship, which we've just
		 * destroyed.
		 * To suppress the lockdep warnings, let's actually unregister
		 * the DSA slave interfaces too, to avoid the nonsensical
		 * multicast address list synchronization on shutdown.
		 */
		unregister_netdevice_queue(slave_dev, &unregister_list);
	}
	unregister_netdevice_many(&unregister_list);

	rtnl_unlock();
	mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_switch_shutdown);