dsa2.c 37.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11
/*
 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
 * Copyright (c) 2008-2009 Marvell Semiconductor
 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
12
#include <linux/netdevice.h>
13 14 15 16
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_net.h>
17
#include <net/devlink.h>
18

19 20 21
#include "dsa_priv.h"

static DEFINE_MUTEX(dsa2_mutex);
22
LIST_HEAD(dsa_tree_list);
23

24 25 26
/* Track the bridges with forwarding offload enabled */
static unsigned long dsa_fwd_offloading_bridges;

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/**
 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
 * @dst: collection of struct dsa_switch devices to notify.
 * @e: event, must be of type DSA_NOTIFIER_*
 * @v: event-specific value.
 *
 * Given a struct dsa_switch_tree, this can be used to run a function once for
 * each member DSA switch. The other alternative of traversing the tree is only
 * through its ports list, which does not uniquely list the switches.
 */
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
{
	struct raw_notifier_head *nh = &dst->nh;
	int err;

	err = raw_notifier_call_chain(nh, e, v);

	return notifier_to_errno(err);
}

/**
 * dsa_broadcast - Notify all DSA trees in the system.
 * @e: event, must be of type DSA_NOTIFIER_*
 * @v: event-specific value.
 *
 * Can be used to notify the switching fabric of events such as cross-chip
 * bridging between disjoint trees (such as islands of tagger-compatible
 * switches bridged by an incompatible middle switch).
55 56 57
 *
 * WARNING: this function is not reliable during probe time, because probing
 * between trees is asynchronous and not all DSA trees might have probed.
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
 */
int dsa_broadcast(unsigned long e, void *v)
{
	struct dsa_switch_tree *dst;
	int err = 0;

	list_for_each_entry(dst, &dsa_tree_list, list) {
		err = dsa_tree_notify(dst, e, v);
		if (err)
			break;
	}

	return err;
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/**
 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
 * @dst: Tree in which to record the mapping.
 * @lag: Netdev that is to be mapped to an ID.
 *
 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
 * two spaces. The size of the mapping space is determined by the
 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
 * it unset if it is not needed, in which case these functions become
 * no-ops.
 */
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
{
	unsigned int id;

	if (dsa_lag_id(dst, lag) >= 0)
		/* Already mapped */
		return;

	for (id = 0; id < dst->lags_len; id++) {
		if (!dsa_lag_dev(dst, id)) {
			dst->lags[id] = lag;
			return;
		}
	}

	/* No IDs left, which is OK. Some drivers do not need it. The
	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
	 * returns an error for this device when joining the LAG. The
	 * driver can then return -EOPNOTSUPP back to DSA, which will
	 * fall back to a software LAG.
	 */
}

/**
 * dsa_lag_unmap() - Remove a LAG ID mapping
 * @dst: Tree in which the mapping is recorded.
 * @lag: Netdev that was mapped.
 *
 * As there may be multiple users of the mapping, it is only removed
 * if there are no other references to it.
 */
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
{
	struct dsa_port *dp;
	unsigned int id;

	dsa_lag_foreach_port(dp, dst, lag)
		/* There are remaining users of this mapping */
		return;

	dsa_lags_foreach_id(id, dst) {
		if (dsa_lag_dev(dst, id) == lag) {
			dst->lags[id] = NULL;
			break;
		}
	}
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
	struct dsa_switch_tree *dst;
	struct dsa_port *dp;

	/* When preparing the offload for a port, it will have a valid
	 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
	 * However there might be other ports having the same dp->bridge_dev
	 * and a valid dp->bridge_num, so just ignore this port.
	 */
	list_for_each_entry(dst, &dsa_tree_list, list)
		list_for_each_entry(dp, &dst->ports, list)
			if (dp->bridge_dev == bridge_dev &&
			    dp->bridge_num != -1)
				return dp->bridge_num;

	return -1;
}

int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
{
	int bridge_num = dsa_bridge_num_find(bridge_dev);

	if (bridge_num < 0) {
		/* First port that offloads TX forwarding for this bridge */
		bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
						 DSA_MAX_NUM_OFFLOADING_BRIDGES);
		if (bridge_num >= max)
			return -1;

		set_bit(bridge_num, &dsa_fwd_offloading_bridges);
	}

	return bridge_num;
}

void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
{
	/* Check if the bridge is still in use, otherwise it is time
	 * to clean it up so we can reuse this bridge_num later.
	 */
	if (!dsa_bridge_num_find(bridge_dev))
		clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
	struct dsa_switch_tree *dst;
	struct dsa_port *dp;

	list_for_each_entry(dst, &dsa_tree_list, list) {
		if (dst->index != tree_index)
			continue;

		list_for_each_entry(dp, &dst->ports, list) {
			if (dp->ds->index != sw_index)
				continue;

			return dp->ds;
		}
	}

	return NULL;
}
EXPORT_SYMBOL_GPL(dsa_switch_find);

198
static struct dsa_switch_tree *dsa_tree_find(int index)
199 200 201
{
	struct dsa_switch_tree *dst;

202
	list_for_each_entry(dst, &dsa_tree_list, list)
203
		if (dst->index == index)
204
			return dst;
205

206 207 208
	return NULL;
}

209
static struct dsa_switch_tree *dsa_tree_alloc(int index)
210 211 212 213 214 215
{
	struct dsa_switch_tree *dst;

	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
	if (!dst)
		return NULL;
216

217
	dst->index = index;
218

219 220
	INIT_LIST_HEAD(&dst->rtable);

221 222
	INIT_LIST_HEAD(&dst->ports);

223
	INIT_LIST_HEAD(&dst->list);
224
	list_add_tail(&dst->list, &dsa_tree_list);
225

226 227 228 229 230
	kref_init(&dst->refcount);

	return dst;
}

231 232
static void dsa_tree_free(struct dsa_switch_tree *dst)
{
233 234
	if (dst->tag_ops)
		dsa_tag_driver_put(dst->tag_ops);
235 236 237 238
	list_del(&dst->list);
	kfree(dst);
}

239
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
240
{
241 242
	if (dst)
		kref_get(&dst->refcount);
243 244 245 246

	return dst;
}

247
static struct dsa_switch_tree *dsa_tree_touch(int index)
248
{
249 250 251 252 253 254 255
	struct dsa_switch_tree *dst;

	dst = dsa_tree_find(index);
	if (dst)
		return dsa_tree_get(dst);
	else
		return dsa_tree_alloc(index);
256 257 258 259 260 261 262 263 264 265 266 267 268
}

static void dsa_tree_release(struct kref *ref)
{
	struct dsa_switch_tree *dst;

	dst = container_of(ref, struct dsa_switch_tree, refcount);

	dsa_tree_free(dst);
}

static void dsa_tree_put(struct dsa_switch_tree *dst)
{
269 270
	if (dst)
		kref_put(&dst->refcount, dsa_tree_release);
271 272
}

273 274
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
						   struct device_node *dn)
275
{
276
	struct dsa_port *dp;
277

278 279 280
	list_for_each_entry(dp, &dst->ports, list)
		if (dp->dn == dn)
			return dp;
281 282 283 284

	return NULL;
}

285 286
static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
				       struct dsa_port *link_dp)
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
{
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst;
	struct dsa_link *dl;

	dst = ds->dst;

	list_for_each_entry(dl, &dst->rtable, list)
		if (dl->dp == dp && dl->link_dp == link_dp)
			return dl;

	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
		return NULL;

	dl->dp = dp;
	dl->link_dp = link_dp;

	INIT_LIST_HEAD(&dl->list);
	list_add_tail(&dl->list, &dst->rtable);

	return dl;
}

311
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
312
{
313 314 315
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst = ds->dst;
	struct device_node *dn = dp->dn;
316
	struct of_phandle_iterator it;
317
	struct dsa_port *link_dp;
318
	struct dsa_link *dl;
319
	int err;
320

321 322 323 324
	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
		link_dp = dsa_tree_find_port_by_node(dst, it.node);
		if (!link_dp) {
			of_node_put(it.node);
325
			return false;
326
		}
327

328 329 330 331 332
		dl = dsa_link_touch(dp, link_dp);
		if (!dl) {
			of_node_put(it.node);
			return false;
		}
333 334
	}

335
	return true;
336 337
}

338
static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
339
{
340 341
	bool complete = true;
	struct dsa_port *dp;
342

343
	list_for_each_entry(dp, &dst->ports, list) {
344
		if (dsa_port_is_dsa(dp)) {
345 346 347 348
			complete = dsa_port_setup_routing_table(dp);
			if (!complete)
				break;
		}
349 350
	}

351
	return complete;
352 353
}

354 355 356 357
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

358 359 360
	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_cpu(dp))
			return dp;
361 362 363 364

	return NULL;
}

365 366 367
/* Assign the default CPU port (the first one in the tree) to all ports of the
 * fabric which don't already have one as part of their own switch.
 */
368 369
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
{
370
	struct dsa_port *cpu_dp, *dp;
371

372 373 374
	cpu_dp = dsa_tree_find_first_cpu(dst);
	if (!cpu_dp) {
		pr_err("DSA: tree %d has no CPU port\n", dst->index);
375 376 377
		return -EINVAL;
	}

378 379 380 381
	list_for_each_entry(dp, &dst->ports, list) {
		if (dp->cpu_dp)
			continue;

382 383
		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
			dp->cpu_dp = cpu_dp;
384
	}
385 386 387 388

	return 0;
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/* Perform initial assignment of CPU ports to user ports and DSA links in the
 * fabric, giving preference to CPU ports local to each switch. Default to
 * using the first CPU port in the switch tree if the port does not have a CPU
 * port local to this switch.
 */
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
	struct dsa_port *cpu_dp, *dp;

	list_for_each_entry(cpu_dp, &dst->ports, list) {
		if (!dsa_port_is_cpu(cpu_dp))
			continue;

		list_for_each_entry(dp, &dst->ports, list) {
			/* Prefer a local CPU port */
			if (dp->ds != cpu_dp->ds)
				continue;

			/* Prefer the first local CPU port found */
			if (dp->cpu_dp)
				continue;

			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
				dp->cpu_dp = cpu_dp;
		}
	}

	return dsa_tree_setup_default_cpu(dst);
}

419
static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
420
{
421 422 423 424 425
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
			dp->cpu_dp = NULL;
426 427
}

428
static int dsa_port_setup(struct dsa_port *dp)
429
{
430
	struct devlink_port *dlp = &dp->devlink_port;
431
	bool dsa_port_link_registered = false;
432
	struct dsa_switch *ds = dp->ds;
433 434
	bool dsa_port_enabled = false;
	int err = 0;
435

436 437 438
	if (dp->setup)
		return 0;

439
	INIT_LIST_HEAD(&dp->fdbs);
440 441
	INIT_LIST_HEAD(&dp->mdbs);

442 443 444 445 446 447
	if (ds->ops->port_setup) {
		err = ds->ops->port_setup(ds, dp->index);
		if (err)
			return err;
	}

448 449
	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
450
		dsa_port_disable(dp);
451 452
		break;
	case DSA_PORT_TYPE_CPU:
453
		err = dsa_port_link_register_of(dp);
454
		if (err)
455 456
			break;
		dsa_port_link_registered = true;
457 458

		err = dsa_port_enable(dp, NULL);
459
		if (err)
460 461 462
			break;
		dsa_port_enabled = true;

463
		break;
464
	case DSA_PORT_TYPE_DSA:
465
		err = dsa_port_link_register_of(dp);
466
		if (err)
467 468
			break;
		dsa_port_link_registered = true;
469 470

		err = dsa_port_enable(dp, NULL);
471
		if (err)
472 473 474
			break;
		dsa_port_enabled = true;

475 476
		break;
	case DSA_PORT_TYPE_USER:
477
		of_get_mac_address(dp->dn, dp->mac);
478 479
		err = dsa_slave_create(dp);
		if (err)
480
			break;
481 482

		devlink_port_type_eth_set(dlp, dp->slave);
483
		break;
484 485
	}

486 487 488 489
	if (err && dsa_port_enabled)
		dsa_port_disable(dp);
	if (err && dsa_port_link_registered)
		dsa_port_link_unregister_of(dp);
490 491 492
	if (err) {
		if (ds->ops->port_teardown)
			ds->ops->port_teardown(ds, dp->index);
493
		return err;
494
	}
495

496 497 498
	dp->setup = true;

	return 0;
499 500
}

501
static int dsa_port_devlink_setup(struct dsa_port *dp)
502
{
503
	struct devlink_port *dlp = &dp->devlink_port;
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	struct dsa_switch_tree *dst = dp->ds->dst;
	struct devlink_port_attrs attrs = {};
	struct devlink *dl = dp->ds->devlink;
	const unsigned char *id;
	unsigned char len;
	int err;

	id = (const unsigned char *)&dst->index;
	len = sizeof(dst->index);

	attrs.phys.port_number = dp->index;
	memcpy(attrs.switch_id.id, id, len);
	attrs.switch_id.id_len = len;
	memset(dlp, 0, sizeof(*dlp));

	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
		break;
	case DSA_PORT_TYPE_CPU:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
		break;
	case DSA_PORT_TYPE_DSA:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
		break;
	case DSA_PORT_TYPE_USER:
		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
		break;
	}

	devlink_port_attrs_set(dlp, &attrs);
	err = devlink_port_register(dl, dlp, dp->index);

	if (!err)
		dp->devlink_port_setup = true;
539

540 541 542 543 544
	return err;
}

static void dsa_port_teardown(struct dsa_port *dp)
{
545
	struct devlink_port *dlp = &dp->devlink_port;
546
	struct dsa_switch *ds = dp->ds;
547
	struct dsa_mac_addr *a, *tmp;
548

549 550 551
	if (!dp->setup)
		return;

552 553 554
	if (ds->ops->port_teardown)
		ds->ops->port_teardown(ds, dp->index);

555 556
	devlink_port_type_clear(dlp);

557 558 559 560
	switch (dp->type) {
	case DSA_PORT_TYPE_UNUSED:
		break;
	case DSA_PORT_TYPE_CPU:
561
		dsa_port_disable(dp);
562 563
		dsa_port_link_unregister_of(dp);
		break;
564
	case DSA_PORT_TYPE_DSA:
565
		dsa_port_disable(dp);
566
		dsa_port_link_unregister_of(dp);
567 568 569 570 571 572 573
		break;
	case DSA_PORT_TYPE_USER:
		if (dp->slave) {
			dsa_slave_destroy(dp->slave);
			dp->slave = NULL;
		}
		break;
574
	}
575

576 577 578 579 580
	list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
		list_del(&a->list);
		kfree(a);
	}

581 582 583 584 585
	list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
		list_del(&a->list);
		kfree(a);
	}

586
	dp->setup = false;
587 588
}

589 590 591 592 593 594 595 596 597
static void dsa_port_devlink_teardown(struct dsa_port *dp)
{
	struct devlink_port *dlp = &dp->devlink_port;

	if (dp->devlink_port_setup)
		devlink_port_unregister(dlp);
	dp->devlink_port_setup = false;
}

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
/* Destroy the current devlink port, and create a new one which has the UNUSED
 * flavour. At this point, any call to ds->ops->port_setup has been already
 * balanced out by a call to ds->ops->port_teardown, so we know that any
 * devlink port regions the driver had are now unregistered. We then call its
 * ds->ops->port_setup again, in order for the driver to re-create them on the
 * new devlink port.
 */
static int dsa_port_reinit_as_unused(struct dsa_port *dp)
{
	struct dsa_switch *ds = dp->ds;
	int err;

	dsa_port_devlink_teardown(dp);
	dp->type = DSA_PORT_TYPE_UNUSED;
	err = dsa_port_devlink_setup(dp);
	if (err)
		return err;

	if (ds->ops->port_setup) {
		/* On error, leave the devlink port registered,
		 * dsa_switch_teardown will clean it up later.
		 */
		err = ds->ops->port_setup(ds, dp->index);
		if (err)
			return err;
	}

	return 0;
}

628 629 630 631 632 633 634 635 636 637 638 639
static int dsa_devlink_info_get(struct devlink *dl,
				struct devlink_info_req *req,
				struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (ds->ops->devlink_info_get)
		return ds->ops->devlink_info_get(ds, req, extack);

	return -EOPNOTSUPP;
}

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
static int dsa_devlink_sb_pool_get(struct devlink *dl,
				   unsigned int sb_index, u16 pool_index,
				   struct devlink_sb_pool_info *pool_info)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
					    pool_info);
}

static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
				   u16 pool_index, u32 size,
				   enum devlink_sb_threshold_type threshold_type,
				   struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_pool_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
					    threshold_type, extack);
}

static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
					unsigned int sb_index, u16 pool_index,
					u32 *p_threshold)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_port_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
						 pool_index, p_threshold);
}

static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
					unsigned int sb_index, u16 pool_index,
					u32 threshold,
					struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_port_pool_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
						 pool_index, threshold, extack);
}

static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
				unsigned int sb_index, u16 tc_index,
				enum devlink_sb_pool_type pool_type,
				u16 *p_pool_index, u32 *p_threshold)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_tc_pool_bind_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
						    tc_index, pool_type,
						    p_pool_index, p_threshold);
}

static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
				unsigned int sb_index, u16 tc_index,
				enum devlink_sb_pool_type pool_type,
				u16 pool_index, u32 threshold,
				struct netlink_ext_ack *extack)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_tc_pool_bind_set)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
						    tc_index, pool_type,
						    pool_index, threshold,
						    extack);
}

static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
				       unsigned int sb_index)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_occ_snapshot)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
}

static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
					unsigned int sb_index)
{
	struct dsa_switch *ds = dsa_devlink_to_ds(dl);

	if (!ds->ops->devlink_sb_occ_max_clear)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
}

static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
					    unsigned int sb_index,
					    u16 pool_index, u32 *p_cur,
					    u32 *p_max)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_occ_port_pool_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
						     pool_index, p_cur, p_max);
}

static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
				    unsigned int sb_index, u16 tc_index,
				    enum devlink_sb_pool_type pool_type,
				    u32 *p_cur, u32 *p_max)
{
	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
	int port = dsa_devlink_port_to_port(dlp);

	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
		return -EOPNOTSUPP;

	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
							sb_index, tc_index,
							pool_type, p_cur,
							p_max);
}

787
static const struct devlink_ops dsa_devlink_ops = {
788 789 790 791 792 793 794 795 796 797 798
	.info_get			= dsa_devlink_info_get,
	.sb_pool_get			= dsa_devlink_sb_pool_get,
	.sb_pool_set			= dsa_devlink_sb_pool_set,
	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
799 800
};

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{
	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
	struct dsa_switch_tree *dst = ds->dst;
	int port, err;

	if (tag_ops->proto == dst->default_proto)
		return 0;

	for (port = 0; port < ds->num_ports; port++) {
		if (!dsa_is_cpu_port(ds, port))
			continue;

		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
		if (err) {
			dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
				tag_ops->name, ERR_PTR(err));
			return err;
		}
	}

	return 0;
}

825
static int dsa_switch_setup(struct dsa_switch *ds)
826
{
827
	struct dsa_devlink_priv *dl_priv;
828
	struct dsa_port *dp;
829 830 831 832
	int err;

	if (ds->setup)
		return 0;
833

834
	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
835
	 * driver and before ops->setup() has run, since the switch drivers and
836 837 838
	 * the slave MDIO bus driver rely on these values for probing PHY
	 * devices or not
	 */
839
	ds->phys_mii_mask |= dsa_user_ports(ds);
840

841 842 843
	/* Add the switch to devlink before calling setup, so that setup can
	 * add dpipe tables
	 */
844 845
	ds->devlink =
		devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
846 847
	if (!ds->devlink)
		return -ENOMEM;
848 849
	dl_priv = devlink_priv(ds->devlink);
	dl_priv->ds = ds;
850

851
	err = devlink_register(ds->devlink);
852
	if (err)
853
		goto free_devlink;
854

855 856 857 858 859 860 861 862 863 864 865
	/* Setup devlink port instances now, so that the switch
	 * setup() can register regions etc, against the ports
	 */
	list_for_each_entry(dp, &ds->dst->ports, list) {
		if (dp->ds == ds) {
			err = dsa_port_devlink_setup(dp);
			if (err)
				goto unregister_devlink_ports;
		}
	}

866 867
	err = dsa_switch_register_notifier(ds);
	if (err)
868
		goto unregister_devlink_ports;
869

870 871
	ds->configure_vlan_while_not_filtering = true;

872 873
	err = ds->ops->setup(ds);
	if (err < 0)
874
		goto unregister_notifier;
875

876 877 878 879
	err = dsa_switch_setup_tag_protocol(ds);
	if (err)
		goto teardown;

880 881
	devlink_params_publish(ds->devlink);

882
	if (!ds->slave_mii_bus && ds->ops->phy_read) {
883
		ds->slave_mii_bus = mdiobus_alloc();
884 885
		if (!ds->slave_mii_bus) {
			err = -ENOMEM;
886
			goto teardown;
887
		}
888 889 890 891 892

		dsa_slave_mii_bus_init(ds);

		err = mdiobus_register(ds->slave_mii_bus);
		if (err < 0)
893
			goto free_slave_mii_bus;
894 895
	}

896 897
	ds->setup = true;

898
	return 0;
899

900 901 902
free_slave_mii_bus:
	if (ds->slave_mii_bus && ds->ops->phy_read)
		mdiobus_free(ds->slave_mii_bus);
903 904 905
teardown:
	if (ds->ops->teardown)
		ds->ops->teardown(ds);
906 907
unregister_notifier:
	dsa_switch_unregister_notifier(ds);
908 909 910 911
unregister_devlink_ports:
	list_for_each_entry(dp, &ds->dst->ports, list)
		if (dp->ds == ds)
			dsa_port_devlink_teardown(dp);
912 913 914 915 916 917
	devlink_unregister(ds->devlink);
free_devlink:
	devlink_free(ds->devlink);
	ds->devlink = NULL;

	return err;
918 919
}

920
static void dsa_switch_teardown(struct dsa_switch *ds)
921
{
922 923
	struct dsa_port *dp;

924 925 926
	if (!ds->setup)
		return;

927
	if (ds->slave_mii_bus && ds->ops->phy_read) {
928
		mdiobus_unregister(ds->slave_mii_bus);
929 930 931
		mdiobus_free(ds->slave_mii_bus);
		ds->slave_mii_bus = NULL;
	}
932 933

	dsa_switch_unregister_notifier(ds);
934

935 936 937
	if (ds->ops->teardown)
		ds->ops->teardown(ds);

938
	if (ds->devlink) {
939 940 941
		list_for_each_entry(dp, &ds->dst->ports, list)
			if (dp->ds == ds)
				dsa_port_devlink_teardown(dp);
942 943 944 945 946
		devlink_unregister(ds->devlink);
		devlink_free(ds->devlink);
		ds->devlink = NULL;
	}

947
	ds->setup = false;
948 949
}

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
/* First tear down the non-shared, then the shared ports. This ensures that
 * all work items scheduled by our switchdev handlers for user ports have
 * completed before we destroy the refcounting kept on the shared ports.
 */
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
			dsa_port_teardown(dp);

	dsa_flush_workqueue();

	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
			dsa_port_teardown(dp);
}

static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list)
		dsa_switch_teardown(dp->ds);
}

977 978
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
979
	struct dsa_port *dp;
980
	int err;
981

982 983
	list_for_each_entry(dp, &dst->ports, list) {
		err = dsa_switch_setup(dp->ds);
984
		if (err)
985 986
			goto teardown;
	}
987

988 989
	list_for_each_entry(dp, &dst->ports, list) {
		err = dsa_port_setup(dp);
990
		if (err) {
991
			err = dsa_port_reinit_as_unused(dp);
992 993 994
			if (err)
				goto teardown;
		}
995 996 997
	}

	return 0;
998

999
teardown:
1000
	dsa_tree_teardown_ports(dst);
1001

1002
	dsa_tree_teardown_switches(dst);
1003 1004

	return err;
1005 1006
}

1007 1008
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
1009 1010
	struct dsa_port *dp;
	int err;
1011

1012 1013 1014 1015 1016 1017 1018 1019 1020
	list_for_each_entry(dp, &dst->ports, list) {
		if (dsa_port_is_cpu(dp)) {
			err = dsa_master_setup(dp->master, dp);
			if (err)
				return err;
		}
	}

	return 0;
1021 1022 1023 1024
}

static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
1025
	struct dsa_port *dp;
1026

1027 1028 1029
	list_for_each_entry(dp, &dst->ports, list)
		if (dsa_port_is_cpu(dp))
			dsa_master_teardown(dp->master);
1030 1031
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
{
	unsigned int len = 0;
	struct dsa_port *dp;

	list_for_each_entry(dp, &dst->ports, list) {
		if (dp->ds->num_lag_ids > len)
			len = dp->ds->num_lag_ids;
	}

	if (!len)
		return 0;

	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
	if (!dst->lags)
		return -ENOMEM;

	dst->lags_len = len;
	return 0;
}

static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
{
	kfree(dst->lags);
}

1058
static int dsa_tree_setup(struct dsa_switch_tree *dst)
1059
{
1060
	bool complete;
1061 1062
	int err;

1063 1064 1065 1066 1067 1068
	if (dst->setup) {
		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
		       dst->index);
		return -EEXIST;
	}

1069 1070 1071 1072
	complete = dsa_tree_setup_routing_table(dst);
	if (!complete)
		return 0;

1073
	err = dsa_tree_setup_cpu_ports(dst);
1074 1075 1076
	if (err)
		return err;

1077 1078
	err = dsa_tree_setup_switches(dst);
	if (err)
1079
		goto teardown_cpu_ports;
1080

1081
	err = dsa_tree_setup_master(dst);
1082
	if (err)
1083
		goto teardown_switches;
1084

1085 1086 1087 1088
	err = dsa_tree_setup_lags(dst);
	if (err)
		goto teardown_master;

1089 1090 1091
	dst->setup = true;

	pr_info("DSA: tree %d setup\n", dst->index);
1092 1093

	return 0;
1094

1095 1096
teardown_master:
	dsa_tree_teardown_master(dst);
1097
teardown_switches:
1098
	dsa_tree_teardown_ports(dst);
1099
	dsa_tree_teardown_switches(dst);
1100 1101
teardown_cpu_ports:
	dsa_tree_teardown_cpu_ports(dst);
1102 1103

	return err;
1104 1105
}

1106
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1107
{
1108 1109
	struct dsa_link *dl, *next;

1110
	if (!dst->setup)
1111 1112
		return;

1113 1114
	dsa_tree_teardown_lags(dst);

1115
	dsa_tree_teardown_master(dst);
1116

1117 1118
	dsa_tree_teardown_ports(dst);

1119
	dsa_tree_teardown_switches(dst);
1120

1121
	dsa_tree_teardown_cpu_ports(dst);
1122

1123 1124 1125 1126 1127
	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
		list_del(&dl->list);
		kfree(dl);
	}

1128 1129 1130
	pr_info("DSA: tree %d torn down\n", dst->index);

	dst->setup = false;
1131 1132
}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
/* Since the dsa/tagging sysfs device attribute is per master, the assumption
 * is that all DSA switches within a tree share the same tagger, otherwise
 * they would have formed disjoint trees (different "dsa,member" values).
 */
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
			      struct net_device *master,
			      const struct dsa_device_ops *tag_ops,
			      const struct dsa_device_ops *old_tag_ops)
{
	struct dsa_notifier_tag_proto_info info;
	struct dsa_port *dp;
	int err = -EBUSY;

	if (!rtnl_trylock())
		return restart_syscall();

	/* At the moment we don't allow changing the tag protocol under
	 * traffic. The rtnl_mutex also happens to serialize concurrent
	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
	 * restriction, there needs to be another mutex which serializes this.
	 */
	if (master->flags & IFF_UP)
		goto out_unlock;

	list_for_each_entry(dp, &dst->ports, list) {
		if (!dsa_is_user_port(dp->ds, dp->index))
			continue;

		if (dp->slave->flags & IFF_UP)
			goto out_unlock;
	}

	info.tag_ops = tag_ops;
	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
	if (err)
		goto out_unwind_tagger;

	dst->tag_ops = tag_ops;

	rtnl_unlock();

	return 0;

out_unwind_tagger:
	info.tag_ops = old_tag_ops;
	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
out_unlock:
	rtnl_unlock();
	return err;
}

1184 1185 1186 1187 1188
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
{
	struct dsa_switch_tree *dst = ds->dst;
	struct dsa_port *dp;

1189 1190 1191 1192 1193 1194 1195
	list_for_each_entry(dp, &dst->ports, list)
		if (dp->ds == ds && dp->index == index)
			return dp;

	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
	if (!dp)
		return NULL;
1196 1197 1198

	dp->ds = ds;
	dp->index = index;
1199
	dp->bridge_num = -1;
1200 1201 1202 1203 1204 1205 1206

	INIT_LIST_HEAD(&dp->list);
	list_add_tail(&dp->list, &dst->ports);

	return dp;
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
{
	if (!name)
		name = "eth%d";

	dp->type = DSA_PORT_TYPE_USER;
	dp->name = name;

	return 0;
}

static int dsa_port_parse_dsa(struct dsa_port *dp)
{
	dp->type = DSA_PORT_TYPE_DSA;

	return 0;
}

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
						  struct net_device *master)
{
	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
	struct dsa_switch *mds, *ds = dp->ds;
	unsigned int mdp_upstream;
	struct dsa_port *mdp;

	/* It is possible to stack DSA switches onto one another when that
	 * happens the switch driver may want to know if its tagging protocol
	 * is going to work in such a configuration.
	 */
	if (dsa_slave_dev_check(master)) {
		mdp = dsa_slave_to_port(master);
		mds = mdp->ds;
		mdp_upstream = dsa_upstream_port(mds, mdp->index);
		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
							  DSA_TAG_PROTO_NONE);
	}

	/* If the master device is not itself a DSA slave in a disjoint DSA
	 * tree, then return immediately.
	 */
	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}

1251 1252
static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
			      const char *user_protocol)
1253
{
1254 1255
	struct dsa_switch *ds = dp->ds;
	struct dsa_switch_tree *dst = ds->dst;
1256
	const struct dsa_device_ops *tag_ops;
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	enum dsa_tag_protocol default_proto;

	/* Find out which protocol the switch would prefer. */
	default_proto = dsa_get_tag_protocol(dp, master);
	if (dst->default_proto) {
		if (dst->default_proto != default_proto) {
			dev_err(ds->dev,
				"A DSA switch tree can have only one tagging protocol\n");
			return -EINVAL;
		}
	} else {
		dst->default_proto = default_proto;
	}

	/* See if the user wants to override that preference. */
	if (user_protocol) {
		if (!ds->ops->change_tag_protocol) {
			dev_err(ds->dev, "Tag protocol cannot be modified\n");
			return -EINVAL;
		}

		tag_ops = dsa_find_tagger_by_name(user_protocol);
	} else {
		tag_ops = dsa_tag_driver_get(default_proto);
	}

	if (IS_ERR(tag_ops)) {
		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
			return -EPROBE_DEFER;

		dev_warn(ds->dev, "No tagger for this switch\n");
		return PTR_ERR(tag_ops);
	}
1290

1291
	if (dst->tag_ops) {
1292
		if (dst->tag_ops != tag_ops) {
1293 1294
			dev_err(ds->dev,
				"A DSA switch tree can have only one tagging protocol\n");
1295 1296

			dsa_tag_driver_put(tag_ops);
1297 1298
			return -EINVAL;
		}
1299

1300
		/* In the case of multiple CPU ports per switch, the tagging
1301
		 * protocol is still reference-counted only per switch tree.
1302
		 */
1303
		dsa_tag_driver_put(tag_ops);
1304
	} else {
1305
		dst->tag_ops = tag_ops;
1306 1307
	}

1308
	dp->master = master;
1309
	dp->type = DSA_PORT_TYPE_CPU;
1310
	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1311
	dp->dst = dst;
1312

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	/* At this point, the tree may be configured to use a different
	 * tagger than the one chosen by the switch driver during
	 * .setup, in the case when a user selects a custom protocol
	 * through the DT.
	 *
	 * This is resolved by syncing the driver with the tree in
	 * dsa_switch_setup_tag_protocol once .setup has run and the
	 * driver is ready to accept calls to .change_tag_protocol. If
	 * the driver does not support the custom protocol at that
	 * point, the tree is wholly rejected, thereby ensuring that the
	 * tree and driver are always in agreement on the protocol to
	 * use.
	 */
1326 1327 1328
	return 0;
}

1329 1330
static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
{
1331
	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1332
	const char *name = of_get_property(dn, "label", NULL);
1333
	bool link = of_property_read_bool(dn, "link");
1334

1335 1336
	dp->dn = dn;

1337
	if (ethernet) {
1338
		struct net_device *master;
1339
		const char *user_protocol;
1340 1341 1342 1343 1344

		master = of_find_net_device_by_node(ethernet);
		if (!master)
			return -EPROBE_DEFER;

1345 1346
		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
		return dsa_port_parse_cpu(dp, master, user_protocol);
1347 1348
	}

1349 1350
	if (link)
		return dsa_port_parse_dsa(dp);
1351

1352
	return dsa_port_parse_user(dp, name);
1353 1354
}

1355 1356
static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
				     struct device_node *dn)
1357
{
1358
	struct device_node *ports, *port;
1359
	struct dsa_port *dp;
1360
	int err = 0;
1361
	u32 reg;
1362 1363 1364

	ports = of_get_child_by_name(dn, "ports");
	if (!ports) {
1365 1366 1367 1368 1369 1370
		/* The second possibility is "ethernet-ports" */
		ports = of_get_child_by_name(dn, "ethernet-ports");
		if (!ports) {
			dev_err(ds->dev, "no ports child node found\n");
			return -EINVAL;
		}
1371
	}
1372 1373 1374 1375

	for_each_available_child_of_node(ports, port) {
		err = of_property_read_u32(port, "reg", &reg);
		if (err)
1376
			goto out_put_node;
1377

1378
		if (reg >= ds->num_ports) {
1379 1380
			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
				port, reg, ds->num_ports);
1381 1382 1383
			err = -EINVAL;
			goto out_put_node;
		}
1384

1385
		dp = dsa_to_port(ds, reg);
1386 1387 1388

		err = dsa_port_parse_of(dp, port);
		if (err)
1389
			goto out_put_node;
1390 1391
	}

1392 1393 1394
out_put_node:
	of_node_put(ports);
	return err;
1395 1396
}

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
				      struct device_node *dn)
{
	u32 m[2] = { 0, 0 };
	int sz;

	/* Don't error out if this optional property isn't found */
	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
	if (sz < 0 && sz != -EINVAL)
		return sz;

	ds->index = m[1];

	ds->dst = dsa_tree_touch(m[0]);
	if (!ds->dst)
		return -ENOMEM;

1414 1415 1416 1417 1418 1419 1420
	if (dsa_switch_find(ds->dst->index, ds->index)) {
		dev_err(ds->dev,
			"A DSA switch with index %d already exists in tree %d\n",
			ds->index, ds->dst->index);
		return -EEXIST;
	}

1421 1422 1423
	if (ds->dst->last_switch < ds->index)
		ds->dst->last_switch = ds->index;

1424 1425 1426
	return 0;
}

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
static int dsa_switch_touch_ports(struct dsa_switch *ds)
{
	struct dsa_port *dp;
	int port;

	for (port = 0; port < ds->num_ports; port++) {
		dp = dsa_port_touch(ds, port);
		if (!dp)
			return -ENOMEM;
	}

	return 0;
}

1441 1442 1443 1444 1445 1446 1447 1448
static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
{
	int err;

	err = dsa_switch_parse_member_of(ds, dn);
	if (err)
		return err;

1449 1450 1451 1452
	err = dsa_switch_touch_ports(ds);
	if (err)
		return err;

1453 1454 1455
	return dsa_switch_parse_ports_of(ds, dn);
}

1456 1457 1458
static int dsa_port_parse(struct dsa_port *dp, const char *name,
			  struct device *dev)
{
1459
	if (!strcmp(name, "cpu")) {
1460 1461 1462 1463 1464 1465 1466 1467
		struct net_device *master;

		master = dsa_dev_to_net_device(dev);
		if (!master)
			return -EPROBE_DEFER;

		dev_put(master);

1468
		return dsa_port_parse_cpu(dp, master, NULL);
1469 1470
	}

1471 1472
	if (!strcmp(name, "dsa"))
		return dsa_port_parse_dsa(dp);
1473

1474
	return dsa_port_parse_user(dp, name);
1475 1476
}

1477 1478
static int dsa_switch_parse_ports(struct dsa_switch *ds,
				  struct dsa_chip_data *cd)
1479 1480
{
	bool valid_name_found = false;
1481 1482 1483
	struct dsa_port *dp;
	struct device *dev;
	const char *name;
1484
	unsigned int i;
1485
	int err;
1486 1487

	for (i = 0; i < DSA_MAX_PORTS; i++) {
1488 1489
		name = cd->port_names[i];
		dev = cd->netdev[i];
1490
		dp = dsa_to_port(ds, i);
1491 1492

		if (!name)
1493 1494
			continue;

1495 1496 1497 1498
		err = dsa_port_parse(dp, name, dev);
		if (err)
			return err;

1499 1500 1501 1502 1503 1504 1505 1506 1507
		valid_name_found = true;
	}

	if (!valid_name_found && i == DSA_MAX_PORTS)
		return -EINVAL;

	return 0;
}

1508
static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1509
{
1510 1511
	int err;

1512
	ds->cd = cd;
1513

1514 1515 1516 1517 1518 1519 1520
	/* We don't support interconnected switches nor multiple trees via
	 * platform data, so this is the unique switch of the tree.
	 */
	ds->index = 0;
	ds->dst = dsa_tree_touch(0);
	if (!ds->dst)
		return -ENOMEM;
1521

1522 1523 1524 1525
	err = dsa_switch_touch_ports(ds);
	if (err)
		return err;

1526
	return dsa_switch_parse_ports(ds, cd);
1527 1528
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
	struct dsa_switch_tree *dst = ds->dst;
	struct dsa_port *dp, *next;

	list_for_each_entry_safe(dp, next, &dst->ports, list) {
		if (dp->ds != ds)
			continue;
		list_del(&dp->list);
		kfree(dp);
	}
}

1542
static int dsa_switch_probe(struct dsa_switch *ds)
1543
{
1544
	struct dsa_switch_tree *dst;
1545 1546
	struct dsa_chip_data *pdata;
	struct device_node *np;
1547
	int err;
1548

1549 1550 1551
	if (!ds->dev)
		return -ENODEV;

1552 1553 1554
	pdata = ds->dev->platform_data;
	np = ds->dev->of_node;

1555 1556 1557
	if (!ds->num_ports)
		return -EINVAL;

1558
	if (np) {
1559
		err = dsa_switch_parse_of(ds, np);
1560 1561 1562
		if (err)
			dsa_switch_release_ports(ds);
	} else if (pdata) {
1563
		err = dsa_switch_parse(ds, pdata);
1564 1565 1566
		if (err)
			dsa_switch_release_ports(ds);
	} else {
1567
		err = -ENODEV;
1568
	}
1569

1570 1571
	if (err)
		return err;
1572

1573 1574 1575
	dst = ds->dst;
	dsa_tree_get(dst);
	err = dsa_tree_setup(dst);
1576 1577
	if (err) {
		dsa_switch_release_ports(ds);
1578
		dsa_tree_put(dst);
1579
	}
1580 1581

	return err;
1582 1583
}

1584
int dsa_register_switch(struct dsa_switch *ds)
1585 1586 1587 1588
{
	int err;

	mutex_lock(&dsa2_mutex);
1589
	err = dsa_switch_probe(ds);
1590
	dsa_tree_put(ds->dst);
1591 1592 1593 1594 1595 1596
	mutex_unlock(&dsa2_mutex);

	return err;
}
EXPORT_SYMBOL_GPL(dsa_register_switch);

1597
static void dsa_switch_remove(struct dsa_switch *ds)
1598 1599
{
	struct dsa_switch_tree *dst = ds->dst;
1600

1601
	dsa_tree_teardown(dst);
1602
	dsa_switch_release_ports(ds);
1603
	dsa_tree_put(dst);
1604 1605 1606 1607 1608
}

void dsa_unregister_switch(struct dsa_switch *ds)
{
	mutex_lock(&dsa2_mutex);
1609
	dsa_switch_remove(ds);
1610 1611 1612
	mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662

/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
 * blocking that operation from completion, due to the dev_hold taken inside
 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
 * the DSA master, so that the system can reboot successfully.
 */
void dsa_switch_shutdown(struct dsa_switch *ds)
{
	struct net_device *master, *slave_dev;
	LIST_HEAD(unregister_list);
	struct dsa_port *dp;

	mutex_lock(&dsa2_mutex);
	rtnl_lock();

	list_for_each_entry(dp, &ds->dst->ports, list) {
		if (dp->ds != ds)
			continue;

		if (!dsa_port_is_user(dp))
			continue;

		master = dp->cpu_dp->master;
		slave_dev = dp->slave;

		netdev_upper_dev_unlink(master, slave_dev);
		/* Just unlinking ourselves as uppers of the master is not
		 * sufficient. When the master net device unregisters, that will
		 * also call dev_close, which we will catch as NETDEV_GOING_DOWN
		 * and trigger a dev_close on our own devices (dsa_slave_close).
		 * In turn, that will call dev_mc_unsync on the master's net
		 * device. If the master is also a DSA switch port, this will
		 * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
		 * its own master. Lockdep will complain about the fact that
		 * all cascaded masters have the same dsa_master_addr_list_lock_key,
		 * which it normally would not do if the cascaded masters would
		 * be in a proper upper/lower relationship, which we've just
		 * destroyed.
		 * To suppress the lockdep warnings, let's actually unregister
		 * the DSA slave interfaces too, to avoid the nonsensical
		 * multicast address list synchronization on shutdown.
		 */
		unregister_netdevice_queue(slave_dev, &unregister_list);
	}
	unregister_netdevice_many(&unregister_list);

	rtnl_unlock();
	mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
反馈
建议
客服 返回
顶部