port.c 25.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9
/*
 * Handling of a single switch port
 *
 * Copyright (c) 2017 Savoir-faire Linux Inc.
 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 */

#include <linux/if_bridge.h>
10
#include <linux/notifier.h>
11 12
#include <linux/of_mdio.h>
#include <linux/of_net.h>
13 14 15

#include "dsa_priv.h"

16 17 18 19 20 21 22 23 24 25 26 27
/**
 * dsa_port_notify - Notify the switching fabric of changes to a port
 * @dp: port on which change occurred
 * @e: event, must be of type DSA_NOTIFIER_*
 * @v: event-specific value.
 *
 * Notify all switches in the DSA tree that this port's switch belongs to,
 * including this switch itself, of an event. Allows the other switches to
 * reconfigure themselves for cross-chip operations. Can also be used to
 * reconfigure ports without net_devices (CPU ports, DSA links) whenever
 * a user port's state changes.
 */
A
Andrew Lunn 已提交
28
static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29
{
30
	return dsa_tree_notify(dp->ds->dst, e, v);
31 32
}

33
int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 35 36 37
{
	struct dsa_switch *ds = dp->ds;
	int port = dp->index;

38 39
	if (!ds->ops->port_stp_state_set)
		return -EOPNOTSUPP;
40

41
	ds->ops->port_stp_state_set(ds, port, state);
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

	if (ds->ops->port_fast_age) {
		/* Fast age FDB entries or flush appropriate forwarding database
		 * for the given port, if we are moving it from Learning or
		 * Forwarding state, to Disabled or Blocking or Listening state.
		 */

		if ((dp->stp_state == BR_STATE_LEARNING ||
		     dp->stp_state == BR_STATE_FORWARDING) &&
		    (state == BR_STATE_DISABLED ||
		     state == BR_STATE_BLOCKING ||
		     state == BR_STATE_LISTENING))
			ds->ops->port_fast_age(ds, port);
	}

	dp->stp_state = state;

	return 0;
}

62
static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 64 65
{
	int err;

66
	err = dsa_port_set_state(dp, state);
67 68 69
	if (err)
		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
}
70

71
int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 73 74 75 76 77 78 79 80 81 82
{
	struct dsa_switch *ds = dp->ds;
	int port = dp->index;
	int err;

	if (ds->ops->port_enable) {
		err = ds->ops->port_enable(ds, port, phy);
		if (err)
			return err;
	}

83 84
	if (!dp->bridge_dev)
		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85

86 87 88
	if (dp->pl)
		phylink_start(dp->pl);

89 90 91
	return 0;
}

92 93 94 95 96 97 98 99 100 101 102 103
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
{
	int err;

	rtnl_lock();
	err = dsa_port_enable_rt(dp, phy);
	rtnl_unlock();

	return err;
}

void dsa_port_disable_rt(struct dsa_port *dp)
104 105 106 107
{
	struct dsa_switch *ds = dp->ds;
	int port = dp->index;

108 109 110
	if (dp->pl)
		phylink_stop(dp->pl);

111 112
	if (!dp->bridge_dev)
		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 114

	if (ds->ops->port_disable)
115
		ds->ops->port_disable(ds, port);
116 117
}

118 119 120 121 122 123 124
void dsa_port_disable(struct dsa_port *dp)
{
	rtnl_lock();
	dsa_port_disable_rt(dp);
	rtnl_unlock();
}

125 126
static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
					 struct netlink_ext_ack *extack)
127
{
128 129 130 131
	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
				   BR_BCAST_FLOOD;
	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
	int flag, err;
132

133 134
	for_each_set_bit(flag, &mask, 32) {
		struct switchdev_brport_flags flags = {0};
135

136
		flags.mask = BIT(flag);
137

138 139
		if (br_port_flag_is_set(brport_dev, BIT(flag)))
			flags.val = BIT(flag);
140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		err = dsa_port_bridge_flags(dp, flags, extack);
		if (err && err != -EOPNOTSUPP)
			return err;
	}

	return 0;
}

static void dsa_port_clear_brport_flags(struct dsa_port *dp)
{
	const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
	const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
				   BR_BCAST_FLOOD;
	int flag, err;

	for_each_set_bit(flag, &mask, 32) {
		struct switchdev_brport_flags flags = {0};

		flags.mask = BIT(flag);
		flags.val = val & BIT(flag);

		err = dsa_port_bridge_flags(dp, flags, NULL);
		if (err && err != -EOPNOTSUPP)
			dev_err(dp->ds->dev,
				"failed to clear bridge port flag %lu: %pe\n",
				flags.val, ERR_PTR(err));
167 168 169
	}
}

170 171 172
static int dsa_port_switchdev_sync(struct dsa_port *dp,
				   struct netlink_ext_ack *extack)
{
173 174
	struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
	struct net_device *br = dp->bridge_dev;
175 176 177 178 179 180
	int err;

	err = dsa_port_inherit_brport_flags(dp, extack);
	if (err)
		return err;

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev));
	if (err && err != -EOPNOTSUPP)
		return err;

	err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
	if (err && err != -EOPNOTSUPP)
		return err;

	err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
	if (err && err != -EOPNOTSUPP)
		return err;

	err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
	if (err && err != -EOPNOTSUPP)
		return err;

	err = br_mdb_replay(br, brport_dev,
			    &dsa_slave_switchdev_blocking_notifier,
			    extack);
	if (err && err != -EOPNOTSUPP)
		return err;

	err = br_fdb_replay(br, brport_dev, &dsa_slave_switchdev_notifier);
	if (err && err != -EOPNOTSUPP)
		return err;

	err = br_vlan_replay(br, brport_dev,
			     &dsa_slave_switchdev_blocking_notifier,
			     extack);
	if (err && err != -EOPNOTSUPP)
		return err;

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	return 0;
}

static void dsa_port_switchdev_unsync(struct dsa_port *dp)
{
	/* Configure the port for standalone mode (no address learning,
	 * flood everything).
	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
	 * when the user requests it through netlink or sysfs, but not
	 * automatically at port join or leave, so we need to handle resetting
	 * the brport flags ourselves. But we even prefer it that way, because
	 * otherwise, some setups might never get the notification they need,
	 * for example, when a port leaves a LAG that offloads the bridge,
	 * it becomes standalone, but as far as the bridge is concerned, no
	 * port ever left.
	 */
	dsa_port_clear_brport_flags(dp);

	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
	 */
	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
235 236 237 238 239 240 241 242 243 244 245 246

	/* VLAN filtering is handled by dsa_switch_bridge_leave */

	/* Some drivers treat the notification for having a local multicast
	 * router by allowing multicast to be flooded to the CPU, so we should
	 * allow this in standalone mode too.
	 */
	dsa_port_mrouter(dp->cpu_dp, true, NULL);

	/* Ageing time may be global to the switch chip, so don't change it
	 * here because we have no good reason (or value) to change it to.
	 */
247 248
}

249 250
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
			 struct netlink_ext_ack *extack)
251 252
{
	struct dsa_notifier_bridge_info info = {
253
		.tree_index = dp->ds->dst->index,
254 255 256 257 258 259
		.sw_index = dp->ds->index,
		.port = dp->index,
		.br = br,
	};
	int err;

260 261
	/* Here the interface is already bridged. Reflect the current
	 * configuration so that drivers can program their chips accordingly.
262 263 264
	 */
	dp->bridge_dev = br;

265
	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
266 267
	if (err)
		goto out_rollback;
268

269 270 271 272 273
	err = dsa_port_switchdev_sync(dp, extack);
	if (err)
		goto out_rollback_unbridge;

	return 0;
274

275 276 277 278
out_rollback_unbridge:
	dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
	dp->bridge_dev = NULL;
279 280 281 282 283 284
	return err;
}

void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
{
	struct dsa_notifier_bridge_info info = {
285
		.tree_index = dp->ds->dst->index,
286 287 288 289 290 291 292 293 294 295 296
		.sw_index = dp->ds->index,
		.port = dp->index,
		.br = br,
	};
	int err;

	/* Here the port is already unbridged. Reflect the current configuration
	 * so that drivers can program their chips accordingly.
	 */
	dp->bridge_dev = NULL;

297
	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
298 299 300
	if (err)
		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");

301
	dsa_port_switchdev_unsync(dp);
302
}
303

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
int dsa_port_lag_change(struct dsa_port *dp,
			struct netdev_lag_lower_state_info *linfo)
{
	struct dsa_notifier_lag_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
	};
	bool tx_enabled;

	if (!dp->lag_dev)
		return 0;

	/* On statically configured aggregates (e.g. loadbalance
	 * without LACP) ports will always be tx_enabled, even if the
	 * link is down. Thus we require both link_up and tx_enabled
	 * in order to include it in the tx set.
	 */
	tx_enabled = linfo->link_up && linfo->tx_enabled;

	if (tx_enabled == dp->lag_tx_enabled)
		return 0;

	dp->lag_tx_enabled = tx_enabled;

	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}

int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
332 333
		      struct netdev_lag_upper_info *uinfo,
		      struct netlink_ext_ack *extack)
334 335 336 337 338 339 340
{
	struct dsa_notifier_lag_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.lag = lag,
		.info = uinfo,
	};
341
	struct net_device *bridge_dev;
342 343 344 345 346 347
	int err;

	dsa_lag_map(dp->ds->dst, lag);
	dp->lag_dev = lag;

	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
348 349
	if (err)
		goto err_lag_join;
350

351 352 353 354
	bridge_dev = netdev_master_upper_dev_get(lag);
	if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
		return 0;

355
	err = dsa_port_bridge_join(dp, bridge_dev, extack);
356 357 358 359 360 361 362 363 364 365
	if (err)
		goto err_bridge_join;

	return 0;

err_bridge_join:
	dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
	dp->lag_dev = NULL;
	dsa_lag_unmap(dp->ds->dst, lag);
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	return err;
}

void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
{
	struct dsa_notifier_lag_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.lag = lag,
	};
	int err;

	if (!dp->lag_dev)
		return;

	/* Port might have been part of a LAG that in turn was
	 * attached to a bridge.
	 */
	if (dp->bridge_dev)
		dsa_port_bridge_leave(dp, dp->bridge_dev);

	dp->lag_tx_enabled = false;
	dp->lag_dev = NULL;

	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
	if (err)
		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
		       err);

	dsa_lag_unmap(dp->ds->dst, lag);
}

398
/* Must be called under rcu_read_lock() */
399
static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
400 401
					      bool vlan_filtering,
					      struct netlink_ext_ack *extack)
402 403
{
	struct dsa_switch *ds = dp->ds;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	int err, i;

	/* VLAN awareness was off, so the question is "can we turn it on".
	 * We may have had 8021q uppers, those need to go. Make sure we don't
	 * enter an inconsistent state: deny changing the VLAN awareness state
	 * as long as we have 8021q uppers.
	 */
	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
		struct net_device *upper_dev, *slave = dp->slave;
		struct net_device *br = dp->bridge_dev;
		struct list_head *iter;

		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
			struct bridge_vlan_info br_info;
			u16 vid;

			if (!is_vlan_dev(upper_dev))
				continue;

			vid = vlan_dev_vlan_id(upper_dev);

			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
			 * device, respectively the VID is not found, returning
			 * 0 means success, which is a failure for us here.
			 */
			err = br_vlan_get_info(br, vid, &br_info);
			if (err == 0) {
431 432
				NL_SET_ERR_MSG_MOD(extack,
						   "Must first remove VLAN uppers having VIDs also present in bridge");
433 434 435 436
				return false;
			}
		}
	}
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

	if (!ds->vlan_filtering_is_global)
		return true;

	/* For cases where enabling/disabling VLAN awareness is global to the
	 * switch, we need to handle the case where multiple bridges span
	 * different ports of the same switch device and one of them has a
	 * different setting than what is being requested.
	 */
	for (i = 0; i < ds->num_ports; i++) {
		struct net_device *other_bridge;

		other_bridge = dsa_to_port(ds, i)->bridge_dev;
		if (!other_bridge)
			continue;
		/* If it's the same bridge, it also has same
		 * vlan_filtering setting => no need to check
		 */
		if (other_bridge == dp->bridge_dev)
			continue;
		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
458 459
			NL_SET_ERR_MSG_MOD(extack,
					   "VLAN filtering is a global setting");
460 461 462 463 464 465
			return false;
		}
	}
	return true;
}

466 467
int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
			    struct netlink_ext_ack *extack)
468 469
{
	struct dsa_switch *ds = dp->ds;
470
	bool apply;
471
	int err;
472

473 474
	if (!ds->ops->port_vlan_filtering)
		return -EOPNOTSUPP;
475

476 477 478 479 480
	/* We are called from dsa_slave_switchdev_blocking_event(),
	 * which is not under rcu_read_lock(), unlike
	 * dsa_slave_switchdev_event().
	 */
	rcu_read_lock();
481
	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
482 483 484
	rcu_read_unlock();
	if (!apply)
		return -EINVAL;
485

486 487 488
	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
		return 0;

489 490
	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
					   extack);
491 492 493
	if (err)
		return err;

494 495 496 497
	if (ds->vlan_filtering_is_global)
		ds->vlan_filtering = vlan_filtering;
	else
		dp->vlan_filtering = vlan_filtering;
498

499 500
	return 0;
}
501

502 503 504 505 506 507 508 509 510 511 512 513 514 515
/* This enforces legacy behavior for switch drivers which assume they can't
 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
 */
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
	struct dsa_switch *ds = dp->ds;

	if (!dp->bridge_dev)
		return false;

	return (!ds->configure_vlan_while_not_filtering &&
		!br_vlan_enabled(dp->bridge_dev));
}

516
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
517 518 519
{
	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
520 521 522 523
	struct dsa_notifier_ageing_time_info info;
	int err;

	info.ageing_time = ageing_time;
524

525 526 527
	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
	if (err)
		return err;
528 529 530

	dp->ageing_time = ageing_time;

531
	return 0;
532
}
V
Vivien Didelot 已提交
533

534
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
535 536
			      struct switchdev_brport_flags flags,
			      struct netlink_ext_ack *extack)
537 538 539
{
	struct dsa_switch *ds = dp->ds;

540
	if (!ds->ops->port_pre_bridge_flags)
541 542
		return -EINVAL;

543
	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
544 545
}

546
int dsa_port_bridge_flags(const struct dsa_port *dp,
547 548
			  struct switchdev_brport_flags flags,
			  struct netlink_ext_ack *extack)
549 550 551
{
	struct dsa_switch *ds = dp->ds;

552
	if (!ds->ops->port_bridge_flags)
553
		return -EOPNOTSUPP;
554

555
	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
556 557
}

558 559
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
		     struct netlink_ext_ack *extack)
560 561 562
{
	struct dsa_switch *ds = dp->ds;

563
	if (!ds->ops->port_set_mrouter)
564
		return -EOPNOTSUPP;
565

566
	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
567 568
}

569 570 571 572 573 574 575 576 577 578 579 580 581
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
			bool propagate_upstream)
{
	struct dsa_notifier_mtu_info info = {
		.sw_index = dp->ds->index,
		.propagate_upstream = propagate_upstream,
		.port = dp->index,
		.mtu = new_mtu,
	};

	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
}

582 583
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
		     u16 vid)
V
Vivien Didelot 已提交
584
{
V
Vivien Didelot 已提交
585 586 587
	struct dsa_notifier_fdb_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
588 589
		.addr = addr,
		.vid = vid,
V
Vivien Didelot 已提交
590
	};
V
Vivien Didelot 已提交
591

V
Vivien Didelot 已提交
592
	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
V
Vivien Didelot 已提交
593 594
}

595 596
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
		     u16 vid)
V
Vivien Didelot 已提交
597
{
V
Vivien Didelot 已提交
598 599 600
	struct dsa_notifier_fdb_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
601 602 603
		.addr = addr,
		.vid = vid,

V
Vivien Didelot 已提交
604
	};
V
Vivien Didelot 已提交
605

V
Vivien Didelot 已提交
606
	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
V
Vivien Didelot 已提交
607 608
}

V
Vivien Didelot 已提交
609 610 611 612 613 614 615 616 617 618 619
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
{
	struct dsa_switch *ds = dp->ds;
	int port = dp->index;

	if (!ds->ops->port_fdb_dump)
		return -EOPNOTSUPP;

	return ds->ops->port_fdb_dump(ds, port, cb, data);
}

A
Andrew Lunn 已提交
620
int dsa_port_mdb_add(const struct dsa_port *dp,
621
		     const struct switchdev_obj_port_mdb *mdb)
V
Vivien Didelot 已提交
622
{
V
Vivien Didelot 已提交
623 624 625 626 627
	struct dsa_notifier_mdb_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mdb = mdb,
	};
V
Vivien Didelot 已提交
628

V
Vivien Didelot 已提交
629
	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
V
Vivien Didelot 已提交
630 631
}

A
Andrew Lunn 已提交
632
int dsa_port_mdb_del(const struct dsa_port *dp,
V
Vivien Didelot 已提交
633 634
		     const struct switchdev_obj_port_mdb *mdb)
{
V
Vivien Didelot 已提交
635 636 637 638 639
	struct dsa_notifier_mdb_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mdb = mdb,
	};
V
Vivien Didelot 已提交
640

V
Vivien Didelot 已提交
641
	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
V
Vivien Didelot 已提交
642 643
}

V
Vivien Didelot 已提交
644
int dsa_port_vlan_add(struct dsa_port *dp,
645 646
		      const struct switchdev_obj_port_vlan *vlan,
		      struct netlink_ext_ack *extack)
V
Vivien Didelot 已提交
647
{
V
Vivien Didelot 已提交
648 649 650 651
	struct dsa_notifier_vlan_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.vlan = vlan,
652
		.extack = extack,
V
Vivien Didelot 已提交
653
	};
V
Vivien Didelot 已提交
654

655
	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
V
Vivien Didelot 已提交
656 657 658 659 660
}

int dsa_port_vlan_del(struct dsa_port *dp,
		      const struct switchdev_obj_port_vlan *vlan)
{
V
Vivien Didelot 已提交
661 662 663 664 665
	struct dsa_notifier_vlan_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.vlan = vlan,
	};
V
Vivien Didelot 已提交
666

667
	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
V
Vivien Didelot 已提交
668
}
669

H
Horatiu Vultur 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
int dsa_port_mrp_add(const struct dsa_port *dp,
		     const struct switchdev_obj_mrp *mrp)
{
	struct dsa_notifier_mrp_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mrp = mrp,
	};

	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
}

int dsa_port_mrp_del(const struct dsa_port *dp,
		     const struct switchdev_obj_mrp *mrp)
{
	struct dsa_notifier_mrp_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mrp = mrp,
	};

	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
}

int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
			       const struct switchdev_obj_ring_role_mrp *mrp)
{
	struct dsa_notifier_mrp_ring_role_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mrp = mrp,
	};

	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
}

int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
			       const struct switchdev_obj_ring_role_mrp *mrp)
{
	struct dsa_notifier_mrp_ring_role_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.mrp = mrp,
	};

	return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
}

718 719 720 721 722 723 724 725
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
			       const struct dsa_device_ops *tag_ops)
{
	cpu_dp->filter = tag_ops->filter;
	cpu_dp->rcv = tag_ops->rcv;
	cpu_dp->tag_ops = tag_ops;
}

726
static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
727 728 729 730
{
	struct device_node *phy_dn;
	struct phy_device *phydev;

731
	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
732
	if (!phy_dn)
733
		return NULL;
734 735 736

	phydev = of_phy_find_device(phy_dn);
	if (!phydev) {
737 738
		of_node_put(phy_dn);
		return ERR_PTR(-EPROBE_DEFER);
739 740
	}

741
	of_node_put(phy_dn);
742 743 744
	return phydev;
}

745 746 747
static void dsa_port_phylink_validate(struct phylink_config *config,
				      unsigned long *supported,
				      struct phylink_link_state *state)
748 749 750 751 752 753 754 755 756 757
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
	struct dsa_switch *ds = dp->ds;

	if (!ds->ops->phylink_validate)
		return;

	ds->ops->phylink_validate(ds, dp->index, supported, state);
}

758 759
static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
					       struct phylink_link_state *state)
760 761 762
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
	struct dsa_switch *ds = dp->ds;
763
	int err;
764

765 766 767 768 769
	/* Only called for inband modes */
	if (!ds->ops->phylink_mac_link_state) {
		state->link = 0;
		return;
	}
770

771 772 773 774
	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
	if (err < 0) {
		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
			dp->index, err);
775
		state->link = 0;
776
	}
777 778
}

779 780 781
static void dsa_port_phylink_mac_config(struct phylink_config *config,
					unsigned int mode,
					const struct phylink_link_state *state)
782 783 784 785 786 787 788 789 790 791
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
	struct dsa_switch *ds = dp->ds;

	if (!ds->ops->phylink_mac_config)
		return;

	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
}

792
static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
793 794 795 796 797 798 799 800 801 802
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
	struct dsa_switch *ds = dp->ds;

	if (!ds->ops->phylink_mac_an_restart)
		return;

	ds->ops->phylink_mac_an_restart(ds, dp->index);
}

803 804 805
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
					   unsigned int mode,
					   phy_interface_t interface)
806 807
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
808
	struct phy_device *phydev = NULL;
809 810
	struct dsa_switch *ds = dp->ds;

811 812 813
	if (dsa_is_user_port(ds, dp->index))
		phydev = dp->slave->phydev;

814
	if (!ds->ops->phylink_mac_link_down) {
815 816
		if (ds->ops->adjust_link && phydev)
			ds->ops->adjust_link(ds, dp->index, phydev);
817 818 819 820 821 822
		return;
	}

	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
}

823
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
824
					 struct phy_device *phydev,
825 826
					 unsigned int mode,
					 phy_interface_t interface,
827 828
					 int speed, int duplex,
					 bool tx_pause, bool rx_pause)
829 830 831 832 833
{
	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
	struct dsa_switch *ds = dp->ds;

	if (!ds->ops->phylink_mac_link_up) {
834 835
		if (ds->ops->adjust_link && phydev)
			ds->ops->adjust_link(ds, dp->index, phydev);
836 837 838
		return;
	}

839 840
	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
				     speed, duplex, tx_pause, rx_pause);
841 842 843 844
}

const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
	.validate = dsa_port_phylink_validate,
845
	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
846 847 848 849 850 851
	.mac_config = dsa_port_phylink_mac_config,
	.mac_an_restart = dsa_port_phylink_mac_an_restart,
	.mac_link_down = dsa_port_phylink_mac_link_down,
	.mac_link_up = dsa_port_phylink_mac_link_up,
};

852 853 854 855 856 857 858 859 860 861 862 863 864 865
static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
{
	struct dsa_switch *ds = dp->ds;
	struct phy_device *phydev;
	int port = dp->index;
	int err = 0;

	phydev = dsa_port_get_phy_device(dp);
	if (!phydev)
		return 0;

	if (IS_ERR(phydev))
		return PTR_ERR(phydev);

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
	if (enable) {
		err = genphy_resume(phydev);
		if (err < 0)
			goto err_put_dev;

		err = genphy_read_status(phydev);
		if (err < 0)
			goto err_put_dev;
	} else {
		err = genphy_suspend(phydev);
		if (err < 0)
			goto err_put_dev;
	}

	if (ds->ops->adjust_link)
		ds->ops->adjust_link(ds, port, phydev);

	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));

err_put_dev:
	put_device(&phydev->mdio.dev);
	return err;
}

static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
891 892 893 894 895
{
	struct device_node *dn = dp->dn;
	struct dsa_switch *ds = dp->ds;
	struct phy_device *phydev;
	int port = dp->index;
896
	phy_interface_t mode;
897 898
	int err;

899 900 901 902 903 904 905
	err = of_phy_register_fixed_link(dn);
	if (err) {
		dev_err(ds->dev,
			"failed to register the fixed PHY of port %d\n",
			port);
		return err;
	}
906

907
	phydev = of_phy_find_device(dn);
908

909 910
	err = of_get_phy_mode(dn, &mode);
	if (err)
911 912
		mode = PHY_INTERFACE_MODE_NA;
	phydev->interface = mode;
913

914
	genphy_read_status(phydev);
915

916 917
	if (ds->ops->adjust_link)
		ds->ops->adjust_link(ds, port, phydev);
918

919
	put_device(&phydev->mdio.dev);
920 921 922 923

	return 0;
}

924 925 926 927
static int dsa_port_phylink_register(struct dsa_port *dp)
{
	struct dsa_switch *ds = dp->ds;
	struct device_node *port_dn = dp->dn;
928 929
	phy_interface_t mode;
	int err;
930

931 932
	err = of_get_phy_mode(port_dn, &mode);
	if (err)
933 934 935 936
		mode = PHY_INTERFACE_MODE_NA;

	dp->pl_config.dev = ds->dev;
	dp->pl_config.type = PHYLINK_DEV;
937
	dp->pl_config.pcs_poll = ds->pcs_poll;
938 939 940 941 942 943 944 945 946

	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
				mode, &dsa_port_phylink_mac_ops);
	if (IS_ERR(dp->pl)) {
		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
		return PTR_ERR(dp->pl);
	}

	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
947
	if (err && err != -ENODEV) {
948 949 950 951 952 953 954 955 956 957 958
		pr_err("could not attach to PHY: %d\n", err);
		goto err_phy_connect;
	}

	return 0;

err_phy_connect:
	phylink_destroy(dp->pl);
	return err;
}

959
int dsa_port_link_register_of(struct dsa_port *dp)
960
{
961
	struct dsa_switch *ds = dp->ds;
962
	struct device_node *phy_np;
963
	int port = dp->index;
964

965 966
	if (!ds->ops->adjust_link) {
		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
967 968 969 970
		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
			if (ds->ops->phylink_mac_link_down)
				ds->ops->phylink_mac_link_down(ds, port,
					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
971
			return dsa_port_phylink_register(dp);
972
		}
973 974
		return 0;
	}
975 976 977 978

	dev_warn(ds->dev,
		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");

979 980 981 982 983
	if (of_phy_is_fixed_link(dp->dn))
		return dsa_port_fixed_link_register_of(dp);
	else
		return dsa_port_setup_phy_of(dp, true);
}
984

985 986
void dsa_port_link_unregister_of(struct dsa_port *dp)
{
987 988
	struct dsa_switch *ds = dp->ds;

989
	if (!ds->ops->adjust_link && dp->pl) {
990 991 992 993
		rtnl_lock();
		phylink_disconnect_phy(dp->pl);
		rtnl_unlock();
		phylink_destroy(dp->pl);
994
		dp->pl = NULL;
995 996 997
		return;
	}

998 999 1000 1001
	if (of_phy_is_fixed_link(dp->dn))
		of_phy_deregister_fixed_link(dp->dn);
	else
		dsa_port_setup_phy_of(dp, false);
1002
}
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059

int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
{
	struct phy_device *phydev;
	int ret = -EOPNOTSUPP;

	if (of_phy_is_fixed_link(dp->dn))
		return ret;

	phydev = dsa_port_get_phy_device(dp);
	if (IS_ERR_OR_NULL(phydev))
		return ret;

	ret = phy_ethtool_get_strings(phydev, data);
	put_device(&phydev->mdio.dev);

	return ret;
}
EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);

int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
{
	struct phy_device *phydev;
	int ret = -EOPNOTSUPP;

	if (of_phy_is_fixed_link(dp->dn))
		return ret;

	phydev = dsa_port_get_phy_device(dp);
	if (IS_ERR_OR_NULL(phydev))
		return ret;

	ret = phy_ethtool_get_stats(phydev, NULL, data);
	put_device(&phydev->mdio.dev);

	return ret;
}
EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);

int dsa_port_get_phy_sset_count(struct dsa_port *dp)
{
	struct phy_device *phydev;
	int ret = -EOPNOTSUPP;

	if (of_phy_is_fixed_link(dp->dn))
		return ret;

	phydev = dsa_port_get_phy_device(dp);
	if (IS_ERR_OR_NULL(phydev))
		return ret;

	ret = phy_ethtool_get_sset_count(phydev);
	put_device(&phydev->mdio.dev);

	return ret;
}
EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
{
	struct dsa_notifier_hsr_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.hsr = hsr,
	};
	int err;

	dp->hsr_dev = hsr;

	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
	if (err)
		dp->hsr_dev = NULL;

	return err;
}

void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
{
	struct dsa_notifier_hsr_info info = {
		.sw_index = dp->ds->index,
		.port = dp->index,
		.hsr = hsr,
	};
	int err;

	dp->hsr_dev = NULL;

	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
	if (err)
		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
}