slave.c 36.4 KB
Newer Older
1 2
/*
 * net/dsa/slave.c - Slave device handling
3
 * Copyright (c) 2008-2009 Marvell Semiconductor
4 5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/list.h>
12
#include <linux/etherdevice.h>
13
#include <linux/netdevice.h>
14
#include <linux/phy.h>
15
#include <linux/phy_fixed.h>
16 17
#include <linux/of_net.h>
#include <linux/of_mdio.h>
18
#include <linux/mdio.h>
19
#include <net/rtnetlink.h>
20 21
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
22
#include <linux/if_bridge.h>
23
#include <linux/netpoll.h>
24
#include <linux/ptp_classify.h>
25

26 27
#include "dsa_priv.h"

28 29
static bool dsa_slave_dev_check(struct net_device *dev);

30 31 32 33 34
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
	struct dsa_switch *ds = bus->priv;

35
	if (ds->phys_mii_mask & (1 << addr))
36
		return ds->ops->phy_read(ds, addr, reg);
37 38 39 40 41 42 43 44

	return 0xffff;
}

static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
	struct dsa_switch *ds = bus->priv;

45
	if (ds->phys_mii_mask & (1 << addr))
46
		return ds->ops->phy_write(ds, addr, reg, val);
47 48 49 50 51 52 53 54 55 56

	return 0;
}

void dsa_slave_mii_bus_init(struct dsa_switch *ds)
{
	ds->slave_mii_bus->priv = (void *)ds;
	ds->slave_mii_bus->name = "dsa slave smi";
	ds->slave_mii_bus->read = dsa_slave_phy_read;
	ds->slave_mii_bus->write = dsa_slave_phy_write;
57
	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
58
		 ds->dst->index, ds->index);
59
	ds->slave_mii_bus->parent = ds->dev;
60
	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
61 62 63 64
}


/* slave device handling ****************************************************/
N
Nicolas Dichtel 已提交
65
static int dsa_slave_get_iflink(const struct net_device *dev)
66
{
67
	return dsa_slave_to_master(dev)->ifindex;
68 69
}

70 71
static int dsa_slave_open(struct net_device *dev)
{
72
	struct net_device *master = dsa_slave_to_master(dev);
73
	struct dsa_port *dp = dsa_slave_to_port(dev);
74 75 76 77 78
	int err;

	if (!(master->flags & IFF_UP))
		return -ENETDOWN;

79
	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
80
		err = dev_uc_add(master, dev->dev_addr);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
		if (err < 0)
			goto out;
	}

	if (dev->flags & IFF_ALLMULTI) {
		err = dev_set_allmulti(master, 1);
		if (err < 0)
			goto del_unicast;
	}
	if (dev->flags & IFF_PROMISC) {
		err = dev_set_promiscuity(master, 1);
		if (err < 0)
			goto clear_allmulti;
	}

96
	err = dsa_port_enable(dp, dev->phydev);
97 98
	if (err)
		goto clear_promisc;
99

100 101
	if (dev->phydev)
		phy_start(dev->phydev);
102

103
	return 0;
104

105 106
clear_promisc:
	if (dev->flags & IFF_PROMISC)
107
		dev_set_promiscuity(master, -1);
108 109 110 111
clear_allmulti:
	if (dev->flags & IFF_ALLMULTI)
		dev_set_allmulti(master, -1);
del_unicast:
112
	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
113
		dev_uc_del(master, dev->dev_addr);
114 115
out:
	return err;
116 117 118 119
}

static int dsa_slave_close(struct net_device *dev)
{
120
	struct net_device *master = dsa_slave_to_master(dev);
121
	struct dsa_port *dp = dsa_slave_to_port(dev);
122

123 124
	if (dev->phydev)
		phy_stop(dev->phydev);
125

126
	dsa_port_disable(dp, dev->phydev);
127

128
	dev_mc_unsync(master, dev);
129
	dev_uc_unsync(master, dev);
130 131 132 133 134
	if (dev->flags & IFF_ALLMULTI)
		dev_set_allmulti(master, -1);
	if (dev->flags & IFF_PROMISC)
		dev_set_promiscuity(master, -1);

135
	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
136
		dev_uc_del(master, dev->dev_addr);
137

138 139 140 141 142
	return 0;
}

static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
143
	struct net_device *master = dsa_slave_to_master(dev);
144 145 146 147 148 149 150 151 152

	if (change & IFF_ALLMULTI)
		dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
	if (change & IFF_PROMISC)
		dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
}

static void dsa_slave_set_rx_mode(struct net_device *dev)
{
153
	struct net_device *master = dsa_slave_to_master(dev);
154 155

	dev_mc_sync(master, dev);
156
	dev_uc_sync(master, dev);
157 158
}

159
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
160
{
161
	struct net_device *master = dsa_slave_to_master(dev);
162 163 164 165 166 167 168 169 170
	struct sockaddr *addr = a;
	int err;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	if (!(dev->flags & IFF_UP))
		goto out;

171
	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
172
		err = dev_uc_add(master, addr->sa_data);
173 174 175 176
		if (err < 0)
			return err;
	}

177
	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
178
		dev_uc_del(master, dev->dev_addr);
179 180

out:
J
Joe Perches 已提交
181
	ether_addr_copy(dev->dev_addr, addr->sa_data);
182 183 184 185

	return 0;
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
struct dsa_slave_dump_ctx {
	struct net_device *dev;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
};

static int
dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
			   bool is_static, void *data)
{
	struct dsa_slave_dump_ctx *dump = data;
	u32 portid = NETLINK_CB(dump->cb->skb).portid;
	u32 seq = dump->cb->nlh->nlmsg_seq;
	struct nlmsghdr *nlh;
	struct ndmsg *ndm;

	if (dump->idx < dump->cb->args[2])
		goto skip;

	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
			sizeof(*ndm), NLM_F_MULTI);
	if (!nlh)
		return -EMSGSIZE;

	ndm = nlmsg_data(nlh);
	ndm->ndm_family  = AF_BRIDGE;
	ndm->ndm_pad1    = 0;
	ndm->ndm_pad2    = 0;
	ndm->ndm_flags   = NTF_SELF;
	ndm->ndm_type    = 0;
	ndm->ndm_ifindex = dump->dev->ifindex;
	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;

	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
		goto nla_put_failure;

	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
		goto nla_put_failure;

	nlmsg_end(dump->skb, nlh);

skip:
	dump->idx++;
	return 0;

nla_put_failure:
	nlmsg_cancel(dump->skb, nlh);
	return -EMSGSIZE;
}

static int
dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
		   struct net_device *dev, struct net_device *filter_dev,
		   int *idx)
{
242
	struct dsa_port *dp = dsa_slave_to_port(dev);
243 244 245 246 247 248 249 250
	struct dsa_slave_dump_ctx dump = {
		.dev = dev,
		.skb = skb,
		.cb = cb,
		.idx = *idx,
	};
	int err;

V
Vivien Didelot 已提交
251
	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
252
	*idx = dump.idx;
V
Vivien Didelot 已提交
253

254 255 256
	return err;
}

257 258
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct dsa_switch *ds = p->dp->ds;
	int port = p->dp->index;

	/* Pass through to switch driver if it supports timestamping */
	switch (cmd) {
	case SIOCGHWTSTAMP:
		if (ds->ops->port_hwtstamp_get)
			return ds->ops->port_hwtstamp_get(ds, port, ifr);
		break;
	case SIOCSHWTSTAMP:
		if (ds->ops->port_hwtstamp_set)
			return ds->ops->port_hwtstamp_set(ds, port, ifr);
		break;
	}

275
	if (!dev->phydev)
276
		return -ENODEV;
277

278
	return phy_mii_ioctl(dev->phydev, ifr, cmd);
279 280
}

281
static int dsa_slave_port_attr_set(struct net_device *dev,
282
				   const struct switchdev_attr *attr,
283
				   struct switchdev_trans *trans)
284
{
285
	struct dsa_port *dp = dsa_slave_to_port(dev);
286
	int ret;
287 288

	switch (attr->id) {
289
	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
290
		ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
291
		break;
292
	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
293 294
		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
					      trans);
295
		break;
296
	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
297
		ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
298
		break;
299 300 301 302 303 304 305 306
	default:
		ret = -EOPNOTSUPP;
		break;
	}

	return ret;
}

307
static int dsa_slave_port_obj_add(struct net_device *dev,
308
				  const struct switchdev_obj *obj,
309
				  struct switchdev_trans *trans)
310
{
311
	struct dsa_port *dp = dsa_slave_to_port(dev);
312 313 314 315 316 317 318
	int err;

	/* For the prepare phase, ensure the full set of changes is feasable in
	 * one go in order to signal a failure properly. If an operation is not
	 * supported, return -EOPNOTSUPP.
	 */

319
	switch (obj->id) {
V
Vivien Didelot 已提交
320
	case SWITCHDEV_OBJ_ID_PORT_MDB:
321
		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
V
Vivien Didelot 已提交
322
		break;
323 324 325 326 327 328 329
	case SWITCHDEV_OBJ_ID_HOST_MDB:
		/* DSA can directly translate this to a normal MDB add,
		 * but on the CPU port.
		 */
		err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
				       trans);
		break;
330
	case SWITCHDEV_OBJ_ID_PORT_VLAN:
331 332
		err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
					trans);
333
		break;
334 335 336 337 338 339 340 341 342
	default:
		err = -EOPNOTSUPP;
		break;
	}

	return err;
}

static int dsa_slave_port_obj_del(struct net_device *dev,
343
				  const struct switchdev_obj *obj)
344
{
345
	struct dsa_port *dp = dsa_slave_to_port(dev);
346 347
	int err;

348
	switch (obj->id) {
V
Vivien Didelot 已提交
349
	case SWITCHDEV_OBJ_ID_PORT_MDB:
350
		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
V
Vivien Didelot 已提交
351
		break;
352 353 354 355 356 357
	case SWITCHDEV_OBJ_ID_HOST_MDB:
		/* DSA can directly translate this to a normal MDB add,
		 * but on the CPU port.
		 */
		err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
		break;
358
	case SWITCHDEV_OBJ_ID_PORT_VLAN:
359
		err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
360
		break;
361 362 363 364 365 366 367 368
	default:
		err = -EOPNOTSUPP;
		break;
	}

	return err;
}

369 370
static int dsa_slave_port_attr_get(struct net_device *dev,
				   struct switchdev_attr *attr)
371
{
372 373
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
374
	struct dsa_switch_tree *dst = ds->dst;
375

376
	switch (attr->id) {
377
	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
378 379
		attr->u.ppid.id_len = sizeof(dst->index);
		memcpy(&attr->u.ppid.id, &dst->index, attr->u.ppid.id_len);
380
		break;
381 382 383
	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
		attr->u.brport_flags_support = 0;
		break;
384 385 386
	default:
		return -EOPNOTSUPP;
	}
387 388 389 390

	return 0;
}

391 392
static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
						     struct sk_buff *skb)
393 394
{
#ifdef CONFIG_NET_POLL_CONTROLLER
395 396
	struct dsa_slave_priv *p = netdev_priv(dev);

397 398 399 400 401 402 403 404
	if (p->netpoll)
		netpoll_send_skb(p->netpoll, skb);
#else
	BUG();
#endif
	return NETDEV_TX_OK;
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
				 struct sk_buff *skb)
{
	struct dsa_switch *ds = p->dp->ds;
	struct sk_buff *clone;
	unsigned int type;

	type = ptp_classify_raw(skb);
	if (type == PTP_CLASS_NONE)
		return;

	if (!ds->ops->port_txtstamp)
		return;

	clone = skb_clone_sk(skb);
	if (!clone)
		return;

	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
		return;

	kfree_skb(clone);
}

429 430 431
static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
432
	struct pcpu_sw_netstats *s;
433
	struct sk_buff *nskb;
434

435 436 437 438 439
	s = this_cpu_ptr(p->stats64);
	u64_stats_update_begin(&s->syncp);
	s->tx_packets++;
	s->tx_bytes += skb->len;
	u64_stats_update_end(&s->syncp);
440

441 442 443 444 445
	/* Identify PTP protocol packets, clone them, and pass them to the
	 * switch driver
	 */
	dsa_skb_tx_timestamp(p, skb);

446 447 448
	/* Transmit function may have to reallocate the original SKB,
	 * in which case it must have freed it. Only free it here on error.
	 */
449
	nskb = p->xmit(skb, dev);
450 451
	if (!nskb) {
		kfree_skb(skb);
452
		return NETDEV_TX_OK;
453
	}
454

455 456 457 458
	/* SKB for netpoll still need to be mangled with the protocol-specific
	 * tag to be successfully transmitted
	 */
	if (unlikely(netpoll_tx_running(dev)))
459
		return dsa_slave_netpoll_send_skb(dev, nskb);
460

461 462 463
	/* Queue the SKB for transmission on the parent interface, but
	 * do not modify its EtherType
	 */
464
	nskb->dev = dsa_slave_to_master(dev);
465
	dev_queue_xmit(nskb);
466 467 468 469

	return NETDEV_TX_OK;
}

470 471 472 473 474
/* ethtool operations *******************************************************/

static void dsa_slave_get_drvinfo(struct net_device *dev,
				  struct ethtool_drvinfo *drvinfo)
{
475 476 477
	strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
478 479
}

480 481
static int dsa_slave_get_regs_len(struct net_device *dev)
{
482 483
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
484

485
	if (ds->ops->get_regs_len)
486
		return ds->ops->get_regs_len(ds, dp->index);
487 488 489 490 491 492 493

	return -EOPNOTSUPP;
}

static void
dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
494 495
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
496

497
	if (ds->ops->get_regs)
498
		ds->ops->get_regs(ds, dp->index, regs, _p);
499 500
}

501 502
static u32 dsa_slave_get_link(struct net_device *dev)
{
503
	if (!dev->phydev)
504
		return -ENODEV;
505

506
	genphy_update_link(dev->phydev);
507

508
	return dev->phydev->link;
509 510
}

511 512
static int dsa_slave_get_eeprom_len(struct net_device *dev)
{
513 514
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
515

516
	if (ds->cd && ds->cd->eeprom_len)
A
Andrew Lunn 已提交
517
		return ds->cd->eeprom_len;
518

519 520
	if (ds->ops->get_eeprom_len)
		return ds->ops->get_eeprom_len(ds);
521 522 523 524 525 526 527

	return 0;
}

static int dsa_slave_get_eeprom(struct net_device *dev,
				struct ethtool_eeprom *eeprom, u8 *data)
{
528 529
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
530

531 532
	if (ds->ops->get_eeprom)
		return ds->ops->get_eeprom(ds, eeprom, data);
533 534 535 536 537 538 539

	return -EOPNOTSUPP;
}

static int dsa_slave_set_eeprom(struct net_device *dev,
				struct ethtool_eeprom *eeprom, u8 *data)
{
540 541
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
542

543 544
	if (ds->ops->set_eeprom)
		return ds->ops->set_eeprom(ds, eeprom, data);
545 546 547 548

	return -EOPNOTSUPP;
}

549 550 551
static void dsa_slave_get_strings(struct net_device *dev,
				  uint32_t stringset, uint8_t *data)
{
552 553
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
554 555 556 557 558 559 560 561

	if (stringset == ETH_SS_STATS) {
		int len = ETH_GSTRING_LEN;

		strncpy(data, "tx_packets", len);
		strncpy(data + len, "tx_bytes", len);
		strncpy(data + 2 * len, "rx_packets", len);
		strncpy(data + 3 * len, "rx_bytes", len);
562
		if (ds->ops->get_strings)
563 564
			ds->ops->get_strings(ds, dp->index, stringset,
					     data + 4 * len);
565 566 567 568 569 570 571
	}
}

static void dsa_slave_get_ethtool_stats(struct net_device *dev,
					struct ethtool_stats *stats,
					uint64_t *data)
{
572
	struct dsa_port *dp = dsa_slave_to_port(dev);
573
	struct dsa_slave_priv *p = netdev_priv(dev);
574
	struct dsa_switch *ds = dp->ds;
575
	struct pcpu_sw_netstats *s;
576
	unsigned int start;
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
	int i;

	for_each_possible_cpu(i) {
		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;

		s = per_cpu_ptr(p->stats64, i);
		do {
			start = u64_stats_fetch_begin_irq(&s->syncp);
			tx_packets = s->tx_packets;
			tx_bytes = s->tx_bytes;
			rx_packets = s->rx_packets;
			rx_bytes = s->rx_bytes;
		} while (u64_stats_fetch_retry_irq(&s->syncp, start));
		data[0] += tx_packets;
		data[1] += tx_bytes;
		data[2] += rx_packets;
		data[3] += rx_bytes;
	}
595
	if (ds->ops->get_ethtool_stats)
596
		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
597 598 599 600
}

static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
{
601 602
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
603 604 605 606 607

	if (sset == ETH_SS_STATS) {
		int count;

		count = 4;
608
		if (ds->ops->get_sset_count)
609
			count += ds->ops->get_sset_count(ds, dp->index, sset);
610 611 612 613 614 615 616

		return count;
	}

	return -EOPNOTSUPP;
}

617 618
static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
619 620
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
621

622
	if (ds->ops->get_wol)
623
		ds->ops->get_wol(ds, dp->index, w);
624 625 626 627
}

static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
628 629
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
630 631
	int ret = -EOPNOTSUPP;

632
	if (ds->ops->set_wol)
633
		ret = ds->ops->set_wol(ds, dp->index, w);
634 635 636 637

	return ret;
}

638 639
static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
640 641
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
642 643
	int ret;

644
	/* Port's PHY and MAC both need to be EEE capable */
645
	if (!dev->phydev)
646 647
		return -ENODEV;

V
Vivien Didelot 已提交
648
	if (!ds->ops->set_mac_eee)
649 650
		return -EOPNOTSUPP;

651
	ret = ds->ops->set_mac_eee(ds, dp->index, e);
652 653 654
	if (ret)
		return ret;

655
	if (e->eee_enabled) {
656
		ret = phy_init_eee(dev->phydev, 0);
657 658 659 660
		if (ret)
			return ret;
	}

661
	return phy_ethtool_set_eee(dev->phydev, e);
662 663 664 665
}

static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
{
666 667
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
668 669
	int ret;

670
	/* Port's PHY and MAC both need to be EEE capable */
671
	if (!dev->phydev)
672 673
		return -ENODEV;

V
Vivien Didelot 已提交
674
	if (!ds->ops->get_mac_eee)
675 676
		return -EOPNOTSUPP;

677
	ret = ds->ops->get_mac_eee(ds, dp->index, e);
678 679 680
	if (ret)
		return ret;

681
	return phy_ethtool_get_eee(dev->phydev, e);
682 683
}

684 685 686 687
#ifdef CONFIG_NET_POLL_CONTROLLER
static int dsa_slave_netpoll_setup(struct net_device *dev,
				   struct netpoll_info *ni)
{
688
	struct net_device *master = dsa_slave_to_master(dev);
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct netpoll *netpoll;
	int err = 0;

	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
	if (!netpoll)
		return -ENOMEM;

	err = __netpoll_setup(netpoll, master);
	if (err) {
		kfree(netpoll);
		goto out;
	}

	p->netpoll = netpoll;
out:
	return err;
}

static void dsa_slave_netpoll_cleanup(struct net_device *dev)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct netpoll *netpoll = p->netpoll;

	if (!netpoll)
		return;

	p->netpoll = NULL;

	__netpoll_free_async(netpoll);
}

static void dsa_slave_poll_controller(struct net_device *dev)
{
}
#endif

726 727 728
static int dsa_slave_get_phys_port_name(struct net_device *dev,
					char *name, size_t len)
{
729
	struct dsa_port *dp = dsa_slave_to_port(dev);
730

731
	if (snprintf(name, len, "p%d", dp->index) >= len)
732
		return -EINVAL;
733 734 735 736

	return 0;
}

737
static struct dsa_mall_tc_entry *
738
dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
739
{
740
	struct dsa_slave_priv *p = netdev_priv(dev);
741 742 743 744 745 746 747 748 749 750 751 752 753
	struct dsa_mall_tc_entry *mall_tc_entry;

	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
		if (mall_tc_entry->cookie == cookie)
			return mall_tc_entry;

	return NULL;
}

static int dsa_slave_add_cls_matchall(struct net_device *dev,
				      struct tc_cls_matchall_offload *cls,
				      bool ingress)
{
754
	struct dsa_port *dp = dsa_slave_to_port(dev);
755 756
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct dsa_mall_tc_entry *mall_tc_entry;
757
	__be16 protocol = cls->common.protocol;
758
	struct dsa_switch *ds = dp->ds;
759 760
	struct net_device *to_dev;
	const struct tc_action *a;
761
	struct dsa_port *to_dp;
762 763 764 765 766 767
	int err = -EOPNOTSUPP;
	LIST_HEAD(actions);

	if (!ds->ops->port_mirror_add)
		return err;

768
	if (!tcf_exts_has_one_action(cls->exts))
769 770 771 772 773 774 775 776
		return err;

	tcf_exts_to_list(cls->exts, &actions);
	a = list_first_entry(&actions, struct tc_action, list);

	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
		struct dsa_mall_mirror_tc_entry *mirror;

777
		to_dev = tcf_mirred_dev(a);
778 779 780 781 782 783 784 785 786 787 788 789 790 791
		if (!to_dev)
			return -EINVAL;

		if (!dsa_slave_dev_check(to_dev))
			return -EOPNOTSUPP;

		mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
		if (!mall_tc_entry)
			return -ENOMEM;

		mall_tc_entry->cookie = cls->cookie;
		mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
		mirror = &mall_tc_entry->mirror;

792
		to_dp = dsa_slave_to_port(to_dev);
793

794
		mirror->to_local_port = to_dp->index;
795 796
		mirror->ingress = ingress;

797
		err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
798 799 800 801 802 803 804 805 806 807 808 809 810 811
		if (err) {
			kfree(mall_tc_entry);
			return err;
		}

		list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
	}

	return 0;
}

static void dsa_slave_del_cls_matchall(struct net_device *dev,
				       struct tc_cls_matchall_offload *cls)
{
812
	struct dsa_port *dp = dsa_slave_to_port(dev);
813
	struct dsa_mall_tc_entry *mall_tc_entry;
814
	struct dsa_switch *ds = dp->ds;
815 816 817 818

	if (!ds->ops->port_mirror_del)
		return;

819
	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
820 821 822 823 824 825 826
	if (!mall_tc_entry)
		return;

	list_del(&mall_tc_entry->list);

	switch (mall_tc_entry->type) {
	case DSA_PORT_MALL_MIRROR:
827
		ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
828 829 830 831 832 833 834 835
		break;
	default:
		WARN_ON(1);
	}

	kfree(mall_tc_entry);
}

836
static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
837 838
					   struct tc_cls_matchall_offload *cls,
					   bool ingress)
839
{
840
	if (cls->common.chain_index)
841
		return -EOPNOTSUPP;
842

843 844
	switch (cls->command) {
	case TC_CLSMATCHALL_REPLACE:
845
		return dsa_slave_add_cls_matchall(dev, cls, ingress);
846 847 848 849 850 851 852 853
	case TC_CLSMATCHALL_DESTROY:
		dsa_slave_del_cls_matchall(dev, cls);
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

854 855 856 857 858
static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				       void *cb_priv, bool ingress)
{
	struct net_device *dev = cb_priv;

859 860 861
	if (!tc_can_offload(dev))
		return -EOPNOTSUPP;

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	switch (type) {
	case TC_SETUP_CLSMATCHALL:
		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
	default:
		return -EOPNOTSUPP;
	}
}

static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
{
	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
}

static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
{
	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
}

static int dsa_slave_setup_tc_block(struct net_device *dev,
				    struct tc_block_offload *f)
{
	tc_setup_cb_t *cb;

	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
		cb = dsa_slave_setup_tc_block_cb_ig;
	else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
		cb = dsa_slave_setup_tc_block_cb_eg;
	else
		return -EOPNOTSUPP;

	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block, cb, dev, dev);
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block, cb, dev);
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

905
static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
906
			      void *type_data)
907
{
908
	switch (type) {
909 910
	case TC_SETUP_BLOCK:
		return dsa_slave_setup_tc_block(dev, type_data);
911
	default:
912
		return -EOPNOTSUPP;
913 914 915
	}
}

916 917 918 919
static void dsa_slave_get_stats64(struct net_device *dev,
				  struct rtnl_link_stats64 *stats)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
920
	struct pcpu_sw_netstats *s;
921
	unsigned int start;
922
	int i;
923 924

	netdev_stats_to_stats64(stats, &dev->stats);
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
	for_each_possible_cpu(i) {
		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;

		s = per_cpu_ptr(p->stats64, i);
		do {
			start = u64_stats_fetch_begin_irq(&s->syncp);
			tx_packets = s->tx_packets;
			tx_bytes = s->tx_bytes;
			rx_packets = s->rx_packets;
			rx_bytes = s->rx_bytes;
		} while (u64_stats_fetch_retry_irq(&s->syncp, start));

		stats->tx_packets += tx_packets;
		stats->tx_bytes += tx_bytes;
		stats->rx_packets += rx_packets;
		stats->rx_bytes += rx_bytes;
	}
942 943
}

944 945 946
static int dsa_slave_get_rxnfc(struct net_device *dev,
			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
947 948
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
949 950 951 952

	if (!ds->ops->get_rxnfc)
		return -EOPNOTSUPP;

953
	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
954 955 956 957 958
}

static int dsa_slave_set_rxnfc(struct net_device *dev,
			       struct ethtool_rxnfc *nfc)
{
959 960
	struct dsa_port *dp = dsa_slave_to_port(dev);
	struct dsa_switch *ds = dp->ds;
961 962 963 964

	if (!ds->ops->set_rxnfc)
		return -EOPNOTSUPP;

965
	return ds->ops->set_rxnfc(ds, dp->index, nfc);
966 967
}

968 969 970 971 972 973 974 975 976 977 978 979
static int dsa_slave_get_ts_info(struct net_device *dev,
				 struct ethtool_ts_info *ts)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct dsa_switch *ds = p->dp->ds;

	if (!ds->ops->get_ts_info)
		return -EOPNOTSUPP;

	return ds->ops->get_ts_info(ds, p->dp->index, ts);
}

980 981
static const struct ethtool_ops dsa_slave_ethtool_ops = {
	.get_drvinfo		= dsa_slave_get_drvinfo,
982 983
	.get_regs_len		= dsa_slave_get_regs_len,
	.get_regs		= dsa_slave_get_regs,
984
	.nway_reset		= phy_ethtool_nway_reset,
985
	.get_link		= dsa_slave_get_link,
986 987 988
	.get_eeprom_len		= dsa_slave_get_eeprom_len,
	.get_eeprom		= dsa_slave_get_eeprom,
	.set_eeprom		= dsa_slave_set_eeprom,
989 990 991
	.get_strings		= dsa_slave_get_strings,
	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
	.get_sset_count		= dsa_slave_get_sset_count,
992 993
	.set_wol		= dsa_slave_set_wol,
	.get_wol		= dsa_slave_get_wol,
994 995
	.set_eee		= dsa_slave_set_eee,
	.get_eee		= dsa_slave_get_eee,
996
	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
997
	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
998 999
	.get_rxnfc		= dsa_slave_get_rxnfc,
	.set_rxnfc		= dsa_slave_set_rxnfc,
1000
	.get_ts_info		= dsa_slave_get_ts_info,
1001 1002
};

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
/* legacy way, bypassing the bridge *****************************************/
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
		       struct net_device *dev,
		       const unsigned char *addr, u16 vid,
		       u16 flags)
{
	struct dsa_port *dp = dsa_slave_to_port(dev);

	return dsa_port_fdb_add(dp, addr, vid);
}

int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
		       struct net_device *dev,
		       const unsigned char *addr, u16 vid)
{
	struct dsa_port *dp = dsa_slave_to_port(dev);

	return dsa_port_fdb_del(dp, addr, vid);
}

1023
static const struct net_device_ops dsa_slave_netdev_ops = {
1024 1025
	.ndo_open	 	= dsa_slave_open,
	.ndo_stop		= dsa_slave_close,
1026
	.ndo_start_xmit		= dsa_slave_xmit,
1027 1028 1029
	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
	.ndo_set_mac_address	= dsa_slave_set_mac_address,
1030 1031
	.ndo_fdb_add		= dsa_legacy_fdb_add,
	.ndo_fdb_del		= dsa_legacy_fdb_del,
1032
	.ndo_fdb_dump		= dsa_slave_fdb_dump,
1033
	.ndo_do_ioctl		= dsa_slave_ioctl,
N
Nicolas Dichtel 已提交
1034
	.ndo_get_iflink		= dsa_slave_get_iflink,
1035 1036 1037 1038 1039
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
	.ndo_poll_controller	= dsa_slave_poll_controller,
#endif
1040
	.ndo_get_phys_port_name	= dsa_slave_get_phys_port_name,
1041
	.ndo_setup_tc		= dsa_slave_setup_tc,
1042
	.ndo_get_stats64	= dsa_slave_get_stats64,
S
Scott Feldman 已提交
1043 1044
};

J
Jiri Pirko 已提交
1045
static const struct switchdev_ops dsa_slave_switchdev_ops = {
1046
	.switchdev_port_attr_get	= dsa_slave_port_attr_get,
1047
	.switchdev_port_attr_set	= dsa_slave_port_attr_set,
1048 1049
	.switchdev_port_obj_add		= dsa_slave_port_obj_add,
	.switchdev_port_obj_del		= dsa_slave_port_obj_del,
1050
};
1051

1052 1053 1054 1055
static struct device_type dsa_type = {
	.name	= "dsa",
};

1056 1057
static void dsa_slave_adjust_link(struct net_device *dev)
{
1058
	struct dsa_port *dp = dsa_slave_to_port(dev);
1059
	struct dsa_slave_priv *p = netdev_priv(dev);
1060
	struct dsa_switch *ds = dp->ds;
1061 1062
	unsigned int status_changed = 0;

1063
	if (p->old_link != dev->phydev->link) {
1064
		status_changed = 1;
1065
		p->old_link = dev->phydev->link;
1066 1067
	}

1068
	if (p->old_duplex != dev->phydev->duplex) {
1069
		status_changed = 1;
1070
		p->old_duplex = dev->phydev->duplex;
1071 1072
	}

1073
	if (p->old_pause != dev->phydev->pause) {
1074
		status_changed = 1;
1075
		p->old_pause = dev->phydev->pause;
1076 1077
	}

1078
	if (ds->ops->adjust_link && status_changed)
1079
		ds->ops->adjust_link(ds, dp->index, dev->phydev);
1080

1081
	if (status_changed)
1082
		phy_print_status(dev->phydev);
1083 1084
}

1085 1086 1087
static int dsa_slave_fixed_link_update(struct net_device *dev,
				       struct fixed_phy_status *status)
{
1088
	struct dsa_switch *ds;
1089
	struct dsa_port *dp;
1090 1091

	if (dev) {
1092 1093
		dp = dsa_slave_to_port(dev);
		ds = dp->ds;
1094
		if (ds->ops->fixed_link_update)
1095
			ds->ops->fixed_link_update(ds, dp->index, status);
1096
	}
1097 1098 1099 1100

	return 0;
}

1101
/* slave device setup *******************************************************/
1102
static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1103
{
1104
	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1105
	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1106
	struct dsa_switch *ds = dp->ds;
1107

1108 1109
	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
	if (!slave_dev->phydev) {
R
Russell King 已提交
1110
		netdev_err(slave_dev, "no phy at %d\n", addr);
1111
		return -ENODEV;
R
Russell King 已提交
1112
	}
1113 1114

	/* Use already configured phy mode */
1115
	if (p->phy_interface == PHY_INTERFACE_MODE_NA)
1116 1117 1118 1119
		p->phy_interface = slave_dev->phydev->interface;

	return phy_connect_direct(slave_dev, slave_dev->phydev,
				  dsa_slave_adjust_link, p->phy_interface);
1120 1121
}

1122
static int dsa_slave_phy_setup(struct net_device *slave_dev)
1123
{
1124
	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1125
	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1126 1127 1128
	struct device_node *port_dn = dp->dn;
	struct dsa_switch *ds = dp->ds;
	struct device_node *phy_dn;
1129
	bool phy_is_fixed = false;
1130
	u32 phy_flags = 0;
1131
	int mode, ret;
1132

1133 1134 1135 1136
	mode = of_get_phy_mode(port_dn);
	if (mode < 0)
		mode = PHY_INTERFACE_MODE_NA;
	p->phy_interface = mode;
1137 1138

	phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
1139
	if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
1140 1141 1142 1143 1144
		/* In the case of a fixed PHY, the DT node associated
		 * to the fixed PHY is the Port DT node
		 */
		ret = of_phy_register_fixed_link(port_dn);
		if (ret) {
R
Russell King 已提交
1145
			netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret);
1146
			return ret;
1147
		}
1148
		phy_is_fixed = true;
1149
		phy_dn = of_node_get(port_dn);
1150 1151
	}

1152
	if (ds->ops->get_phy_flags)
1153
		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1154

1155
	if (phy_dn) {
1156 1157 1158 1159
		slave_dev->phydev = of_phy_connect(slave_dev, phy_dn,
						   dsa_slave_adjust_link,
						   phy_flags,
						   p->phy_interface);
1160
		of_node_put(phy_dn);
1161
	}
1162

1163 1164 1165
	if (slave_dev->phydev && phy_is_fixed)
		fixed_phy_set_link_update(slave_dev->phydev,
					  dsa_slave_fixed_link_update);
1166

1167 1168 1169
	/* We could not connect to a designated PHY, so use the switch internal
	 * MDIO bus instead
	 */
1170
	if (!slave_dev->phydev) {
1171
		ret = dsa_slave_phy_connect(slave_dev, dp->index);
R
Russell King 已提交
1172
		if (ret) {
1173
			netdev_err(slave_dev, "failed to connect to port %d: %d\n",
1174
				   dp->index, ret);
1175 1176
			if (phy_is_fixed)
				of_phy_deregister_fixed_link(port_dn);
1177
			return ret;
R
Russell King 已提交
1178
		}
1179
	}
1180

1181
	phy_attached_info(slave_dev->phydev);
1182

1183
	return 0;
1184 1185
}

1186 1187 1188 1189 1190 1191 1192 1193 1194
static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
					    struct netdev_queue *txq,
					    void *_unused)
{
	lockdep_set_class(&txq->_xmit_lock,
			  &dsa_slave_netdev_xmit_lock_key);
}

1195 1196 1197 1198
int dsa_slave_suspend(struct net_device *slave_dev)
{
	struct dsa_slave_priv *p = netdev_priv(slave_dev);

1199 1200
	netif_device_detach(slave_dev);

1201 1202
	if (slave_dev->phydev) {
		phy_stop(slave_dev->phydev);
1203 1204 1205
		p->old_pause = -1;
		p->old_link = -1;
		p->old_duplex = -1;
1206
		phy_suspend(slave_dev->phydev);
1207 1208 1209 1210 1211 1212 1213 1214 1215
	}

	return 0;
}

int dsa_slave_resume(struct net_device *slave_dev)
{
	netif_device_attach(slave_dev);

1216 1217 1218
	if (slave_dev->phydev) {
		phy_resume(slave_dev->phydev);
		phy_start(slave_dev->phydev);
1219 1220 1221 1222 1223
	}

	return 0;
}

V
Vivien Didelot 已提交
1224 1225
static void dsa_slave_notify(struct net_device *dev, unsigned long val)
{
1226
	struct net_device *master = dsa_slave_to_master(dev);
1227
	struct dsa_port *dp = dsa_slave_to_port(dev);
V
Vivien Didelot 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
	struct dsa_notifier_register_info rinfo = {
		.switch_number = dp->ds->index,
		.port_number = dp->index,
		.master = master,
		.info.dev = dev,
	};

	call_dsa_notifiers(val, dev, &rinfo.info);
}

1238
int dsa_slave_create(struct dsa_port *port)
1239
{
1240
	const struct dsa_port *cpu_dp = port->cpu_dp;
1241
	struct net_device *master = cpu_dp->master;
1242
	struct dsa_switch *ds = port->ds;
1243
	const char *name = port->name;
1244 1245 1246 1247
	struct net_device *slave_dev;
	struct dsa_slave_priv *p;
	int ret;

1248 1249 1250 1251 1252 1253
	if (!ds->num_tx_queues)
		ds->num_tx_queues = 1;

	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
				     NET_NAME_UNKNOWN, ether_setup,
				     ds->num_tx_queues, 1);
1254
	if (slave_dev == NULL)
1255
		return -ENOMEM;
1256

1257 1258
	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
	slave_dev->hw_features |= NETIF_F_HW_TC;
1259
	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1260
	eth_hw_addr_inherit(slave_dev, master);
1261
	slave_dev->priv_flags |= IFF_NO_QUEUE;
1262
	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
J
Jiri Pirko 已提交
1263
	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
1264 1265
	slave_dev->min_mtu = 0;
	slave_dev->max_mtu = ETH_MAX_MTU;
1266
	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1267

1268 1269 1270
	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
				 NULL);

1271 1272
	SET_NETDEV_DEV(slave_dev, port->ds->dev);
	slave_dev->dev.of_node = port->dn;
1273 1274 1275
	slave_dev->vlan_features = master->vlan_features;

	p = netdev_priv(slave_dev);
1276 1277 1278 1279 1280
	p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
	if (!p->stats64) {
		free_netdev(slave_dev);
		return -ENOMEM;
	}
1281
	p->dp = port;
1282
	INIT_LIST_HEAD(&p->mall_tc_list);
V
Vivien Didelot 已提交
1283
	p->xmit = cpu_dp->tag_ops->xmit;
1284

1285 1286 1287 1288
	p->old_pause = -1;
	p->old_link = -1;
	p->old_duplex = -1;

1289
	port->slave = slave_dev;
1290 1291 1292

	netif_carrier_off(slave_dev);

1293
	ret = dsa_slave_phy_setup(slave_dev);
A
Andrew Lunn 已提交
1294 1295
	if (ret) {
		netdev_err(master, "error %d setting up slave phy\n", ret);
1296 1297 1298
		goto out_free;
	}

V
Vivien Didelot 已提交
1299
	dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1300

1301 1302 1303 1304 1305
	ret = register_netdev(slave_dev);
	if (ret) {
		netdev_err(master, "error %d registering interface %s\n",
			   ret, slave_dev->name);
		goto out_phy;
A
Andrew Lunn 已提交
1306 1307
	}

1308
	return 0;
1309 1310

out_phy:
1311
	phy_disconnect(slave_dev->phydev);
1312 1313
	if (of_phy_is_fixed_link(port->dn))
		of_phy_deregister_fixed_link(port->dn);
1314 1315 1316
out_free:
	free_percpu(p->stats64);
	free_netdev(slave_dev);
1317
	port->slave = NULL;
1318
	return ret;
1319
}
1320

1321 1322
void dsa_slave_destroy(struct net_device *slave_dev)
{
1323
	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1324
	struct dsa_slave_priv *p = netdev_priv(slave_dev);
1325
	struct device_node *port_dn = dp->dn;
1326 1327

	netif_carrier_off(slave_dev);
1328 1329
	if (slave_dev->phydev) {
		phy_disconnect(slave_dev->phydev);
1330 1331 1332 1333

		if (of_phy_is_fixed_link(port_dn))
			of_phy_deregister_fixed_link(port_dn);
	}
V
Vivien Didelot 已提交
1334
	dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1335
	unregister_netdev(slave_dev);
1336
	free_percpu(p->stats64);
1337 1338 1339
	free_netdev(slave_dev);
}

1340 1341 1342 1343 1344
static bool dsa_slave_dev_check(struct net_device *dev)
{
	return dev->netdev_ops == &dsa_slave_netdev_ops;
}

1345 1346
static int dsa_slave_changeupper(struct net_device *dev,
				 struct netdev_notifier_changeupper_info *info)
1347
{
1348
	struct dsa_port *dp = dsa_slave_to_port(dev);
1349
	int err = NOTIFY_DONE;
1350

1351 1352
	if (netif_is_bridge_master(info->upper_dev)) {
		if (info->linking) {
1353
			err = dsa_port_bridge_join(dp, info->upper_dev);
1354 1355
			err = notifier_from_errno(err);
		} else {
1356
			dsa_port_bridge_leave(dp, info->upper_dev);
1357
			err = NOTIFY_OK;
1358 1359
		}
	}
1360

1361
	return err;
1362
}
1363

1364 1365
static int dsa_slave_netdevice_event(struct notifier_block *nb,
				     unsigned long event, void *ptr)
1366 1367 1368
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);

1369
	if (!dsa_slave_dev_check(dev))
1370 1371 1372 1373
		return NOTIFY_DONE;

	if (event == NETDEV_CHANGEUPPER)
		return dsa_slave_changeupper(dev, ptr);
1374 1375 1376

	return NOTIFY_DONE;
}
1377

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
struct dsa_switchdev_event_work {
	struct work_struct work;
	struct switchdev_notifier_fdb_info fdb_info;
	struct net_device *dev;
	unsigned long event;
};

static void dsa_slave_switchdev_event_work(struct work_struct *work)
{
	struct dsa_switchdev_event_work *switchdev_work =
		container_of(work, struct dsa_switchdev_event_work, work);
	struct net_device *dev = switchdev_work->dev;
	struct switchdev_notifier_fdb_info *fdb_info;
1391
	struct dsa_port *dp = dsa_slave_to_port(dev);
1392 1393 1394 1395 1396 1397
	int err;

	rtnl_lock();
	switch (switchdev_work->event) {
	case SWITCHDEV_FDB_ADD_TO_DEVICE:
		fdb_info = &switchdev_work->fdb_info;
1398
		err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
		if (err) {
			netdev_dbg(dev, "fdb add failed err=%d\n", err);
			break;
		}
		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
					 &fdb_info->info);
		break;

	case SWITCHDEV_FDB_DEL_TO_DEVICE:
		fdb_info = &switchdev_work->fdb_info;
1409
		err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		if (err) {
			netdev_dbg(dev, "fdb del failed err=%d\n", err);
			dev_close(dev);
		}
		break;
	}
	rtnl_unlock();

	kfree(switchdev_work->fdb_info.addr);
	kfree(switchdev_work);
	dev_put(dev);
}

static int
dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
				  switchdev_work,
				  const struct switchdev_notifier_fdb_info *
				  fdb_info)
{
	memcpy(&switchdev_work->fdb_info, fdb_info,
	       sizeof(switchdev_work->fdb_info));
	switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
	if (!switchdev_work->fdb_info.addr)
		return -ENOMEM;
	ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
			fdb_info->addr);
	return 0;
}

/* Called under rcu_read_lock() */
static int dsa_slave_switchdev_event(struct notifier_block *unused,
				     unsigned long event, void *ptr)
{
	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
	struct dsa_switchdev_event_work *switchdev_work;

	if (!dsa_slave_dev_check(dev))
		return NOTIFY_DONE;

	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
	if (!switchdev_work)
		return NOTIFY_BAD;

	INIT_WORK(&switchdev_work->work,
		  dsa_slave_switchdev_event_work);
	switchdev_work->dev = dev;
	switchdev_work->event = event;

	switch (event) {
	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
	case SWITCHDEV_FDB_DEL_TO_DEVICE:
		if (dsa_slave_switchdev_fdb_work_init(switchdev_work,
						      ptr))
			goto err_fdb_work_init;
		dev_hold(dev);
		break;
	default:
		kfree(switchdev_work);
		return NOTIFY_DONE;
	}

	dsa_schedule_work(&switchdev_work->work);
	return NOTIFY_OK;

err_fdb_work_init:
	kfree(switchdev_work);
	return NOTIFY_BAD;
}

1479
static struct notifier_block dsa_slave_nb __read_mostly = {
1480 1481 1482 1483 1484
	.notifier_call  = dsa_slave_netdevice_event,
};

static struct notifier_block dsa_slave_switchdev_notifier = {
	.notifier_call = dsa_slave_switchdev_event,
1485 1486 1487 1488
};

int dsa_slave_register_notifier(void)
{
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	int err;

	err = register_netdevice_notifier(&dsa_slave_nb);
	if (err)
		return err;

	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
	if (err)
		goto err_switchdev_nb;

	return 0;

err_switchdev_nb:
	unregister_netdevice_notifier(&dsa_slave_nb);
	return err;
1504 1505 1506 1507 1508 1509
}

void dsa_slave_unregister_notifier(void)
{
	int err;

1510 1511 1512 1513
	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
	if (err)
		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);

1514 1515 1516 1517
	err = unregister_netdevice_notifier(&dsa_slave_nb);
	if (err)
		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
}