ksz_common.c 11.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Microchip switch driver main logic
 *
5
 * Copyright (C) 2017-2019 Microchip Technology Inc.
6 7 8 9
 */

#include <linux/delay.h>
#include <linux/export.h>
10
#include <linux/gpio/consumer.h>
11 12 13 14 15 16
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
17
#include <linux/of_net.h>
18 19 20
#include <net/dsa.h>
#include <net/switchdev.h>

21
#include "ksz_common.h"
22

23
void ksz_update_port_member(struct ksz_device *dev, int port)
24
{
25 26 27 28
	struct ksz_port *p = &dev->ports[port];
	struct dsa_switch *ds = dev->ds;
	u8 port_member = 0, cpu_port;
	const struct dsa_port *dp;
29
	int i, j;
30

31 32 33 34 35 36 37 38 39 40 41 42
	if (!dsa_is_user_port(ds, port))
		return;

	dp = dsa_to_port(ds, port);
	cpu_port = BIT(dsa_upstream_port(ds, port));

	for (i = 0; i < ds->num_ports; i++) {
		const struct dsa_port *other_dp = dsa_to_port(ds, i);
		struct ksz_port *other_p = &dev->ports[i];
		u8 val = 0;

		if (!dsa_is_user_port(ds, i))
43
			continue;
44 45
		if (port == i)
			continue;
46
		if (!dsa_port_bridge_same(dp, other_dp))
47
			continue;
48 49
		if (other_p->stp_state != BR_STATE_FORWARDING)
			continue;
50

51
		if (p->stp_state == BR_STATE_FORWARDING) {
52 53 54 55
			val |= BIT(port);
			port_member |= BIT(i);
		}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
		/* Retain port [i]'s relationship to other ports than [port] */
		for (j = 0; j < ds->num_ports; j++) {
			const struct dsa_port *third_dp;
			struct ksz_port *third_p;

			if (j == i)
				continue;
			if (j == port)
				continue;
			if (!dsa_is_user_port(ds, j))
				continue;
			third_p = &dev->ports[j];
			if (third_p->stp_state != BR_STATE_FORWARDING)
				continue;
			third_dp = dsa_to_port(ds, j);
			if (dsa_port_bridge_same(other_dp, third_dp))
				val |= BIT(j);
		}

75
		dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
76
	}
77 78

	dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
79
}
80
EXPORT_SYMBOL_GPL(ksz_update_port_member);
81

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
static void port_r_cnt(struct ksz_device *dev, int port)
{
	struct ksz_port_mib *mib = &dev->ports[port].mib;
	u64 *dropped;

	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
	while (mib->cnt_ptr < dev->reg_mib_cnt) {
		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
					&mib->counters[mib->cnt_ptr]);
		++mib->cnt_ptr;
	}

	/* last one in storage */
	dropped = &mib->counters[dev->mib_cnt];

	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
	while (mib->cnt_ptr < dev->mib_cnt) {
		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
					dropped, &mib->counters[mib->cnt_ptr]);
		++mib->cnt_ptr;
	}
	mib->cnt_ptr = 0;
}

static void ksz_mib_read_work(struct work_struct *work)
{
	struct ksz_device *dev = container_of(work, struct ksz_device,
109
					      mib_read.work);
110 111 112 113
	struct ksz_port_mib *mib;
	struct ksz_port *p;
	int i;

114
	for (i = 0; i < dev->port_cnt; i++) {
115 116 117
		if (dsa_is_unused_port(dev->ds, i))
			continue;

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
		p = &dev->ports[i];
		mib = &p->mib;
		mutex_lock(&mib->cnt_mutex);

		/* Only read MIB counters when the port is told to do.
		 * If not, read only dropped counters when link is not up.
		 */
		if (!p->read) {
			const struct dsa_port *dp = dsa_to_port(dev->ds, i);

			if (!netif_carrier_ok(dp->slave))
				mib->cnt_ptr = dev->reg_mib_cnt;
		}
		port_r_cnt(dev, i);
		p->read = false;
133 134 135 136

		if (dev->dev_ops->r_mib_stat64)
			dev->dev_ops->r_mib_stat64(dev, i);

137 138 139
		mutex_unlock(&mib->cnt_mutex);
	}

140
	schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
141 142 143 144 145 146
}

void ksz_init_mib_timer(struct ksz_device *dev)
{
	int i;

147 148
	INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);

149
	for (i = 0; i < dev->port_cnt; i++)
150 151 152 153
		dev->dev_ops->port_init_cnt(dev, i);
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);

154
int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
155 156
{
	struct ksz_device *dev = ds->priv;
157
	u16 val = 0xffff;
158

159
	dev->dev_ops->r_phy(dev, addr, reg, &val);
160 161 162

	return val;
}
163
EXPORT_SYMBOL_GPL(ksz_phy_read16);
164

165
int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
166 167 168
{
	struct ksz_device *dev = ds->priv;

169
	dev->dev_ops->w_phy(dev, addr, reg, val);
170 171 172

	return 0;
}
173
EXPORT_SYMBOL_GPL(ksz_phy_write16);
174

175 176
void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
		       phy_interface_t interface)
177 178 179 180 181
{
	struct ksz_device *dev = ds->priv;
	struct ksz_port *p = &dev->ports[port];

	/* Read all MIB counters when the link is going down. */
182
	p->read = true;
183 184 185
	/* timer started */
	if (dev->mib_read_interval)
		schedule_delayed_work(&dev->mib_read, 0);
186 187 188
}
EXPORT_SYMBOL_GPL(ksz_mac_link_down);

189
int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
190 191 192
{
	struct ksz_device *dev = ds->priv;

193 194 195
	if (sset != ETH_SS_STATS)
		return 0;

196
	return dev->mib_cnt;
197
}
198
EXPORT_SYMBOL_GPL(ksz_sset_count);
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
{
	const struct dsa_port *dp = dsa_to_port(ds, port);
	struct ksz_device *dev = ds->priv;
	struct ksz_port_mib *mib;

	mib = &dev->ports[port].mib;
	mutex_lock(&mib->cnt_mutex);

	/* Only read dropped counters if no link. */
	if (!netif_carrier_ok(dp->slave))
		mib->cnt_ptr = dev->reg_mib_cnt;
	port_r_cnt(dev, port);
	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
	mutex_unlock(&mib->cnt_mutex);
}
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);

218
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
219
			 struct dsa_bridge bridge,
220 221
			 bool *tx_fwd_offload,
			 struct netlink_ext_ack *extack)
222
{
223 224 225
	/* port_stp_state_set() will be called after to put the port in
	 * appropriate state so there is no need to do anything.
	 */
226

227
	return 0;
228
}
229
EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
230

231
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
232
			   struct dsa_bridge bridge)
233
{
234 235 236
	/* port_stp_state_set() will be called after to put the port in
	 * forwarding state so there is no need to do anything.
	 */
237
}
238
EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
239

240
void ksz_port_fast_age(struct dsa_switch *ds, int port)
241 242 243
{
	struct ksz_device *dev = ds->priv;

244
	dev->dev_ops->flush_dyn_mac_table(dev, port);
245
}
246
EXPORT_SYMBOL_GPL(ksz_port_fast_age);
247

248 249
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
		      void *data)
250 251
{
	struct ksz_device *dev = ds->priv;
252
	int ret = 0;
253 254 255 256 257
	u16 i = 0;
	u16 entries = 0;
	u8 timestamp = 0;
	u8 fid;
	u8 member;
258 259 260
	struct alu_struct alu;

	do {
261 262 263 264 265
		alu.is_static = false;
		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
						    &member, &timestamp,
						    &entries);
		if (!ret && (member & BIT(port))) {
266
			ret = cb(alu.mac, alu.fid, alu.is_static, data);
267
			if (ret)
268
				break;
269
		}
270 271 272 273
		i++;
	} while (i < entries);
	if (i >= entries)
		ret = 0;
274 275 276

	return ret;
}
277
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
278

279
int ksz_port_mdb_add(struct dsa_switch *ds, int port,
280 281
		     const struct switchdev_obj_port_mdb *mdb,
		     struct dsa_db db)
282 283
{
	struct ksz_device *dev = ds->priv;
284
	struct alu_struct alu;
285
	int index;
286
	int empty = 0;
287

288
	alu.port_forward = 0;
289
	for (index = 0; index < dev->num_statics; index++) {
290 291 292 293
		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
			/* Found one already in static MAC table. */
			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
			    alu.fid == mdb->vid)
294
				break;
295 296 297
		/* Remember the first empty entry. */
		} else if (!empty) {
			empty = index + 1;
298 299 300 301
		}
	}

	/* no available entry */
302
	if (index == dev->num_statics && !empty)
303
		return -ENOSPC;
304 305

	/* add entry */
306 307 308 309 310 311 312 313 314
	if (index == dev->num_statics) {
		index = empty - 1;
		memset(&alu, 0, sizeof(alu));
		memcpy(alu.mac, mdb->addr, ETH_ALEN);
		alu.is_static = true;
	}
	alu.port_forward |= BIT(port);
	if (mdb->vid) {
		alu.is_use_fid = true;
315

316 317 318 319
		/* Need a way to map VID to FID. */
		alu.fid = mdb->vid;
	}
	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
320 321

	return 0;
322
}
323
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
324

325
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
326 327
		     const struct switchdev_obj_port_mdb *mdb,
		     struct dsa_db db)
328 329
{
	struct ksz_device *dev = ds->priv;
330
	struct alu_struct alu;
331 332 333
	int index;

	for (index = 0; index < dev->num_statics; index++) {
334 335 336 337
		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
			/* Found one already in static MAC table. */
			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
			    alu.fid == mdb->vid)
338 339 340 341 342
				break;
		}
	}

	/* no available entry */
343
	if (index == dev->num_statics)
344 345 346
		goto exit;

	/* clear port */
347 348 349 350
	alu.port_forward &= ~BIT(port);
	if (!alu.port_forward)
		alu.is_static = false;
	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
351 352

exit:
353
	return 0;
354
}
355
EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
356

357
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
358 359 360
{
	struct ksz_device *dev = ds->priv;

361 362 363
	if (!dsa_is_user_port(ds, port))
		return 0;

364 365
	/* setup slave port */
	dev->dev_ops->port_setup(dev, port, false);
366

367 368 369
	/* port_stp_state_set() will be called after to enable the port so
	 * there is no need to do anything.
	 */
370 371 372

	return 0;
}
373
EXPORT_SYMBOL_GPL(ksz_enable_port);
374

375
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
376 377 378 379
{
	struct dsa_switch *ds;
	struct ksz_device *swdev;

380
	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
381 382 383
	if (!ds)
		return NULL;

384 385 386
	ds->dev = base;
	ds->num_ports = DSA_MAX_PORTS;

387 388 389 390 391 392 393 394 395 396 397 398 399 400
	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
	if (!swdev)
		return NULL;

	ds->priv = swdev;
	swdev->dev = base;

	swdev->ds = ds;
	swdev->priv = priv;

	return swdev;
}
EXPORT_SYMBOL(ksz_switch_alloc);

401 402
int ksz_switch_register(struct ksz_device *dev,
			const struct ksz_dev_ops *ops)
403
{
404
	struct device_node *port, *ports;
405
	phy_interface_t interface;
406
	unsigned int port_num;
407 408 409 410 411
	int ret;

	if (dev->pdata)
		dev->chip_id = dev->pdata->chip_id;

412 413 414 415 416 417
	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
						  GPIOD_OUT_LOW);
	if (IS_ERR(dev->reset_gpio))
		return PTR_ERR(dev->reset_gpio);

	if (dev->reset_gpio) {
418
		gpiod_set_value_cansleep(dev->reset_gpio, 1);
419
		usleep_range(10000, 12000);
420
		gpiod_set_value_cansleep(dev->reset_gpio, 0);
421
		msleep(100);
422 423
	}

424
	mutex_init(&dev->dev_mutex);
425
	mutex_init(&dev->regmap_mutex);
426 427 428
	mutex_init(&dev->alu_mutex);
	mutex_init(&dev->vlan_mutex);

429 430 431
	dev->dev_ops = ops;

	if (dev->dev_ops->detect(dev))
432 433
		return -EINVAL;

434
	ret = dev->dev_ops->init(dev);
435 436 437
	if (ret)
		return ret;

438 439 440
	/* Host port interface will be self detected, or specifically set in
	 * device tree.
	 */
441 442
	for (port_num = 0; port_num < dev->port_cnt; ++port_num)
		dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
443
	if (dev->dev->of_node) {
444 445
		ret = of_get_phy_mode(dev->dev->of_node, &interface);
		if (ret == 0)
446
			dev->compat_interface = interface;
447 448 449
		ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
		if (!ports)
			ports = of_get_child_by_name(dev->dev->of_node, "ports");
450 451 452 453 454
		if (ports)
			for_each_available_child_of_node(ports, port) {
				if (of_property_read_u32(port, "reg",
							 &port_num))
					continue;
455 456
				if (!(dev->port_mask & BIT(port_num))) {
					of_node_put(port);
457
					return -EINVAL;
458
				}
459 460 461
				of_get_phy_mode(port,
						&dev->ports[port_num].interface);
			}
462 463
		dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
							 "microchip,synclko-125");
464 465 466 467 468 469
		dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
							     "microchip,synclko-disable");
		if (dev->synclko_125 && dev->synclko_disable) {
			dev_err(dev->dev, "inconsistent synclko settings\n");
			return -EINVAL;
		}
470 471 472 473 474 475 476 477
	}

	ret = dsa_register_switch(dev->ds);
	if (ret) {
		dev->dev_ops->exit(dev);
		return ret;
	}

478
	/* Read MIB counters every 30 seconds to avoid overflow. */
479
	dev->mib_read_interval = msecs_to_jiffies(5000);
480 481 482 483

	/* Start the MIB timer. */
	schedule_delayed_work(&dev->mib_read, 0);

484
	return 0;
485 486 487 488 489
}
EXPORT_SYMBOL(ksz_switch_register);

void ksz_switch_remove(struct ksz_device *dev)
{
490
	/* timer started */
491 492
	if (dev->mib_read_interval) {
		dev->mib_read_interval = 0;
493
		cancel_delayed_work_sync(&dev->mib_read);
494
	}
495

496
	dev->dev_ops->exit(dev);
497
	dsa_unregister_switch(dev->ds);
498 499

	if (dev->reset_gpio)
500
		gpiod_set_value_cansleep(dev->reset_gpio, 1);
501

502 503 504 505 506 507
}
EXPORT_SYMBOL(ksz_switch_remove);

MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");