ksz_common.c 11.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Microchip switch driver main logic
 *
5
 * Copyright (C) 2017-2019 Microchip Technology Inc.
6 7 8 9
 */

#include <linux/delay.h>
#include <linux/export.h>
10
#include <linux/gpio/consumer.h>
11 12 13 14 15 16
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
17
#include <linux/of_net.h>
18 19 20
#include <net/dsa.h>
#include <net/switchdev.h>

21
#include "ksz_common.h"
22

23
void ksz_update_port_member(struct ksz_device *dev, int port)
24
{
25
	struct ksz_port *p;
26 27
	int i;

28 29 30 31 32 33 34 35 36 37 38
	for (i = 0; i < dev->port_cnt; i++) {
		if (i == port || i == dev->cpu_port)
			continue;
		p = &dev->ports[i];
		if (!(dev->member & (1 << i)))
			continue;

		/* Port is a member of the bridge and is forwarding. */
		if (p->stp_state == BR_STATE_FORWARDING &&
		    p->member != dev->member)
			dev->dev_ops->cfg_port_member(dev, i, dev->member);
39 40
	}
}
41
EXPORT_SYMBOL_GPL(ksz_update_port_member);
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
static void port_r_cnt(struct ksz_device *dev, int port)
{
	struct ksz_port_mib *mib = &dev->ports[port].mib;
	u64 *dropped;

	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
	while (mib->cnt_ptr < dev->reg_mib_cnt) {
		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
					&mib->counters[mib->cnt_ptr]);
		++mib->cnt_ptr;
	}

	/* last one in storage */
	dropped = &mib->counters[dev->mib_cnt];

	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
	while (mib->cnt_ptr < dev->mib_cnt) {
		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
					dropped, &mib->counters[mib->cnt_ptr]);
		++mib->cnt_ptr;
	}
	mib->cnt_ptr = 0;
}

static void ksz_mib_read_work(struct work_struct *work)
{
	struct ksz_device *dev = container_of(work, struct ksz_device,
					      mib_read);
	struct ksz_port_mib *mib;
	struct ksz_port *p;
	int i;

	for (i = 0; i < dev->mib_port_cnt; i++) {
76 77 78
		if (dsa_is_unused_port(dev->ds, i))
			continue;

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
		p = &dev->ports[i];
		mib = &p->mib;
		mutex_lock(&mib->cnt_mutex);

		/* Only read MIB counters when the port is told to do.
		 * If not, read only dropped counters when link is not up.
		 */
		if (!p->read) {
			const struct dsa_port *dp = dsa_to_port(dev->ds, i);

			if (!netif_carrier_ok(dp->slave))
				mib->cnt_ptr = dev->reg_mib_cnt;
		}
		port_r_cnt(dev, i);
		p->read = false;
		mutex_unlock(&mib->cnt_mutex);
	}
}

static void mib_monitor(struct timer_list *t)
{
	struct ksz_device *dev = from_timer(dev, t, mib_read_timer);

	mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
	schedule_work(&dev->mib_read);
}

void ksz_init_mib_timer(struct ksz_device *dev)
{
	int i;

	/* Read MIB counters every 30 seconds to avoid overflow. */
	dev->mib_read_interval = msecs_to_jiffies(30000);

	INIT_WORK(&dev->mib_read, ksz_mib_read_work);
	timer_setup(&dev->mib_read_timer, mib_monitor, 0);

	for (i = 0; i < dev->mib_port_cnt; i++)
		dev->dev_ops->port_init_cnt(dev, i);

	/* Start the timer 2 seconds later. */
	dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
	add_timer(&dev->mib_read_timer);
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);

125
int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
126 127
{
	struct ksz_device *dev = ds->priv;
128
	u16 val = 0xffff;
129

130
	dev->dev_ops->r_phy(dev, addr, reg, &val);
131 132 133

	return val;
}
134
EXPORT_SYMBOL_GPL(ksz_phy_read16);
135

136
int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
137 138 139
{
	struct ksz_device *dev = ds->priv;

140
	dev->dev_ops->w_phy(dev, addr, reg, val);
141 142 143

	return 0;
}
144
EXPORT_SYMBOL_GPL(ksz_phy_write16);
145

146 147 148 149 150 151 152 153 154 155 156
void ksz_adjust_link(struct dsa_switch *ds, int port,
		     struct phy_device *phydev)
{
	struct ksz_device *dev = ds->priv;
	struct ksz_port *p = &dev->ports[port];

	/* Read all MIB counters when the link is going down. */
	if (!phydev->link) {
		p->read = true;
		schedule_work(&dev->mib_read);
	}
157 158 159 160 161 162 163
	mutex_lock(&dev->dev_mutex);
	if (!phydev->link)
		dev->live_ports &= ~(1 << port);
	else
		/* Remember which port is connected and active. */
		dev->live_ports |= (1 << port) & dev->on_ports;
	mutex_unlock(&dev->dev_mutex);
164 165 166
}
EXPORT_SYMBOL_GPL(ksz_adjust_link);

167
int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
168 169 170
{
	struct ksz_device *dev = ds->priv;

171 172 173
	if (sset != ETH_SS_STATS)
		return 0;

174
	return dev->mib_cnt;
175
}
176
EXPORT_SYMBOL_GPL(ksz_sset_count);
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
{
	const struct dsa_port *dp = dsa_to_port(ds, port);
	struct ksz_device *dev = ds->priv;
	struct ksz_port_mib *mib;

	mib = &dev->ports[port].mib;
	mutex_lock(&mib->cnt_mutex);

	/* Only read dropped counters if no link. */
	if (!netif_carrier_ok(dp->slave))
		mib->cnt_ptr = dev->reg_mib_cnt;
	port_r_cnt(dev, port);
	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
	mutex_unlock(&mib->cnt_mutex);
}
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);

196 197
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
			 struct net_device *br)
198 199 200
{
	struct ksz_device *dev = ds->priv;

201
	mutex_lock(&dev->dev_mutex);
202
	dev->br_member |= (1 << port);
203
	mutex_unlock(&dev->dev_mutex);
204

205 206 207
	/* port_stp_state_set() will be called after to put the port in
	 * appropriate state so there is no need to do anything.
	 */
208

209
	return 0;
210
}
211
EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
212

213 214
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
			   struct net_device *br)
215 216 217
{
	struct ksz_device *dev = ds->priv;

218
	mutex_lock(&dev->dev_mutex);
219 220
	dev->br_member &= ~(1 << port);
	dev->member &= ~(1 << port);
221
	mutex_unlock(&dev->dev_mutex);
222

223 224 225
	/* port_stp_state_set() will be called after to put the port in
	 * forwarding state so there is no need to do anything.
	 */
226
}
227
EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
228

229
void ksz_port_fast_age(struct dsa_switch *ds, int port)
230 231 232
{
	struct ksz_device *dev = ds->priv;

233
	dev->dev_ops->flush_dyn_mac_table(dev, port);
234
}
235
EXPORT_SYMBOL_GPL(ksz_port_fast_age);
236

237 238
int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
			  const struct switchdev_obj_port_vlan *vlan)
239 240 241 242 243
{
	/* nothing needed */

	return 0;
}
244
EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
245

246 247
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
		      void *data)
248 249
{
	struct ksz_device *dev = ds->priv;
250
	int ret = 0;
251 252 253 254 255
	u16 i = 0;
	u16 entries = 0;
	u8 timestamp = 0;
	u8 fid;
	u8 member;
256 257 258
	struct alu_struct alu;

	do {
259 260 261 262 263
		alu.is_static = false;
		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
						    &member, &timestamp,
						    &entries);
		if (!ret && (member & BIT(port))) {
264
			ret = cb(alu.mac, alu.fid, alu.is_static, data);
265
			if (ret)
266
				break;
267
		}
268 269 270 271
		i++;
	} while (i < entries);
	if (i >= entries)
		ret = 0;
272 273 274

	return ret;
}
275
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
276

277 278
int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
			 const struct switchdev_obj_port_mdb *mdb)
279 280 281 282
{
	/* nothing to do */
	return 0;
}
283
EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
284

285 286
void ksz_port_mdb_add(struct dsa_switch *ds, int port,
		      const struct switchdev_obj_port_mdb *mdb)
287 288
{
	struct ksz_device *dev = ds->priv;
289
	struct alu_struct alu;
290
	int index;
291
	int empty = 0;
292

293
	alu.port_forward = 0;
294
	for (index = 0; index < dev->num_statics; index++) {
295 296 297 298
		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
			/* Found one already in static MAC table. */
			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
			    alu.fid == mdb->vid)
299
				break;
300 301 302
		/* Remember the first empty entry. */
		} else if (!empty) {
			empty = index + 1;
303 304 305 306
		}
	}

	/* no available entry */
307 308
	if (index == dev->num_statics && !empty)
		return;
309 310

	/* add entry */
311 312 313 314 315 316 317 318 319
	if (index == dev->num_statics) {
		index = empty - 1;
		memset(&alu, 0, sizeof(alu));
		memcpy(alu.mac, mdb->addr, ETH_ALEN);
		alu.is_static = true;
	}
	alu.port_forward |= BIT(port);
	if (mdb->vid) {
		alu.is_use_fid = true;
320

321 322 323 324
		/* Need a way to map VID to FID. */
		alu.fid = mdb->vid;
	}
	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
325
}
326
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
327

328 329
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
		     const struct switchdev_obj_port_mdb *mdb)
330 331
{
	struct ksz_device *dev = ds->priv;
332
	struct alu_struct alu;
333 334 335 336
	int index;
	int ret = 0;

	for (index = 0; index < dev->num_statics; index++) {
337 338 339 340
		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
			/* Found one already in static MAC table. */
			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
			    alu.fid == mdb->vid)
341 342 343 344 345
				break;
		}
	}

	/* no available entry */
346
	if (index == dev->num_statics)
347 348 349
		goto exit;

	/* clear port */
350 351 352 353
	alu.port_forward &= ~BIT(port);
	if (!alu.port_forward)
		alu.is_static = false;
	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
354 355 356 357

exit:
	return ret;
}
358
EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
359

360
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
361 362 363
{
	struct ksz_device *dev = ds->priv;

364 365 366
	if (!dsa_is_user_port(ds, port))
		return 0;

367 368
	/* setup slave port */
	dev->dev_ops->port_setup(dev, port, false);
369 370
	if (dev->dev_ops->phy_setup)
		dev->dev_ops->phy_setup(dev, port, phy);
371

372 373 374
	/* port_stp_state_set() will be called after to enable the port so
	 * there is no need to do anything.
	 */
375 376 377

	return 0;
}
378
EXPORT_SYMBOL_GPL(ksz_enable_port);
379

380
void ksz_disable_port(struct dsa_switch *ds, int port)
381 382 383
{
	struct ksz_device *dev = ds->priv;

384 385 386
	if (!dsa_is_user_port(ds, port))
		return;

387 388
	dev->on_ports &= ~(1 << port);
	dev->live_ports &= ~(1 << port);
389

390 391 392
	/* port_stp_state_set() will be called after to disable the port so
	 * there is no need to do anything.
	 */
393
}
394
EXPORT_SYMBOL_GPL(ksz_disable_port);
395

396
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
{
	struct dsa_switch *ds;
	struct ksz_device *swdev;

	ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
	if (!ds)
		return NULL;

	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
	if (!swdev)
		return NULL;

	ds->priv = swdev;
	swdev->dev = base;

	swdev->ds = ds;
	swdev->priv = priv;

	return swdev;
}
EXPORT_SYMBOL(ksz_switch_alloc);

419 420
int ksz_switch_register(struct ksz_device *dev,
			const struct ksz_dev_ops *ops)
421 422 423 424 425 426
{
	int ret;

	if (dev->pdata)
		dev->chip_id = dev->pdata->chip_id;

427 428 429 430 431 432
	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
						  GPIOD_OUT_LOW);
	if (IS_ERR(dev->reset_gpio))
		return PTR_ERR(dev->reset_gpio);

	if (dev->reset_gpio) {
433
		gpiod_set_value_cansleep(dev->reset_gpio, 1);
434
		mdelay(10);
435
		gpiod_set_value_cansleep(dev->reset_gpio, 0);
436 437
	}

438
	mutex_init(&dev->dev_mutex);
439
	mutex_init(&dev->regmap_mutex);
440 441 442
	mutex_init(&dev->alu_mutex);
	mutex_init(&dev->vlan_mutex);

443 444 445
	dev->dev_ops = ops;

	if (dev->dev_ops->detect(dev))
446 447
		return -EINVAL;

448
	ret = dev->dev_ops->init(dev);
449 450 451
	if (ret)
		return ret;

452 453 454
	/* Host port interface will be self detected, or specifically set in
	 * device tree.
	 */
455 456 457 458
	if (dev->dev->of_node) {
		ret = of_get_phy_mode(dev->dev->of_node);
		if (ret >= 0)
			dev->interface = ret;
459 460
		dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
							 "microchip,synclko-125");
461 462 463 464 465 466 467 468 469
	}

	ret = dsa_register_switch(dev->ds);
	if (ret) {
		dev->dev_ops->exit(dev);
		return ret;
	}

	return 0;
470 471 472 473 474
}
EXPORT_SYMBOL(ksz_switch_register);

void ksz_switch_remove(struct ksz_device *dev)
{
475 476 477 478 479 480
	/* timer started */
	if (dev->mib_read_timer.expires) {
		del_timer_sync(&dev->mib_read_timer);
		flush_work(&dev->mib_read);
	}

481
	dev->dev_ops->exit(dev);
482
	dsa_unregister_switch(dev->ds);
483 484

	if (dev->reset_gpio)
485
		gpiod_set_value_cansleep(dev->reset_gpio, 1);
486

487 488 489 490 491 492
}
EXPORT_SYMBOL(ksz_switch_remove);

MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");