hsr_device.c 13.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright 2011-2014 Autronica Fire and Security AS
3 4
 *
 * Author(s):
5
 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6
 * This file contains device methods for creating, using and destroying
7
 * virtual HSR or PRP devices.
8 9 10 11 12 13 14 15
 */

#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
#include "hsr_device.h"
16
#include "hsr_slave.h"
17 18
#include "hsr_framereg.h"
#include "hsr_main.h"
A
Arvid Brodin 已提交
19
#include "hsr_forward.h"
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42

static bool is_admin_up(struct net_device *dev)
{
	return dev && (dev->flags & IFF_UP);
}

static bool is_slave_up(struct net_device *dev)
{
	return dev && is_admin_up(dev) && netif_oper_up(dev);
}

static void __hsr_set_operstate(struct net_device *dev, int transition)
{
	write_lock_bh(&dev_base_lock);
	if (dev->operstate != transition) {
		dev->operstate = transition;
		write_unlock_bh(&dev_base_lock);
		netdev_state_change(dev);
	} else {
		write_unlock_bh(&dev_base_lock);
	}
}

43
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
44
{
45 46
	if (!is_admin_up(master->dev)) {
		__hsr_set_operstate(master->dev, IF_OPER_DOWN);
47 48 49
		return;
	}

50
	if (has_carrier)
51
		__hsr_set_operstate(master->dev, IF_OPER_UP);
52
	else
53
		__hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
54 55
}

56
static bool hsr_check_carrier(struct hsr_port *master)
57
{
58
	struct hsr_port *port;
59

60
	ASSERT_RTNL();
61

62
	hsr_for_each_port(master->hsr, port) {
63
		if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
64 65
			netif_carrier_on(master->dev);
			return true;
66
		}
67
	}
68

69
	netif_carrier_off(master->dev);
70

71
	return false;
72 73
}

74 75
static void hsr_check_announce(struct net_device *hsr_dev,
			       unsigned char old_operstate)
76
{
77
	struct hsr_priv *hsr;
78

79
	hsr = netdev_priv(hsr_dev);
80

81
	if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
82
		/* Went up */
83
		hsr->announce_count = 0;
84 85
		mod_timer(&hsr->announce_timer,
			  jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
86 87
	}

88
	if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
89
		/* Went down */
90
		del_timer(&hsr->announce_timer);
91 92
}

93 94
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
{
95
	struct hsr_port *master;
96 97 98
	unsigned char old_operstate;
	bool has_carrier;

99
	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
100 101 102
	/* netif_stacked_transfer_operstate() cannot be used here since
	 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
	 */
103 104 105 106
	old_operstate = master->dev->operstate;
	has_carrier = hsr_check_carrier(master);
	hsr_set_operstate(master, has_carrier);
	hsr_check_announce(master->dev, old_operstate);
107 108
}

109
int hsr_get_max_mtu(struct hsr_priv *hsr)
110
{
111
	unsigned int mtu_max;
112
	struct hsr_port *port;
113 114

	mtu_max = ETH_DATA_LEN;
115 116 117
	hsr_for_each_port(hsr, port)
		if (port->type != HSR_PT_MASTER)
			mtu_max = min(port->dev->mtu, mtu_max);
118 119 120

	if (mtu_max < HSR_HLEN)
		return 0;
121
	return mtu_max - HSR_HLEN;
122 123 124 125
}

static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
{
126
	struct hsr_priv *hsr;
127

128
	hsr = netdev_priv(dev);
129

130
	if (new_mtu > hsr_get_max_mtu(hsr)) {
131
		netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
132
			    HSR_HLEN);
133 134 135 136 137 138 139 140 141 142
		return -EINVAL;
	}

	dev->mtu = new_mtu;

	return 0;
}

static int hsr_dev_open(struct net_device *dev)
{
143
	struct hsr_priv *hsr;
144 145
	struct hsr_port *port;
	char designation;
146

147
	hsr = netdev_priv(dev);
148
	designation = '\0';
149

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
	hsr_for_each_port(hsr, port) {
		if (port->type == HSR_PT_MASTER)
			continue;
		switch (port->type) {
		case HSR_PT_SLAVE_A:
			designation = 'A';
			break;
		case HSR_PT_SLAVE_B:
			designation = 'B';
			break;
		default:
			designation = '?';
		}
		if (!is_slave_up(port->dev))
			netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
				    designation, port->dev->name);
166
	}
167 168 169

	if (designation == '\0')
		netdev_warn(dev, "No slave devices configured\n");
170 171 172 173 174 175

	return 0;
}

static int hsr_dev_close(struct net_device *dev)
{
176
	/* Nothing to do here. */
177 178 179
	return 0;
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
						netdev_features_t features)
{
	netdev_features_t mask;
	struct hsr_port *port;

	mask = features;

	/* Mask out all features that, if supported by one device, should be
	 * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
	 *
	 * Anything that's off in mask will not be enabled - so only things
	 * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
	 * may become enabled.
	 */
	features &= ~NETIF_F_ONE_FOR_ALL;
	hsr_for_each_port(hsr, port)
		features = netdev_increment_features(features,
						     port->dev->features,
						     mask);

	return features;
}

static netdev_features_t hsr_fix_features(struct net_device *dev,
					  netdev_features_t features)
{
	struct hsr_priv *hsr = netdev_priv(dev);

	return hsr_features_recompute(hsr, features);
}

212
static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
213
{
A
Arvid Brodin 已提交
214
	struct hsr_priv *hsr = netdev_priv(dev);
215
	struct hsr_port *master;
216

217
	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
218 219
	if (master) {
		skb->dev = master->dev;
220
		skb_reset_mac_header(skb);
221 222 223 224 225
		hsr_forward_skb(skb, master);
	} else {
		atomic_long_inc(&dev->tx_dropped);
		dev_kfree_skb_any(skb);
	}
226 227 228 229
	return NETDEV_TX_OK;
}

static const struct header_ops hsr_header_ops = {
A
Arvid Brodin 已提交
230
	.create	 = eth_header,
231 232 233
	.parse	 = eth_header_parse,
};

234
static struct sk_buff *hsr_init_skb(struct hsr_port *master)
235
{
236
	struct hsr_priv *hsr = master->hsr;
237 238 239
	struct sk_buff *skb;
	int hlen, tlen;

240 241
	hlen = LL_RESERVED_SPACE(master->dev);
	tlen = master->dev->needed_tailroom;
242 243 244 245
	/* skb size is same for PRP/HSR frames, only difference
	 * being, for PRP it is a trailer and for HSR it is a
	 * header
	 */
246
	skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
247
			    sizeof(struct hsr_sup_payload) + hlen + tlen);
248

249
	if (!skb)
250
		return skb;
251 252

	skb_reserve(skb, hlen);
253
	skb->dev = master->dev;
254 255
	skb->priority = TC_PRIO_CONTROL;

256
	if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
257
			    hsr->sup_multicast_addr,
A
Arvid Brodin 已提交
258
			    skb->dev->dev_addr, skb->len) <= 0)
259
		goto out;
260

A
Arvid Brodin 已提交
261
	skb_reset_mac_header(skb);
262 263
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
264

265 266 267 268 269 270 271 272
	return skb;
out:
	kfree_skb(skb);

	return NULL;
}

static void send_hsr_supervision_frame(struct hsr_port *master,
273
				       unsigned long *interval)
274
{
275 276
	struct hsr_priv *hsr = master->hsr;
	__u8 type = HSR_TLV_LIFE_CHECK;
277 278 279 280
	struct hsr_sup_payload *hsr_sp;
	struct hsr_sup_tag *hsr_stag;
	unsigned long irqflags;
	struct sk_buff *skb;
281 282 283 284 285 286 287 288

	*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
	if (hsr->announce_count < 3 && hsr->prot_version == 0) {
		type = HSR_TLV_ANNOUNCE;
		*interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
		hsr->announce_count++;
	}

289
	skb = hsr_init_skb(master);
290 291 292 293 294
	if (!skb) {
		WARN_ONCE(1, "HSR: Could not send supervision frame\n");
		return;
	}

295
	hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
296 297
	set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
	set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
298

P
Peter Heise 已提交
299
	/* From HSRv1 on we have separate supervision sequence numbers. */
A
Arvid Brodin 已提交
300
	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
301 302 303
	if (hsr->prot_version > 0) {
		hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
		hsr->sup_sequence_nr++;
P
Peter Heise 已提交
304
	} else {
305 306
		hsr_stag->sequence_nr = htons(hsr->sequence_nr);
		hsr->sequence_nr++;
P
Peter Heise 已提交
307
	}
A
Arvid Brodin 已提交
308
	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
309

310
	hsr_stag->HSR_TLV_type = type;
P
Peter Heise 已提交
311
	/* TODO: Why 12 in HSRv0? */
312 313
	hsr_stag->HSR_TLV_length = hsr->prot_version ?
				sizeof(struct hsr_sup_payload) : 12;
314 315

	/* Payload: MacAddressA */
316
	hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
317
	ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
318

319
	if (skb_put_padto(skb, ETH_ZLEN))
320
		return;
P
Peter Heise 已提交
321

A
Arvid Brodin 已提交
322
	hsr_forward_skb(skb, master);
323

324
	return;
325 326
}

327 328 329 330 331 332 333 334 335
static void send_prp_supervision_frame(struct hsr_port *master,
				       unsigned long *interval)
{
	struct hsr_priv *hsr = master->hsr;
	struct hsr_sup_payload *hsr_sp;
	struct hsr_sup_tag *hsr_stag;
	unsigned long irqflags;
	struct sk_buff *skb;

336
	skb = hsr_init_skb(master);
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	if (!skb) {
		WARN_ONCE(1, "PRP: Could not send supervision frame\n");
		return;
	}

	*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
	hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
	set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
	set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));

	/* From HSRv1 on we have separate supervision sequence numbers. */
	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
	hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
	hsr->sup_sequence_nr++;
	hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
	hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload);

	/* Payload: MacAddressA */
	hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
	ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);

358
	if (skb_put_padto(skb, ETH_ZLEN)) {
359 360 361 362 363 364 365 366 367
		spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
		return;
	}

	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);

	hsr_forward_skb(skb, master);
}

368 369
/* Announce (supervision frame) timer function
 */
370
static void hsr_announce(struct timer_list *t)
371
{
372
	struct hsr_priv *hsr;
373
	struct hsr_port *master;
374
	unsigned long interval;
375

376
	hsr = from_timer(hsr, t, announce_timer);
A
Arvid Brodin 已提交
377 378

	rcu_read_lock();
379
	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
380
	hsr->proto_ops->send_sv_frame(master, &interval);
381

382
	if (is_admin_up(master->dev))
383
		mod_timer(&hsr->announce_timer, jiffies + interval);
A
Arvid Brodin 已提交
384 385

	rcu_read_unlock();
386 387
}

388
void hsr_del_ports(struct hsr_priv *hsr)
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
{
	struct hsr_port *port;

	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
	if (port)
		hsr_del_port(port);

	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
	if (port)
		hsr_del_port(port);

	port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
	if (port)
		hsr_del_port(port);
}

405 406 407 408 409
static const struct net_device_ops hsr_device_ops = {
	.ndo_change_mtu = hsr_dev_change_mtu,
	.ndo_open = hsr_dev_open,
	.ndo_stop = hsr_dev_close,
	.ndo_start_xmit = hsr_dev_xmit,
410
	.ndo_fix_features = hsr_fix_features,
411 412
};

413 414 415
static struct device_type hsr_type = {
	.name = "hsr",
};
416

417 418
static struct hsr_proto_ops hsr_ops = {
	.send_sv_frame = send_hsr_supervision_frame,
419 420
	.create_tagged_frame = hsr_create_tagged_frame,
	.get_untagged_frame = hsr_get_untagged_frame,
421
	.drop_frame = hsr_drop_frame,
422
	.fill_frame_info = hsr_fill_frame_info,
423
	.invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
424 425
};

426
static struct hsr_proto_ops prp_ops = {
427
	.send_sv_frame = send_prp_supervision_frame,
428 429 430 431 432 433
	.create_tagged_frame = prp_create_tagged_frame,
	.get_untagged_frame = prp_get_untagged_frame,
	.drop_frame = prp_drop_frame,
	.fill_frame_info = prp_fill_frame_info,
	.handle_san_frame = prp_handle_san_frame,
	.update_san_info = prp_update_san_info,
434 435
};

436 437
void hsr_dev_setup(struct net_device *dev)
{
438
	eth_hw_addr_random(dev);
439 440

	ether_setup(dev);
441
	dev->min_mtu = 0;
442 443 444
	dev->header_ops = &hsr_header_ops;
	dev->netdev_ops = &hsr_device_ops;
	SET_NETDEV_DEVTYPE(dev, &hsr_type);
445
	dev->priv_flags |= IFF_NO_QUEUE;
446

447
	dev->needs_free_netdev = true;
448 449 450 451 452 453 454 455 456 457 458 459 460

	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
			   NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
			   NETIF_F_HW_VLAN_CTAG_TX;

	dev->features = dev->hw_features;

	/* Prevent recursive tx locking */
	dev->features |= NETIF_F_LLTX;
	/* VLAN on top of HSR needs testing and probably some work on
	 * hsr_header_create() etc.
	 */
	dev->features |= NETIF_F_VLAN_CHALLENGED;
461 462 463 464
	/* Not sure about this. Taken from bridge code. netdev_features.h says
	 * it means "Does not change network namespaces".
	 */
	dev->features |= NETIF_F_NETNS_LOCAL;
465 466 467 468
}

/* Return true if dev is a HSR master; return false otherwise.
 */
469
bool is_hsr_master(struct net_device *dev)
470 471 472
{
	return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
}
473
EXPORT_SYMBOL(is_hsr_master);
474 475

/* Default multicast address for HSR Supervision frames */
J
Joe Perches 已提交
476
static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
477 478 479 480
	0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
};

int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
481 482
		     unsigned char multicast_spec, u8 protocol_version,
		     struct netlink_ext_ack *extack)
483
{
484
	bool unregister = false;
485
	struct hsr_priv *hsr;
486 487
	int res;

488
	hsr = netdev_priv(hsr_dev);
489
	INIT_LIST_HEAD(&hsr->ports);
490 491
	INIT_LIST_HEAD(&hsr->node_db);
	INIT_LIST_HEAD(&hsr->self_node_db);
492
	spin_lock_init(&hsr->list_lock);
493 494 495

	ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);

496
	/* initialize protocol specific functions */
497 498 499 500 501
	if (protocol_version == PRP_V1) {
		/* For PRP, lan_id has most significant 3 bits holding
		 * the net_id of PRP_LAN_ID
		 */
		hsr->net_id = PRP_LAN_ID << 1;
502
		hsr->proto_ops = &prp_ops;
503
	} else {
504
		hsr->proto_ops = &hsr_ops;
505
	}
506

507
	/* Make sure we recognize frames from ourselves in hsr_rcv() */
508
	res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
509 510 511 512
				   slave[1]->dev_addr);
	if (res < 0)
		return res;

513
	spin_lock_init(&hsr->seqnr_lock);
514
	/* Overflow soon to find bugs easier: */
A
Arvid Brodin 已提交
515
	hsr->sequence_nr = HSR_SEQNR_START;
P
Peter Heise 已提交
516
	hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
517

518 519
	timer_setup(&hsr->announce_timer, hsr_announce, 0);
	timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
520

521 522
	ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
	hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
523

524
	hsr->prot_version = protocol_version;
P
Peter Heise 已提交
525

526 527 528
	/* Make sure the 1st call to netif_carrier_on() gets through */
	netif_carrier_off(hsr_dev);

529
	res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
530
	if (res)
531
		goto err_add_master;
532

533
	res = register_netdevice(hsr_dev);
534
	if (res)
535
		goto err_unregister;
536

537 538
	unregister = true;

539
	res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
540
	if (res)
541
		goto err_unregister;
542

543
	res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
544
	if (res)
545
		goto err_unregister;
546

547
	hsr_debugfs_init(hsr, hsr_dev);
548
	mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
549

550
	return 0;
551

552
err_unregister:
553
	hsr_del_ports(hsr);
554
err_add_master:
555
	hsr_del_self_node(hsr);
556

557 558
	if (unregister)
		unregister_netdevice(hsr_dev);
559
	return res;
560
}