main.c 14.9 KB
Newer Older
1
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

20 21
#include <linux/crc32c.h>
#include <linux/highmem.h>
22 23 24 25
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/dsfield.h>
26
#include "main.h"
27 28
#include "sysfs.h"
#include "debugfs.h"
29 30 31 32 33 34 35 36
#include "routing.h"
#include "send.h"
#include "originator.h"
#include "soft-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "hard-interface.h"
#include "gateway_client.h"
37
#include "bridge_loop_avoidance.h"
38
#include "distributed-arp-table.h"
39 40
#include "vis.h"
#include "hash.h"
41
#include "bat_algo.h"
42
#include "network-coding.h"
43

44 45

/* List manipulations on hardif_list have to be rtnl_lock()'ed,
46 47
 * list traversals just rcu-locked
 */
48
struct list_head batadv_hardif_list;
49
static int (*batadv_rx_handler[256])(struct sk_buff *,
50
				     struct batadv_hard_iface *);
51
char batadv_routing_algo[20] = "BATMAN_IV";
52
static struct hlist_head batadv_algo_list;
53

54
unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55

56
struct workqueue_struct *batadv_event_workqueue;
57

58
static void batadv_recv_handler_init(void);
59

60
static int __init batadv_init(void)
61
{
62
	INIT_LIST_HEAD(&batadv_hardif_list);
63
	INIT_HLIST_HEAD(&batadv_algo_list);
64

65
	batadv_recv_handler_init();
66

67
	batadv_iv_init();
68

69
	batadv_event_workqueue = create_singlethread_workqueue("bat_events");
70

71
	if (!batadv_event_workqueue)
72 73
		return -ENOMEM;

74
	batadv_socket_init();
75
	batadv_debugfs_init();
76

77
	register_netdevice_notifier(&batadv_hard_if_notifier);
78
	rtnl_link_register(&batadv_link_ops);
79

80
	pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
81
		BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
82 83 84 85

	return 0;
}

86
static void __exit batadv_exit(void)
87
{
88
	batadv_debugfs_destroy();
89
	rtnl_link_unregister(&batadv_link_ops);
90 91
	unregister_netdevice_notifier(&batadv_hard_if_notifier);
	batadv_hardif_remove_interfaces();
92

93 94 95
	flush_workqueue(batadv_event_workqueue);
	destroy_workqueue(batadv_event_workqueue);
	batadv_event_workqueue = NULL;
96 97 98 99

	rcu_barrier();
}

100
int batadv_mesh_init(struct net_device *soft_iface)
101
{
102
	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
103
	int ret;
104 105 106

	spin_lock_init(&bat_priv->forw_bat_list_lock);
	spin_lock_init(&bat_priv->forw_bcast_list_lock);
107 108 109 110 111 112 113
	spin_lock_init(&bat_priv->tt.changes_list_lock);
	spin_lock_init(&bat_priv->tt.req_list_lock);
	spin_lock_init(&bat_priv->tt.roam_list_lock);
	spin_lock_init(&bat_priv->tt.last_changeset_lock);
	spin_lock_init(&bat_priv->gw.list_lock);
	spin_lock_init(&bat_priv->vis.hash_lock);
	spin_lock_init(&bat_priv->vis.list_lock);
114 115 116

	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
117 118 119 120
	INIT_HLIST_HEAD(&bat_priv->gw.list);
	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
	INIT_LIST_HEAD(&bat_priv->tt.req_list);
	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
121

122
	ret = batadv_originator_init(bat_priv);
123
	if (ret < 0)
124 125
		goto err;

126
	ret = batadv_tt_init(bat_priv);
127
	if (ret < 0)
128 129
		goto err;

130 131
	batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
			    BATADV_NULL_IFINDEX);
132

133
	ret = batadv_vis_init(bat_priv);
134
	if (ret < 0)
135 136
		goto err;

137
	ret = batadv_bla_init(bat_priv);
138
	if (ret < 0)
139 140
		goto err;

141 142 143 144
	ret = batadv_dat_init(bat_priv);
	if (ret < 0)
		goto err;

145 146 147 148
	ret = batadv_nc_init(bat_priv);
	if (ret < 0)
		goto err;

149
	atomic_set(&bat_priv->gw.reselect, 0);
150
	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
151 152

	return 0;
153 154

err:
155
	batadv_mesh_free(soft_iface);
156
	return ret;
157 158
}

159
void batadv_mesh_free(struct net_device *soft_iface)
160
{
161
	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
162

163
	atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
164

165
	batadv_purge_outstanding_packets(bat_priv, NULL);
166

167
	batadv_vis_quit(bat_priv);
168

169
	batadv_gw_node_purge(bat_priv);
170
	batadv_nc_free(bat_priv);
171 172
	batadv_dat_free(bat_priv);
	batadv_bla_free(bat_priv);
173

174 175 176 177
	/* Free the TT and the originator tables only after having terminated
	 * all the other depending components which may use these structures for
	 * their purposes.
	 */
178
	batadv_tt_free(bat_priv);
179

180 181 182 183 184 185
	/* Since the originator table clean up routine is accessing the TT
	 * tables as well, it has to be invoked after the TT tables have been
	 * freed and marked as empty. This ensures that no cleanup RCU callbacks
	 * accessing the TT data are scheduled for later execution.
	 */
	batadv_originator_free(bat_priv);
186

187
	free_percpu(bat_priv->bat_counters);
188
	bat_priv->bat_counters = NULL;
189

190
	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
191 192
}

193 194 195 196 197 198
/**
 * batadv_is_my_mac - check if the given mac address belongs to any of the real
 * interfaces in the current mesh
 * @bat_priv: the bat priv with all the soft interface information
 * @addr: the address to check
 */
199
int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
200
{
201
	const struct batadv_hard_iface *hard_iface;
202 203

	rcu_read_lock();
204
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
205
		if (hard_iface->if_status != BATADV_IF_ACTIVE)
206 207
			continue;

208 209 210
		if (hard_iface->soft_iface != bat_priv->soft_iface)
			continue;

211
		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
212 213 214 215 216 217 218 219
			rcu_read_unlock();
			return 1;
		}
	}
	rcu_read_unlock();
	return 0;
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
/**
 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
 *  function that requires the primary interface
 * @seq: debugfs table seq_file struct
 *
 * Returns primary interface if found or NULL otherwise.
 */
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq)
{
	struct net_device *net_dev = (struct net_device *)seq->private;
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
	struct batadv_hard_iface *primary_if;

	primary_if = batadv_primary_if_get_selected(bat_priv);

	if (!primary_if) {
		seq_printf(seq,
			   "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
			   net_dev->name);
		goto out;
	}

	if (primary_if->if_status == BATADV_IF_ACTIVE)
		goto out;

	seq_printf(seq,
		   "BATMAN mesh %s disabled - primary interface not active\n",
		   net_dev->name);
	batadv_hardif_free_ref(primary_if);
	primary_if = NULL;

out:
	return primary_if;
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/**
 * batadv_skb_set_priority - sets skb priority according to packet content
 * @skb: the packet to be sent
 * @offset: offset to the packet content
 *
 * This function sets a value between 256 and 263 (802.1d priority), which
 * can be interpreted by the cfg80211 or other drivers.
 */
void batadv_skb_set_priority(struct sk_buff *skb, int offset)
{
	struct iphdr ip_hdr_tmp, *ip_hdr;
	struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
	struct ethhdr ethhdr_tmp, *ethhdr;
	struct vlan_ethhdr *vhdr, vhdr_tmp;
	u32 prio;

	/* already set, do nothing */
	if (skb->priority >= 256 && skb->priority <= 263)
		return;

	ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
	if (!ethhdr)
		return;

	switch (ethhdr->h_proto) {
	case htons(ETH_P_8021Q):
		vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
					  sizeof(*vhdr), &vhdr_tmp);
		if (!vhdr)
			return;
		prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
		prio = prio >> VLAN_PRIO_SHIFT;
		break;
	case htons(ETH_P_IP):
		ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
					    sizeof(*ip_hdr), &ip_hdr_tmp);
		if (!ip_hdr)
			return;
		prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
		break;
	case htons(ETH_P_IPV6):
		ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
					     sizeof(*ip6_hdr), &ip6_hdr_tmp);
		if (!ip6_hdr)
			return;
		prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
		break;
	default:
		return;
	}

	skb->priority = prio + 256;
}

310
static int batadv_recv_unhandled_packet(struct sk_buff *skb,
311
					struct batadv_hard_iface *recv_if)
312 313 314 315 316 317 318
{
	return NET_RX_DROP;
}

/* incoming packets with the batman ethertype received on any active hard
 * interface
 */
319 320 321
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
			   struct packet_type *ptype,
			   struct net_device *orig_dev)
322
{
323
	struct batadv_priv *bat_priv;
324
	struct batadv_ogm_packet *batadv_ogm_packet;
325
	struct batadv_hard_iface *hard_iface;
326 327 328
	uint8_t idx;
	int ret;

329 330
	hard_iface = container_of(ptype, struct batadv_hard_iface,
				  batman_adv_ptype);
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	skb = skb_share_check(skb, GFP_ATOMIC);

	/* skb was released by skb_share_check() */
	if (!skb)
		goto err_out;

	/* packet should hold at least type and version */
	if (unlikely(!pskb_may_pull(skb, 2)))
		goto err_free;

	/* expect a valid ethernet header here. */
	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
		goto err_free;

	if (!hard_iface->soft_iface)
		goto err_free;

	bat_priv = netdev_priv(hard_iface->soft_iface);

350
	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
351 352 353
		goto err_free;

	/* discard frames on not active interfaces */
354
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
355 356
		goto err_free;

357
	batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
358

359
	if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
360
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
361
			   "Drop packet: incompatible batman version (%i)\n",
362
			   batadv_ogm_packet->header.version);
363 364 365 366 367 368
		goto err_free;
	}

	/* all receive handlers return whether they received or reused
	 * the supplied skb. if not, we have to free the skb.
	 */
369
	idx = batadv_ogm_packet->header.packet_type;
370
	ret = (*batadv_rx_handler[idx])(skb, hard_iface);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

	if (ret == NET_RX_DROP)
		kfree_skb(skb);

	/* return NET_RX_SUCCESS in any case as we
	 * most probably dropped the packet for
	 * routing-logical reasons.
	 */
	return NET_RX_SUCCESS;

err_free:
	kfree_skb(skb);
err_out:
	return NET_RX_DROP;
}

387
static void batadv_recv_handler_init(void)
388 389 390
{
	int i;

391 392
	for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
		batadv_rx_handler[i] = batadv_recv_unhandled_packet;
393 394

	/* batman icmp packet */
395
	batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
396 397
	/* unicast with 4 addresses packet */
	batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
398
	/* unicast packet */
399
	batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
400
	/* fragmented unicast packet */
401
	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
402
	/* broadcast packet */
403
	batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
404
	/* vis packet */
405
	batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
406
	/* Translation table query (request or response) */
407
	batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
408
	/* Roaming advertisement */
409
	batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
410 411
}

412 413 414 415
int
batadv_recv_handler_register(uint8_t packet_type,
			     int (*recv_handler)(struct sk_buff *,
						 struct batadv_hard_iface *))
416
{
417
	if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
418 419
		return -EBUSY;

420
	batadv_rx_handler[packet_type] = recv_handler;
421 422 423
	return 0;
}

424
void batadv_recv_handler_unregister(uint8_t packet_type)
425
{
426
	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
427 428
}

429
static struct batadv_algo_ops *batadv_algo_get(char *name)
430
{
431
	struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
432

433
	hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
434 435 436 437 438 439 440 441 442 443
		if (strcmp(bat_algo_ops_tmp->name, name) != 0)
			continue;

		bat_algo_ops = bat_algo_ops_tmp;
		break;
	}

	return bat_algo_ops;
}

444
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
445
{
446
	struct batadv_algo_ops *bat_algo_ops_tmp;
447
	int ret;
448

449
	bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
450
	if (bat_algo_ops_tmp) {
451 452
		pr_info("Trying to register already registered routing algorithm: %s\n",
			bat_algo_ops->name);
453
		ret = -EEXIST;
454 455 456
		goto out;
	}

457
	/* all algorithms must implement all ops (for now) */
458
	if (!bat_algo_ops->bat_iface_enable ||
459
	    !bat_algo_ops->bat_iface_disable ||
460
	    !bat_algo_ops->bat_iface_update_mac ||
461
	    !bat_algo_ops->bat_primary_iface_set ||
462
	    !bat_algo_ops->bat_ogm_schedule ||
463
	    !bat_algo_ops->bat_ogm_emit) {
464 465
		pr_info("Routing algo '%s' does not implement required ops\n",
			bat_algo_ops->name);
466
		ret = -EINVAL;
467 468 469
		goto out;
	}

470
	INIT_HLIST_NODE(&bat_algo_ops->list);
471
	hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
472 473 474 475 476 477
	ret = 0;

out:
	return ret;
}

478
int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
479
{
480
	struct batadv_algo_ops *bat_algo_ops;
481
	int ret = -EINVAL;
482

483
	bat_algo_ops = batadv_algo_get(name);
484 485 486 487 488 489 490 491 492 493
	if (!bat_algo_ops)
		goto out;

	bat_priv->bat_algo_ops = bat_algo_ops;
	ret = 0;

out:
	return ret;
}

494
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
495
{
496
	struct batadv_algo_ops *bat_algo_ops;
497

498
	seq_puts(seq, "Available routing algorithms:\n");
499

500
	hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
501 502 503 504 505 506
		seq_printf(seq, "%s\n", bat_algo_ops->name);
	}

	return 0;
}

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
/**
 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
 *  the header
 * @skb: skb pointing to fragmented socket buffers
 * @payload_ptr: Pointer to position inside the head buffer of the skb
 *  marking the start of the data to be CRC'ed
 *
 * payload_ptr must always point to an address in the skb head buffer and not to
 * a fragment.
 */
__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
{
	u32 crc = 0;
	unsigned int from;
	unsigned int to = skb->len;
	struct skb_seq_state st;
	const u8 *data;
	unsigned int len;
	unsigned int consumed = 0;

	from = (unsigned int)(payload_ptr - skb->data);

	skb_prepare_seq_read(skb, from, to, &st);
	while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
		crc = crc32c(crc, data, len);
		consumed += len;
	}

	return htonl(crc);
}

538
static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
539
{
540
	struct batadv_algo_ops *bat_algo_ops;
541 542
	char *algo_name = (char *)val;
	size_t name_len = strlen(algo_name);
543

544
	if (name_len > 0 && algo_name[name_len - 1] == '\n')
545 546
		algo_name[name_len - 1] = '\0';

547
	bat_algo_ops = batadv_algo_get(algo_name);
548
	if (!bat_algo_ops) {
549
		pr_err("Routing algorithm '%s' is not supported\n", algo_name);
550 551 552
		return -EINVAL;
	}

553
	return param_set_copystring(algo_name, kp);
554 555
}

556 557
static const struct kernel_param_ops batadv_param_ops_ra = {
	.set = batadv_param_set_ra,
558 559 560
	.get = param_get_string,
};

561
static struct kparam_string batadv_param_string_ra = {
562 563
	.maxlen = sizeof(batadv_routing_algo),
	.string = batadv_routing_algo,
564 565
};

566 567 568 569
module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
		0644);
module_init(batadv_init);
module_exit(batadv_exit);
570 571 572

MODULE_LICENSE("GPL");

573 574 575 576
MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
MODULE_VERSION(BATADV_SOURCE_VERSION);