bridge_loop_avoidance.c 67.2 KB
Newer Older
1
/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 17 18
 */

#include "bridge_loop_avoidance.h"
19
#include "main.h"
20

21 22 23
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
24
#include <linux/crc16.h>
25 26 27
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
28
#include <linux/if_arp.h>
29
#include <linux/if_ether.h>
30
#include <linux/if_vlan.h>
31 32 33
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
34
#include <linux/kref.h>
35 36 37
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
38
#include <linux/netlink.h>
39 40 41 42 43 44 45 46 47 48
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
49 50 51 52
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
53 54 55

#include "hard-interface.h"
#include "hash.h"
56
#include "log.h"
57
#include "netlink.h"
58 59
#include "originator.h"
#include "packet.h"
60
#include "soft-interface.h"
61
#include "sysfs.h"
62
#include "translation-table.h"
63

64
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
65

66
static void batadv_bla_periodic_work(struct work_struct *work);
67 68 69
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
			 struct batadv_bla_backbone_gw *backbone_gw);
70

71
/**
72 73 74
 * batadv_choose_claim - choose the right bucket for a claim.
 * @data: data to hash
 * @size: size of the hash table
75
 *
76
 * Return: the hash index of the claim
77
 */
78
static inline u32 batadv_choose_claim(const void *data, u32 size)
79
{
80
	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
81
	u32 hash = 0;
82

83 84
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
85 86 87 88

	return hash % size;
}

89
/**
90 91 92
 * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
 * @data: data to hash
 * @size: size of the hash table
93
 *
94
 * Return: the hash index of the backbone gateway
95
 */
96
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
97
{
98
	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
99
	u32 hash = 0;
100

101 102
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
103 104 105 106

	return hash % size;
}

107 108 109 110 111
/**
 * batadv_compare_backbone_gw - compare address and vid of two backbone gws
 * @node: list node of the first entry to compare
 * @data2: pointer to the second backbone gateway
 *
112
 * Return: true if the backbones have the same data, false otherwise
113
 */
114 115
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
				       const void *data2)
116
{
117
	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
118
					 hash_entry);
119 120
	const struct batadv_bla_backbone_gw *gw1 = data1;
	const struct batadv_bla_backbone_gw *gw2 = data2;
121

122
	if (!batadv_compare_eth(gw1->orig, gw2->orig))
123
		return false;
124 125

	if (gw1->vid != gw2->vid)
126
		return false;
127

128
	return true;
129 130
}

131
/**
132
 * batadv_compare_claim - compare address and vid of two claims
133 134 135
 * @node: list node of the first entry to compare
 * @data2: pointer to the second claims
 *
136
 * Return: true if the claim have the same data, 0 otherwise
137
 */
138 139
static bool batadv_compare_claim(const struct hlist_node *node,
				 const void *data2)
140
{
141
	const void *data1 = container_of(node, struct batadv_bla_claim,
142
					 hash_entry);
143 144
	const struct batadv_bla_claim *cl1 = data1;
	const struct batadv_bla_claim *cl2 = data2;
145 146

	if (!batadv_compare_eth(cl1->addr, cl2->addr))
147
		return false;
148 149

	if (cl1->vid != cl2->vid)
150
		return false;
151

152
	return true;
153 154
}

155
/**
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
 * batadv_backbone_gw_release - release backbone gw from lists and queue for
 *  free after rcu grace period
 * @ref: kref pointer of the backbone gw
 */
static void batadv_backbone_gw_release(struct kref *ref)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
				   refcount);

	kfree_rcu(backbone_gw, rcu);
}

/**
171 172
 * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
 *  release it
173 174
 * @backbone_gw: backbone gateway to be free'd
 */
175
static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
176
{
177
	kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
178 179
}

180 181 182 183 184
/**
 * batadv_claim_release - release claim from lists and queue for free after rcu
 *  grace period
 * @ref: kref pointer of the claim
 */
185
static void batadv_claim_release(struct kref *ref)
186
{
187
	struct batadv_bla_claim *claim;
188
	struct batadv_bla_backbone_gw *old_backbone_gw;
189 190 191

	claim = container_of(ref, struct batadv_bla_claim, refcount);

192 193 194 195 196 197 198 199 200 201 202
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
	claim->backbone_gw = NULL;
	spin_unlock_bh(&claim->backbone_lock);

	spin_lock_bh(&old_backbone_gw->crc_lock);
	old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
	spin_unlock_bh(&old_backbone_gw->crc_lock);

	batadv_backbone_gw_put(old_backbone_gw);

203
	kfree_rcu(claim, rcu);
204 205
}

206
/**
207
 * batadv_claim_put - decrement the claim refcounter and possibly
208
 *  release it
209 210
 * @claim: claim to be free'd
 */
211
static void batadv_claim_put(struct batadv_bla_claim *claim)
212
{
213
	kref_put(&claim->refcount, batadv_claim_release);
214 215
}

216
/**
217
 * batadv_claim_hash_find - looks for a claim in the claim hash
218
 * @bat_priv: the bat priv with all the soft interface information
219 220
 * @data: search data (may be local/static data)
 *
221
 * Return: claim if found or NULL otherwise.
222
 */
223 224 225
static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv *bat_priv,
		       struct batadv_bla_claim *data)
226
{
227
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228
	struct hlist_head *head;
229 230
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim *claim_tmp = NULL;
231 232 233 234 235
	int index;

	if (!hash)
		return NULL;

236
	index = batadv_choose_claim(data, hash->size);
237 238 239
	head = &hash->table[index];

	rcu_read_lock();
240
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
241
		if (!batadv_compare_claim(&claim->hash_entry, data))
242 243
			continue;

244
		if (!kref_get_unless_zero(&claim->refcount))
245 246 247 248 249 250 251 252 253 254
			continue;

		claim_tmp = claim;
		break;
	}
	rcu_read_unlock();

	return claim_tmp;
}

255
/**
256
 * batadv_backbone_hash_find - looks for a backbone gateway in the hash
257
 * @bat_priv: the bat priv with all the soft interface information
258 259 260
 * @addr: the address of the originator
 * @vid: the VLAN ID
 *
261
 * Return: backbone gateway if found or NULL otherwise
262
 */
263
static struct batadv_bla_backbone_gw *
264 265
batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
			  unsigned short vid)
266
{
267
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268
	struct hlist_head *head;
269 270
	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 272 273 274 275
	int index;

	if (!hash)
		return NULL;

276
	ether_addr_copy(search_entry.orig, addr);
277 278
	search_entry.vid = vid;

279
	index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 281 282
	head = &hash->table[index];

	rcu_read_lock();
283
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 285
		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
						&search_entry))
286 287
			continue;

288
		if (!kref_get_unless_zero(&backbone_gw->refcount))
289 290 291 292 293 294 295 296 297 298
			continue;

		backbone_gw_tmp = backbone_gw;
		break;
	}
	rcu_read_unlock();

	return backbone_gw_tmp;
}

299 300 301 302
/**
 * batadv_bla_del_backbone_claims - delete all claims for a backbone
 * @backbone_gw: backbone gateway where the claims should be removed
 */
303
static void
304
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305
{
306
	struct batadv_hashtable *hash;
307
	struct hlist_node *node_tmp;
308
	struct hlist_head *head;
309
	struct batadv_bla_claim *claim;
310 311 312
	int i;
	spinlock_t *list_lock;	/* protects write access to the hash lists */

313
	hash = backbone_gw->bat_priv->bla.claim_hash;
314 315 316 317 318 319 320 321
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
322
		hlist_for_each_entry_safe(claim, node_tmp,
323 324 325 326
					  head, hash_entry) {
			if (claim->backbone_gw != backbone_gw)
				continue;

327
			batadv_claim_put(claim);
328
			hlist_del_rcu(&claim->hash_entry);
329 330 331 332
		}
		spin_unlock_bh(list_lock);
	}

333
	/* all claims gone, initialize CRC */
334
	spin_lock_bh(&backbone_gw->crc_lock);
335
	backbone_gw->crc = BATADV_BLA_CRC_INIT;
336
	spin_unlock_bh(&backbone_gw->crc_lock);
337 338
}

339 340 341
/**
 * batadv_bla_send_claim - sends a claim frame according to the provided info
 * @bat_priv: the bat priv with all the soft interface information
342
 * @mac: the mac address to be announced within the claim
343 344 345
 * @vid: the VLAN ID
 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
 */
346
static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347
				  unsigned short vid, int claimtype)
348 349 350
{
	struct sk_buff *skb;
	struct ethhdr *ethhdr;
351
	struct batadv_hard_iface *primary_if;
352
	struct net_device *soft_iface;
353
	u8 *hw_src;
354
	struct batadv_bla_claim_dst local_claim_dest;
355
	__be32 zeroip = 0;
356

357
	primary_if = batadv_primary_if_get_selected(bat_priv);
358 359 360
	if (!primary_if)
		return;

361
	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362
	       sizeof(local_claim_dest));
363 364 365 366 367 368 369 370 371 372 373 374 375 376
	local_claim_dest.type = claimtype;

	soft_iface = primary_if->soft_iface;

	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
			 /* IP DST: 0.0.0.0 */
			 zeroip,
			 primary_if->soft_iface,
			 /* IP SRC: 0.0.0.0 */
			 zeroip,
			 /* Ethernet DST: Broadcast */
			 NULL,
			 /* Ethernet SRC/HW SRC:  originator mac */
			 primary_if->net_dev->dev_addr,
377
			 /* HW DST: FF:43:05:XX:YY:YY
378
			  * with XX   = claim type
379
			  * and YY:YY = group id
380
			  */
381
			 (u8 *)&local_claim_dest);
382 383 384 385 386

	if (!skb)
		goto out;

	ethhdr = (struct ethhdr *)skb->data;
387
	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388 389 390

	/* now we pretend that the client would have sent this ... */
	switch (claimtype) {
391
	case BATADV_CLAIM_TYPE_CLAIM:
392 393 394
		/* normal claim frame
		 * set Ethernet SRC to the clients mac
		 */
395
		ether_addr_copy(ethhdr->h_source, mac);
396
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
397 398
			   "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
			   BATADV_PRINT_VID(vid));
399
		break;
400
	case BATADV_CLAIM_TYPE_UNCLAIM:
401 402 403
		/* unclaim frame
		 * set HW SRC to the clients mac
		 */
404
		ether_addr_copy(hw_src, mac);
405
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
406
			   "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
407
			   BATADV_PRINT_VID(vid));
408
		break;
409
	case BATADV_CLAIM_TYPE_ANNOUNCE:
410 411 412
		/* announcement frame
		 * set HW SRC to the special mac containg the crc
		 */
413
		ether_addr_copy(hw_src, mac);
414
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
415
			   "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
416
			   ethhdr->h_source, BATADV_PRINT_VID(vid));
417
		break;
418
	case BATADV_CLAIM_TYPE_REQUEST:
419
		/* request frame
420 421
		 * set HW SRC and header destination to the receiving backbone
		 * gws mac
422
		 */
423 424
		ether_addr_copy(hw_src, mac);
		ether_addr_copy(ethhdr->h_dest, mac);
425
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
426
			   "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
427 428
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));
429
		break;
430 431 432 433 434 435 436 437
	case BATADV_CLAIM_TYPE_LOOPDETECT:
		ether_addr_copy(ethhdr->h_source, mac);
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
			   "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));

		break;
438 439
	}

440
	if (vid & BATADV_VLAN_HAS_TAG) {
441 442
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				      vid & VLAN_VID_MASK);
443 444 445
		if (!skb)
			goto out;
	}
446 447 448

	skb_reset_mac_header(skb);
	skb->protocol = eth_type_trans(skb, soft_iface);
449 450 451
	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
			   skb->len + ETH_HLEN);
452 453 454 455 456
	soft_iface->last_rx = jiffies;

	netif_rx(skb);
out:
	if (primary_if)
457
		batadv_hardif_put(primary_if);
458 459
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
/**
 * batadv_bla_loopdetect_report - worker for reporting the loop
 * @work: work queue item
 *
 * Throws an uevent, as the loopdetect check function can't do that itself
 * since the kernel may sleep while throwing uevents.
 */
static void batadv_bla_loopdetect_report(struct work_struct *work)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_priv *bat_priv;
	char vid_str[6] = { '\0' };

	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
				   report_work);
	bat_priv = backbone_gw->bat_priv;

	batadv_info(bat_priv->soft_iface,
		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
		    BATADV_PRINT_VID(backbone_gw->vid));
	snprintf(vid_str, sizeof(vid_str), "%d",
		 BATADV_PRINT_VID(backbone_gw->vid));
	vid_str[sizeof(vid_str) - 1] = 0;

	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
			    vid_str);

	batadv_backbone_gw_put(backbone_gw);
}

490
/**
491
 * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
492
 * @bat_priv: the bat priv with all the soft interface information
493 494
 * @orig: the mac address of the originator
 * @vid: the VLAN ID
495
 * @own_backbone: set if the requested backbone is local
496
 *
497
 * Return: the (possibly created) backbone gateway or NULL on error
498
 */
499
static struct batadv_bla_backbone_gw *
500
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
501
			   unsigned short vid, bool own_backbone)
502
{
503
	struct batadv_bla_backbone_gw *entry;
504
	struct batadv_orig_node *orig_node;
505 506
	int hash_added;

507
	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
508 509 510 511

	if (entry)
		return entry;

512
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
513
		   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
514
		   orig, BATADV_PRINT_VID(vid));
515 516 517 518 519 520 521

	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
	if (!entry)
		return NULL;

	entry->vid = vid;
	entry->lasttime = jiffies;
522
	entry->crc = BATADV_BLA_CRC_INIT;
523
	entry->bat_priv = bat_priv;
524
	spin_lock_init(&entry->crc_lock);
525
	atomic_set(&entry->request_sent, 0);
526
	atomic_set(&entry->wait_periods, 0);
527
	ether_addr_copy(entry->orig, orig);
528
	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
529 530

	/* one for the hash, one for returning */
531 532
	kref_init(&entry->refcount);
	kref_get(&entry->refcount);
533

534
	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
535 536 537
				     batadv_compare_backbone_gw,
				     batadv_choose_backbone_gw, entry,
				     &entry->hash_entry);
538 539 540 541 542 543 544

	if (unlikely(hash_added != 0)) {
		/* hash failed, free the structure */
		kfree(entry);
		return NULL;
	}

545
	/* this is a gateway now, remove any TT entry on this VLAN */
546
	orig_node = batadv_orig_hash_find(bat_priv, orig);
547
	if (orig_node) {
548
		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
549
					  "became a backbone gateway");
550
		batadv_orig_node_put(orig_node);
551
	}
552

553
	if (own_backbone) {
554 555
		batadv_bla_send_announce(bat_priv, entry);

556 557
		/* this will be decreased in the worker thread */
		atomic_inc(&entry->request_sent);
558
		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
559 560 561
		atomic_inc(&bat_priv->bla.num_requests);
	}

562 563 564
	return entry;
}

565 566 567 568 569 570 571
/**
 * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the selected primary interface
 * @vid: VLAN identifier
 *
 * update or add the own backbone gw to make sure we announce
572 573
 * where we receive other backbone gws
 */
574 575 576
static void
batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
577
				  unsigned short vid)
578
{
579
	struct batadv_bla_backbone_gw *backbone_gw;
580

581 582
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
583
						 vid, true);
584 585 586 587
	if (unlikely(!backbone_gw))
		return;

	backbone_gw->lasttime = jiffies;
588
	batadv_backbone_gw_put(backbone_gw);
589 590
}

591 592 593
/**
 * batadv_bla_answer_request - answer a bla request by sending own claims
 * @bat_priv: the bat priv with all the soft interface information
594
 * @primary_if: interface where the request came on
595 596 597 598 599
 * @vid: the vid where the request came on
 *
 * Repeat all of our own claims, and finally send an ANNOUNCE frame
 * to allow the requester another check if the CRC is correct now.
 */
600 601
static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
				      struct batadv_hard_iface *primary_if,
602
				      unsigned short vid)
603 604
{
	struct hlist_head *head;
605
	struct batadv_hashtable *hash;
606
	struct batadv_bla_claim *claim;
607
	struct batadv_bla_backbone_gw *backbone_gw;
608 609
	int i;

610
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
611
		   "bla_answer_request(): received a claim request, send all of our own claims again\n");
612

613 614 615
	backbone_gw = batadv_backbone_hash_find(bat_priv,
						primary_if->net_dev->dev_addr,
						vid);
616 617 618
	if (!backbone_gw)
		return;

619
	hash = bat_priv->bla.claim_hash;
620 621 622 623
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
624
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
625 626 627 628
			/* only own claims are interesting */
			if (claim->backbone_gw != backbone_gw)
				continue;

629
			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
630
					      BATADV_CLAIM_TYPE_CLAIM);
631 632 633 634 635
		}
		rcu_read_unlock();
	}

	/* finally, send an announcement frame */
636
	batadv_bla_send_announce(bat_priv, backbone_gw);
637
	batadv_backbone_gw_put(backbone_gw);
638 639
}

640 641 642
/**
 * batadv_bla_send_request - send a request to repeat claims
 * @backbone_gw: the backbone gateway from whom we are out of sync
643 644 645 646 647
 *
 * When the crc is wrong, ask the backbone gateway for a full table update.
 * After the request, it will repeat all of his own claims and finally
 * send an announcement claim with which we can check again.
 */
648
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
649 650
{
	/* first, remove all old entries */
651
	batadv_bla_del_backbone_claims(backbone_gw);
652

653 654
	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
		   "Sending REQUEST to %pM\n", backbone_gw->orig);
655 656

	/* send request */
657
	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
658
			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
659 660 661

	/* no local broadcasts should be sent or received, for now. */
	if (!atomic_read(&backbone_gw->request_sent)) {
662
		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
663 664 665 666
		atomic_set(&backbone_gw->request_sent, 1);
	}
}

667
/**
668
 * batadv_bla_send_announce - Send an announcement frame
669
 * @bat_priv: the bat priv with all the soft interface information
670 671
 * @backbone_gw: our backbone gateway which should be announced
 */
672
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
673
				     struct batadv_bla_backbone_gw *backbone_gw)
674
{
675
	u8 mac[ETH_ALEN];
676
	__be16 crc;
677

678
	memcpy(mac, batadv_announce_mac, 4);
679
	spin_lock_bh(&backbone_gw->crc_lock);
680
	crc = htons(backbone_gw->crc);
681
	spin_unlock_bh(&backbone_gw->crc_lock);
682
	memcpy(&mac[4], &crc, 2);
683

684
	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
685
			      BATADV_CLAIM_TYPE_ANNOUNCE);
686 687
}

688 689 690
/**
 * batadv_bla_add_claim - Adds a claim in the claim hash
 * @bat_priv: the bat priv with all the soft interface information
691 692 693 694
 * @mac: the mac address of the claim
 * @vid: the VLAN ID of the frame
 * @backbone_gw: the backbone gateway which claims it
 */
695
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
696
				 const u8 *mac, const unsigned short vid,
697
				 struct batadv_bla_backbone_gw *backbone_gw)
698
{
699
	struct batadv_bla_backbone_gw *old_backbone_gw;
700 701
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim search_claim;
702
	bool remove_crc = false;
703 704
	int hash_added;

705
	ether_addr_copy(search_claim.addr, mac);
706
	search_claim.vid = vid;
707
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
708 709 710 711 712 713 714

	/* create a new claim entry if it does not exist yet. */
	if (!claim) {
		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
		if (!claim)
			return;

715
		ether_addr_copy(claim->addr, mac);
716
		spin_lock_init(&claim->backbone_lock);
717 718
		claim->vid = vid;
		claim->lasttime = jiffies;
719
		kref_get(&backbone_gw->refcount);
720
		claim->backbone_gw = backbone_gw;
721
		kref_init(&claim->refcount);
722

723
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
724
			   "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
725
			   mac, BATADV_PRINT_VID(vid));
726 727

		kref_get(&claim->refcount);
728
		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
729 730 731
					     batadv_compare_claim,
					     batadv_choose_claim, claim,
					     &claim->hash_entry);
732 733 734 735 736 737 738 739 740 741 742 743

		if (unlikely(hash_added != 0)) {
			/* only local changes happened. */
			kfree(claim);
			return;
		}
	} else {
		claim->lasttime = jiffies;
		if (claim->backbone_gw == backbone_gw)
			/* no need to register a new backbone */
			goto claim_free_ref;

744
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
745
			   "bla_add_claim(): changing ownership for %pM, vid %d\n",
746
			   mac, BATADV_PRINT_VID(vid));
747

748
		remove_crc = true;
749
	}
750 751 752 753

	/* replace backbone_gw atomically and adjust reference counters */
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
754
	kref_get(&backbone_gw->refcount);
755
	claim->backbone_gw = backbone_gw;
756
	spin_unlock_bh(&claim->backbone_lock);
757

758 759 760 761 762 763
	if (remove_crc) {
		/* remove claim address from old backbone_gw */
		spin_lock_bh(&old_backbone_gw->crc_lock);
		old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
		spin_unlock_bh(&old_backbone_gw->crc_lock);
	}
764

765 766 767
	batadv_backbone_gw_put(old_backbone_gw);

	/* add claim address to new backbone_gw */
768
	spin_lock_bh(&backbone_gw->crc_lock);
769
	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
770
	spin_unlock_bh(&backbone_gw->crc_lock);
771 772 773
	backbone_gw->lasttime = jiffies;

claim_free_ref:
774
	batadv_claim_put(claim);
775 776
}

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
/**
 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
 *  claim
 * @claim: claim whose backbone_gw should be returned
 *
 * Return: valid reference to claim::backbone_gw
 */
static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	spin_lock_bh(&claim->backbone_lock);
	backbone_gw = claim->backbone_gw;
	kref_get(&backbone_gw->refcount);
	spin_unlock_bh(&claim->backbone_lock);

	return backbone_gw;
}

797 798 799 800 801
/**
 * batadv_bla_del_claim - delete a claim from the claim hash
 * @bat_priv: the bat priv with all the soft interface information
 * @mac: mac address of the claim to be removed
 * @vid: VLAN id for the claim to be removed
802
 */
803
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
804
				 const u8 *mac, const unsigned short vid)
805
{
806
	struct batadv_bla_claim search_claim, *claim;
807

808
	ether_addr_copy(search_claim.addr, mac);
809
	search_claim.vid = vid;
810
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
811 812 813
	if (!claim)
		return;

814
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
815
		   mac, BATADV_PRINT_VID(vid));
816

817
	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
818
			   batadv_choose_claim, claim);
819
	batadv_claim_put(claim); /* reference from the hash is gone */
820 821

	/* don't need the reference from hash_find() anymore */
822
	batadv_claim_put(claim);
823 824
}

825 826
/**
 * batadv_handle_announce - check for ANNOUNCE frame
827 828 829 830
 * @bat_priv: the bat priv with all the soft interface information
 * @an_addr: announcement mac address (ARP Sender HW address)
 * @backbone_addr: originator address of the sender (Ethernet source MAC)
 * @vid: the VLAN ID of the frame
831
 *
832
 * Return: true if handled
833
 */
834 835
static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
				   u8 *backbone_addr, unsigned short vid)
836
{
837
	struct batadv_bla_backbone_gw *backbone_gw;
838
	u16 backbone_crc, crc;
839

840
	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
841
		return false;
842

843 844
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
845 846

	if (unlikely(!backbone_gw))
847
		return true;
848 849 850

	/* handle as ANNOUNCE frame */
	backbone_gw->lasttime = jiffies;
851
	crc = ntohs(*((__be16 *)(&an_addr[4])));
852

853
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
854
		   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
855
		   BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
856

857 858 859 860 861
	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	if (backbone_crc != crc) {
862
		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
863
			   "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
864 865
			   backbone_gw->orig,
			   BATADV_PRINT_VID(backbone_gw->vid),
866
			   backbone_crc, crc);
867

868
		batadv_bla_send_request(backbone_gw);
869 870 871 872 873
	} else {
		/* if we have sent a request and the crc was OK,
		 * we can allow traffic again.
		 */
		if (atomic_read(&backbone_gw->request_sent)) {
874
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
875 876 877 878
			atomic_set(&backbone_gw->request_sent, 0);
		}
	}

879
	batadv_backbone_gw_put(backbone_gw);
880
	return true;
881 882
}

883 884
/**
 * batadv_handle_request - check for REQUEST frame
885 886 887 888 889
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
 * @ethhdr: ethernet header of a packet
 * @vid: the VLAN ID of the frame
890
 *
891
 * Return: true if handled
892
 */
893 894 895 896
static bool batadv_handle_request(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, struct ethhdr *ethhdr,
				  unsigned short vid)
897 898
{
	/* check for REQUEST frame */
899
	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
900
		return false;
901 902 903 904

	/* sanity check, this should not happen on a normal switch,
	 * we ignore it in this case.
	 */
905
	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
906
		return true;
907

908
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
909
		   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
910
		   BATADV_PRINT_VID(vid), ethhdr->h_source);
911

912
	batadv_bla_answer_request(bat_priv, primary_if, vid);
913
	return true;
914 915
}

916 917
/**
 * batadv_handle_unclaim - check for UNCLAIM frame
918 919 920 921 922
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet source)
 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
923
 *
924
 * Return: true if handled
925
 */
926 927 928 929
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, u8 *claim_addr,
				  unsigned short vid)
930
{
931
	struct batadv_bla_backbone_gw *backbone_gw;
932 933

	/* unclaim in any case if it is our own */
934 935
	if (primary_if && batadv_compare_eth(backbone_addr,
					     primary_if->net_dev->dev_addr))
936
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
937
				      BATADV_CLAIM_TYPE_UNCLAIM);
938

939
	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
940 941

	if (!backbone_gw)
942
		return true;
943 944

	/* this must be an UNCLAIM frame */
945
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
946
		   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
947
		   claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
948

949
	batadv_bla_del_claim(bat_priv, claim_addr, vid);
950
	batadv_backbone_gw_put(backbone_gw);
951
	return true;
952 953
}

954 955
/**
 * batadv_handle_claim - check for CLAIM frame
956 957 958 959 960
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet Source)
 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
961
 *
962
 * Return: true if handled
963
 */
964 965 966 967
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
				struct batadv_hard_iface *primary_if,
				u8 *backbone_addr, u8 *claim_addr,
				unsigned short vid)
968
{
969
	struct batadv_bla_backbone_gw *backbone_gw;
970 971 972

	/* register the gateway if not yet available, and add the claim. */

973 974
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
975 976

	if (unlikely(!backbone_gw))
977
		return true;
978 979

	/* this must be a CLAIM frame */
980
	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
981
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
982
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
983
				      BATADV_CLAIM_TYPE_CLAIM);
984 985 986

	/* TODO: we could call something like tt_local_del() here. */

987
	batadv_backbone_gw_put(backbone_gw);
988
	return true;
989 990
}

991
/**
992
 * batadv_check_claim_group - check for claim group membership
993
 * @bat_priv: the bat priv with all the soft interface information
994
 * @primary_if: the primary interface of this batman interface
995 996 997 998 999 1000 1001 1002
 * @hw_src: the Hardware source in the ARP Header
 * @hw_dst: the Hardware destination in the ARP Header
 * @ethhdr: pointer to the Ethernet header of the claim frame
 *
 * checks if it is a claim packet and if its on the same group.
 * This function also applies the group ID of the sender
 * if it is in the same mesh.
 *
1003
 * Return:
1004 1005 1006 1007
 *	2  - if it is a claim packet and on the same group
 *	1  - if is a claim packet from another group
 *	0  - if it is not a claim packet
 */
1008 1009
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
1010
				    u8 *hw_src, u8 *hw_dst,
1011
				    struct ethhdr *ethhdr)
1012
{
1013
	u8 *backbone_addr;
1014
	struct batadv_orig_node *orig_node;
1015
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1016

1017
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1018
	bla_dst_own = &bat_priv->bla.claim_dest;
1019 1020 1021 1022 1023

	/* if announcement packet, use the source,
	 * otherwise assume it is in the hw_src
	 */
	switch (bla_dst->type) {
1024
	case BATADV_CLAIM_TYPE_CLAIM:
1025 1026
		backbone_addr = hw_src;
		break;
1027 1028
	case BATADV_CLAIM_TYPE_REQUEST:
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1029
	case BATADV_CLAIM_TYPE_UNCLAIM:
1030 1031 1032 1033 1034 1035 1036
		backbone_addr = ethhdr->h_source;
		break;
	default:
		return 0;
	}

	/* don't accept claim frames from ourselves */
1037
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1038 1039 1040 1041 1042 1043 1044
		return 0;

	/* if its already the same group, it is fine. */
	if (bla_dst->group == bla_dst_own->group)
		return 2;

	/* lets see if this originator is in our mesh */
1045
	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1046 1047 1048 1049 1050 1051 1052 1053 1054

	/* dont accept claims from gateways which are not in
	 * the same mesh or group.
	 */
	if (!orig_node)
		return 1;

	/* if our mesh friends mac is bigger, use it for ourselves. */
	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1055
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1056
			   "taking other backbones claim group: %#.4x\n",
1057
			   ntohs(bla_dst->group));
1058 1059 1060
		bla_dst_own->group = bla_dst->group;
	}

1061
	batadv_orig_node_put(orig_node);
1062 1063 1064 1065

	return 2;
}

1066
/**
1067
 * batadv_bla_process_claim - Check if this is a claim frame, and process it
1068
 * @bat_priv: the bat priv with all the soft interface information
1069
 * @primary_if: the primary hard interface of this batman soft interface
1070 1071
 * @skb: the frame to be checked
 *
1072
 * Return: true if it was a claim frame, otherwise return false to
1073 1074
 * tell the callee that it can use the frame on its own.
 */
1075 1076 1077
static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
				     struct batadv_hard_iface *primary_if,
				     struct sk_buff *skb)
1078
{
1079
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1080
	u8 *hw_src, *hw_dst;
1081
	struct vlan_hdr *vhdr, vhdr_buf;
1082
	struct ethhdr *ethhdr;
1083
	struct arphdr *arphdr;
1084
	unsigned short vid;
1085
	int vlan_depth = 0;
1086
	__be16 proto;
1087
	int headlen;
1088
	int ret;
1089

1090
	vid = batadv_get_vid(skb, 0);
1091
	ethhdr = eth_hdr(skb);
1092

1093 1094 1095
	proto = ethhdr->h_proto;
	headlen = ETH_HLEN;
	if (vid & BATADV_VLAN_HAS_TAG) {
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		/* Traverse the VLAN/Ethertypes.
		 *
		 * At this point it is known that the first protocol is a VLAN
		 * header, so start checking at the encapsulated protocol.
		 *
		 * The depth of the VLAN headers is recorded to drop BLA claim
		 * frames encapsulated into multiple VLAN headers (QinQ).
		 */
		do {
			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
						  &vhdr_buf);
			if (!vhdr)
1108
				return false;
1109 1110 1111 1112 1113

			proto = vhdr->h_vlan_encapsulated_proto;
			headlen += VLAN_HLEN;
			vlan_depth++;
		} while (proto == htons(ETH_P_8021Q));
1114 1115
	}

1116
	if (proto != htons(ETH_P_ARP))
1117
		return false; /* not a claim frame */
1118 1119 1120 1121

	/* this must be a ARP frame. check if it is a claim. */

	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1122
		return false;
1123 1124

	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
1125
	ethhdr = eth_hdr(skb);
1126
	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1127 1128 1129 1130 1131

	/* Check whether the ARP frame carries a valid
	 * IP information
	 */
	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1132
		return false;
1133
	if (arphdr->ar_pro != htons(ETH_P_IP))
1134
		return false;
1135
	if (arphdr->ar_hln != ETH_ALEN)
1136
		return false;
1137
	if (arphdr->ar_pln != 4)
1138
		return false;
1139

1140
	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1141
	hw_dst = hw_src + ETH_ALEN + 4;
1142
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1143 1144 1145 1146 1147
	bla_dst_own = &bat_priv->bla.claim_dest;

	/* check if it is a claim frame in general */
	if (memcmp(bla_dst->magic, bla_dst_own->magic,
		   sizeof(bla_dst->magic)) != 0)
1148
		return false;
1149 1150 1151 1152 1153 1154

	/* check if there is a claim frame encapsulated deeper in (QinQ) and
	 * drop that, as this is not supported by BLA but should also not be
	 * sent via the mesh.
	 */
	if (vlan_depth > 1)
1155
		return true;
1156

1157 1158
	/* Let the loopdetect frames on the mesh in any case. */
	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1159
		return false;
1160

1161
	/* check if it is a claim frame. */
1162 1163
	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
				       ethhdr);
1164
	if (ret == 1)
1165
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1166
			   "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1167 1168
			   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
			   hw_dst);
1169 1170

	if (ret < 2)
1171
		return !!ret;
1172 1173

	/* become a backbone gw ourselves on this vlan if not happened yet */
1174
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1175 1176 1177

	/* check for the different types of claim frames ... */
	switch (bla_dst->type) {
1178
	case BATADV_CLAIM_TYPE_CLAIM:
1179 1180
		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
					ethhdr->h_source, vid))
1181
			return true;
1182
		break;
1183
	case BATADV_CLAIM_TYPE_UNCLAIM:
1184 1185
		if (batadv_handle_unclaim(bat_priv, primary_if,
					  ethhdr->h_source, hw_src, vid))
1186
			return true;
1187 1188
		break;

1189
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1190 1191
		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
					   vid))
1192
			return true;
1193
		break;
1194
	case BATADV_CLAIM_TYPE_REQUEST:
1195 1196
		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
					  vid))
1197
			return true;
1198 1199 1200
		break;
	}

1201
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
1202
		   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1203
		   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
1204
	return true;
1205 1206
}

1207 1208 1209 1210 1211 1212 1213
/**
 * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
 *  immediately
 * @bat_priv: the bat priv with all the soft interface information
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we last heard from other nodes, and remove them in case of
1214 1215
 * a time out, or clean all backbone gws if now is set.
 */
1216
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1217
{
1218
	struct batadv_bla_backbone_gw *backbone_gw;
1219
	struct hlist_node *node_tmp;
1220
	struct hlist_head *head;
1221
	struct batadv_hashtable *hash;
1222 1223 1224
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

1225
	hash = bat_priv->bla.backbone_hash;
1226 1227 1228 1229 1230 1231 1232 1233
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
1234
		hlist_for_each_entry_safe(backbone_gw, node_tmp,
1235 1236 1237
					  head, hash_entry) {
			if (now)
				goto purge_now;
1238
			if (!batadv_has_timed_out(backbone_gw->lasttime,
1239
						  BATADV_BLA_BACKBONE_TIMEOUT))
1240 1241
				continue;

1242
			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1243 1244
				   "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
				   backbone_gw->orig);
1245 1246 1247 1248

purge_now:
			/* don't wait for the pending request anymore */
			if (atomic_read(&backbone_gw->request_sent))
1249
				atomic_dec(&bat_priv->bla.num_requests);
1250

1251
			batadv_bla_del_backbone_claims(backbone_gw);
1252

1253
			hlist_del_rcu(&backbone_gw->hash_entry);
1254
			batadv_backbone_gw_put(backbone_gw);
1255 1256 1257 1258 1259
		}
		spin_unlock_bh(list_lock);
	}
}

1260
/**
1261
 * batadv_bla_purge_claims - Remove claims after a timeout or immediately
1262
 * @bat_priv: the bat priv with all the soft interface information
1263 1264 1265 1266 1267 1268
 * @primary_if: the selected primary interface, may be NULL if now is set
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we heard last time from our own claims, and remove them in case of
 * a time out, or clean all claims if now is set
 */
1269 1270 1271
static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    int now)
1272
{
1273
	struct batadv_bla_backbone_gw *backbone_gw;
1274
	struct batadv_bla_claim *claim;
1275
	struct hlist_head *head;
1276
	struct batadv_hashtable *hash;
1277 1278
	int i;

1279
	hash = bat_priv->bla.claim_hash;
1280 1281 1282 1283 1284 1285 1286
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1287
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
1288
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1289 1290
			if (now)
				goto purge_now;
1291 1292

			if (!batadv_compare_eth(backbone_gw->orig,
1293
						primary_if->net_dev->dev_addr))
1294 1295
				goto skip;

1296
			if (!batadv_has_timed_out(claim->lasttime,
1297
						  BATADV_BLA_CLAIM_TIMEOUT))
1298
				goto skip;
1299

1300
			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1301 1302
				   "bla_purge_claims(): %pM, vid %d, time out\n",
				   claim->addr, claim->vid);
1303 1304

purge_now:
1305
			batadv_handle_unclaim(bat_priv, primary_if,
1306
					      backbone_gw->orig,
1307
					      claim->addr, claim->vid);
1308 1309
skip:
			batadv_backbone_gw_put(backbone_gw);
1310 1311 1312 1313 1314
		}
		rcu_read_unlock();
	}
}

1315
/**
1316 1317
 * batadv_bla_update_orig_address - Update the backbone gateways when the own
 *  originator address changes
1318
 * @bat_priv: the bat priv with all the soft interface information
1319 1320 1321
 * @primary_if: the new selected primary_if
 * @oldif: the old primary interface, may be NULL
 */
1322 1323 1324
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    struct batadv_hard_iface *oldif)
1325
{
1326
	struct batadv_bla_backbone_gw *backbone_gw;
1327
	struct hlist_head *head;
1328
	struct batadv_hashtable *hash;
1329
	__be16 group;
1330 1331
	int i;

1332
	/* reset bridge loop avoidance group id */
1333 1334
	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
	bat_priv->bla.claim_dest.group = group;
1335

1336 1337 1338 1339
	/* purge everything when bridge loop avoidance is turned off */
	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		oldif = NULL;

1340
	if (!oldif) {
1341 1342
		batadv_bla_purge_claims(bat_priv, NULL, 1);
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1343 1344 1345
		return;
	}

1346
	hash = bat_priv->bla.backbone_hash;
1347 1348 1349 1350 1351 1352 1353
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1354
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1355
			/* own orig still holds the old value. */
1356 1357
			if (!batadv_compare_eth(backbone_gw->orig,
						oldif->net_dev->dev_addr))
1358 1359
				continue;

1360 1361
			ether_addr_copy(backbone_gw->orig,
					primary_if->net_dev->dev_addr);
1362 1363 1364
			/* send an announce frame so others will ask for our
			 * claims and update their tables.
			 */
1365
			batadv_bla_send_announce(bat_priv, backbone_gw);
1366 1367 1368 1369 1370
		}
		rcu_read_unlock();
	}
}

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
/**
 * batadv_bla_send_loopdetect - send a loopdetect frame
 * @bat_priv: the bat priv with all the soft interface information
 * @backbone_gw: the backbone gateway for which a loop should be detected
 *
 * To detect loops that the bridge loop avoidance can't handle, send a loop
 * detection packet on the backbone. Unlike other BLA frames, this frame will
 * be allowed on the mesh by other nodes. If it is received on the mesh, this
 * indicates that there is a loop.
 */
static void
batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
			   struct batadv_bla_backbone_gw *backbone_gw)
{
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
		   backbone_gw->vid);
	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
}

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
/**
 * batadv_bla_status_update - purge bla interfaces if necessary
 * @net_dev: the soft interface net device
 */
void batadv_bla_status_update(struct net_device *net_dev)
{
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
	struct batadv_hard_iface *primary_if;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		return;

	/* this function already purges everything when bla is disabled,
	 * so just call that one.
	 */
	batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1408
	batadv_hardif_put(primary_if);
1409 1410
}

1411 1412 1413 1414 1415
/**
 * batadv_bla_periodic_work - performs periodic bla work
 * @work: kernel work struct
 *
 * periodic work to do:
1416 1417 1418
 *  * purge structures when they are too old
 *  * send announcements
 */
1419
static void batadv_bla_periodic_work(struct work_struct *work)
1420
{
1421
	struct delayed_work *delayed_work;
1422
	struct batadv_priv *bat_priv;
1423
	struct batadv_priv_bla *priv_bla;
1424
	struct hlist_head *head;
1425
	struct batadv_bla_backbone_gw *backbone_gw;
1426
	struct batadv_hashtable *hash;
1427
	struct batadv_hard_iface *primary_if;
1428
	bool send_loopdetect = false;
1429 1430
	int i;

G
Geliang Tang 已提交
1431
	delayed_work = to_delayed_work(work);
1432 1433
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1434
	primary_if = batadv_primary_if_get_selected(bat_priv);
1435 1436 1437
	if (!primary_if)
		goto out;

1438 1439
	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);
1440 1441 1442 1443

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
		/* set a new random mac address for the next bridge loop
		 * detection frames. Set the locally administered bit to avoid
		 * collisions with users mac addresses.
		 */
		random_ether_addr(bat_priv->bla.loopdetect_addr);
		bat_priv->bla.loopdetect_addr[0] = 0xba;
		bat_priv->bla.loopdetect_addr[1] = 0xbe;
		bat_priv->bla.loopdetect_lasttime = jiffies;
		atomic_set(&bat_priv->bla.loopdetect_next,
			   BATADV_BLA_LOOPDETECT_PERIODS);

		/* mark for sending loop detect on all VLANs */
		send_loopdetect = true;
	}

1460
	hash = bat_priv->bla.backbone_hash;
1461 1462 1463 1464 1465 1466 1467
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1468
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1469 1470
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
1471 1472 1473 1474
				continue;

			backbone_gw->lasttime = jiffies;

1475
			batadv_bla_send_announce(bat_priv, backbone_gw);
1476 1477 1478
			if (send_loopdetect)
				batadv_bla_send_loopdetect(bat_priv,
							   backbone_gw);
1479 1480 1481 1482 1483

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
1484 1485 1486
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
1487 1488 1489 1490 1491
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

1492 1493 1494
			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

1495 1496
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
1497 1498 1499 1500 1501
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
1502
		batadv_hardif_put(primary_if);
1503

1504 1505
	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1506 1507
}

1508 1509 1510 1511 1512
/* The hash for claim and backbone hash receive the same key because they
 * are getting initialized by hash_new with the same key. Reinitializing
 * them with to different keys to allow nested locking without generating
 * lockdep warnings
 */
1513 1514
static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
1515

1516 1517 1518 1519 1520 1521
/**
 * batadv_bla_init - initialize all bla structures
 * @bat_priv: the bat priv with all the soft interface information
 *
 * Return: 0 on success, < 0 on error.
 */
1522
int batadv_bla_init(struct batadv_priv *bat_priv)
1523
{
1524
	int i;
1525
	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1526
	struct batadv_hard_iface *primary_if;
1527
	u16 crc;
1528
	unsigned long entrytime;
1529

1530 1531
	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);

1532
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1533

1534
	/* setting claim destination address */
1535 1536
	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
	bat_priv->bla.claim_dest.type = 0;
1537
	primary_if = batadv_primary_if_get_selected(bat_priv);
1538
	if (primary_if) {
1539 1540
		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
		bat_priv->bla.claim_dest.group = htons(crc);
1541
		batadv_hardif_put(primary_if);
1542
	} else {
1543
		bat_priv->bla.claim_dest.group = 0; /* will be set later */
1544 1545
	}

1546
	/* initialize the duplicate list */
1547
	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1548
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1549 1550
		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
	bat_priv->bla.bcast_duplist_curr = 0;
1551

1552 1553 1554
	atomic_set(&bat_priv->bla.loopdetect_next,
		   BATADV_BLA_LOOPDETECT_PERIODS);

1555
	if (bat_priv->bla.claim_hash)
1556
		return 0;
1557

1558 1559
	bat_priv->bla.claim_hash = batadv_hash_new(128);
	bat_priv->bla.backbone_hash = batadv_hash_new(32);
1560

1561
	if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1562
		return -ENOMEM;
1563

1564
	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1565
				   &batadv_claim_hash_lock_class_key);
1566
	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1567
				   &batadv_backbone_hash_lock_class_key);
1568

1569
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1570

1571 1572 1573 1574
	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1575
	return 0;
1576 1577
}

1578
/**
1579
 * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
1580
 * @bat_priv: the bat priv with all the soft interface information
1581
 * @skb: contains the bcast_packet to be checked
1582 1583 1584 1585 1586 1587 1588 1589 1590
 *
 * check if it is on our broadcast list. Another gateway might
 * have sent the same packet because it is connected to the same backbone,
 * so we have to remove this duplicate.
 *
 * This is performed by checking the CRC, which will tell us
 * with a good chance that it is the same packet. If it is furthermore
 * sent by another host, drop it. We allow equal packets from
 * the same host however as this might be intended.
1591
 *
1592
 * Return: true if a packet is in the duplicate list, false otherwise.
1593
 */
1594 1595
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
				    struct sk_buff *skb)
1596
{
1597
	int i, curr;
1598 1599
	__be32 crc;
	struct batadv_bcast_packet *bcast_packet;
1600
	struct batadv_bcast_duplist_entry *entry;
1601
	bool ret = false;
1602

1603
	bcast_packet = (struct batadv_bcast_packet *)skb->data;
1604 1605

	/* calculate the crc ... */
1606
	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1607

1608 1609
	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);

1610
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1611 1612 1613
		curr = (bat_priv->bla.bcast_duplist_curr + i);
		curr %= BATADV_DUPLIST_SIZE;
		entry = &bat_priv->bla.bcast_duplist[curr];
1614 1615 1616 1617

		/* we can stop searching if the entry is too old ;
		 * later entries will be even older
		 */
1618 1619
		if (batadv_has_timed_out(entry->entrytime,
					 BATADV_DUPLIST_TIMEOUT))
1620 1621 1622 1623 1624
			break;

		if (entry->crc != crc)
			continue;

1625
		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1626 1627 1628
			continue;

		/* this entry seems to match: same crc, not too old,
1629
		 * and from another gw. therefore return true to forbid it.
1630
		 */
1631
		ret = true;
1632
		goto out;
1633
	}
1634
	/* not found, add a new entry (overwrite the oldest entry)
1635
	 * and allow it, its the first occurrence.
1636
	 */
1637
	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1638
	curr %= BATADV_DUPLIST_SIZE;
1639
	entry = &bat_priv->bla.bcast_duplist[curr];
1640 1641
	entry->crc = crc;
	entry->entrytime = jiffies;
1642
	ether_addr_copy(entry->orig, bcast_packet->orig);
1643
	bat_priv->bla.bcast_duplist_curr = curr;
1644

1645 1646 1647 1648
out:
	spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);

	return ret;
1649 1650
}

1651
/**
1652 1653
 * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
 *  the VLAN identified by vid.
1654
 * @bat_priv: the bat priv with all the soft interface information
1655
 * @orig: originator mac address
1656
 * @vid: VLAN identifier
1657
 *
1658
 * Return: true if orig is a backbone for this vid, false otherwise.
1659
 */
1660
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1661
				    unsigned short vid)
1662
{
1663
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1664
	struct hlist_head *head;
1665
	struct batadv_bla_backbone_gw *backbone_gw;
1666 1667 1668
	int i;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1669
		return false;
1670 1671

	if (!hash)
1672
		return false;
1673 1674 1675 1676 1677

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1678
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1679 1680
			if (batadv_compare_eth(backbone_gw->orig, orig) &&
			    backbone_gw->vid == vid) {
1681
				rcu_read_unlock();
1682
				return true;
1683 1684 1685 1686 1687
			}
		}
		rcu_read_unlock();
	}

1688
	return false;
1689 1690
}

1691
/**
1692
 * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
1693
 * @skb: the frame to be checked
1694 1695 1696
 * @orig_node: the orig_node of the frame
 * @hdr_size: maximum length of the frame
 *
1697 1698
 * Return: true if the orig_node is also a gateway on the soft interface,
 * otherwise it returns false.
1699
 */
1700 1701
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
			       struct batadv_orig_node *orig_node, int hdr_size)
1702
{
1703
	struct batadv_bla_backbone_gw *backbone_gw;
1704
	unsigned short vid;
1705 1706

	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1707
		return false;
1708 1709

	/* first, find out the vid. */
1710
	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1711
		return false;
1712

1713
	vid = batadv_get_vid(skb, hdr_size);
1714 1715

	/* see if this originator is a backbone gw for this VLAN */
1716 1717
	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
						orig_node->orig, vid);
1718
	if (!backbone_gw)
1719
		return false;
1720

1721
	batadv_backbone_gw_put(backbone_gw);
1722
	return true;
1723 1724
}

1725
/**
1726
 * batadv_bla_free - free all bla structures
1727 1728 1729 1730
 * @bat_priv: the bat priv with all the soft interface information
 *
 * for softinterface free or module unload
 */
1731
void batadv_bla_free(struct batadv_priv *bat_priv)
1732
{
1733
	struct batadv_hard_iface *primary_if;
1734

1735
	cancel_delayed_work_sync(&bat_priv->bla.work);
1736
	primary_if = batadv_primary_if_get_selected(bat_priv);
1737

1738
	if (bat_priv->bla.claim_hash) {
1739
		batadv_bla_purge_claims(bat_priv, primary_if, 1);
1740 1741
		batadv_hash_destroy(bat_priv->bla.claim_hash);
		bat_priv->bla.claim_hash = NULL;
1742
	}
1743
	if (bat_priv->bla.backbone_hash) {
1744
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1745 1746
		batadv_hash_destroy(bat_priv->bla.backbone_hash);
		bat_priv->bla.backbone_hash = NULL;
1747 1748
	}
	if (primary_if)
1749
		batadv_hardif_put(primary_if);
1750 1751
}

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
/**
 * batadv_bla_loopdetect_check - check and handle a detected loop
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the packet to check
 * @primary_if: interface where the request came on
 * @vid: the VLAN ID of the frame
 *
 * Checks if this packet is a loop detect frame which has been sent by us,
 * throw an uevent and log the event if that is the case.
 *
 * Return: true if it is a loop detect frame which is to be dropped, false
 * otherwise.
 */
static bool
batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
			    struct batadv_hard_iface *primary_if,
			    unsigned short vid)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct ethhdr *ethhdr;

	ethhdr = eth_hdr(skb);

	/* Only check for the MAC address and skip more checks here for
	 * performance reasons - this function is on the hotpath, after all.
	 */
	if (!batadv_compare_eth(ethhdr->h_source,
				bat_priv->bla.loopdetect_addr))
		return false;

	/* If the packet came too late, don't forward it on the mesh
	 * but don't consider that as loop. It might be a coincidence.
	 */
	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
				 BATADV_BLA_LOOPDETECT_TIMEOUT))
		return true;

	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
						 vid, true);
	if (unlikely(!backbone_gw))
		return true;

	queue_work(batadv_event_workqueue, &backbone_gw->report_work);
	/* backbone_gw is unreferenced in the report work function function */

	return true;
}

1801
/**
1802
 * batadv_bla_rx - check packets coming from the mesh.
1803
 * @bat_priv: the bat priv with all the soft interface information
1804 1805
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
1806
 * @is_bcast: the packet came in a broadcast packet type.
1807
 *
1808
 * batadv_bla_rx avoidance checks if:
1809 1810 1811
 *  * we have to race for a claim
 *  * if the frame is allowed on the LAN
 *
1812 1813
 * in these cases, the skb is further handled by this function
 *
1814 1815
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1816
 */
1817 1818
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid, bool is_bcast)
1819
{
1820
	struct batadv_bla_backbone_gw *backbone_gw;
1821
	struct ethhdr *ethhdr;
1822
	struct batadv_bla_claim search_claim, *claim = NULL;
1823
	struct batadv_hard_iface *primary_if;
1824
	bool own_claim;
1825
	bool ret;
1826

1827
	ethhdr = eth_hdr(skb);
1828

1829
	primary_if = batadv_primary_if_get_selected(bat_priv);
1830 1831 1832 1833 1834 1835
	if (!primary_if)
		goto handled;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1836 1837 1838
	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
		goto handled;

1839
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1840
		/* don't allow broadcasts while requests are in flight */
1841
		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1842 1843
			goto handled;

1844
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1845
	search_claim.vid = vid;
1846
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1847 1848 1849 1850 1851

	if (!claim) {
		/* possible optimization: race for a claim */
		/* No claim exists yet, claim it for us!
		 */
1852 1853 1854
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1855 1856 1857 1858
		goto allow;
	}

	/* if it is our own claim ... */
1859 1860 1861 1862 1863 1864
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	own_claim = batadv_compare_eth(backbone_gw->orig,
				       primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (own_claim) {
1865 1866 1867 1868 1869 1870
		/* ... allow it in any case */
		claim->lasttime = jiffies;
		goto allow;
	}

	/* if it is a broadcast ... */
1871 1872 1873 1874 1875 1876 1877
	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
		/* ... drop it. the responsible gateway is in charge.
		 *
		 * We need to check is_bcast because with the gateway
		 * feature, broadcasts (like DHCP requests) may be sent
		 * using a unicast packet type.
		 */
1878 1879 1880 1881 1882 1883
		goto handled;
	} else {
		/* seems the client considers us as its best gateway.
		 * send a claim and update the claim table
		 * immediately.
		 */
1884 1885 1886
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1887 1888 1889
		goto allow;
	}
allow:
1890
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1891
	ret = false;
1892 1893 1894 1895
	goto out;

handled:
	kfree_skb(skb);
1896
	ret = true;
1897 1898 1899

out:
	if (primary_if)
1900
		batadv_hardif_put(primary_if);
1901
	if (claim)
1902
		batadv_claim_put(claim);
1903 1904 1905
	return ret;
}

1906
/**
1907
 * batadv_bla_tx - check packets going into the mesh
1908
 * @bat_priv: the bat priv with all the soft interface information
1909 1910 1911
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
 *
1912
 * batadv_bla_tx checks if:
1913 1914 1915
 *  * a claim was received which has to be processed
 *  * the frame is allowed on the mesh
 *
1916
 * in these cases, the skb is further handled by this function.
1917 1918
 *
 * This call might reallocate skb data.
1919
 *
1920 1921
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1922
 */
1923 1924
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid)
1925 1926
{
	struct ethhdr *ethhdr;
1927
	struct batadv_bla_claim search_claim, *claim = NULL;
1928
	struct batadv_bla_backbone_gw *backbone_gw;
1929
	struct batadv_hard_iface *primary_if;
1930
	bool client_roamed;
1931
	bool ret = false;
1932

1933
	primary_if = batadv_primary_if_get_selected(bat_priv);
1934 1935 1936 1937 1938 1939
	if (!primary_if)
		goto out;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1940
	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1941 1942
		goto handled;

1943
	ethhdr = eth_hdr(skb);
1944

1945
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1946 1947 1948 1949
		/* don't allow broadcasts while requests are in flight */
		if (is_multicast_ether_addr(ethhdr->h_dest))
			goto handled;

1950
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1951 1952
	search_claim.vid = vid;

1953
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1954 1955 1956 1957 1958 1959

	/* if no claim exists, allow it. */
	if (!claim)
		goto allow;

	/* check if we are responsible. */
1960 1961 1962 1963 1964 1965
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	client_roamed = batadv_compare_eth(backbone_gw->orig,
					   primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (client_roamed) {
1966 1967 1968
		/* if yes, the client has roamed and we have
		 * to unclaim it.
		 */
1969 1970 1971
		batadv_handle_unclaim(bat_priv, primary_if,
				      primary_if->net_dev->dev_addr,
				      ethhdr->h_source, vid);
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
		goto allow;
	}

	/* check if it is a multicast/broadcast frame */
	if (is_multicast_ether_addr(ethhdr->h_dest)) {
		/* drop it. the responsible gateway has forwarded it into
		 * the backbone network.
		 */
		goto handled;
	} else {
		/* we must allow it. at least if we are
		 * responsible for the DESTINATION.
		 */
		goto allow;
	}
allow:
1988
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1989
	ret = false;
1990 1991
	goto out;
handled:
1992
	ret = true;
1993 1994
out:
	if (primary_if)
1995
		batadv_hardif_put(primary_if);
1996
	if (claim)
1997
		batadv_claim_put(claim);
1998 1999
	return ret;
}
2000

2001 2002 2003 2004 2005 2006 2007
/**
 * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2008
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2009 2010
{
	struct net_device *net_dev = (struct net_device *)seq->private;
2011
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2012
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2013
	struct batadv_bla_backbone_gw *backbone_gw;
2014
	struct batadv_bla_claim *claim;
2015
	struct batadv_hard_iface *primary_if;
2016
	struct hlist_head *head;
2017
	u16 backbone_crc;
2018
	u32 i;
2019
	bool is_own;
2020
	u8 *primary_addr;
2021

2022 2023
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2024 2025
		goto out;

2026
	primary_addr = primary_if->net_dev->dev_addr;
2027
	seq_printf(seq,
2028
		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2029
		   net_dev->name, primary_addr,
2030
		   ntohs(bat_priv->bla.claim_dest.group));
2031 2032
	seq_puts(seq,
		 "   Client               VID      Originator        [o] (CRC   )\n");
2033 2034 2035 2036
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2037
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
2038 2039 2040
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);

			is_own = batadv_compare_eth(backbone_gw->orig,
2041
						    primary_addr);
2042

2043 2044 2045
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);
2046
			seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2047
				   claim->addr, BATADV_PRINT_VID(claim->vid),
2048
				   backbone_gw->orig,
2049
				   (is_own ? 'x' : ' '),
2050
				   backbone_crc);
2051 2052

			batadv_backbone_gw_put(backbone_gw);
2053 2054 2055 2056 2057
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2058
		batadv_hardif_put(primary_if);
2059
	return 0;
2060
}
2061

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
/**
 * batadv_bla_claim_dump_entry - dump one entry of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @claim: entry to dump
 *
 * Return: 0 or error code.
 */
static int
batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
			    struct batadv_hard_iface *primary_if,
			    struct batadv_bla_claim *claim)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	void *hdr;
	int ret = -EINVAL;

	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
			  NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

	is_own = batadv_compare_eth(claim->backbone_gw->orig,
				    primary_addr);

	spin_lock_bh(&claim->backbone_gw->crc_lock);
	backbone_crc = claim->backbone_gw->crc;
	spin_unlock_bh(&claim->backbone_gw->crc_lock);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
	    nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    claim->backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
 * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @head: bucket to dump
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
			     struct batadv_hard_iface *primary_if,
			     struct hlist_head *head, int *idx_skip)
{
	struct batadv_bla_claim *claim;
	int idx = 0;

	rcu_read_lock();
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
		if (idx++ < *idx_skip)
			continue;
		if (batadv_bla_claim_dump_entry(msg, portid, seq,
						primary_if, claim)) {
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

	*idx_skip = idx;
unlock:
	rcu_read_unlock();
	return 0;
}

/**
 * batadv_bla_claim_dump - dump claim table to a netlink socket
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	struct hlist_head *head;
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.claim_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
		head = &hash->table[bucket];

		if (batadv_bla_claim_dump_bucket(msg, portid,
						 cb->nlh->nlmsg_seq,
						 primary_if, head, &idx))
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}

2224 2225 2226 2227 2228 2229 2230 2231
/**
 * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
 *  file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2232 2233 2234 2235
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
{
	struct net_device *net_dev = (struct net_device *)seq->private;
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2236
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2237
	struct batadv_bla_backbone_gw *backbone_gw;
2238 2239 2240
	struct batadv_hard_iface *primary_if;
	struct hlist_head *head;
	int secs, msecs;
2241
	u16 backbone_crc;
2242
	u32 i;
2243
	bool is_own;
2244
	u8 *primary_addr;
2245

2246 2247
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2248 2249 2250 2251
		goto out;

	primary_addr = primary_if->net_dev->dev_addr;
	seq_printf(seq,
2252
		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2253
		   net_dev->name, primary_addr,
2254
		   ntohs(bat_priv->bla.claim_dest.group));
2255
	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2256 2257 2258 2259
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2260
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
			msecs = jiffies_to_msecs(jiffies -
						 backbone_gw->lasttime);
			secs = msecs / 1000;
			msecs = msecs % 1000;

			is_own = batadv_compare_eth(backbone_gw->orig,
						    primary_addr);
			if (is_own)
				continue;

2271 2272 2273 2274
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);

2275
			seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2276 2277
				   backbone_gw->orig,
				   BATADV_PRINT_VID(backbone_gw->vid), secs,
2278
				   msecs, backbone_crc);
2279 2280 2281 2282 2283
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2284
		batadv_hardif_put(primary_if);
2285
	return 0;
2286
}
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450

/**
 * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @backbone_gw: entry to dump
 *
 * Return: 0 or error code.
 */
static int
batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
			       struct batadv_hard_iface *primary_if,
			       struct batadv_bla_backbone_gw *backbone_gw)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	int msecs;
	void *hdr;
	int ret = -EINVAL;

	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
			  NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

	is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);

	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc) ||
	    nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
 * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @head: bucket to dump
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
				struct batadv_hard_iface *primary_if,
				struct hlist_head *head, int *idx_skip)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	int idx = 0;

	rcu_read_lock();
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
		if (idx++ < *idx_skip)
			continue;
		if (batadv_bla_backbone_dump_entry(msg, portid, seq,
						   primary_if, backbone_gw)) {
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

	*idx_skip = idx;
unlock:
	rcu_read_unlock();
	return 0;
}

/**
 * batadv_bla_backbone_dump - dump backbone table to a netlink socket
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	struct hlist_head *head;
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.backbone_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
		head = &hash->table[bucket];

		if (batadv_bla_backbone_dump_bucket(msg, portid,
						    cb->nlh->nlmsg_seq,
						    primary_if, head, &idx))
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}