bridge_loop_avoidance.c 63.5 KB
Newer Older
1
/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 17 18
 */

#include "bridge_loop_avoidance.h"
19
#include "main.h"
20

21 22 23
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
24
#include <linux/crc16.h>
25 26 27
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
28
#include <linux/if_arp.h>
29
#include <linux/if_ether.h>
30
#include <linux/if_vlan.h>
31 32 33
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
34
#include <linux/kref.h>
35 36 37
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
38
#include <linux/netlink.h>
39 40 41 42 43 44 45 46 47 48
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
49 50 51 52
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
53 54 55

#include "hard-interface.h"
#include "hash.h"
56
#include "log.h"
57
#include "netlink.h"
58 59
#include "originator.h"
#include "packet.h"
60
#include "soft-interface.h"
61
#include "sysfs.h"
62
#include "translation-table.h"
63

64
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
65

66
static void batadv_bla_periodic_work(struct work_struct *work);
67 68 69
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
			 struct batadv_bla_backbone_gw *backbone_gw);
70

71
/**
72 73 74
 * batadv_choose_claim - choose the right bucket for a claim.
 * @data: data to hash
 * @size: size of the hash table
75
 *
76
 * Return: the hash index of the claim
77
 */
78
static inline u32 batadv_choose_claim(const void *data, u32 size)
79
{
80
	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
81
	u32 hash = 0;
82

83 84
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
85 86 87 88

	return hash % size;
}

89
/**
90 91 92
 * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
 * @data: data to hash
 * @size: size of the hash table
93
 *
94
 * Return: the hash index of the backbone gateway
95
 */
96
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
97
{
98
	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
99
	u32 hash = 0;
100

101 102
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
103 104 105 106

	return hash % size;
}

107 108 109 110 111
/**
 * batadv_compare_backbone_gw - compare address and vid of two backbone gws
 * @node: list node of the first entry to compare
 * @data2: pointer to the second backbone gateway
 *
112
 * Return: true if the backbones have the same data, false otherwise
113
 */
114 115
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
				       const void *data2)
116
{
117
	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
118
					 hash_entry);
119 120
	const struct batadv_bla_backbone_gw *gw1 = data1;
	const struct batadv_bla_backbone_gw *gw2 = data2;
121

122
	if (!batadv_compare_eth(gw1->orig, gw2->orig))
123
		return false;
124 125

	if (gw1->vid != gw2->vid)
126
		return false;
127

128
	return true;
129 130
}

131
/**
132
 * batadv_compare_claim - compare address and vid of two claims
133 134 135
 * @node: list node of the first entry to compare
 * @data2: pointer to the second claims
 *
136
 * Return: true if the claim have the same data, 0 otherwise
137
 */
138 139
static bool batadv_compare_claim(const struct hlist_node *node,
				 const void *data2)
140
{
141
	const void *data1 = container_of(node, struct batadv_bla_claim,
142
					 hash_entry);
143 144
	const struct batadv_bla_claim *cl1 = data1;
	const struct batadv_bla_claim *cl2 = data2;
145 146

	if (!batadv_compare_eth(cl1->addr, cl2->addr))
147
		return false;
148 149

	if (cl1->vid != cl2->vid)
150
		return false;
151

152
	return true;
153 154
}

155
/**
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
 * batadv_backbone_gw_release - release backbone gw from lists and queue for
 *  free after rcu grace period
 * @ref: kref pointer of the backbone gw
 */
static void batadv_backbone_gw_release(struct kref *ref)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
				   refcount);

	kfree_rcu(backbone_gw, rcu);
}

/**
171 172
 * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
 *  release it
173 174
 * @backbone_gw: backbone gateway to be free'd
 */
175
static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
176
{
177
	kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
178 179
}

180 181 182 183 184
/**
 * batadv_claim_release - release claim from lists and queue for free after rcu
 *  grace period
 * @ref: kref pointer of the claim
 */
185
static void batadv_claim_release(struct kref *ref)
186
{
187
	struct batadv_bla_claim *claim;
188
	struct batadv_bla_backbone_gw *old_backbone_gw;
189 190 191

	claim = container_of(ref, struct batadv_bla_claim, refcount);

192 193 194 195 196 197 198 199 200 201 202
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
	claim->backbone_gw = NULL;
	spin_unlock_bh(&claim->backbone_lock);

	spin_lock_bh(&old_backbone_gw->crc_lock);
	old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
	spin_unlock_bh(&old_backbone_gw->crc_lock);

	batadv_backbone_gw_put(old_backbone_gw);

203
	kfree_rcu(claim, rcu);
204 205
}

206
/**
207
 * batadv_claim_put - decrement the claim refcounter and possibly
208
 *  release it
209 210
 * @claim: claim to be free'd
 */
211
static void batadv_claim_put(struct batadv_bla_claim *claim)
212
{
213
	kref_put(&claim->refcount, batadv_claim_release);
214 215
}

216
/**
217
 * batadv_claim_hash_find - looks for a claim in the claim hash
218
 * @bat_priv: the bat priv with all the soft interface information
219 220
 * @data: search data (may be local/static data)
 *
221
 * Return: claim if found or NULL otherwise.
222
 */
223 224 225
static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv *bat_priv,
		       struct batadv_bla_claim *data)
226
{
227
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228
	struct hlist_head *head;
229 230
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim *claim_tmp = NULL;
231 232 233 234 235
	int index;

	if (!hash)
		return NULL;

236
	index = batadv_choose_claim(data, hash->size);
237 238 239
	head = &hash->table[index];

	rcu_read_lock();
240
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
241
		if (!batadv_compare_claim(&claim->hash_entry, data))
242 243
			continue;

244
		if (!kref_get_unless_zero(&claim->refcount))
245 246 247 248 249 250 251 252 253 254
			continue;

		claim_tmp = claim;
		break;
	}
	rcu_read_unlock();

	return claim_tmp;
}

255
/**
256
 * batadv_backbone_hash_find - looks for a backbone gateway in the hash
257
 * @bat_priv: the bat priv with all the soft interface information
258 259 260
 * @addr: the address of the originator
 * @vid: the VLAN ID
 *
261
 * Return: backbone gateway if found or NULL otherwise
262
 */
263
static struct batadv_bla_backbone_gw *
264 265
batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
			  unsigned short vid)
266
{
267
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268
	struct hlist_head *head;
269 270
	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 272 273 274 275
	int index;

	if (!hash)
		return NULL;

276
	ether_addr_copy(search_entry.orig, addr);
277 278
	search_entry.vid = vid;

279
	index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 281 282
	head = &hash->table[index];

	rcu_read_lock();
283
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 285
		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
						&search_entry))
286 287
			continue;

288
		if (!kref_get_unless_zero(&backbone_gw->refcount))
289 290 291 292 293 294 295 296 297 298
			continue;

		backbone_gw_tmp = backbone_gw;
		break;
	}
	rcu_read_unlock();

	return backbone_gw_tmp;
}

299 300 301 302
/**
 * batadv_bla_del_backbone_claims - delete all claims for a backbone
 * @backbone_gw: backbone gateway where the claims should be removed
 */
303
static void
304
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305
{
306
	struct batadv_hashtable *hash;
307
	struct hlist_node *node_tmp;
308
	struct hlist_head *head;
309
	struct batadv_bla_claim *claim;
310 311 312
	int i;
	spinlock_t *list_lock;	/* protects write access to the hash lists */

313
	hash = backbone_gw->bat_priv->bla.claim_hash;
314 315 316 317 318 319 320 321
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
322
		hlist_for_each_entry_safe(claim, node_tmp,
323 324 325 326
					  head, hash_entry) {
			if (claim->backbone_gw != backbone_gw)
				continue;

327
			batadv_claim_put(claim);
328
			hlist_del_rcu(&claim->hash_entry);
329 330 331 332
		}
		spin_unlock_bh(list_lock);
	}

333
	/* all claims gone, initialize CRC */
334
	spin_lock_bh(&backbone_gw->crc_lock);
335
	backbone_gw->crc = BATADV_BLA_CRC_INIT;
336
	spin_unlock_bh(&backbone_gw->crc_lock);
337 338
}

339 340 341
/**
 * batadv_bla_send_claim - sends a claim frame according to the provided info
 * @bat_priv: the bat priv with all the soft interface information
342
 * @mac: the mac address to be announced within the claim
343 344 345
 * @vid: the VLAN ID
 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
 */
346
static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347
				  unsigned short vid, int claimtype)
348 349 350
{
	struct sk_buff *skb;
	struct ethhdr *ethhdr;
351
	struct batadv_hard_iface *primary_if;
352
	struct net_device *soft_iface;
353
	u8 *hw_src;
354
	struct batadv_bla_claim_dst local_claim_dest;
355
	__be32 zeroip = 0;
356

357
	primary_if = batadv_primary_if_get_selected(bat_priv);
358 359 360
	if (!primary_if)
		return;

361
	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362
	       sizeof(local_claim_dest));
363 364 365 366 367 368 369 370 371 372 373 374 375 376
	local_claim_dest.type = claimtype;

	soft_iface = primary_if->soft_iface;

	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
			 /* IP DST: 0.0.0.0 */
			 zeroip,
			 primary_if->soft_iface,
			 /* IP SRC: 0.0.0.0 */
			 zeroip,
			 /* Ethernet DST: Broadcast */
			 NULL,
			 /* Ethernet SRC/HW SRC:  originator mac */
			 primary_if->net_dev->dev_addr,
377
			 /* HW DST: FF:43:05:XX:YY:YY
378
			  * with XX   = claim type
379
			  * and YY:YY = group id
380
			  */
381
			 (u8 *)&local_claim_dest);
382 383 384 385 386

	if (!skb)
		goto out;

	ethhdr = (struct ethhdr *)skb->data;
387
	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388 389 390

	/* now we pretend that the client would have sent this ... */
	switch (claimtype) {
391
	case BATADV_CLAIM_TYPE_CLAIM:
392 393 394
		/* normal claim frame
		 * set Ethernet SRC to the clients mac
		 */
395
		ether_addr_copy(ethhdr->h_source, mac);
396
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
397 398
			   "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
			   BATADV_PRINT_VID(vid));
399
		break;
400
	case BATADV_CLAIM_TYPE_UNCLAIM:
401 402 403
		/* unclaim frame
		 * set HW SRC to the clients mac
		 */
404
		ether_addr_copy(hw_src, mac);
405
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
406
			   "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
407
			   BATADV_PRINT_VID(vid));
408
		break;
409
	case BATADV_CLAIM_TYPE_ANNOUNCE:
410 411 412
		/* announcement frame
		 * set HW SRC to the special mac containg the crc
		 */
413
		ether_addr_copy(hw_src, mac);
414
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
415
			   "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
416
			   ethhdr->h_source, BATADV_PRINT_VID(vid));
417
		break;
418
	case BATADV_CLAIM_TYPE_REQUEST:
419
		/* request frame
420 421
		 * set HW SRC and header destination to the receiving backbone
		 * gws mac
422
		 */
423 424
		ether_addr_copy(hw_src, mac);
		ether_addr_copy(ethhdr->h_dest, mac);
425
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
426
			   "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
427 428
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));
429
		break;
430 431 432 433 434 435 436 437
	case BATADV_CLAIM_TYPE_LOOPDETECT:
		ether_addr_copy(ethhdr->h_source, mac);
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
			   "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));

		break;
438 439
	}

440
	if (vid & BATADV_VLAN_HAS_TAG) {
441 442
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				      vid & VLAN_VID_MASK);
443 444 445
		if (!skb)
			goto out;
	}
446 447 448

	skb_reset_mac_header(skb);
	skb->protocol = eth_type_trans(skb, soft_iface);
449 450 451
	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
			   skb->len + ETH_HLEN);
452 453 454 455 456
	soft_iface->last_rx = jiffies;

	netif_rx(skb);
out:
	if (primary_if)
457
		batadv_hardif_put(primary_if);
458 459
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
/**
 * batadv_bla_loopdetect_report - worker for reporting the loop
 * @work: work queue item
 *
 * Throws an uevent, as the loopdetect check function can't do that itself
 * since the kernel may sleep while throwing uevents.
 */
static void batadv_bla_loopdetect_report(struct work_struct *work)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_priv *bat_priv;
	char vid_str[6] = { '\0' };

	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
				   report_work);
	bat_priv = backbone_gw->bat_priv;

	batadv_info(bat_priv->soft_iface,
		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
		    BATADV_PRINT_VID(backbone_gw->vid));
	snprintf(vid_str, sizeof(vid_str), "%d",
		 BATADV_PRINT_VID(backbone_gw->vid));
	vid_str[sizeof(vid_str) - 1] = 0;

	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
			    vid_str);

	batadv_backbone_gw_put(backbone_gw);
}

490
/**
491
 * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
492
 * @bat_priv: the bat priv with all the soft interface information
493 494
 * @orig: the mac address of the originator
 * @vid: the VLAN ID
495
 * @own_backbone: set if the requested backbone is local
496
 *
497
 * Return: the (possibly created) backbone gateway or NULL on error
498
 */
499
static struct batadv_bla_backbone_gw *
500
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
501
			   unsigned short vid, bool own_backbone)
502
{
503
	struct batadv_bla_backbone_gw *entry;
504
	struct batadv_orig_node *orig_node;
505 506
	int hash_added;

507
	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
508 509 510 511

	if (entry)
		return entry;

512
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
513
		   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
514
		   orig, BATADV_PRINT_VID(vid));
515 516 517 518 519 520 521

	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
	if (!entry)
		return NULL;

	entry->vid = vid;
	entry->lasttime = jiffies;
522
	entry->crc = BATADV_BLA_CRC_INIT;
523
	entry->bat_priv = bat_priv;
524
	spin_lock_init(&entry->crc_lock);
525
	atomic_set(&entry->request_sent, 0);
526
	atomic_set(&entry->wait_periods, 0);
527
	ether_addr_copy(entry->orig, orig);
528
	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
529 530

	/* one for the hash, one for returning */
531 532
	kref_init(&entry->refcount);
	kref_get(&entry->refcount);
533

534
	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
535 536 537
				     batadv_compare_backbone_gw,
				     batadv_choose_backbone_gw, entry,
				     &entry->hash_entry);
538 539 540 541 542 543 544

	if (unlikely(hash_added != 0)) {
		/* hash failed, free the structure */
		kfree(entry);
		return NULL;
	}

545
	/* this is a gateway now, remove any TT entry on this VLAN */
546
	orig_node = batadv_orig_hash_find(bat_priv, orig);
547
	if (orig_node) {
548
		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
549
					  "became a backbone gateway");
550
		batadv_orig_node_put(orig_node);
551
	}
552

553
	if (own_backbone) {
554 555
		batadv_bla_send_announce(bat_priv, entry);

556 557
		/* this will be decreased in the worker thread */
		atomic_inc(&entry->request_sent);
558
		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
559 560 561
		atomic_inc(&bat_priv->bla.num_requests);
	}

562 563 564
	return entry;
}

565 566 567 568 569 570 571
/**
 * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the selected primary interface
 * @vid: VLAN identifier
 *
 * update or add the own backbone gw to make sure we announce
572 573
 * where we receive other backbone gws
 */
574 575 576
static void
batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
577
				  unsigned short vid)
578
{
579
	struct batadv_bla_backbone_gw *backbone_gw;
580

581 582
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
583
						 vid, true);
584 585 586 587
	if (unlikely(!backbone_gw))
		return;

	backbone_gw->lasttime = jiffies;
588
	batadv_backbone_gw_put(backbone_gw);
589 590
}

591 592 593
/**
 * batadv_bla_answer_request - answer a bla request by sending own claims
 * @bat_priv: the bat priv with all the soft interface information
594
 * @primary_if: interface where the request came on
595 596 597 598 599
 * @vid: the vid where the request came on
 *
 * Repeat all of our own claims, and finally send an ANNOUNCE frame
 * to allow the requester another check if the CRC is correct now.
 */
600 601
static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
				      struct batadv_hard_iface *primary_if,
602
				      unsigned short vid)
603 604
{
	struct hlist_head *head;
605
	struct batadv_hashtable *hash;
606
	struct batadv_bla_claim *claim;
607
	struct batadv_bla_backbone_gw *backbone_gw;
608 609
	int i;

610
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
611
		   "bla_answer_request(): received a claim request, send all of our own claims again\n");
612

613 614 615
	backbone_gw = batadv_backbone_hash_find(bat_priv,
						primary_if->net_dev->dev_addr,
						vid);
616 617 618
	if (!backbone_gw)
		return;

619
	hash = bat_priv->bla.claim_hash;
620 621 622 623
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
624
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
625 626 627 628
			/* only own claims are interesting */
			if (claim->backbone_gw != backbone_gw)
				continue;

629
			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
630
					      BATADV_CLAIM_TYPE_CLAIM);
631 632 633 634 635
		}
		rcu_read_unlock();
	}

	/* finally, send an announcement frame */
636
	batadv_bla_send_announce(bat_priv, backbone_gw);
637
	batadv_backbone_gw_put(backbone_gw);
638 639
}

640 641 642
/**
 * batadv_bla_send_request - send a request to repeat claims
 * @backbone_gw: the backbone gateway from whom we are out of sync
643 644 645 646 647
 *
 * When the crc is wrong, ask the backbone gateway for a full table update.
 * After the request, it will repeat all of his own claims and finally
 * send an announcement claim with which we can check again.
 */
648
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
649 650
{
	/* first, remove all old entries */
651
	batadv_bla_del_backbone_claims(backbone_gw);
652

653 654
	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
		   "Sending REQUEST to %pM\n", backbone_gw->orig);
655 656

	/* send request */
657
	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
658
			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
659 660 661

	/* no local broadcasts should be sent or received, for now. */
	if (!atomic_read(&backbone_gw->request_sent)) {
662
		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
663 664 665 666
		atomic_set(&backbone_gw->request_sent, 1);
	}
}

667
/**
668
 * batadv_bla_send_announce - Send an announcement frame
669
 * @bat_priv: the bat priv with all the soft interface information
670 671
 * @backbone_gw: our backbone gateway which should be announced
 */
672
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
673
				     struct batadv_bla_backbone_gw *backbone_gw)
674
{
675
	u8 mac[ETH_ALEN];
676
	__be16 crc;
677

678
	memcpy(mac, batadv_announce_mac, 4);
679
	spin_lock_bh(&backbone_gw->crc_lock);
680
	crc = htons(backbone_gw->crc);
681
	spin_unlock_bh(&backbone_gw->crc_lock);
682
	memcpy(&mac[4], &crc, 2);
683

684
	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
685
			      BATADV_CLAIM_TYPE_ANNOUNCE);
686 687
}

688 689 690
/**
 * batadv_bla_add_claim - Adds a claim in the claim hash
 * @bat_priv: the bat priv with all the soft interface information
691 692 693 694
 * @mac: the mac address of the claim
 * @vid: the VLAN ID of the frame
 * @backbone_gw: the backbone gateway which claims it
 */
695
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
696
				 const u8 *mac, const unsigned short vid,
697
				 struct batadv_bla_backbone_gw *backbone_gw)
698
{
699
	struct batadv_bla_backbone_gw *old_backbone_gw;
700 701
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim search_claim;
702
	bool remove_crc = false;
703 704
	int hash_added;

705
	ether_addr_copy(search_claim.addr, mac);
706
	search_claim.vid = vid;
707
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
708 709 710 711 712 713 714

	/* create a new claim entry if it does not exist yet. */
	if (!claim) {
		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
		if (!claim)
			return;

715
		ether_addr_copy(claim->addr, mac);
716
		spin_lock_init(&claim->backbone_lock);
717 718
		claim->vid = vid;
		claim->lasttime = jiffies;
719
		kref_get(&backbone_gw->refcount);
720 721
		claim->backbone_gw = backbone_gw;

722 723
		kref_init(&claim->refcount);
		kref_get(&claim->refcount);
724
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
725
			   "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
726
			   mac, BATADV_PRINT_VID(vid));
727
		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
728 729 730
					     batadv_compare_claim,
					     batadv_choose_claim, claim,
					     &claim->hash_entry);
731 732 733 734 735 736 737 738 739 740 741 742

		if (unlikely(hash_added != 0)) {
			/* only local changes happened. */
			kfree(claim);
			return;
		}
	} else {
		claim->lasttime = jiffies;
		if (claim->backbone_gw == backbone_gw)
			/* no need to register a new backbone */
			goto claim_free_ref;

743
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
744
			   "bla_add_claim(): changing ownership for %pM, vid %d\n",
745
			   mac, BATADV_PRINT_VID(vid));
746

747
		remove_crc = true;
748
	}
749 750 751 752

	/* replace backbone_gw atomically and adjust reference counters */
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
753
	kref_get(&backbone_gw->refcount);
754
	claim->backbone_gw = backbone_gw;
755
	spin_unlock_bh(&claim->backbone_lock);
756

757 758 759 760 761 762
	if (remove_crc) {
		/* remove claim address from old backbone_gw */
		spin_lock_bh(&old_backbone_gw->crc_lock);
		old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
		spin_unlock_bh(&old_backbone_gw->crc_lock);
	}
763

764 765 766
	batadv_backbone_gw_put(old_backbone_gw);

	/* add claim address to new backbone_gw */
767
	spin_lock_bh(&backbone_gw->crc_lock);
768
	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
769
	spin_unlock_bh(&backbone_gw->crc_lock);
770 771 772
	backbone_gw->lasttime = jiffies;

claim_free_ref:
773
	batadv_claim_put(claim);
774 775
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
/**
 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
 *  claim
 * @claim: claim whose backbone_gw should be returned
 *
 * Return: valid reference to claim::backbone_gw
 */
static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	spin_lock_bh(&claim->backbone_lock);
	backbone_gw = claim->backbone_gw;
	kref_get(&backbone_gw->refcount);
	spin_unlock_bh(&claim->backbone_lock);

	return backbone_gw;
}

796 797 798 799 800
/**
 * batadv_bla_del_claim - delete a claim from the claim hash
 * @bat_priv: the bat priv with all the soft interface information
 * @mac: mac address of the claim to be removed
 * @vid: VLAN id for the claim to be removed
801
 */
802
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
803
				 const u8 *mac, const unsigned short vid)
804
{
805
	struct batadv_bla_claim search_claim, *claim;
806

807
	ether_addr_copy(search_claim.addr, mac);
808
	search_claim.vid = vid;
809
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
810 811 812
	if (!claim)
		return;

813
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
814
		   mac, BATADV_PRINT_VID(vid));
815

816
	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
817
			   batadv_choose_claim, claim);
818
	batadv_claim_put(claim); /* reference from the hash is gone */
819 820

	/* don't need the reference from hash_find() anymore */
821
	batadv_claim_put(claim);
822 823
}

824 825
/**
 * batadv_handle_announce - check for ANNOUNCE frame
826 827 828 829
 * @bat_priv: the bat priv with all the soft interface information
 * @an_addr: announcement mac address (ARP Sender HW address)
 * @backbone_addr: originator address of the sender (Ethernet source MAC)
 * @vid: the VLAN ID of the frame
830
 *
831
 * Return: true if handled
832
 */
833 834
static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
				   u8 *backbone_addr, unsigned short vid)
835
{
836
	struct batadv_bla_backbone_gw *backbone_gw;
837
	u16 backbone_crc, crc;
838

839
	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
840
		return false;
841

842 843
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
844 845

	if (unlikely(!backbone_gw))
846
		return true;
847 848 849

	/* handle as ANNOUNCE frame */
	backbone_gw->lasttime = jiffies;
850
	crc = ntohs(*((__be16 *)(&an_addr[4])));
851

852
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
853
		   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
854
		   BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
855

856 857 858 859 860
	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	if (backbone_crc != crc) {
861
		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
862
			   "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
863 864
			   backbone_gw->orig,
			   BATADV_PRINT_VID(backbone_gw->vid),
865
			   backbone_crc, crc);
866

867
		batadv_bla_send_request(backbone_gw);
868 869 870 871 872
	} else {
		/* if we have sent a request and the crc was OK,
		 * we can allow traffic again.
		 */
		if (atomic_read(&backbone_gw->request_sent)) {
873
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
874 875 876 877
			atomic_set(&backbone_gw->request_sent, 0);
		}
	}

878
	batadv_backbone_gw_put(backbone_gw);
879
	return true;
880 881
}

882 883
/**
 * batadv_handle_request - check for REQUEST frame
884 885 886 887 888
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
 * @ethhdr: ethernet header of a packet
 * @vid: the VLAN ID of the frame
889
 *
890
 * Return: true if handled
891
 */
892 893 894 895
static bool batadv_handle_request(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, struct ethhdr *ethhdr,
				  unsigned short vid)
896 897
{
	/* check for REQUEST frame */
898
	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
899
		return false;
900 901 902 903

	/* sanity check, this should not happen on a normal switch,
	 * we ignore it in this case.
	 */
904
	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
905
		return true;
906

907
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
908
		   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
909
		   BATADV_PRINT_VID(vid), ethhdr->h_source);
910

911
	batadv_bla_answer_request(bat_priv, primary_if, vid);
912
	return true;
913 914
}

915 916
/**
 * batadv_handle_unclaim - check for UNCLAIM frame
917 918 919 920 921
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet source)
 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
922
 *
923
 * Return: true if handled
924
 */
925 926 927 928
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, u8 *claim_addr,
				  unsigned short vid)
929
{
930
	struct batadv_bla_backbone_gw *backbone_gw;
931 932

	/* unclaim in any case if it is our own */
933 934
	if (primary_if && batadv_compare_eth(backbone_addr,
					     primary_if->net_dev->dev_addr))
935
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
936
				      BATADV_CLAIM_TYPE_UNCLAIM);
937

938
	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
939 940

	if (!backbone_gw)
941
		return true;
942 943

	/* this must be an UNCLAIM frame */
944
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
945
		   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
946
		   claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
947

948
	batadv_bla_del_claim(bat_priv, claim_addr, vid);
949
	batadv_backbone_gw_put(backbone_gw);
950
	return true;
951 952
}

953 954
/**
 * batadv_handle_claim - check for CLAIM frame
955 956 957 958 959
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet Source)
 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
960
 *
961
 * Return: true if handled
962
 */
963 964 965 966
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
				struct batadv_hard_iface *primary_if,
				u8 *backbone_addr, u8 *claim_addr,
				unsigned short vid)
967
{
968
	struct batadv_bla_backbone_gw *backbone_gw;
969 970 971

	/* register the gateway if not yet available, and add the claim. */

972 973
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
974 975

	if (unlikely(!backbone_gw))
976
		return true;
977 978

	/* this must be a CLAIM frame */
979
	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
980
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
981
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
982
				      BATADV_CLAIM_TYPE_CLAIM);
983 984 985

	/* TODO: we could call something like tt_local_del() here. */

986
	batadv_backbone_gw_put(backbone_gw);
987
	return true;
988 989
}

990
/**
991
 * batadv_check_claim_group - check for claim group membership
992
 * @bat_priv: the bat priv with all the soft interface information
993
 * @primary_if: the primary interface of this batman interface
994 995 996 997 998 999 1000 1001
 * @hw_src: the Hardware source in the ARP Header
 * @hw_dst: the Hardware destination in the ARP Header
 * @ethhdr: pointer to the Ethernet header of the claim frame
 *
 * checks if it is a claim packet and if its on the same group.
 * This function also applies the group ID of the sender
 * if it is in the same mesh.
 *
1002
 * Return:
1003 1004 1005 1006
 *	2  - if it is a claim packet and on the same group
 *	1  - if is a claim packet from another group
 *	0  - if it is not a claim packet
 */
1007 1008
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
1009
				    u8 *hw_src, u8 *hw_dst,
1010
				    struct ethhdr *ethhdr)
1011
{
1012
	u8 *backbone_addr;
1013
	struct batadv_orig_node *orig_node;
1014
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1015

1016
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1017
	bla_dst_own = &bat_priv->bla.claim_dest;
1018 1019 1020 1021 1022

	/* if announcement packet, use the source,
	 * otherwise assume it is in the hw_src
	 */
	switch (bla_dst->type) {
1023
	case BATADV_CLAIM_TYPE_CLAIM:
1024 1025
		backbone_addr = hw_src;
		break;
1026 1027
	case BATADV_CLAIM_TYPE_REQUEST:
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1028
	case BATADV_CLAIM_TYPE_UNCLAIM:
1029 1030 1031 1032 1033 1034 1035
		backbone_addr = ethhdr->h_source;
		break;
	default:
		return 0;
	}

	/* don't accept claim frames from ourselves */
1036
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1037 1038 1039 1040 1041 1042 1043
		return 0;

	/* if its already the same group, it is fine. */
	if (bla_dst->group == bla_dst_own->group)
		return 2;

	/* lets see if this originator is in our mesh */
1044
	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1045 1046 1047 1048 1049 1050 1051 1052 1053

	/* dont accept claims from gateways which are not in
	 * the same mesh or group.
	 */
	if (!orig_node)
		return 1;

	/* if our mesh friends mac is bigger, use it for ourselves. */
	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1054
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1055
			   "taking other backbones claim group: %#.4x\n",
1056
			   ntohs(bla_dst->group));
1057 1058 1059
		bla_dst_own->group = bla_dst->group;
	}

1060
	batadv_orig_node_put(orig_node);
1061 1062 1063 1064

	return 2;
}

1065
/**
1066
 * batadv_bla_process_claim - Check if this is a claim frame, and process it
1067
 * @bat_priv: the bat priv with all the soft interface information
1068
 * @primary_if: the primary hard interface of this batman soft interface
1069 1070
 * @skb: the frame to be checked
 *
1071
 * Return: true if it was a claim frame, otherwise return false to
1072 1073
 * tell the callee that it can use the frame on its own.
 */
1074 1075 1076
static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
				     struct batadv_hard_iface *primary_if,
				     struct sk_buff *skb)
1077
{
1078
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1079
	u8 *hw_src, *hw_dst;
1080
	struct vlan_hdr *vhdr, vhdr_buf;
1081
	struct ethhdr *ethhdr;
1082
	struct arphdr *arphdr;
1083
	unsigned short vid;
1084
	int vlan_depth = 0;
1085
	__be16 proto;
1086
	int headlen;
1087
	int ret;
1088

1089
	vid = batadv_get_vid(skb, 0);
1090
	ethhdr = eth_hdr(skb);
1091

1092 1093 1094
	proto = ethhdr->h_proto;
	headlen = ETH_HLEN;
	if (vid & BATADV_VLAN_HAS_TAG) {
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		/* Traverse the VLAN/Ethertypes.
		 *
		 * At this point it is known that the first protocol is a VLAN
		 * header, so start checking at the encapsulated protocol.
		 *
		 * The depth of the VLAN headers is recorded to drop BLA claim
		 * frames encapsulated into multiple VLAN headers (QinQ).
		 */
		do {
			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
						  &vhdr_buf);
			if (!vhdr)
1107
				return false;
1108 1109 1110 1111 1112

			proto = vhdr->h_vlan_encapsulated_proto;
			headlen += VLAN_HLEN;
			vlan_depth++;
		} while (proto == htons(ETH_P_8021Q));
1113 1114
	}

1115
	if (proto != htons(ETH_P_ARP))
1116
		return false; /* not a claim frame */
1117 1118 1119 1120

	/* this must be a ARP frame. check if it is a claim. */

	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1121
		return false;
1122 1123

	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
1124
	ethhdr = eth_hdr(skb);
1125
	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1126 1127 1128 1129 1130

	/* Check whether the ARP frame carries a valid
	 * IP information
	 */
	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1131
		return false;
1132
	if (arphdr->ar_pro != htons(ETH_P_IP))
1133
		return false;
1134
	if (arphdr->ar_hln != ETH_ALEN)
1135
		return false;
1136
	if (arphdr->ar_pln != 4)
1137
		return false;
1138

1139
	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1140
	hw_dst = hw_src + ETH_ALEN + 4;
1141
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1142 1143 1144 1145 1146
	bla_dst_own = &bat_priv->bla.claim_dest;

	/* check if it is a claim frame in general */
	if (memcmp(bla_dst->magic, bla_dst_own->magic,
		   sizeof(bla_dst->magic)) != 0)
1147
		return false;
1148 1149 1150 1151 1152 1153

	/* check if there is a claim frame encapsulated deeper in (QinQ) and
	 * drop that, as this is not supported by BLA but should also not be
	 * sent via the mesh.
	 */
	if (vlan_depth > 1)
1154
		return true;
1155

1156 1157
	/* Let the loopdetect frames on the mesh in any case. */
	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1158
		return false;
1159

1160
	/* check if it is a claim frame. */
1161 1162
	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
				       ethhdr);
1163
	if (ret == 1)
1164
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1165
			   "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1166 1167
			   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
			   hw_dst);
1168 1169

	if (ret < 2)
1170
		return !!ret;
1171 1172

	/* become a backbone gw ourselves on this vlan if not happened yet */
1173
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1174 1175 1176

	/* check for the different types of claim frames ... */
	switch (bla_dst->type) {
1177
	case BATADV_CLAIM_TYPE_CLAIM:
1178 1179
		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
					ethhdr->h_source, vid))
1180
			return true;
1181
		break;
1182
	case BATADV_CLAIM_TYPE_UNCLAIM:
1183 1184
		if (batadv_handle_unclaim(bat_priv, primary_if,
					  ethhdr->h_source, hw_src, vid))
1185
			return true;
1186 1187
		break;

1188
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1189 1190
		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
					   vid))
1191
			return true;
1192
		break;
1193
	case BATADV_CLAIM_TYPE_REQUEST:
1194 1195
		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
					  vid))
1196
			return true;
1197 1198 1199
		break;
	}

1200
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
1201
		   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1202
		   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
1203
	return true;
1204 1205
}

1206 1207 1208 1209 1210 1211 1212
/**
 * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
 *  immediately
 * @bat_priv: the bat priv with all the soft interface information
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we last heard from other nodes, and remove them in case of
1213 1214
 * a time out, or clean all backbone gws if now is set.
 */
1215
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1216
{
1217
	struct batadv_bla_backbone_gw *backbone_gw;
1218
	struct hlist_node *node_tmp;
1219
	struct hlist_head *head;
1220
	struct batadv_hashtable *hash;
1221 1222 1223
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

1224
	hash = bat_priv->bla.backbone_hash;
1225 1226 1227 1228 1229 1230 1231 1232
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
1233
		hlist_for_each_entry_safe(backbone_gw, node_tmp,
1234 1235 1236
					  head, hash_entry) {
			if (now)
				goto purge_now;
1237
			if (!batadv_has_timed_out(backbone_gw->lasttime,
1238
						  BATADV_BLA_BACKBONE_TIMEOUT))
1239 1240
				continue;

1241
			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1242 1243
				   "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
				   backbone_gw->orig);
1244 1245 1246 1247

purge_now:
			/* don't wait for the pending request anymore */
			if (atomic_read(&backbone_gw->request_sent))
1248
				atomic_dec(&bat_priv->bla.num_requests);
1249

1250
			batadv_bla_del_backbone_claims(backbone_gw);
1251

1252
			hlist_del_rcu(&backbone_gw->hash_entry);
1253
			batadv_backbone_gw_put(backbone_gw);
1254 1255 1256 1257 1258
		}
		spin_unlock_bh(list_lock);
	}
}

1259
/**
1260
 * batadv_bla_purge_claims - Remove claims after a timeout or immediately
1261
 * @bat_priv: the bat priv with all the soft interface information
1262 1263 1264 1265 1266 1267
 * @primary_if: the selected primary interface, may be NULL if now is set
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we heard last time from our own claims, and remove them in case of
 * a time out, or clean all claims if now is set
 */
1268 1269 1270
static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    int now)
1271
{
1272
	struct batadv_bla_backbone_gw *backbone_gw;
1273
	struct batadv_bla_claim *claim;
1274
	struct hlist_head *head;
1275
	struct batadv_hashtable *hash;
1276 1277
	int i;

1278
	hash = bat_priv->bla.claim_hash;
1279 1280 1281 1282 1283 1284 1285
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1286
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
1287
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1288 1289
			if (now)
				goto purge_now;
1290 1291

			if (!batadv_compare_eth(backbone_gw->orig,
1292
						primary_if->net_dev->dev_addr))
1293 1294
				goto skip;

1295
			if (!batadv_has_timed_out(claim->lasttime,
1296
						  BATADV_BLA_CLAIM_TIMEOUT))
1297
				goto skip;
1298

1299
			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1300 1301
				   "bla_purge_claims(): %pM, vid %d, time out\n",
				   claim->addr, claim->vid);
1302 1303

purge_now:
1304
			batadv_handle_unclaim(bat_priv, primary_if,
1305
					      backbone_gw->orig,
1306
					      claim->addr, claim->vid);
1307 1308
skip:
			batadv_backbone_gw_put(backbone_gw);
1309 1310 1311 1312 1313
		}
		rcu_read_unlock();
	}
}

1314
/**
1315 1316
 * batadv_bla_update_orig_address - Update the backbone gateways when the own
 *  originator address changes
1317
 * @bat_priv: the bat priv with all the soft interface information
1318 1319 1320
 * @primary_if: the new selected primary_if
 * @oldif: the old primary interface, may be NULL
 */
1321 1322 1323
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    struct batadv_hard_iface *oldif)
1324
{
1325
	struct batadv_bla_backbone_gw *backbone_gw;
1326
	struct hlist_head *head;
1327
	struct batadv_hashtable *hash;
1328
	__be16 group;
1329 1330
	int i;

1331
	/* reset bridge loop avoidance group id */
1332 1333
	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
	bat_priv->bla.claim_dest.group = group;
1334

1335 1336 1337 1338
	/* purge everything when bridge loop avoidance is turned off */
	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		oldif = NULL;

1339
	if (!oldif) {
1340 1341
		batadv_bla_purge_claims(bat_priv, NULL, 1);
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1342 1343 1344
		return;
	}

1345
	hash = bat_priv->bla.backbone_hash;
1346 1347 1348 1349 1350 1351 1352
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1353
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1354
			/* own orig still holds the old value. */
1355 1356
			if (!batadv_compare_eth(backbone_gw->orig,
						oldif->net_dev->dev_addr))
1357 1358
				continue;

1359 1360
			ether_addr_copy(backbone_gw->orig,
					primary_if->net_dev->dev_addr);
1361 1362 1363
			/* send an announce frame so others will ask for our
			 * claims and update their tables.
			 */
1364
			batadv_bla_send_announce(bat_priv, backbone_gw);
1365 1366 1367 1368 1369
		}
		rcu_read_unlock();
	}
}

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
/**
 * batadv_bla_send_loopdetect - send a loopdetect frame
 * @bat_priv: the bat priv with all the soft interface information
 * @backbone_gw: the backbone gateway for which a loop should be detected
 *
 * To detect loops that the bridge loop avoidance can't handle, send a loop
 * detection packet on the backbone. Unlike other BLA frames, this frame will
 * be allowed on the mesh by other nodes. If it is received on the mesh, this
 * indicates that there is a loop.
 */
static void
batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
			   struct batadv_bla_backbone_gw *backbone_gw)
{
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
		   backbone_gw->vid);
	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
}

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
/**
 * batadv_bla_status_update - purge bla interfaces if necessary
 * @net_dev: the soft interface net device
 */
void batadv_bla_status_update(struct net_device *net_dev)
{
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
	struct batadv_hard_iface *primary_if;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		return;

	/* this function already purges everything when bla is disabled,
	 * so just call that one.
	 */
	batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1407
	batadv_hardif_put(primary_if);
1408 1409
}

1410 1411 1412 1413 1414
/**
 * batadv_bla_periodic_work - performs periodic bla work
 * @work: kernel work struct
 *
 * periodic work to do:
1415 1416 1417
 *  * purge structures when they are too old
 *  * send announcements
 */
1418
static void batadv_bla_periodic_work(struct work_struct *work)
1419
{
1420
	struct delayed_work *delayed_work;
1421
	struct batadv_priv *bat_priv;
1422
	struct batadv_priv_bla *priv_bla;
1423
	struct hlist_head *head;
1424
	struct batadv_bla_backbone_gw *backbone_gw;
1425
	struct batadv_hashtable *hash;
1426
	struct batadv_hard_iface *primary_if;
1427
	bool send_loopdetect = false;
1428 1429
	int i;

G
Geliang Tang 已提交
1430
	delayed_work = to_delayed_work(work);
1431 1432
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1433
	primary_if = batadv_primary_if_get_selected(bat_priv);
1434 1435 1436
	if (!primary_if)
		goto out;

1437 1438
	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);
1439 1440 1441 1442

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
		/* set a new random mac address for the next bridge loop
		 * detection frames. Set the locally administered bit to avoid
		 * collisions with users mac addresses.
		 */
		random_ether_addr(bat_priv->bla.loopdetect_addr);
		bat_priv->bla.loopdetect_addr[0] = 0xba;
		bat_priv->bla.loopdetect_addr[1] = 0xbe;
		bat_priv->bla.loopdetect_lasttime = jiffies;
		atomic_set(&bat_priv->bla.loopdetect_next,
			   BATADV_BLA_LOOPDETECT_PERIODS);

		/* mark for sending loop detect on all VLANs */
		send_loopdetect = true;
	}

1459
	hash = bat_priv->bla.backbone_hash;
1460 1461 1462 1463 1464 1465 1466
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1467
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1468 1469
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
1470 1471 1472 1473
				continue;

			backbone_gw->lasttime = jiffies;

1474
			batadv_bla_send_announce(bat_priv, backbone_gw);
1475 1476 1477
			if (send_loopdetect)
				batadv_bla_send_loopdetect(bat_priv,
							   backbone_gw);
1478 1479 1480 1481 1482

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
1483 1484 1485
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
1486 1487 1488 1489 1490
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

1491 1492 1493
			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

1494 1495
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
1496 1497 1498 1499 1500
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
1501
		batadv_hardif_put(primary_if);
1502

1503 1504
	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1505 1506
}

1507 1508 1509 1510 1511
/* The hash for claim and backbone hash receive the same key because they
 * are getting initialized by hash_new with the same key. Reinitializing
 * them with to different keys to allow nested locking without generating
 * lockdep warnings
 */
1512 1513
static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
1514

1515 1516 1517 1518 1519 1520
/**
 * batadv_bla_init - initialize all bla structures
 * @bat_priv: the bat priv with all the soft interface information
 *
 * Return: 0 on success, < 0 on error.
 */
1521
int batadv_bla_init(struct batadv_priv *bat_priv)
1522
{
1523
	int i;
1524
	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1525
	struct batadv_hard_iface *primary_if;
1526
	u16 crc;
1527
	unsigned long entrytime;
1528

1529 1530
	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);

1531
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1532

1533
	/* setting claim destination address */
1534 1535
	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
	bat_priv->bla.claim_dest.type = 0;
1536
	primary_if = batadv_primary_if_get_selected(bat_priv);
1537
	if (primary_if) {
1538 1539
		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
		bat_priv->bla.claim_dest.group = htons(crc);
1540
		batadv_hardif_put(primary_if);
1541
	} else {
1542
		bat_priv->bla.claim_dest.group = 0; /* will be set later */
1543 1544
	}

1545
	/* initialize the duplicate list */
1546
	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1547
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1548 1549
		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
	bat_priv->bla.bcast_duplist_curr = 0;
1550

1551 1552 1553
	atomic_set(&bat_priv->bla.loopdetect_next,
		   BATADV_BLA_LOOPDETECT_PERIODS);

1554
	if (bat_priv->bla.claim_hash)
1555
		return 0;
1556

1557 1558
	bat_priv->bla.claim_hash = batadv_hash_new(128);
	bat_priv->bla.backbone_hash = batadv_hash_new(32);
1559

1560
	if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1561
		return -ENOMEM;
1562

1563
	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1564
				   &batadv_claim_hash_lock_class_key);
1565
	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1566
				   &batadv_backbone_hash_lock_class_key);
1567

1568
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1569

1570 1571 1572 1573
	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1574
	return 0;
1575 1576
}

1577
/**
1578
 * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
1579
 * @bat_priv: the bat priv with all the soft interface information
1580
 * @skb: contains the bcast_packet to be checked
1581 1582 1583 1584 1585 1586 1587 1588 1589
 *
 * check if it is on our broadcast list. Another gateway might
 * have sent the same packet because it is connected to the same backbone,
 * so we have to remove this duplicate.
 *
 * This is performed by checking the CRC, which will tell us
 * with a good chance that it is the same packet. If it is furthermore
 * sent by another host, drop it. We allow equal packets from
 * the same host however as this might be intended.
1590
 *
1591
 * Return: true if a packet is in the duplicate list, false otherwise.
1592
 */
1593 1594
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
				    struct sk_buff *skb)
1595
{
1596
	int i, curr;
1597 1598
	__be32 crc;
	struct batadv_bcast_packet *bcast_packet;
1599
	struct batadv_bcast_duplist_entry *entry;
1600
	bool ret = false;
1601

1602
	bcast_packet = (struct batadv_bcast_packet *)skb->data;
1603 1604

	/* calculate the crc ... */
1605
	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1606

1607 1608
	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);

1609
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1610 1611 1612
		curr = (bat_priv->bla.bcast_duplist_curr + i);
		curr %= BATADV_DUPLIST_SIZE;
		entry = &bat_priv->bla.bcast_duplist[curr];
1613 1614 1615 1616

		/* we can stop searching if the entry is too old ;
		 * later entries will be even older
		 */
1617 1618
		if (batadv_has_timed_out(entry->entrytime,
					 BATADV_DUPLIST_TIMEOUT))
1619 1620 1621 1622 1623
			break;

		if (entry->crc != crc)
			continue;

1624
		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1625 1626 1627
			continue;

		/* this entry seems to match: same crc, not too old,
1628
		 * and from another gw. therefore return true to forbid it.
1629
		 */
1630
		ret = true;
1631
		goto out;
1632
	}
1633
	/* not found, add a new entry (overwrite the oldest entry)
1634
	 * and allow it, its the first occurrence.
1635
	 */
1636
	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1637
	curr %= BATADV_DUPLIST_SIZE;
1638
	entry = &bat_priv->bla.bcast_duplist[curr];
1639 1640
	entry->crc = crc;
	entry->entrytime = jiffies;
1641
	ether_addr_copy(entry->orig, bcast_packet->orig);
1642
	bat_priv->bla.bcast_duplist_curr = curr;
1643

1644 1645 1646 1647
out:
	spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);

	return ret;
1648 1649
}

1650
/**
1651 1652
 * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
 *  the VLAN identified by vid.
1653
 * @bat_priv: the bat priv with all the soft interface information
1654
 * @orig: originator mac address
1655
 * @vid: VLAN identifier
1656
 *
1657
 * Return: true if orig is a backbone for this vid, false otherwise.
1658
 */
1659
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1660
				    unsigned short vid)
1661
{
1662
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1663
	struct hlist_head *head;
1664
	struct batadv_bla_backbone_gw *backbone_gw;
1665 1666 1667
	int i;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1668
		return false;
1669 1670

	if (!hash)
1671
		return false;
1672 1673 1674 1675 1676

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1677
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1678 1679
			if (batadv_compare_eth(backbone_gw->orig, orig) &&
			    backbone_gw->vid == vid) {
1680
				rcu_read_unlock();
1681
				return true;
1682 1683 1684 1685 1686
			}
		}
		rcu_read_unlock();
	}

1687
	return false;
1688 1689
}

1690
/**
1691
 * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
1692
 * @skb: the frame to be checked
1693 1694 1695
 * @orig_node: the orig_node of the frame
 * @hdr_size: maximum length of the frame
 *
1696 1697
 * Return: true if the orig_node is also a gateway on the soft interface,
 * otherwise it returns false.
1698
 */
1699 1700
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
			       struct batadv_orig_node *orig_node, int hdr_size)
1701
{
1702
	struct batadv_bla_backbone_gw *backbone_gw;
1703
	unsigned short vid;
1704 1705

	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1706
		return false;
1707 1708

	/* first, find out the vid. */
1709
	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1710
		return false;
1711

1712
	vid = batadv_get_vid(skb, hdr_size);
1713 1714

	/* see if this originator is a backbone gw for this VLAN */
1715 1716
	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
						orig_node->orig, vid);
1717
	if (!backbone_gw)
1718
		return false;
1719

1720
	batadv_backbone_gw_put(backbone_gw);
1721
	return true;
1722 1723
}

1724
/**
1725
 * batadv_bla_free - free all bla structures
1726 1727 1728 1729
 * @bat_priv: the bat priv with all the soft interface information
 *
 * for softinterface free or module unload
 */
1730
void batadv_bla_free(struct batadv_priv *bat_priv)
1731
{
1732
	struct batadv_hard_iface *primary_if;
1733

1734
	cancel_delayed_work_sync(&bat_priv->bla.work);
1735
	primary_if = batadv_primary_if_get_selected(bat_priv);
1736

1737
	if (bat_priv->bla.claim_hash) {
1738
		batadv_bla_purge_claims(bat_priv, primary_if, 1);
1739 1740
		batadv_hash_destroy(bat_priv->bla.claim_hash);
		bat_priv->bla.claim_hash = NULL;
1741
	}
1742
	if (bat_priv->bla.backbone_hash) {
1743
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1744 1745
		batadv_hash_destroy(bat_priv->bla.backbone_hash);
		bat_priv->bla.backbone_hash = NULL;
1746 1747
	}
	if (primary_if)
1748
		batadv_hardif_put(primary_if);
1749 1750
}

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
/**
 * batadv_bla_loopdetect_check - check and handle a detected loop
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the packet to check
 * @primary_if: interface where the request came on
 * @vid: the VLAN ID of the frame
 *
 * Checks if this packet is a loop detect frame which has been sent by us,
 * throw an uevent and log the event if that is the case.
 *
 * Return: true if it is a loop detect frame which is to be dropped, false
 * otherwise.
 */
static bool
batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
			    struct batadv_hard_iface *primary_if,
			    unsigned short vid)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct ethhdr *ethhdr;

	ethhdr = eth_hdr(skb);

	/* Only check for the MAC address and skip more checks here for
	 * performance reasons - this function is on the hotpath, after all.
	 */
	if (!batadv_compare_eth(ethhdr->h_source,
				bat_priv->bla.loopdetect_addr))
		return false;

	/* If the packet came too late, don't forward it on the mesh
	 * but don't consider that as loop. It might be a coincidence.
	 */
	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
				 BATADV_BLA_LOOPDETECT_TIMEOUT))
		return true;

	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
						 vid, true);
	if (unlikely(!backbone_gw))
		return true;

	queue_work(batadv_event_workqueue, &backbone_gw->report_work);
	/* backbone_gw is unreferenced in the report work function function */

	return true;
}

1800
/**
1801
 * batadv_bla_rx - check packets coming from the mesh.
1802
 * @bat_priv: the bat priv with all the soft interface information
1803 1804
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
1805
 * @is_bcast: the packet came in a broadcast packet type.
1806
 *
1807
 * batadv_bla_rx avoidance checks if:
1808 1809 1810
 *  * we have to race for a claim
 *  * if the frame is allowed on the LAN
 *
1811 1812
 * in these cases, the skb is further handled by this function
 *
1813 1814
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1815
 */
1816 1817
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid, bool is_bcast)
1818
{
1819
	struct batadv_bla_backbone_gw *backbone_gw;
1820
	struct ethhdr *ethhdr;
1821
	struct batadv_bla_claim search_claim, *claim = NULL;
1822
	struct batadv_hard_iface *primary_if;
1823
	bool own_claim;
1824
	bool ret;
1825

1826
	ethhdr = eth_hdr(skb);
1827

1828
	primary_if = batadv_primary_if_get_selected(bat_priv);
1829 1830 1831 1832 1833 1834
	if (!primary_if)
		goto handled;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1835 1836 1837
	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
		goto handled;

1838
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1839
		/* don't allow broadcasts while requests are in flight */
1840
		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1841 1842
			goto handled;

1843
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1844
	search_claim.vid = vid;
1845
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1846 1847 1848 1849 1850

	if (!claim) {
		/* possible optimization: race for a claim */
		/* No claim exists yet, claim it for us!
		 */
1851 1852 1853
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1854 1855 1856 1857
		goto allow;
	}

	/* if it is our own claim ... */
1858 1859 1860 1861 1862 1863
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	own_claim = batadv_compare_eth(backbone_gw->orig,
				       primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (own_claim) {
1864 1865 1866 1867 1868 1869
		/* ... allow it in any case */
		claim->lasttime = jiffies;
		goto allow;
	}

	/* if it is a broadcast ... */
1870 1871 1872 1873 1874 1875 1876
	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
		/* ... drop it. the responsible gateway is in charge.
		 *
		 * We need to check is_bcast because with the gateway
		 * feature, broadcasts (like DHCP requests) may be sent
		 * using a unicast packet type.
		 */
1877 1878 1879 1880 1881 1882
		goto handled;
	} else {
		/* seems the client considers us as its best gateway.
		 * send a claim and update the claim table
		 * immediately.
		 */
1883 1884 1885
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1886 1887 1888
		goto allow;
	}
allow:
1889
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1890
	ret = false;
1891 1892 1893 1894
	goto out;

handled:
	kfree_skb(skb);
1895
	ret = true;
1896 1897 1898

out:
	if (primary_if)
1899
		batadv_hardif_put(primary_if);
1900
	if (claim)
1901
		batadv_claim_put(claim);
1902 1903 1904
	return ret;
}

1905
/**
1906
 * batadv_bla_tx - check packets going into the mesh
1907
 * @bat_priv: the bat priv with all the soft interface information
1908 1909 1910
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
 *
1911
 * batadv_bla_tx checks if:
1912 1913 1914
 *  * a claim was received which has to be processed
 *  * the frame is allowed on the mesh
 *
1915
 * in these cases, the skb is further handled by this function.
1916 1917
 *
 * This call might reallocate skb data.
1918
 *
1919 1920
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1921
 */
1922 1923
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid)
1924 1925
{
	struct ethhdr *ethhdr;
1926
	struct batadv_bla_claim search_claim, *claim = NULL;
1927
	struct batadv_bla_backbone_gw *backbone_gw;
1928
	struct batadv_hard_iface *primary_if;
1929
	bool client_roamed;
1930
	bool ret = false;
1931

1932
	primary_if = batadv_primary_if_get_selected(bat_priv);
1933 1934 1935 1936 1937 1938
	if (!primary_if)
		goto out;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1939
	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1940 1941
		goto handled;

1942
	ethhdr = eth_hdr(skb);
1943

1944
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1945 1946 1947 1948
		/* don't allow broadcasts while requests are in flight */
		if (is_multicast_ether_addr(ethhdr->h_dest))
			goto handled;

1949
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1950 1951
	search_claim.vid = vid;

1952
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1953 1954 1955 1956 1957 1958

	/* if no claim exists, allow it. */
	if (!claim)
		goto allow;

	/* check if we are responsible. */
1959 1960 1961 1962 1963 1964
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	client_roamed = batadv_compare_eth(backbone_gw->orig,
					   primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (client_roamed) {
1965 1966 1967
		/* if yes, the client has roamed and we have
		 * to unclaim it.
		 */
1968 1969 1970
		batadv_handle_unclaim(bat_priv, primary_if,
				      primary_if->net_dev->dev_addr,
				      ethhdr->h_source, vid);
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
		goto allow;
	}

	/* check if it is a multicast/broadcast frame */
	if (is_multicast_ether_addr(ethhdr->h_dest)) {
		/* drop it. the responsible gateway has forwarded it into
		 * the backbone network.
		 */
		goto handled;
	} else {
		/* we must allow it. at least if we are
		 * responsible for the DESTINATION.
		 */
		goto allow;
	}
allow:
1987
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1988
	ret = false;
1989 1990
	goto out;
handled:
1991
	ret = true;
1992 1993
out:
	if (primary_if)
1994
		batadv_hardif_put(primary_if);
1995
	if (claim)
1996
		batadv_claim_put(claim);
1997 1998
	return ret;
}
1999

2000 2001 2002 2003 2004 2005 2006
/**
 * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2007
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2008 2009
{
	struct net_device *net_dev = (struct net_device *)seq->private;
2010
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2011
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2012
	struct batadv_bla_backbone_gw *backbone_gw;
2013
	struct batadv_bla_claim *claim;
2014
	struct batadv_hard_iface *primary_if;
2015
	struct hlist_head *head;
2016
	u16 backbone_crc;
2017
	u32 i;
2018
	bool is_own;
2019
	u8 *primary_addr;
2020

2021 2022
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2023 2024
		goto out;

2025
	primary_addr = primary_if->net_dev->dev_addr;
2026
	seq_printf(seq,
2027
		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2028
		   net_dev->name, primary_addr,
2029
		   ntohs(bat_priv->bla.claim_dest.group));
2030 2031
	seq_puts(seq,
		 "   Client               VID      Originator        [o] (CRC   )\n");
2032 2033 2034 2035
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2036
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
2037 2038 2039
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);

			is_own = batadv_compare_eth(backbone_gw->orig,
2040
						    primary_addr);
2041

2042 2043 2044
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);
2045
			seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2046
				   claim->addr, BATADV_PRINT_VID(claim->vid),
2047
				   backbone_gw->orig,
2048
				   (is_own ? 'x' : ' '),
2049
				   backbone_crc);
2050 2051

			batadv_backbone_gw_put(backbone_gw);
2052 2053 2054 2055 2056
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2057
		batadv_hardif_put(primary_if);
2058
	return 0;
2059
}
2060

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
/**
 * batadv_bla_claim_dump_entry - dump one entry of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @claim: entry to dump
 *
 * Return: 0 or error code.
 */
static int
batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
			    struct batadv_hard_iface *primary_if,
			    struct batadv_bla_claim *claim)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	void *hdr;
	int ret = -EINVAL;

	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
			  NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

	is_own = batadv_compare_eth(claim->backbone_gw->orig,
				    primary_addr);

	spin_lock_bh(&claim->backbone_gw->crc_lock);
	backbone_crc = claim->backbone_gw->crc;
	spin_unlock_bh(&claim->backbone_gw->crc_lock);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
	    nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    claim->backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
 * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @head: bucket to dump
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
			     struct batadv_hard_iface *primary_if,
			     struct hlist_head *head, int *idx_skip)
{
	struct batadv_bla_claim *claim;
	int idx = 0;

	rcu_read_lock();
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
		if (idx++ < *idx_skip)
			continue;
		if (batadv_bla_claim_dump_entry(msg, portid, seq,
						primary_if, claim)) {
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

	*idx_skip = idx;
unlock:
	rcu_read_unlock();
	return 0;
}

/**
 * batadv_bla_claim_dump - dump claim table to a netlink socket
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	struct hlist_head *head;
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.claim_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
		head = &hash->table[bucket];

		if (batadv_bla_claim_dump_bucket(msg, portid,
						 cb->nlh->nlmsg_seq,
						 primary_if, head, &idx))
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}

2223 2224 2225 2226 2227 2228 2229 2230
/**
 * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
 *  file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2231 2232 2233 2234
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
{
	struct net_device *net_dev = (struct net_device *)seq->private;
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2235
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2236
	struct batadv_bla_backbone_gw *backbone_gw;
2237 2238 2239
	struct batadv_hard_iface *primary_if;
	struct hlist_head *head;
	int secs, msecs;
2240
	u16 backbone_crc;
2241
	u32 i;
2242
	bool is_own;
2243
	u8 *primary_addr;
2244

2245 2246
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2247 2248 2249 2250
		goto out;

	primary_addr = primary_if->net_dev->dev_addr;
	seq_printf(seq,
2251
		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2252
		   net_dev->name, primary_addr,
2253
		   ntohs(bat_priv->bla.claim_dest.group));
2254
	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2255 2256 2257 2258
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2259
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
			msecs = jiffies_to_msecs(jiffies -
						 backbone_gw->lasttime);
			secs = msecs / 1000;
			msecs = msecs % 1000;

			is_own = batadv_compare_eth(backbone_gw->orig,
						    primary_addr);
			if (is_own)
				continue;

2270 2271 2272 2273
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);

2274
			seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2275 2276
				   backbone_gw->orig,
				   BATADV_PRINT_VID(backbone_gw->vid), secs,
2277
				   msecs, backbone_crc);
2278 2279 2280 2281 2282
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2283
		batadv_hardif_put(primary_if);
2284
	return 0;
2285
}