bridge_loop_avoidance.c 70.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (C) 2011-2018  B.A.T.M.A.N. contributors:
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
16
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 18 19
 */

#include "bridge_loop_avoidance.h"
20
#include "main.h"
21

22 23 24
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
25
#include <linux/crc16.h>
26 27
#include <linux/errno.h>
#include <linux/etherdevice.h>
28
#include <linux/gfp.h>
29
#include <linux/if_arp.h>
30
#include <linux/if_ether.h>
31
#include <linux/if_vlan.h>
32 33 34
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
35
#include <linux/kref.h>
36 37 38
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
39
#include <linux/netlink.h>
40 41 42 43 44 45 46 47 48 49
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
50 51 52
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
53
#include <uapi/linux/batadv_packet.h>
54
#include <uapi/linux/batman_adv.h>
55 56 57

#include "hard-interface.h"
#include "hash.h"
58
#include "log.h"
59
#include "netlink.h"
60
#include "originator.h"
61
#include "soft-interface.h"
62
#include "sysfs.h"
63
#include "translation-table.h"
64

65
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
66

67
static void batadv_bla_periodic_work(struct work_struct *work);
68 69 70
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
			 struct batadv_bla_backbone_gw *backbone_gw);
71

72
/**
73
 * batadv_choose_claim() - choose the right bucket for a claim.
74 75
 * @data: data to hash
 * @size: size of the hash table
76
 *
77
 * Return: the hash index of the claim
78
 */
79
static inline u32 batadv_choose_claim(const void *data, u32 size)
80
{
81
	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
82
	u32 hash = 0;
83

84 85
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
86 87 88 89

	return hash % size;
}

90
/**
91
 * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
92 93
 * @data: data to hash
 * @size: size of the hash table
94
 *
95
 * Return: the hash index of the backbone gateway
96
 */
97
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
98
{
99
	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
100
	u32 hash = 0;
101

102 103
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
104 105 106 107

	return hash % size;
}

108
/**
109
 * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
110 111 112
 * @node: list node of the first entry to compare
 * @data2: pointer to the second backbone gateway
 *
113
 * Return: true if the backbones have the same data, false otherwise
114
 */
115 116
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
				       const void *data2)
117
{
118
	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
119
					 hash_entry);
120 121
	const struct batadv_bla_backbone_gw *gw1 = data1;
	const struct batadv_bla_backbone_gw *gw2 = data2;
122

123
	if (!batadv_compare_eth(gw1->orig, gw2->orig))
124
		return false;
125 126

	if (gw1->vid != gw2->vid)
127
		return false;
128

129
	return true;
130 131
}

132
/**
133
 * batadv_compare_claim() - compare address and vid of two claims
134 135 136
 * @node: list node of the first entry to compare
 * @data2: pointer to the second claims
 *
137
 * Return: true if the claim have the same data, 0 otherwise
138
 */
139 140
static bool batadv_compare_claim(const struct hlist_node *node,
				 const void *data2)
141
{
142
	const void *data1 = container_of(node, struct batadv_bla_claim,
143
					 hash_entry);
144 145
	const struct batadv_bla_claim *cl1 = data1;
	const struct batadv_bla_claim *cl2 = data2;
146 147

	if (!batadv_compare_eth(cl1->addr, cl2->addr))
148
		return false;
149 150

	if (cl1->vid != cl2->vid)
151
		return false;
152

153
	return true;
154 155
}

156
/**
157
 * batadv_backbone_gw_release() - release backbone gw from lists and queue for
158 159 160 161 162 163 164 165 166 167 168 169 170 171
 *  free after rcu grace period
 * @ref: kref pointer of the backbone gw
 */
static void batadv_backbone_gw_release(struct kref *ref)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
				   refcount);

	kfree_rcu(backbone_gw, rcu);
}

/**
172
 * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
173
 *  release it
174 175
 * @backbone_gw: backbone gateway to be free'd
 */
176
static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
177
{
178
	kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
179 180
}

181
/**
182 183
 * batadv_claim_release() - release claim from lists and queue for free after
 *  rcu grace period
184 185
 * @ref: kref pointer of the claim
 */
186
static void batadv_claim_release(struct kref *ref)
187
{
188
	struct batadv_bla_claim *claim;
189
	struct batadv_bla_backbone_gw *old_backbone_gw;
190 191 192

	claim = container_of(ref, struct batadv_bla_claim, refcount);

193 194 195 196 197 198 199 200 201 202 203
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
	claim->backbone_gw = NULL;
	spin_unlock_bh(&claim->backbone_lock);

	spin_lock_bh(&old_backbone_gw->crc_lock);
	old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
	spin_unlock_bh(&old_backbone_gw->crc_lock);

	batadv_backbone_gw_put(old_backbone_gw);

204
	kfree_rcu(claim, rcu);
205 206
}

207
/**
208
 * batadv_claim_put() - decrement the claim refcounter and possibly release it
209 210
 * @claim: claim to be free'd
 */
211
static void batadv_claim_put(struct batadv_bla_claim *claim)
212
{
213
	kref_put(&claim->refcount, batadv_claim_release);
214 215
}

216
/**
217
 * batadv_claim_hash_find() - looks for a claim in the claim hash
218
 * @bat_priv: the bat priv with all the soft interface information
219 220
 * @data: search data (may be local/static data)
 *
221
 * Return: claim if found or NULL otherwise.
222
 */
223 224 225
static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv *bat_priv,
		       struct batadv_bla_claim *data)
226
{
227
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228
	struct hlist_head *head;
229 230
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim *claim_tmp = NULL;
231 232 233 234 235
	int index;

	if (!hash)
		return NULL;

236
	index = batadv_choose_claim(data, hash->size);
237 238 239
	head = &hash->table[index];

	rcu_read_lock();
240
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
241
		if (!batadv_compare_claim(&claim->hash_entry, data))
242 243
			continue;

244
		if (!kref_get_unless_zero(&claim->refcount))
245 246 247 248 249 250 251 252 253 254
			continue;

		claim_tmp = claim;
		break;
	}
	rcu_read_unlock();

	return claim_tmp;
}

255
/**
256
 * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
257
 * @bat_priv: the bat priv with all the soft interface information
258 259 260
 * @addr: the address of the originator
 * @vid: the VLAN ID
 *
261
 * Return: backbone gateway if found or NULL otherwise
262
 */
263
static struct batadv_bla_backbone_gw *
264 265
batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
			  unsigned short vid)
266
{
267
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268
	struct hlist_head *head;
269 270
	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 272 273 274 275
	int index;

	if (!hash)
		return NULL;

276
	ether_addr_copy(search_entry.orig, addr);
277 278
	search_entry.vid = vid;

279
	index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 281 282
	head = &hash->table[index];

	rcu_read_lock();
283
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 285
		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
						&search_entry))
286 287
			continue;

288
		if (!kref_get_unless_zero(&backbone_gw->refcount))
289 290 291 292 293 294 295 296 297 298
			continue;

		backbone_gw_tmp = backbone_gw;
		break;
	}
	rcu_read_unlock();

	return backbone_gw_tmp;
}

299
/**
300
 * batadv_bla_del_backbone_claims() - delete all claims for a backbone
301 302
 * @backbone_gw: backbone gateway where the claims should be removed
 */
303
static void
304
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305
{
306
	struct batadv_hashtable *hash;
307
	struct hlist_node *node_tmp;
308
	struct hlist_head *head;
309
	struct batadv_bla_claim *claim;
310 311 312
	int i;
	spinlock_t *list_lock;	/* protects write access to the hash lists */

313
	hash = backbone_gw->bat_priv->bla.claim_hash;
314 315 316 317 318 319 320 321
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
322
		hlist_for_each_entry_safe(claim, node_tmp,
323 324 325 326
					  head, hash_entry) {
			if (claim->backbone_gw != backbone_gw)
				continue;

327
			batadv_claim_put(claim);
328
			hlist_del_rcu(&claim->hash_entry);
329 330 331 332
		}
		spin_unlock_bh(list_lock);
	}

333
	/* all claims gone, initialize CRC */
334
	spin_lock_bh(&backbone_gw->crc_lock);
335
	backbone_gw->crc = BATADV_BLA_CRC_INIT;
336
	spin_unlock_bh(&backbone_gw->crc_lock);
337 338
}

339
/**
340
 * batadv_bla_send_claim() - sends a claim frame according to the provided info
341
 * @bat_priv: the bat priv with all the soft interface information
342
 * @mac: the mac address to be announced within the claim
343 344 345
 * @vid: the VLAN ID
 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
 */
346
static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347
				  unsigned short vid, int claimtype)
348 349 350
{
	struct sk_buff *skb;
	struct ethhdr *ethhdr;
351
	struct batadv_hard_iface *primary_if;
352
	struct net_device *soft_iface;
353
	u8 *hw_src;
354
	struct batadv_bla_claim_dst local_claim_dest;
355
	__be32 zeroip = 0;
356

357
	primary_if = batadv_primary_if_get_selected(bat_priv);
358 359 360
	if (!primary_if)
		return;

361
	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362
	       sizeof(local_claim_dest));
363 364 365 366 367 368 369 370 371 372 373 374 375 376
	local_claim_dest.type = claimtype;

	soft_iface = primary_if->soft_iface;

	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
			 /* IP DST: 0.0.0.0 */
			 zeroip,
			 primary_if->soft_iface,
			 /* IP SRC: 0.0.0.0 */
			 zeroip,
			 /* Ethernet DST: Broadcast */
			 NULL,
			 /* Ethernet SRC/HW SRC:  originator mac */
			 primary_if->net_dev->dev_addr,
377
			 /* HW DST: FF:43:05:XX:YY:YY
378
			  * with XX   = claim type
379
			  * and YY:YY = group id
380
			  */
381
			 (u8 *)&local_claim_dest);
382 383 384 385 386

	if (!skb)
		goto out;

	ethhdr = (struct ethhdr *)skb->data;
387
	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388 389 390

	/* now we pretend that the client would have sent this ... */
	switch (claimtype) {
391
	case BATADV_CLAIM_TYPE_CLAIM:
392 393 394
		/* normal claim frame
		 * set Ethernet SRC to the clients mac
		 */
395
		ether_addr_copy(ethhdr->h_source, mac);
396
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
397
			   "%s(): CLAIM %pM on vid %d\n", __func__, mac,
398
			   batadv_print_vid(vid));
399
		break;
400
	case BATADV_CLAIM_TYPE_UNCLAIM:
401 402 403
		/* unclaim frame
		 * set HW SRC to the clients mac
		 */
404
		ether_addr_copy(hw_src, mac);
405
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
406
			   "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
407
			   batadv_print_vid(vid));
408
		break;
409
	case BATADV_CLAIM_TYPE_ANNOUNCE:
410 411 412
		/* announcement frame
		 * set HW SRC to the special mac containg the crc
		 */
413
		ether_addr_copy(hw_src, mac);
414
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
415
			   "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
416
			   ethhdr->h_source, batadv_print_vid(vid));
417
		break;
418
	case BATADV_CLAIM_TYPE_REQUEST:
419
		/* request frame
420 421
		 * set HW SRC and header destination to the receiving backbone
		 * gws mac
422
		 */
423 424
		ether_addr_copy(hw_src, mac);
		ether_addr_copy(ethhdr->h_dest, mac);
425
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
426
			   "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
427
			   ethhdr->h_source, ethhdr->h_dest,
428
			   batadv_print_vid(vid));
429
		break;
430 431 432
	case BATADV_CLAIM_TYPE_LOOPDETECT:
		ether_addr_copy(ethhdr->h_source, mac);
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
433 434
			   "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
			   __func__, ethhdr->h_source, ethhdr->h_dest,
435
			   batadv_print_vid(vid));
436 437

		break;
438 439
	}

440
	if (vid & BATADV_VLAN_HAS_TAG) {
441 442
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				      vid & VLAN_VID_MASK);
443 444 445
		if (!skb)
			goto out;
	}
446 447 448

	skb_reset_mac_header(skb);
	skb->protocol = eth_type_trans(skb, soft_iface);
449 450 451
	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
			   skb->len + ETH_HLEN);
452 453 454 455

	netif_rx(skb);
out:
	if (primary_if)
456
		batadv_hardif_put(primary_if);
457 458
}

459
/**
460
 * batadv_bla_loopdetect_report() - worker for reporting the loop
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
 * @work: work queue item
 *
 * Throws an uevent, as the loopdetect check function can't do that itself
 * since the kernel may sleep while throwing uevents.
 */
static void batadv_bla_loopdetect_report(struct work_struct *work)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_priv *bat_priv;
	char vid_str[6] = { '\0' };

	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
				   report_work);
	bat_priv = backbone_gw->bat_priv;

	batadv_info(bat_priv->soft_iface,
		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
478
		    batadv_print_vid(backbone_gw->vid));
479
	snprintf(vid_str, sizeof(vid_str), "%d",
480
		 batadv_print_vid(backbone_gw->vid));
481 482 483 484 485 486 487 488
	vid_str[sizeof(vid_str) - 1] = 0;

	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
			    vid_str);

	batadv_backbone_gw_put(backbone_gw);
}

489
/**
490
 * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
491
 * @bat_priv: the bat priv with all the soft interface information
492 493
 * @orig: the mac address of the originator
 * @vid: the VLAN ID
494
 * @own_backbone: set if the requested backbone is local
495
 *
496
 * Return: the (possibly created) backbone gateway or NULL on error
497
 */
498
static struct batadv_bla_backbone_gw *
499
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
500
			   unsigned short vid, bool own_backbone)
501
{
502
	struct batadv_bla_backbone_gw *entry;
503
	struct batadv_orig_node *orig_node;
504 505
	int hash_added;

506
	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
507 508 509 510

	if (entry)
		return entry;

511
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
512
		   "%s(): not found (%pM, %d), creating new entry\n", __func__,
513
		   orig, batadv_print_vid(vid));
514 515 516 517 518 519 520

	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
	if (!entry)
		return NULL;

	entry->vid = vid;
	entry->lasttime = jiffies;
521
	entry->crc = BATADV_BLA_CRC_INIT;
522
	entry->bat_priv = bat_priv;
523
	spin_lock_init(&entry->crc_lock);
524
	atomic_set(&entry->request_sent, 0);
525
	atomic_set(&entry->wait_periods, 0);
526
	ether_addr_copy(entry->orig, orig);
527
	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
528
	kref_init(&entry->refcount);
529

530
	kref_get(&entry->refcount);
531
	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
532 533 534
				     batadv_compare_backbone_gw,
				     batadv_choose_backbone_gw, entry,
				     &entry->hash_entry);
535 536 537 538 539 540 541

	if (unlikely(hash_added != 0)) {
		/* hash failed, free the structure */
		kfree(entry);
		return NULL;
	}

542
	/* this is a gateway now, remove any TT entry on this VLAN */
543
	orig_node = batadv_orig_hash_find(bat_priv, orig);
544
	if (orig_node) {
545
		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
546
					  "became a backbone gateway");
547
		batadv_orig_node_put(orig_node);
548
	}
549

550
	if (own_backbone) {
551 552
		batadv_bla_send_announce(bat_priv, entry);

553 554
		/* this will be decreased in the worker thread */
		atomic_inc(&entry->request_sent);
555
		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
556 557 558
		atomic_inc(&bat_priv->bla.num_requests);
	}

559 560 561
	return entry;
}

562
/**
563
 * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
564 565 566 567 568
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the selected primary interface
 * @vid: VLAN identifier
 *
 * update or add the own backbone gw to make sure we announce
569 570
 * where we receive other backbone gws
 */
571 572 573
static void
batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
574
				  unsigned short vid)
575
{
576
	struct batadv_bla_backbone_gw *backbone_gw;
577

578 579
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
580
						 vid, true);
581 582 583 584
	if (unlikely(!backbone_gw))
		return;

	backbone_gw->lasttime = jiffies;
585
	batadv_backbone_gw_put(backbone_gw);
586 587
}

588
/**
589
 * batadv_bla_answer_request() - answer a bla request by sending own claims
590
 * @bat_priv: the bat priv with all the soft interface information
591
 * @primary_if: interface where the request came on
592 593 594 595 596
 * @vid: the vid where the request came on
 *
 * Repeat all of our own claims, and finally send an ANNOUNCE frame
 * to allow the requester another check if the CRC is correct now.
 */
597 598
static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
				      struct batadv_hard_iface *primary_if,
599
				      unsigned short vid)
600 601
{
	struct hlist_head *head;
602
	struct batadv_hashtable *hash;
603
	struct batadv_bla_claim *claim;
604
	struct batadv_bla_backbone_gw *backbone_gw;
605 606
	int i;

607
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
608 609
		   "%s(): received a claim request, send all of our own claims again\n",
		   __func__);
610

611 612 613
	backbone_gw = batadv_backbone_hash_find(bat_priv,
						primary_if->net_dev->dev_addr,
						vid);
614 615 616
	if (!backbone_gw)
		return;

617
	hash = bat_priv->bla.claim_hash;
618 619 620 621
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
622
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
623 624 625 626
			/* only own claims are interesting */
			if (claim->backbone_gw != backbone_gw)
				continue;

627
			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
628
					      BATADV_CLAIM_TYPE_CLAIM);
629 630 631 632 633
		}
		rcu_read_unlock();
	}

	/* finally, send an announcement frame */
634
	batadv_bla_send_announce(bat_priv, backbone_gw);
635
	batadv_backbone_gw_put(backbone_gw);
636 637
}

638
/**
639
 * batadv_bla_send_request() - send a request to repeat claims
640
 * @backbone_gw: the backbone gateway from whom we are out of sync
641 642 643 644 645
 *
 * When the crc is wrong, ask the backbone gateway for a full table update.
 * After the request, it will repeat all of his own claims and finally
 * send an announcement claim with which we can check again.
 */
646
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
647 648
{
	/* first, remove all old entries */
649
	batadv_bla_del_backbone_claims(backbone_gw);
650

651 652
	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
		   "Sending REQUEST to %pM\n", backbone_gw->orig);
653 654

	/* send request */
655
	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
656
			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
657 658 659

	/* no local broadcasts should be sent or received, for now. */
	if (!atomic_read(&backbone_gw->request_sent)) {
660
		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
661 662 663 664
		atomic_set(&backbone_gw->request_sent, 1);
	}
}

665
/**
666
 * batadv_bla_send_announce() - Send an announcement frame
667
 * @bat_priv: the bat priv with all the soft interface information
668 669
 * @backbone_gw: our backbone gateway which should be announced
 */
670
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
671
				     struct batadv_bla_backbone_gw *backbone_gw)
672
{
673
	u8 mac[ETH_ALEN];
674
	__be16 crc;
675

676
	memcpy(mac, batadv_announce_mac, 4);
677
	spin_lock_bh(&backbone_gw->crc_lock);
678
	crc = htons(backbone_gw->crc);
679
	spin_unlock_bh(&backbone_gw->crc_lock);
680
	memcpy(&mac[4], &crc, 2);
681

682
	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
683
			      BATADV_CLAIM_TYPE_ANNOUNCE);
684 685
}

686
/**
687
 * batadv_bla_add_claim() - Adds a claim in the claim hash
688
 * @bat_priv: the bat priv with all the soft interface information
689 690 691 692
 * @mac: the mac address of the claim
 * @vid: the VLAN ID of the frame
 * @backbone_gw: the backbone gateway which claims it
 */
693
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
694
				 const u8 *mac, const unsigned short vid,
695
				 struct batadv_bla_backbone_gw *backbone_gw)
696
{
697
	struct batadv_bla_backbone_gw *old_backbone_gw;
698 699
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim search_claim;
700
	bool remove_crc = false;
701 702
	int hash_added;

703
	ether_addr_copy(search_claim.addr, mac);
704
	search_claim.vid = vid;
705
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
706 707 708 709 710 711 712

	/* create a new claim entry if it does not exist yet. */
	if (!claim) {
		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
		if (!claim)
			return;

713
		ether_addr_copy(claim->addr, mac);
714
		spin_lock_init(&claim->backbone_lock);
715 716
		claim->vid = vid;
		claim->lasttime = jiffies;
717
		kref_get(&backbone_gw->refcount);
718
		claim->backbone_gw = backbone_gw;
719
		kref_init(&claim->refcount);
720

721
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
722 723
			   "%s(): adding new entry %pM, vid %d to hash ...\n",
			   __func__, mac, batadv_print_vid(vid));
724 725

		kref_get(&claim->refcount);
726
		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
727 728 729
					     batadv_compare_claim,
					     batadv_choose_claim, claim,
					     &claim->hash_entry);
730 731 732 733 734 735 736 737 738 739 740 741

		if (unlikely(hash_added != 0)) {
			/* only local changes happened. */
			kfree(claim);
			return;
		}
	} else {
		claim->lasttime = jiffies;
		if (claim->backbone_gw == backbone_gw)
			/* no need to register a new backbone */
			goto claim_free_ref;

742
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
743 744 745
			   "%s(): changing ownership for %pM, vid %d to gw %pM\n",
			   __func__, mac, batadv_print_vid(vid),
			   backbone_gw->orig);
746

747
		remove_crc = true;
748
	}
749 750 751 752

	/* replace backbone_gw atomically and adjust reference counters */
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
753
	kref_get(&backbone_gw->refcount);
754
	claim->backbone_gw = backbone_gw;
755
	spin_unlock_bh(&claim->backbone_lock);
756

757 758 759 760 761 762
	if (remove_crc) {
		/* remove claim address from old backbone_gw */
		spin_lock_bh(&old_backbone_gw->crc_lock);
		old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
		spin_unlock_bh(&old_backbone_gw->crc_lock);
	}
763

764 765 766
	batadv_backbone_gw_put(old_backbone_gw);

	/* add claim address to new backbone_gw */
767
	spin_lock_bh(&backbone_gw->crc_lock);
768
	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
769
	spin_unlock_bh(&backbone_gw->crc_lock);
770 771 772
	backbone_gw->lasttime = jiffies;

claim_free_ref:
773
	batadv_claim_put(claim);
774 775
}

776
/**
777
 * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
 *  claim
 * @claim: claim whose backbone_gw should be returned
 *
 * Return: valid reference to claim::backbone_gw
 */
static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	spin_lock_bh(&claim->backbone_lock);
	backbone_gw = claim->backbone_gw;
	kref_get(&backbone_gw->refcount);
	spin_unlock_bh(&claim->backbone_lock);

	return backbone_gw;
}

796
/**
797
 * batadv_bla_del_claim() - delete a claim from the claim hash
798 799 800
 * @bat_priv: the bat priv with all the soft interface information
 * @mac: mac address of the claim to be removed
 * @vid: VLAN id for the claim to be removed
801
 */
802
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
803
				 const u8 *mac, const unsigned short vid)
804
{
805
	struct batadv_bla_claim search_claim, *claim;
806

807
	ether_addr_copy(search_claim.addr, mac);
808
	search_claim.vid = vid;
809
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
810 811 812
	if (!claim)
		return;

813
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
814
		   mac, batadv_print_vid(vid));
815

816
	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
817
			   batadv_choose_claim, claim);
818
	batadv_claim_put(claim); /* reference from the hash is gone */
819 820

	/* don't need the reference from hash_find() anymore */
821
	batadv_claim_put(claim);
822 823
}

824
/**
825
 * batadv_handle_announce() - check for ANNOUNCE frame
826 827 828 829
 * @bat_priv: the bat priv with all the soft interface information
 * @an_addr: announcement mac address (ARP Sender HW address)
 * @backbone_addr: originator address of the sender (Ethernet source MAC)
 * @vid: the VLAN ID of the frame
830
 *
831
 * Return: true if handled
832
 */
833 834
static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
				   u8 *backbone_addr, unsigned short vid)
835
{
836
	struct batadv_bla_backbone_gw *backbone_gw;
837
	u16 backbone_crc, crc;
838

839
	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
840
		return false;
841

842 843
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
844 845

	if (unlikely(!backbone_gw))
846
		return true;
847 848 849

	/* handle as ANNOUNCE frame */
	backbone_gw->lasttime = jiffies;
850
	crc = ntohs(*((__be16 *)(&an_addr[4])));
851

852
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
853 854
		   "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
		   __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
855

856 857 858 859 860
	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	if (backbone_crc != crc) {
861
		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
862 863
			   "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
			   __func__, backbone_gw->orig,
864
			   batadv_print_vid(backbone_gw->vid),
865
			   backbone_crc, crc);
866

867
		batadv_bla_send_request(backbone_gw);
868 869 870 871 872
	} else {
		/* if we have sent a request and the crc was OK,
		 * we can allow traffic again.
		 */
		if (atomic_read(&backbone_gw->request_sent)) {
873
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
874 875 876 877
			atomic_set(&backbone_gw->request_sent, 0);
		}
	}

878
	batadv_backbone_gw_put(backbone_gw);
879
	return true;
880 881
}

882
/**
883
 * batadv_handle_request() - check for REQUEST frame
884 885 886 887 888
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
 * @ethhdr: ethernet header of a packet
 * @vid: the VLAN ID of the frame
889
 *
890
 * Return: true if handled
891
 */
892 893 894 895
static bool batadv_handle_request(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, struct ethhdr *ethhdr,
				  unsigned short vid)
896 897
{
	/* check for REQUEST frame */
898
	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
899
		return false;
900 901 902 903

	/* sanity check, this should not happen on a normal switch,
	 * we ignore it in this case.
	 */
904
	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
905
		return true;
906

907
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
908 909
		   "%s(): REQUEST vid %d (sent by %pM)...\n",
		   __func__, batadv_print_vid(vid), ethhdr->h_source);
910

911
	batadv_bla_answer_request(bat_priv, primary_if, vid);
912
	return true;
913 914
}

915
/**
916
 * batadv_handle_unclaim() - check for UNCLAIM frame
917 918 919 920 921
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet source)
 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
922
 *
923
 * Return: true if handled
924
 */
925 926 927 928
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, u8 *claim_addr,
				  unsigned short vid)
929
{
930
	struct batadv_bla_backbone_gw *backbone_gw;
931 932

	/* unclaim in any case if it is our own */
933 934
	if (primary_if && batadv_compare_eth(backbone_addr,
					     primary_if->net_dev->dev_addr))
935
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
936
				      BATADV_CLAIM_TYPE_UNCLAIM);
937

938
	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
939 940

	if (!backbone_gw)
941
		return true;
942 943

	/* this must be an UNCLAIM frame */
944
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
945
		   "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
946
		   claim_addr, batadv_print_vid(vid), backbone_gw->orig);
947

948
	batadv_bla_del_claim(bat_priv, claim_addr, vid);
949
	batadv_backbone_gw_put(backbone_gw);
950
	return true;
951 952
}

953
/**
954
 * batadv_handle_claim() - check for CLAIM frame
955 956 957 958 959
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet Source)
 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
960
 *
961
 * Return: true if handled
962
 */
963 964 965 966
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
				struct batadv_hard_iface *primary_if,
				u8 *backbone_addr, u8 *claim_addr,
				unsigned short vid)
967
{
968
	struct batadv_bla_backbone_gw *backbone_gw;
969 970 971

	/* register the gateway if not yet available, and add the claim. */

972 973
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
974 975

	if (unlikely(!backbone_gw))
976
		return true;
977 978

	/* this must be a CLAIM frame */
979
	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
980
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
981
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
982
				      BATADV_CLAIM_TYPE_CLAIM);
983 984 985

	/* TODO: we could call something like tt_local_del() here. */

986
	batadv_backbone_gw_put(backbone_gw);
987
	return true;
988 989
}

990
/**
991
 * batadv_check_claim_group() - check for claim group membership
992
 * @bat_priv: the bat priv with all the soft interface information
993
 * @primary_if: the primary interface of this batman interface
994 995 996 997 998 999 1000 1001
 * @hw_src: the Hardware source in the ARP Header
 * @hw_dst: the Hardware destination in the ARP Header
 * @ethhdr: pointer to the Ethernet header of the claim frame
 *
 * checks if it is a claim packet and if its on the same group.
 * This function also applies the group ID of the sender
 * if it is in the same mesh.
 *
1002
 * Return:
1003 1004 1005 1006
 *	2  - if it is a claim packet and on the same group
 *	1  - if is a claim packet from another group
 *	0  - if it is not a claim packet
 */
1007 1008
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
1009
				    u8 *hw_src, u8 *hw_dst,
1010
				    struct ethhdr *ethhdr)
1011
{
1012
	u8 *backbone_addr;
1013
	struct batadv_orig_node *orig_node;
1014
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1015

1016
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1017
	bla_dst_own = &bat_priv->bla.claim_dest;
1018 1019 1020 1021 1022

	/* if announcement packet, use the source,
	 * otherwise assume it is in the hw_src
	 */
	switch (bla_dst->type) {
1023
	case BATADV_CLAIM_TYPE_CLAIM:
1024 1025
		backbone_addr = hw_src;
		break;
1026 1027
	case BATADV_CLAIM_TYPE_REQUEST:
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1028
	case BATADV_CLAIM_TYPE_UNCLAIM:
1029 1030 1031 1032 1033 1034 1035
		backbone_addr = ethhdr->h_source;
		break;
	default:
		return 0;
	}

	/* don't accept claim frames from ourselves */
1036
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1037 1038 1039 1040 1041 1042 1043
		return 0;

	/* if its already the same group, it is fine. */
	if (bla_dst->group == bla_dst_own->group)
		return 2;

	/* lets see if this originator is in our mesh */
1044
	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1045 1046 1047 1048 1049 1050 1051 1052 1053

	/* dont accept claims from gateways which are not in
	 * the same mesh or group.
	 */
	if (!orig_node)
		return 1;

	/* if our mesh friends mac is bigger, use it for ourselves. */
	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1054
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1055
			   "taking other backbones claim group: %#.4x\n",
1056
			   ntohs(bla_dst->group));
1057 1058 1059
		bla_dst_own->group = bla_dst->group;
	}

1060
	batadv_orig_node_put(orig_node);
1061 1062 1063 1064

	return 2;
}

1065
/**
1066
 * batadv_bla_process_claim() - Check if this is a claim frame, and process it
1067
 * @bat_priv: the bat priv with all the soft interface information
1068
 * @primary_if: the primary hard interface of this batman soft interface
1069 1070
 * @skb: the frame to be checked
 *
1071
 * Return: true if it was a claim frame, otherwise return false to
1072 1073
 * tell the callee that it can use the frame on its own.
 */
1074 1075 1076
static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
				     struct batadv_hard_iface *primary_if,
				     struct sk_buff *skb)
1077
{
1078
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1079
	u8 *hw_src, *hw_dst;
1080
	struct vlan_hdr *vhdr, vhdr_buf;
1081
	struct ethhdr *ethhdr;
1082
	struct arphdr *arphdr;
1083
	unsigned short vid;
1084
	int vlan_depth = 0;
1085
	__be16 proto;
1086
	int headlen;
1087
	int ret;
1088

1089
	vid = batadv_get_vid(skb, 0);
1090
	ethhdr = eth_hdr(skb);
1091

1092 1093 1094
	proto = ethhdr->h_proto;
	headlen = ETH_HLEN;
	if (vid & BATADV_VLAN_HAS_TAG) {
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		/* Traverse the VLAN/Ethertypes.
		 *
		 * At this point it is known that the first protocol is a VLAN
		 * header, so start checking at the encapsulated protocol.
		 *
		 * The depth of the VLAN headers is recorded to drop BLA claim
		 * frames encapsulated into multiple VLAN headers (QinQ).
		 */
		do {
			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
						  &vhdr_buf);
			if (!vhdr)
1107
				return false;
1108 1109 1110 1111 1112

			proto = vhdr->h_vlan_encapsulated_proto;
			headlen += VLAN_HLEN;
			vlan_depth++;
		} while (proto == htons(ETH_P_8021Q));
1113 1114
	}

1115
	if (proto != htons(ETH_P_ARP))
1116
		return false; /* not a claim frame */
1117 1118 1119 1120

	/* this must be a ARP frame. check if it is a claim. */

	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1121
		return false;
1122 1123

	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
1124
	ethhdr = eth_hdr(skb);
1125
	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1126 1127 1128 1129 1130

	/* Check whether the ARP frame carries a valid
	 * IP information
	 */
	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1131
		return false;
1132
	if (arphdr->ar_pro != htons(ETH_P_IP))
1133
		return false;
1134
	if (arphdr->ar_hln != ETH_ALEN)
1135
		return false;
1136
	if (arphdr->ar_pln != 4)
1137
		return false;
1138

1139
	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1140
	hw_dst = hw_src + ETH_ALEN + 4;
1141
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1142 1143 1144 1145 1146
	bla_dst_own = &bat_priv->bla.claim_dest;

	/* check if it is a claim frame in general */
	if (memcmp(bla_dst->magic, bla_dst_own->magic,
		   sizeof(bla_dst->magic)) != 0)
1147
		return false;
1148 1149 1150 1151 1152 1153

	/* check if there is a claim frame encapsulated deeper in (QinQ) and
	 * drop that, as this is not supported by BLA but should also not be
	 * sent via the mesh.
	 */
	if (vlan_depth > 1)
1154
		return true;
1155

1156 1157
	/* Let the loopdetect frames on the mesh in any case. */
	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1158
		return false;
1159

1160
	/* check if it is a claim frame. */
1161 1162
	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
				       ethhdr);
1163
	if (ret == 1)
1164
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1165 1166 1167
			   "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
			   __func__, ethhdr->h_source, batadv_print_vid(vid),
			   hw_src, hw_dst);
1168 1169

	if (ret < 2)
1170
		return !!ret;
1171 1172

	/* become a backbone gw ourselves on this vlan if not happened yet */
1173
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1174 1175 1176

	/* check for the different types of claim frames ... */
	switch (bla_dst->type) {
1177
	case BATADV_CLAIM_TYPE_CLAIM:
1178 1179
		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
					ethhdr->h_source, vid))
1180
			return true;
1181
		break;
1182
	case BATADV_CLAIM_TYPE_UNCLAIM:
1183 1184
		if (batadv_handle_unclaim(bat_priv, primary_if,
					  ethhdr->h_source, hw_src, vid))
1185
			return true;
1186 1187
		break;

1188
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1189 1190
		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
					   vid))
1191
			return true;
1192
		break;
1193
	case BATADV_CLAIM_TYPE_REQUEST:
1194 1195
		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
					  vid))
1196
			return true;
1197 1198 1199
		break;
	}

1200
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
1201 1202 1203
		   "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
		   __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
		   hw_dst);
1204
	return true;
1205 1206
}

1207
/**
1208
 * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
1209 1210 1211 1212 1213
 *  immediately
 * @bat_priv: the bat priv with all the soft interface information
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we last heard from other nodes, and remove them in case of
1214 1215
 * a time out, or clean all backbone gws if now is set.
 */
1216
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1217
{
1218
	struct batadv_bla_backbone_gw *backbone_gw;
1219
	struct hlist_node *node_tmp;
1220
	struct hlist_head *head;
1221
	struct batadv_hashtable *hash;
1222 1223 1224
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

1225
	hash = bat_priv->bla.backbone_hash;
1226 1227 1228 1229 1230 1231 1232 1233
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
1234
		hlist_for_each_entry_safe(backbone_gw, node_tmp,
1235 1236 1237
					  head, hash_entry) {
			if (now)
				goto purge_now;
1238
			if (!batadv_has_timed_out(backbone_gw->lasttime,
1239
						  BATADV_BLA_BACKBONE_TIMEOUT))
1240 1241
				continue;

1242
			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1243 1244
				   "%s(): backbone gw %pM timed out\n",
				   __func__, backbone_gw->orig);
1245 1246 1247 1248

purge_now:
			/* don't wait for the pending request anymore */
			if (atomic_read(&backbone_gw->request_sent))
1249
				atomic_dec(&bat_priv->bla.num_requests);
1250

1251
			batadv_bla_del_backbone_claims(backbone_gw);
1252

1253
			hlist_del_rcu(&backbone_gw->hash_entry);
1254
			batadv_backbone_gw_put(backbone_gw);
1255 1256 1257 1258 1259
		}
		spin_unlock_bh(list_lock);
	}
}

1260
/**
1261
 * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
1262
 * @bat_priv: the bat priv with all the soft interface information
1263 1264 1265 1266 1267 1268
 * @primary_if: the selected primary interface, may be NULL if now is set
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we heard last time from our own claims, and remove them in case of
 * a time out, or clean all claims if now is set
 */
1269 1270 1271
static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    int now)
1272
{
1273
	struct batadv_bla_backbone_gw *backbone_gw;
1274
	struct batadv_bla_claim *claim;
1275
	struct hlist_head *head;
1276
	struct batadv_hashtable *hash;
1277 1278
	int i;

1279
	hash = bat_priv->bla.claim_hash;
1280 1281 1282 1283 1284 1285 1286
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1287
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
1288
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1289 1290
			if (now)
				goto purge_now;
1291 1292

			if (!batadv_compare_eth(backbone_gw->orig,
1293
						primary_if->net_dev->dev_addr))
1294 1295
				goto skip;

1296
			if (!batadv_has_timed_out(claim->lasttime,
1297
						  BATADV_BLA_CLAIM_TIMEOUT))
1298
				goto skip;
1299

1300
			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1301
				   "%s(): timed out.\n", __func__);
1302 1303

purge_now:
1304
			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1305
				   "%s(): %pM, vid %d\n", __func__,
1306 1307
				   claim->addr, claim->vid);

1308
			batadv_handle_unclaim(bat_priv, primary_if,
1309
					      backbone_gw->orig,
1310
					      claim->addr, claim->vid);
1311 1312
skip:
			batadv_backbone_gw_put(backbone_gw);
1313 1314 1315 1316 1317
		}
		rcu_read_unlock();
	}
}

1318
/**
1319
 * batadv_bla_update_orig_address() - Update the backbone gateways when the own
1320
 *  originator address changes
1321
 * @bat_priv: the bat priv with all the soft interface information
1322 1323 1324
 * @primary_if: the new selected primary_if
 * @oldif: the old primary interface, may be NULL
 */
1325 1326 1327
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    struct batadv_hard_iface *oldif)
1328
{
1329
	struct batadv_bla_backbone_gw *backbone_gw;
1330
	struct hlist_head *head;
1331
	struct batadv_hashtable *hash;
1332
	__be16 group;
1333 1334
	int i;

1335
	/* reset bridge loop avoidance group id */
1336 1337
	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
	bat_priv->bla.claim_dest.group = group;
1338

1339 1340 1341 1342
	/* purge everything when bridge loop avoidance is turned off */
	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		oldif = NULL;

1343
	if (!oldif) {
1344 1345
		batadv_bla_purge_claims(bat_priv, NULL, 1);
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1346 1347 1348
		return;
	}

1349
	hash = bat_priv->bla.backbone_hash;
1350 1351 1352 1353 1354 1355 1356
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1357
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1358
			/* own orig still holds the old value. */
1359 1360
			if (!batadv_compare_eth(backbone_gw->orig,
						oldif->net_dev->dev_addr))
1361 1362
				continue;

1363 1364
			ether_addr_copy(backbone_gw->orig,
					primary_if->net_dev->dev_addr);
1365 1366 1367
			/* send an announce frame so others will ask for our
			 * claims and update their tables.
			 */
1368
			batadv_bla_send_announce(bat_priv, backbone_gw);
1369 1370 1371 1372 1373
		}
		rcu_read_unlock();
	}
}

1374
/**
1375
 * batadv_bla_send_loopdetect() - send a loopdetect frame
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
 * @bat_priv: the bat priv with all the soft interface information
 * @backbone_gw: the backbone gateway for which a loop should be detected
 *
 * To detect loops that the bridge loop avoidance can't handle, send a loop
 * detection packet on the backbone. Unlike other BLA frames, this frame will
 * be allowed on the mesh by other nodes. If it is received on the mesh, this
 * indicates that there is a loop.
 */
static void
batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
			   struct batadv_bla_backbone_gw *backbone_gw)
{
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
		   backbone_gw->vid);
	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
}

1394
/**
1395
 * batadv_bla_status_update() - purge bla interfaces if necessary
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
 * @net_dev: the soft interface net device
 */
void batadv_bla_status_update(struct net_device *net_dev)
{
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
	struct batadv_hard_iface *primary_if;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		return;

	/* this function already purges everything when bla is disabled,
	 * so just call that one.
	 */
	batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1411
	batadv_hardif_put(primary_if);
1412 1413
}

1414
/**
1415
 * batadv_bla_periodic_work() - performs periodic bla work
1416 1417 1418
 * @work: kernel work struct
 *
 * periodic work to do:
1419 1420 1421
 *  * purge structures when they are too old
 *  * send announcements
 */
1422
static void batadv_bla_periodic_work(struct work_struct *work)
1423
{
1424
	struct delayed_work *delayed_work;
1425
	struct batadv_priv *bat_priv;
1426
	struct batadv_priv_bla *priv_bla;
1427
	struct hlist_head *head;
1428
	struct batadv_bla_backbone_gw *backbone_gw;
1429
	struct batadv_hashtable *hash;
1430
	struct batadv_hard_iface *primary_if;
1431
	bool send_loopdetect = false;
1432 1433
	int i;

G
Geliang Tang 已提交
1434
	delayed_work = to_delayed_work(work);
1435 1436
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1437
	primary_if = batadv_primary_if_get_selected(bat_priv);
1438 1439 1440
	if (!primary_if)
		goto out;

1441 1442
	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);
1443 1444 1445 1446

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

1447 1448 1449 1450 1451
	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
		/* set a new random mac address for the next bridge loop
		 * detection frames. Set the locally administered bit to avoid
		 * collisions with users mac addresses.
		 */
1452
		eth_random_addr(bat_priv->bla.loopdetect_addr);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
		bat_priv->bla.loopdetect_addr[0] = 0xba;
		bat_priv->bla.loopdetect_addr[1] = 0xbe;
		bat_priv->bla.loopdetect_lasttime = jiffies;
		atomic_set(&bat_priv->bla.loopdetect_next,
			   BATADV_BLA_LOOPDETECT_PERIODS);

		/* mark for sending loop detect on all VLANs */
		send_loopdetect = true;
	}

1463
	hash = bat_priv->bla.backbone_hash;
1464 1465 1466 1467 1468 1469 1470
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1471
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1472 1473
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
1474 1475 1476 1477
				continue;

			backbone_gw->lasttime = jiffies;

1478
			batadv_bla_send_announce(bat_priv, backbone_gw);
1479 1480 1481
			if (send_loopdetect)
				batadv_bla_send_loopdetect(bat_priv,
							   backbone_gw);
1482 1483 1484 1485 1486

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
1487 1488 1489
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
1490 1491 1492 1493 1494
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

1495 1496 1497
			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

1498 1499
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
1500 1501 1502 1503 1504
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
1505
		batadv_hardif_put(primary_if);
1506

1507 1508
	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1509 1510
}

1511 1512 1513 1514 1515
/* The hash for claim and backbone hash receive the same key because they
 * are getting initialized by hash_new with the same key. Reinitializing
 * them with to different keys to allow nested locking without generating
 * lockdep warnings
 */
1516 1517
static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
1518

1519
/**
1520
 * batadv_bla_init() - initialize all bla structures
1521 1522 1523 1524
 * @bat_priv: the bat priv with all the soft interface information
 *
 * Return: 0 on success, < 0 on error.
 */
1525
int batadv_bla_init(struct batadv_priv *bat_priv)
1526
{
1527
	int i;
1528
	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1529
	struct batadv_hard_iface *primary_if;
1530
	u16 crc;
1531
	unsigned long entrytime;
1532

1533 1534
	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);

1535
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1536

1537
	/* setting claim destination address */
1538 1539
	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
	bat_priv->bla.claim_dest.type = 0;
1540
	primary_if = batadv_primary_if_get_selected(bat_priv);
1541
	if (primary_if) {
1542 1543
		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
		bat_priv->bla.claim_dest.group = htons(crc);
1544
		batadv_hardif_put(primary_if);
1545
	} else {
1546
		bat_priv->bla.claim_dest.group = 0; /* will be set later */
1547 1548
	}

1549
	/* initialize the duplicate list */
1550
	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1551
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1552 1553
		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
	bat_priv->bla.bcast_duplist_curr = 0;
1554

1555 1556 1557
	atomic_set(&bat_priv->bla.loopdetect_next,
		   BATADV_BLA_LOOPDETECT_PERIODS);

1558
	if (bat_priv->bla.claim_hash)
1559
		return 0;
1560

1561 1562
	bat_priv->bla.claim_hash = batadv_hash_new(128);
	bat_priv->bla.backbone_hash = batadv_hash_new(32);
1563

1564
	if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1565
		return -ENOMEM;
1566

1567
	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1568
				   &batadv_claim_hash_lock_class_key);
1569
	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1570
				   &batadv_backbone_hash_lock_class_key);
1571

1572
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1573

1574 1575 1576 1577
	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1578
	return 0;
1579 1580
}

1581
/**
1582
 * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
1583
 * @bat_priv: the bat priv with all the soft interface information
1584
 * @skb: contains the bcast_packet to be checked
1585 1586 1587 1588 1589 1590 1591 1592 1593
 *
 * check if it is on our broadcast list. Another gateway might
 * have sent the same packet because it is connected to the same backbone,
 * so we have to remove this duplicate.
 *
 * This is performed by checking the CRC, which will tell us
 * with a good chance that it is the same packet. If it is furthermore
 * sent by another host, drop it. We allow equal packets from
 * the same host however as this might be intended.
1594
 *
1595
 * Return: true if a packet is in the duplicate list, false otherwise.
1596
 */
1597 1598
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
				    struct sk_buff *skb)
1599
{
1600
	int i, curr;
1601 1602
	__be32 crc;
	struct batadv_bcast_packet *bcast_packet;
1603
	struct batadv_bcast_duplist_entry *entry;
1604
	bool ret = false;
1605

1606
	bcast_packet = (struct batadv_bcast_packet *)skb->data;
1607 1608

	/* calculate the crc ... */
1609
	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1610

1611 1612
	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);

1613
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1614 1615 1616
		curr = (bat_priv->bla.bcast_duplist_curr + i);
		curr %= BATADV_DUPLIST_SIZE;
		entry = &bat_priv->bla.bcast_duplist[curr];
1617 1618 1619 1620

		/* we can stop searching if the entry is too old ;
		 * later entries will be even older
		 */
1621 1622
		if (batadv_has_timed_out(entry->entrytime,
					 BATADV_DUPLIST_TIMEOUT))
1623 1624 1625 1626 1627
			break;

		if (entry->crc != crc)
			continue;

1628
		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1629 1630 1631
			continue;

		/* this entry seems to match: same crc, not too old,
1632
		 * and from another gw. therefore return true to forbid it.
1633
		 */
1634
		ret = true;
1635
		goto out;
1636
	}
1637
	/* not found, add a new entry (overwrite the oldest entry)
1638
	 * and allow it, its the first occurrence.
1639
	 */
1640
	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1641
	curr %= BATADV_DUPLIST_SIZE;
1642
	entry = &bat_priv->bla.bcast_duplist[curr];
1643 1644
	entry->crc = crc;
	entry->entrytime = jiffies;
1645
	ether_addr_copy(entry->orig, bcast_packet->orig);
1646
	bat_priv->bla.bcast_duplist_curr = curr;
1647

1648 1649 1650 1651
out:
	spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);

	return ret;
1652 1653
}

1654
/**
1655
 * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
1656
 *  the VLAN identified by vid.
1657
 * @bat_priv: the bat priv with all the soft interface information
1658
 * @orig: originator mac address
1659
 * @vid: VLAN identifier
1660
 *
1661
 * Return: true if orig is a backbone for this vid, false otherwise.
1662
 */
1663
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1664
				    unsigned short vid)
1665
{
1666
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1667
	struct hlist_head *head;
1668
	struct batadv_bla_backbone_gw *backbone_gw;
1669 1670 1671
	int i;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1672
		return false;
1673 1674

	if (!hash)
1675
		return false;
1676 1677 1678 1679 1680

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1681
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1682 1683
			if (batadv_compare_eth(backbone_gw->orig, orig) &&
			    backbone_gw->vid == vid) {
1684
				rcu_read_unlock();
1685
				return true;
1686 1687 1688 1689 1690
			}
		}
		rcu_read_unlock();
	}

1691
	return false;
1692 1693
}

1694
/**
1695
 * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
1696
 * @skb: the frame to be checked
1697 1698 1699
 * @orig_node: the orig_node of the frame
 * @hdr_size: maximum length of the frame
 *
1700 1701
 * Return: true if the orig_node is also a gateway on the soft interface,
 * otherwise it returns false.
1702
 */
1703 1704
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
			       struct batadv_orig_node *orig_node, int hdr_size)
1705
{
1706
	struct batadv_bla_backbone_gw *backbone_gw;
1707
	unsigned short vid;
1708 1709

	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1710
		return false;
1711 1712

	/* first, find out the vid. */
1713
	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1714
		return false;
1715

1716
	vid = batadv_get_vid(skb, hdr_size);
1717 1718

	/* see if this originator is a backbone gw for this VLAN */
1719 1720
	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
						orig_node->orig, vid);
1721
	if (!backbone_gw)
1722
		return false;
1723

1724
	batadv_backbone_gw_put(backbone_gw);
1725
	return true;
1726 1727
}

1728
/**
1729
 * batadv_bla_free() - free all bla structures
1730 1731 1732 1733
 * @bat_priv: the bat priv with all the soft interface information
 *
 * for softinterface free or module unload
 */
1734
void batadv_bla_free(struct batadv_priv *bat_priv)
1735
{
1736
	struct batadv_hard_iface *primary_if;
1737

1738
	cancel_delayed_work_sync(&bat_priv->bla.work);
1739
	primary_if = batadv_primary_if_get_selected(bat_priv);
1740

1741
	if (bat_priv->bla.claim_hash) {
1742
		batadv_bla_purge_claims(bat_priv, primary_if, 1);
1743 1744
		batadv_hash_destroy(bat_priv->bla.claim_hash);
		bat_priv->bla.claim_hash = NULL;
1745
	}
1746
	if (bat_priv->bla.backbone_hash) {
1747
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1748 1749
		batadv_hash_destroy(bat_priv->bla.backbone_hash);
		bat_priv->bla.backbone_hash = NULL;
1750 1751
	}
	if (primary_if)
1752
		batadv_hardif_put(primary_if);
1753 1754
}

1755
/**
1756
 * batadv_bla_loopdetect_check() - check and handle a detected loop
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the packet to check
 * @primary_if: interface where the request came on
 * @vid: the VLAN ID of the frame
 *
 * Checks if this packet is a loop detect frame which has been sent by us,
 * throw an uevent and log the event if that is the case.
 *
 * Return: true if it is a loop detect frame which is to be dropped, false
 * otherwise.
 */
static bool
batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
			    struct batadv_hard_iface *primary_if,
			    unsigned short vid)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct ethhdr *ethhdr;
1775
	bool ret;
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798

	ethhdr = eth_hdr(skb);

	/* Only check for the MAC address and skip more checks here for
	 * performance reasons - this function is on the hotpath, after all.
	 */
	if (!batadv_compare_eth(ethhdr->h_source,
				bat_priv->bla.loopdetect_addr))
		return false;

	/* If the packet came too late, don't forward it on the mesh
	 * but don't consider that as loop. It might be a coincidence.
	 */
	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
				 BATADV_BLA_LOOPDETECT_TIMEOUT))
		return true;

	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
						 vid, true);
	if (unlikely(!backbone_gw))
		return true;

1799 1800 1801 1802 1803 1804 1805
	ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);

	/* backbone_gw is unreferenced in the report work function function
	 * if queue_work() call was successful
	 */
	if (!ret)
		batadv_backbone_gw_put(backbone_gw);
1806 1807 1808 1809

	return true;
}

1810
/**
1811
 * batadv_bla_rx() - check packets coming from the mesh.
1812
 * @bat_priv: the bat priv with all the soft interface information
1813 1814
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
1815
 * @is_bcast: the packet came in a broadcast packet type.
1816
 *
1817
 * batadv_bla_rx avoidance checks if:
1818 1819 1820
 *  * we have to race for a claim
 *  * if the frame is allowed on the LAN
 *
1821 1822
 * in these cases, the skb is further handled by this function
 *
1823 1824
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1825
 */
1826 1827
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid, bool is_bcast)
1828
{
1829
	struct batadv_bla_backbone_gw *backbone_gw;
1830
	struct ethhdr *ethhdr;
1831
	struct batadv_bla_claim search_claim, *claim = NULL;
1832
	struct batadv_hard_iface *primary_if;
1833
	bool own_claim;
1834
	bool ret;
1835

1836
	ethhdr = eth_hdr(skb);
1837

1838
	primary_if = batadv_primary_if_get_selected(bat_priv);
1839 1840 1841 1842 1843 1844
	if (!primary_if)
		goto handled;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1845 1846 1847
	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
		goto handled;

1848
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1849
		/* don't allow broadcasts while requests are in flight */
1850
		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1851 1852
			goto handled;

1853
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1854
	search_claim.vid = vid;
1855
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1856 1857 1858 1859 1860

	if (!claim) {
		/* possible optimization: race for a claim */
		/* No claim exists yet, claim it for us!
		 */
1861 1862

		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1863 1864
			   "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
			   __func__, ethhdr->h_source,
1865 1866 1867
			   batadv_is_my_client(bat_priv,
					       ethhdr->h_source, vid) ?
			   "yes" : "no");
1868 1869 1870
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1871 1872 1873 1874
		goto allow;
	}

	/* if it is our own claim ... */
1875 1876 1877 1878 1879 1880
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	own_claim = batadv_compare_eth(backbone_gw->orig,
				       primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (own_claim) {
1881 1882 1883 1884 1885 1886
		/* ... allow it in any case */
		claim->lasttime = jiffies;
		goto allow;
	}

	/* if it is a broadcast ... */
1887 1888 1889 1890 1891 1892 1893
	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
		/* ... drop it. the responsible gateway is in charge.
		 *
		 * We need to check is_bcast because with the gateway
		 * feature, broadcasts (like DHCP requests) may be sent
		 * using a unicast packet type.
		 */
1894 1895 1896 1897 1898 1899
		goto handled;
	} else {
		/* seems the client considers us as its best gateway.
		 * send a claim and update the claim table
		 * immediately.
		 */
1900 1901 1902
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1903 1904 1905
		goto allow;
	}
allow:
1906
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1907
	ret = false;
1908 1909 1910 1911
	goto out;

handled:
	kfree_skb(skb);
1912
	ret = true;
1913 1914 1915

out:
	if (primary_if)
1916
		batadv_hardif_put(primary_if);
1917
	if (claim)
1918
		batadv_claim_put(claim);
1919 1920 1921
	return ret;
}

1922
/**
1923
 * batadv_bla_tx() - check packets going into the mesh
1924
 * @bat_priv: the bat priv with all the soft interface information
1925 1926 1927
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
 *
1928
 * batadv_bla_tx checks if:
1929 1930 1931
 *  * a claim was received which has to be processed
 *  * the frame is allowed on the mesh
 *
1932
 * in these cases, the skb is further handled by this function.
1933 1934
 *
 * This call might reallocate skb data.
1935
 *
1936 1937
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1938
 */
1939 1940
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid)
1941 1942
{
	struct ethhdr *ethhdr;
1943
	struct batadv_bla_claim search_claim, *claim = NULL;
1944
	struct batadv_bla_backbone_gw *backbone_gw;
1945
	struct batadv_hard_iface *primary_if;
1946
	bool client_roamed;
1947
	bool ret = false;
1948

1949
	primary_if = batadv_primary_if_get_selected(bat_priv);
1950 1951 1952 1953 1954 1955
	if (!primary_if)
		goto out;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1956
	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1957 1958
		goto handled;

1959
	ethhdr = eth_hdr(skb);
1960

1961
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1962 1963 1964 1965
		/* don't allow broadcasts while requests are in flight */
		if (is_multicast_ether_addr(ethhdr->h_dest))
			goto handled;

1966
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1967 1968
	search_claim.vid = vid;

1969
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1970 1971 1972 1973 1974 1975

	/* if no claim exists, allow it. */
	if (!claim)
		goto allow;

	/* check if we are responsible. */
1976 1977 1978 1979 1980 1981
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	client_roamed = batadv_compare_eth(backbone_gw->orig,
					   primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (client_roamed) {
1982 1983 1984
		/* if yes, the client has roamed and we have
		 * to unclaim it.
		 */
1985 1986 1987 1988 1989
		if (batadv_has_timed_out(claim->lasttime, 100)) {
			/* only unclaim if the last claim entry is
			 * older than 100 ms to make sure we really
			 * have a roaming client here.
			 */
1990 1991
			batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
				   __func__, ethhdr->h_source);
1992 1993 1994 1995 1996
			batadv_handle_unclaim(bat_priv, primary_if,
					      primary_if->net_dev->dev_addr,
					      ethhdr->h_source, vid);
			goto allow;
		} else {
1997 1998
			batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
				   __func__, ethhdr->h_source);
1999 2000
			goto handled;
		}
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
	}

	/* check if it is a multicast/broadcast frame */
	if (is_multicast_ether_addr(ethhdr->h_dest)) {
		/* drop it. the responsible gateway has forwarded it into
		 * the backbone network.
		 */
		goto handled;
	} else {
		/* we must allow it. at least if we are
		 * responsible for the DESTINATION.
		 */
		goto allow;
	}
allow:
2016
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2017
	ret = false;
2018 2019
	goto out;
handled:
2020
	ret = true;
2021 2022
out:
	if (primary_if)
2023
		batadv_hardif_put(primary_if);
2024
	if (claim)
2025
		batadv_claim_put(claim);
2026 2027
	return ret;
}
2028

2029
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2030
/**
2031
 * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
2032 2033 2034 2035 2036
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2037
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2038 2039
{
	struct net_device *net_dev = (struct net_device *)seq->private;
2040
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2041
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2042
	struct batadv_bla_backbone_gw *backbone_gw;
2043
	struct batadv_bla_claim *claim;
2044
	struct batadv_hard_iface *primary_if;
2045
	struct hlist_head *head;
2046
	u16 backbone_crc;
2047
	u32 i;
2048
	bool is_own;
2049
	u8 *primary_addr;
2050

2051 2052
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2053 2054
		goto out;

2055
	primary_addr = primary_if->net_dev->dev_addr;
2056
	seq_printf(seq,
2057
		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2058
		   net_dev->name, primary_addr,
2059
		   ntohs(bat_priv->bla.claim_dest.group));
2060 2061
	seq_puts(seq,
		 "   Client               VID      Originator        [o] (CRC   )\n");
2062 2063 2064 2065
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2066
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
2067 2068 2069
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);

			is_own = batadv_compare_eth(backbone_gw->orig,
2070
						    primary_addr);
2071

2072 2073 2074
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);
2075
			seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2076
				   claim->addr, batadv_print_vid(claim->vid),
2077
				   backbone_gw->orig,
2078
				   (is_own ? 'x' : ' '),
2079
				   backbone_crc);
2080 2081

			batadv_backbone_gw_put(backbone_gw);
2082 2083 2084 2085 2086
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2087
		batadv_hardif_put(primary_if);
2088
	return 0;
2089
}
2090
#endif
2091

2092
/**
2093
 * batadv_bla_claim_dump_entry() - dump one entry of the claim table
2094 2095 2096
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
2097
 * @cb: Control block containing additional options
2098 2099 2100 2101 2102 2103
 * @primary_if: primary interface
 * @claim: entry to dump
 *
 * Return: 0 or error code.
 */
static int
2104 2105
batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
			    struct netlink_callback *cb,
2106 2107 2108 2109 2110 2111 2112 2113 2114
			    struct batadv_hard_iface *primary_if,
			    struct batadv_bla_claim *claim)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	void *hdr;
	int ret = -EINVAL;

2115 2116 2117
	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
			  &batadv_netlink_family, NLM_F_MULTI,
			  BATADV_CMD_GET_BLA_CLAIM);
2118 2119 2120 2121 2122
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

2123 2124
	genl_dump_check_consistent(cb, hdr);

2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	is_own = batadv_compare_eth(claim->backbone_gw->orig,
				    primary_addr);

	spin_lock_bh(&claim->backbone_gw->crc_lock);
	backbone_crc = claim->backbone_gw->crc;
	spin_unlock_bh(&claim->backbone_gw->crc_lock);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
	    nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    claim->backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
2156
 * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
2157 2158 2159
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
2160
 * @cb: Control block containing additional options
2161
 * @primary_if: primary interface
2162 2163
 * @hash: hash to dump
 * @bucket: bucket index to dump
2164 2165 2166 2167 2168
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
2169 2170
batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
			     struct netlink_callback *cb,
2171
			     struct batadv_hard_iface *primary_if,
2172 2173
			     struct batadv_hashtable *hash, unsigned int bucket,
			     int *idx_skip)
2174 2175 2176
{
	struct batadv_bla_claim *claim;
	int idx = 0;
2177
	int ret = 0;
2178

2179 2180 2181 2182
	spin_lock_bh(&hash->list_locks[bucket]);
	cb->seq = atomic_read(&hash->generation) << 1 | 1;

	hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2183 2184
		if (idx++ < *idx_skip)
			continue;
2185

2186
		ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2187 2188
						  primary_if, claim);
		if (ret) {
2189 2190 2191 2192 2193
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

2194
	*idx_skip = 0;
2195
unlock:
2196
	spin_unlock_bh(&hash->list_locks[bucket]);
2197
	return ret;
2198 2199 2200
}

/**
2201
 * batadv_bla_claim_dump() - dump claim table to a netlink socket
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.claim_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
2241 2242
		if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
						 hash, bucket, &idx))
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}

2262
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2263
/**
2264 2265
 * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
 *  seq file
2266 2267 2268 2269 2270
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2271 2272 2273 2274
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
{
	struct net_device *net_dev = (struct net_device *)seq->private;
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2275
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2276
	struct batadv_bla_backbone_gw *backbone_gw;
2277 2278 2279
	struct batadv_hard_iface *primary_if;
	struct hlist_head *head;
	int secs, msecs;
2280
	u16 backbone_crc;
2281
	u32 i;
2282
	bool is_own;
2283
	u8 *primary_addr;
2284

2285 2286
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2287 2288 2289 2290
		goto out;

	primary_addr = primary_if->net_dev->dev_addr;
	seq_printf(seq,
2291
		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2292
		   net_dev->name, primary_addr,
2293
		   ntohs(bat_priv->bla.claim_dest.group));
2294
	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2295 2296 2297 2298
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2299
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
			msecs = jiffies_to_msecs(jiffies -
						 backbone_gw->lasttime);
			secs = msecs / 1000;
			msecs = msecs % 1000;

			is_own = batadv_compare_eth(backbone_gw->orig,
						    primary_addr);
			if (is_own)
				continue;

2310 2311 2312 2313
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);

2314
			seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2315
				   backbone_gw->orig,
2316
				   batadv_print_vid(backbone_gw->vid), secs,
2317
				   msecs, backbone_crc);
2318 2319 2320 2321 2322
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2323
		batadv_hardif_put(primary_if);
2324
	return 0;
2325
}
2326
#endif
2327 2328

/**
2329 2330
 * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
 *  netlink socket
2331 2332
 * @msg: buffer for the message
 * @portid: netlink port
2333
 * @cb: Control block containing additional options
2334 2335 2336 2337 2338 2339
 * @primary_if: primary interface
 * @backbone_gw: entry to dump
 *
 * Return: 0 or error code.
 */
static int
2340 2341
batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
			       struct netlink_callback *cb,
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
			       struct batadv_hard_iface *primary_if,
			       struct batadv_bla_backbone_gw *backbone_gw)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	int msecs;
	void *hdr;
	int ret = -EINVAL;

2352 2353 2354
	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
			  &batadv_netlink_family, NLM_F_MULTI,
			  BATADV_CMD_GET_BLA_BACKBONE);
2355 2356 2357 2358 2359
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

2360 2361
	genl_dump_check_consistent(cb, hdr);

2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);

	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc) ||
	    nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
2394 2395
 * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
 *  a netlink socket
2396 2397
 * @msg: buffer for the message
 * @portid: netlink port
2398
 * @cb: Control block containing additional options
2399
 * @primary_if: primary interface
2400 2401
 * @hash: hash to dump
 * @bucket: bucket index to dump
2402 2403 2404 2405 2406
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
2407 2408
batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
				struct netlink_callback *cb,
2409
				struct batadv_hard_iface *primary_if,
2410 2411
				struct batadv_hashtable *hash,
				unsigned int bucket, int *idx_skip)
2412 2413 2414
{
	struct batadv_bla_backbone_gw *backbone_gw;
	int idx = 0;
2415
	int ret = 0;
2416

2417 2418 2419 2420
	spin_lock_bh(&hash->list_locks[bucket]);
	cb->seq = atomic_read(&hash->generation) << 1 | 1;

	hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2421 2422
		if (idx++ < *idx_skip)
			continue;
2423

2424
		ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2425 2426
						     primary_if, backbone_gw);
		if (ret) {
2427 2428 2429 2430 2431
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

2432
	*idx_skip = 0;
2433
unlock:
2434
	spin_unlock_bh(&hash->list_locks[bucket]);
2435
	return ret;
2436 2437 2438
}

/**
2439
 * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.backbone_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
2479 2480
		if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
						    hash, bucket, &idx))
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}
2499 2500 2501

#ifdef CONFIG_BATMAN_ADV_DAT
/**
2502
 * batadv_bla_check_claim() - check if address is claimed
2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
 *
 * @bat_priv: the bat priv with all the soft interface information
 * @addr: mac address of which the claim status is checked
 * @vid: the VLAN ID
 *
 * addr is checked if this address is claimed by the local device itself.
 *
 * Return: true if bla is disabled or the mac is claimed by the device,
 * false if the device addr is already claimed by another gateway
 */
bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
			    u8 *addr, unsigned short vid)
{
	struct batadv_bla_claim search_claim;
	struct batadv_bla_claim *claim = NULL;
	struct batadv_hard_iface *primary_if = NULL;
	bool ret = true;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		return ret;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		return ret;

	/* First look if the mac address is claimed */
	ether_addr_copy(search_claim.addr, addr);
	search_claim.vid = vid;

	claim = batadv_claim_hash_find(bat_priv, &search_claim);

	/* If there is a claim and we are not owner of the claim,
	 * return false.
	 */
	if (claim) {
		if (!batadv_compare_eth(claim->backbone_gw->orig,
					primary_if->net_dev->dev_addr))
			ret = false;
		batadv_claim_put(claim);
	}

	batadv_hardif_put(primary_if);
	return ret;
}
#endif