bridge_loop_avoidance.c 67.2 KB
Newer Older
1
/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 17 18
 */

#include "bridge_loop_avoidance.h"
19
#include "main.h"
20

21 22 23
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
24
#include <linux/crc16.h>
25 26 27
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
28
#include <linux/if_arp.h>
29
#include <linux/if_ether.h>
30
#include <linux/if_vlan.h>
31 32 33
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
34
#include <linux/kref.h>
35 36 37
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
38
#include <linux/netlink.h>
39 40 41 42 43 44 45 46 47 48
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
49 50 51 52
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
53 54 55

#include "hard-interface.h"
#include "hash.h"
56
#include "log.h"
57
#include "netlink.h"
58 59
#include "originator.h"
#include "packet.h"
60
#include "soft-interface.h"
61
#include "sysfs.h"
62
#include "translation-table.h"
63

64
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
65

66
static void batadv_bla_periodic_work(struct work_struct *work);
67 68 69
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
			 struct batadv_bla_backbone_gw *backbone_gw);
70

71
/**
72 73 74
 * batadv_choose_claim - choose the right bucket for a claim.
 * @data: data to hash
 * @size: size of the hash table
75
 *
76
 * Return: the hash index of the claim
77
 */
78
static inline u32 batadv_choose_claim(const void *data, u32 size)
79
{
80
	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
81
	u32 hash = 0;
82

83 84
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
85 86 87 88

	return hash % size;
}

89
/**
90 91 92
 * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
 * @data: data to hash
 * @size: size of the hash table
93
 *
94
 * Return: the hash index of the backbone gateway
95
 */
96
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
97
{
98
	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
99
	u32 hash = 0;
100

101 102
	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
103 104 105 106

	return hash % size;
}

107 108 109 110 111
/**
 * batadv_compare_backbone_gw - compare address and vid of two backbone gws
 * @node: list node of the first entry to compare
 * @data2: pointer to the second backbone gateway
 *
112
 * Return: true if the backbones have the same data, false otherwise
113
 */
114 115
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
				       const void *data2)
116
{
117
	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
118
					 hash_entry);
119 120
	const struct batadv_bla_backbone_gw *gw1 = data1;
	const struct batadv_bla_backbone_gw *gw2 = data2;
121

122
	if (!batadv_compare_eth(gw1->orig, gw2->orig))
123
		return false;
124 125

	if (gw1->vid != gw2->vid)
126
		return false;
127

128
	return true;
129 130
}

131
/**
132
 * batadv_compare_claim - compare address and vid of two claims
133 134 135
 * @node: list node of the first entry to compare
 * @data2: pointer to the second claims
 *
136
 * Return: true if the claim have the same data, 0 otherwise
137
 */
138 139
static bool batadv_compare_claim(const struct hlist_node *node,
				 const void *data2)
140
{
141
	const void *data1 = container_of(node, struct batadv_bla_claim,
142
					 hash_entry);
143 144
	const struct batadv_bla_claim *cl1 = data1;
	const struct batadv_bla_claim *cl2 = data2;
145 146

	if (!batadv_compare_eth(cl1->addr, cl2->addr))
147
		return false;
148 149

	if (cl1->vid != cl2->vid)
150
		return false;
151

152
	return true;
153 154
}

155
/**
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
 * batadv_backbone_gw_release - release backbone gw from lists and queue for
 *  free after rcu grace period
 * @ref: kref pointer of the backbone gw
 */
static void batadv_backbone_gw_release(struct kref *ref)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
				   refcount);

	kfree_rcu(backbone_gw, rcu);
}

/**
171 172
 * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
 *  release it
173 174
 * @backbone_gw: backbone gateway to be free'd
 */
175
static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
176
{
177
	kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
178 179
}

180 181 182 183 184
/**
 * batadv_claim_release - release claim from lists and queue for free after rcu
 *  grace period
 * @ref: kref pointer of the claim
 */
185
static void batadv_claim_release(struct kref *ref)
186
{
187
	struct batadv_bla_claim *claim;
188
	struct batadv_bla_backbone_gw *old_backbone_gw;
189 190 191

	claim = container_of(ref, struct batadv_bla_claim, refcount);

192 193 194 195 196 197 198 199 200 201 202
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
	claim->backbone_gw = NULL;
	spin_unlock_bh(&claim->backbone_lock);

	spin_lock_bh(&old_backbone_gw->crc_lock);
	old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
	spin_unlock_bh(&old_backbone_gw->crc_lock);

	batadv_backbone_gw_put(old_backbone_gw);

203
	kfree_rcu(claim, rcu);
204 205
}

206
/**
207
 * batadv_claim_put - decrement the claim refcounter and possibly
208
 *  release it
209 210
 * @claim: claim to be free'd
 */
211
static void batadv_claim_put(struct batadv_bla_claim *claim)
212
{
213
	kref_put(&claim->refcount, batadv_claim_release);
214 215
}

216
/**
217
 * batadv_claim_hash_find - looks for a claim in the claim hash
218
 * @bat_priv: the bat priv with all the soft interface information
219 220
 * @data: search data (may be local/static data)
 *
221
 * Return: claim if found or NULL otherwise.
222
 */
223 224 225
static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv *bat_priv,
		       struct batadv_bla_claim *data)
226
{
227
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228
	struct hlist_head *head;
229 230
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim *claim_tmp = NULL;
231 232 233 234 235
	int index;

	if (!hash)
		return NULL;

236
	index = batadv_choose_claim(data, hash->size);
237 238 239
	head = &hash->table[index];

	rcu_read_lock();
240
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
241
		if (!batadv_compare_claim(&claim->hash_entry, data))
242 243
			continue;

244
		if (!kref_get_unless_zero(&claim->refcount))
245 246 247 248 249 250 251 252 253 254
			continue;

		claim_tmp = claim;
		break;
	}
	rcu_read_unlock();

	return claim_tmp;
}

255
/**
256
 * batadv_backbone_hash_find - looks for a backbone gateway in the hash
257
 * @bat_priv: the bat priv with all the soft interface information
258 259 260
 * @addr: the address of the originator
 * @vid: the VLAN ID
 *
261
 * Return: backbone gateway if found or NULL otherwise
262
 */
263
static struct batadv_bla_backbone_gw *
264 265
batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
			  unsigned short vid)
266
{
267
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268
	struct hlist_head *head;
269 270
	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 272 273 274 275
	int index;

	if (!hash)
		return NULL;

276
	ether_addr_copy(search_entry.orig, addr);
277 278
	search_entry.vid = vid;

279
	index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 281 282
	head = &hash->table[index];

	rcu_read_lock();
283
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 285
		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
						&search_entry))
286 287
			continue;

288
		if (!kref_get_unless_zero(&backbone_gw->refcount))
289 290 291 292 293 294 295 296 297 298
			continue;

		backbone_gw_tmp = backbone_gw;
		break;
	}
	rcu_read_unlock();

	return backbone_gw_tmp;
}

299 300 301 302
/**
 * batadv_bla_del_backbone_claims - delete all claims for a backbone
 * @backbone_gw: backbone gateway where the claims should be removed
 */
303
static void
304
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305
{
306
	struct batadv_hashtable *hash;
307
	struct hlist_node *node_tmp;
308
	struct hlist_head *head;
309
	struct batadv_bla_claim *claim;
310 311 312
	int i;
	spinlock_t *list_lock;	/* protects write access to the hash lists */

313
	hash = backbone_gw->bat_priv->bla.claim_hash;
314 315 316 317 318 319 320 321
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
322
		hlist_for_each_entry_safe(claim, node_tmp,
323 324 325 326
					  head, hash_entry) {
			if (claim->backbone_gw != backbone_gw)
				continue;

327
			batadv_claim_put(claim);
328
			hlist_del_rcu(&claim->hash_entry);
329 330 331 332
		}
		spin_unlock_bh(list_lock);
	}

333
	/* all claims gone, initialize CRC */
334
	spin_lock_bh(&backbone_gw->crc_lock);
335
	backbone_gw->crc = BATADV_BLA_CRC_INIT;
336
	spin_unlock_bh(&backbone_gw->crc_lock);
337 338
}

339 340 341
/**
 * batadv_bla_send_claim - sends a claim frame according to the provided info
 * @bat_priv: the bat priv with all the soft interface information
342
 * @mac: the mac address to be announced within the claim
343 344 345
 * @vid: the VLAN ID
 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
 */
346
static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347
				  unsigned short vid, int claimtype)
348 349 350
{
	struct sk_buff *skb;
	struct ethhdr *ethhdr;
351
	struct batadv_hard_iface *primary_if;
352
	struct net_device *soft_iface;
353
	u8 *hw_src;
354
	struct batadv_bla_claim_dst local_claim_dest;
355
	__be32 zeroip = 0;
356

357
	primary_if = batadv_primary_if_get_selected(bat_priv);
358 359 360
	if (!primary_if)
		return;

361
	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362
	       sizeof(local_claim_dest));
363 364 365 366 367 368 369 370 371 372 373 374 375 376
	local_claim_dest.type = claimtype;

	soft_iface = primary_if->soft_iface;

	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
			 /* IP DST: 0.0.0.0 */
			 zeroip,
			 primary_if->soft_iface,
			 /* IP SRC: 0.0.0.0 */
			 zeroip,
			 /* Ethernet DST: Broadcast */
			 NULL,
			 /* Ethernet SRC/HW SRC:  originator mac */
			 primary_if->net_dev->dev_addr,
377
			 /* HW DST: FF:43:05:XX:YY:YY
378
			  * with XX   = claim type
379
			  * and YY:YY = group id
380
			  */
381
			 (u8 *)&local_claim_dest);
382 383 384 385 386

	if (!skb)
		goto out;

	ethhdr = (struct ethhdr *)skb->data;
387
	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388 389 390

	/* now we pretend that the client would have sent this ... */
	switch (claimtype) {
391
	case BATADV_CLAIM_TYPE_CLAIM:
392 393 394
		/* normal claim frame
		 * set Ethernet SRC to the clients mac
		 */
395
		ether_addr_copy(ethhdr->h_source, mac);
396
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
397 398
			   "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
			   BATADV_PRINT_VID(vid));
399
		break;
400
	case BATADV_CLAIM_TYPE_UNCLAIM:
401 402 403
		/* unclaim frame
		 * set HW SRC to the clients mac
		 */
404
		ether_addr_copy(hw_src, mac);
405
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
406
			   "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
407
			   BATADV_PRINT_VID(vid));
408
		break;
409
	case BATADV_CLAIM_TYPE_ANNOUNCE:
410 411 412
		/* announcement frame
		 * set HW SRC to the special mac containg the crc
		 */
413
		ether_addr_copy(hw_src, mac);
414
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
415
			   "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
416
			   ethhdr->h_source, BATADV_PRINT_VID(vid));
417
		break;
418
	case BATADV_CLAIM_TYPE_REQUEST:
419
		/* request frame
420 421
		 * set HW SRC and header destination to the receiving backbone
		 * gws mac
422
		 */
423 424
		ether_addr_copy(hw_src, mac);
		ether_addr_copy(ethhdr->h_dest, mac);
425
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
426
			   "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
427 428
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));
429
		break;
430 431 432 433 434 435 436 437
	case BATADV_CLAIM_TYPE_LOOPDETECT:
		ether_addr_copy(ethhdr->h_source, mac);
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
			   "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
			   ethhdr->h_source, ethhdr->h_dest,
			   BATADV_PRINT_VID(vid));

		break;
438 439
	}

440
	if (vid & BATADV_VLAN_HAS_TAG) {
441 442
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				      vid & VLAN_VID_MASK);
443 444 445
		if (!skb)
			goto out;
	}
446 447 448

	skb_reset_mac_header(skb);
	skb->protocol = eth_type_trans(skb, soft_iface);
449 450 451
	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
			   skb->len + ETH_HLEN);
452 453 454 455

	netif_rx(skb);
out:
	if (primary_if)
456
		batadv_hardif_put(primary_if);
457 458
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
/**
 * batadv_bla_loopdetect_report - worker for reporting the loop
 * @work: work queue item
 *
 * Throws an uevent, as the loopdetect check function can't do that itself
 * since the kernel may sleep while throwing uevents.
 */
static void batadv_bla_loopdetect_report(struct work_struct *work)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_priv *bat_priv;
	char vid_str[6] = { '\0' };

	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
				   report_work);
	bat_priv = backbone_gw->bat_priv;

	batadv_info(bat_priv->soft_iface,
		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
		    BATADV_PRINT_VID(backbone_gw->vid));
	snprintf(vid_str, sizeof(vid_str), "%d",
		 BATADV_PRINT_VID(backbone_gw->vid));
	vid_str[sizeof(vid_str) - 1] = 0;

	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
			    vid_str);

	batadv_backbone_gw_put(backbone_gw);
}

489
/**
490
 * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
491
 * @bat_priv: the bat priv with all the soft interface information
492 493
 * @orig: the mac address of the originator
 * @vid: the VLAN ID
494
 * @own_backbone: set if the requested backbone is local
495
 *
496
 * Return: the (possibly created) backbone gateway or NULL on error
497
 */
498
static struct batadv_bla_backbone_gw *
499
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
500
			   unsigned short vid, bool own_backbone)
501
{
502
	struct batadv_bla_backbone_gw *entry;
503
	struct batadv_orig_node *orig_node;
504 505
	int hash_added;

506
	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
507 508 509 510

	if (entry)
		return entry;

511
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
512
		   "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
513
		   orig, BATADV_PRINT_VID(vid));
514 515 516 517 518 519 520

	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
	if (!entry)
		return NULL;

	entry->vid = vid;
	entry->lasttime = jiffies;
521
	entry->crc = BATADV_BLA_CRC_INIT;
522
	entry->bat_priv = bat_priv;
523
	spin_lock_init(&entry->crc_lock);
524
	atomic_set(&entry->request_sent, 0);
525
	atomic_set(&entry->wait_periods, 0);
526
	ether_addr_copy(entry->orig, orig);
527
	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
528
	kref_init(&entry->refcount);
529

530
	kref_get(&entry->refcount);
531
	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
532 533 534
				     batadv_compare_backbone_gw,
				     batadv_choose_backbone_gw, entry,
				     &entry->hash_entry);
535 536 537 538 539 540 541

	if (unlikely(hash_added != 0)) {
		/* hash failed, free the structure */
		kfree(entry);
		return NULL;
	}

542
	/* this is a gateway now, remove any TT entry on this VLAN */
543
	orig_node = batadv_orig_hash_find(bat_priv, orig);
544
	if (orig_node) {
545
		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
546
					  "became a backbone gateway");
547
		batadv_orig_node_put(orig_node);
548
	}
549

550
	if (own_backbone) {
551 552
		batadv_bla_send_announce(bat_priv, entry);

553 554
		/* this will be decreased in the worker thread */
		atomic_inc(&entry->request_sent);
555
		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
556 557 558
		atomic_inc(&bat_priv->bla.num_requests);
	}

559 560 561
	return entry;
}

562 563 564 565 566 567 568
/**
 * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the selected primary interface
 * @vid: VLAN identifier
 *
 * update or add the own backbone gw to make sure we announce
569 570
 * where we receive other backbone gws
 */
571 572 573
static void
batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
574
				  unsigned short vid)
575
{
576
	struct batadv_bla_backbone_gw *backbone_gw;
577

578 579
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
580
						 vid, true);
581 582 583 584
	if (unlikely(!backbone_gw))
		return;

	backbone_gw->lasttime = jiffies;
585
	batadv_backbone_gw_put(backbone_gw);
586 587
}

588 589 590
/**
 * batadv_bla_answer_request - answer a bla request by sending own claims
 * @bat_priv: the bat priv with all the soft interface information
591
 * @primary_if: interface where the request came on
592 593 594 595 596
 * @vid: the vid where the request came on
 *
 * Repeat all of our own claims, and finally send an ANNOUNCE frame
 * to allow the requester another check if the CRC is correct now.
 */
597 598
static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
				      struct batadv_hard_iface *primary_if,
599
				      unsigned short vid)
600 601
{
	struct hlist_head *head;
602
	struct batadv_hashtable *hash;
603
	struct batadv_bla_claim *claim;
604
	struct batadv_bla_backbone_gw *backbone_gw;
605 606
	int i;

607
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
608
		   "bla_answer_request(): received a claim request, send all of our own claims again\n");
609

610 611 612
	backbone_gw = batadv_backbone_hash_find(bat_priv,
						primary_if->net_dev->dev_addr,
						vid);
613 614 615
	if (!backbone_gw)
		return;

616
	hash = bat_priv->bla.claim_hash;
617 618 619 620
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
621
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
622 623 624 625
			/* only own claims are interesting */
			if (claim->backbone_gw != backbone_gw)
				continue;

626
			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
627
					      BATADV_CLAIM_TYPE_CLAIM);
628 629 630 631 632
		}
		rcu_read_unlock();
	}

	/* finally, send an announcement frame */
633
	batadv_bla_send_announce(bat_priv, backbone_gw);
634
	batadv_backbone_gw_put(backbone_gw);
635 636
}

637 638 639
/**
 * batadv_bla_send_request - send a request to repeat claims
 * @backbone_gw: the backbone gateway from whom we are out of sync
640 641 642 643 644
 *
 * When the crc is wrong, ask the backbone gateway for a full table update.
 * After the request, it will repeat all of his own claims and finally
 * send an announcement claim with which we can check again.
 */
645
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
646 647
{
	/* first, remove all old entries */
648
	batadv_bla_del_backbone_claims(backbone_gw);
649

650 651
	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
		   "Sending REQUEST to %pM\n", backbone_gw->orig);
652 653

	/* send request */
654
	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
655
			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
656 657 658

	/* no local broadcasts should be sent or received, for now. */
	if (!atomic_read(&backbone_gw->request_sent)) {
659
		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
660 661 662 663
		atomic_set(&backbone_gw->request_sent, 1);
	}
}

664
/**
665
 * batadv_bla_send_announce - Send an announcement frame
666
 * @bat_priv: the bat priv with all the soft interface information
667 668
 * @backbone_gw: our backbone gateway which should be announced
 */
669
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
670
				     struct batadv_bla_backbone_gw *backbone_gw)
671
{
672
	u8 mac[ETH_ALEN];
673
	__be16 crc;
674

675
	memcpy(mac, batadv_announce_mac, 4);
676
	spin_lock_bh(&backbone_gw->crc_lock);
677
	crc = htons(backbone_gw->crc);
678
	spin_unlock_bh(&backbone_gw->crc_lock);
679
	memcpy(&mac[4], &crc, 2);
680

681
	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
682
			      BATADV_CLAIM_TYPE_ANNOUNCE);
683 684
}

685 686 687
/**
 * batadv_bla_add_claim - Adds a claim in the claim hash
 * @bat_priv: the bat priv with all the soft interface information
688 689 690 691
 * @mac: the mac address of the claim
 * @vid: the VLAN ID of the frame
 * @backbone_gw: the backbone gateway which claims it
 */
692
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
693
				 const u8 *mac, const unsigned short vid,
694
				 struct batadv_bla_backbone_gw *backbone_gw)
695
{
696
	struct batadv_bla_backbone_gw *old_backbone_gw;
697 698
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim search_claim;
699
	bool remove_crc = false;
700 701
	int hash_added;

702
	ether_addr_copy(search_claim.addr, mac);
703
	search_claim.vid = vid;
704
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
705 706 707 708 709 710 711

	/* create a new claim entry if it does not exist yet. */
	if (!claim) {
		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
		if (!claim)
			return;

712
		ether_addr_copy(claim->addr, mac);
713
		spin_lock_init(&claim->backbone_lock);
714 715
		claim->vid = vid;
		claim->lasttime = jiffies;
716
		kref_get(&backbone_gw->refcount);
717
		claim->backbone_gw = backbone_gw;
718
		kref_init(&claim->refcount);
719

720
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
721
			   "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
722
			   mac, BATADV_PRINT_VID(vid));
723 724

		kref_get(&claim->refcount);
725
		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
726 727 728
					     batadv_compare_claim,
					     batadv_choose_claim, claim,
					     &claim->hash_entry);
729 730 731 732 733 734 735 736 737 738 739 740

		if (unlikely(hash_added != 0)) {
			/* only local changes happened. */
			kfree(claim);
			return;
		}
	} else {
		claim->lasttime = jiffies;
		if (claim->backbone_gw == backbone_gw)
			/* no need to register a new backbone */
			goto claim_free_ref;

741
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
742
			   "bla_add_claim(): changing ownership for %pM, vid %d\n",
743
			   mac, BATADV_PRINT_VID(vid));
744

745
		remove_crc = true;
746
	}
747 748 749 750

	/* replace backbone_gw atomically and adjust reference counters */
	spin_lock_bh(&claim->backbone_lock);
	old_backbone_gw = claim->backbone_gw;
751
	kref_get(&backbone_gw->refcount);
752
	claim->backbone_gw = backbone_gw;
753
	spin_unlock_bh(&claim->backbone_lock);
754

755 756 757 758 759 760
	if (remove_crc) {
		/* remove claim address from old backbone_gw */
		spin_lock_bh(&old_backbone_gw->crc_lock);
		old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
		spin_unlock_bh(&old_backbone_gw->crc_lock);
	}
761

762 763 764
	batadv_backbone_gw_put(old_backbone_gw);

	/* add claim address to new backbone_gw */
765
	spin_lock_bh(&backbone_gw->crc_lock);
766
	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
767
	spin_unlock_bh(&backbone_gw->crc_lock);
768 769 770
	backbone_gw->lasttime = jiffies;

claim_free_ref:
771
	batadv_claim_put(claim);
772 773
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
/**
 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
 *  claim
 * @claim: claim whose backbone_gw should be returned
 *
 * Return: valid reference to claim::backbone_gw
 */
static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
{
	struct batadv_bla_backbone_gw *backbone_gw;

	spin_lock_bh(&claim->backbone_lock);
	backbone_gw = claim->backbone_gw;
	kref_get(&backbone_gw->refcount);
	spin_unlock_bh(&claim->backbone_lock);

	return backbone_gw;
}

794 795 796 797 798
/**
 * batadv_bla_del_claim - delete a claim from the claim hash
 * @bat_priv: the bat priv with all the soft interface information
 * @mac: mac address of the claim to be removed
 * @vid: VLAN id for the claim to be removed
799
 */
800
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
801
				 const u8 *mac, const unsigned short vid)
802
{
803
	struct batadv_bla_claim search_claim, *claim;
804

805
	ether_addr_copy(search_claim.addr, mac);
806
	search_claim.vid = vid;
807
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
808 809 810
	if (!claim)
		return;

811
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
812
		   mac, BATADV_PRINT_VID(vid));
813

814
	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
815
			   batadv_choose_claim, claim);
816
	batadv_claim_put(claim); /* reference from the hash is gone */
817 818

	/* don't need the reference from hash_find() anymore */
819
	batadv_claim_put(claim);
820 821
}

822 823
/**
 * batadv_handle_announce - check for ANNOUNCE frame
824 825 826 827
 * @bat_priv: the bat priv with all the soft interface information
 * @an_addr: announcement mac address (ARP Sender HW address)
 * @backbone_addr: originator address of the sender (Ethernet source MAC)
 * @vid: the VLAN ID of the frame
828
 *
829
 * Return: true if handled
830
 */
831 832
static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
				   u8 *backbone_addr, unsigned short vid)
833
{
834
	struct batadv_bla_backbone_gw *backbone_gw;
835
	u16 backbone_crc, crc;
836

837
	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
838
		return false;
839

840 841
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
842 843

	if (unlikely(!backbone_gw))
844
		return true;
845 846 847

	/* handle as ANNOUNCE frame */
	backbone_gw->lasttime = jiffies;
848
	crc = ntohs(*((__be16 *)(&an_addr[4])));
849

850
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
851
		   "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
852
		   BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
853

854 855 856 857 858
	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	if (backbone_crc != crc) {
859
		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
860
			   "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
861 862
			   backbone_gw->orig,
			   BATADV_PRINT_VID(backbone_gw->vid),
863
			   backbone_crc, crc);
864

865
		batadv_bla_send_request(backbone_gw);
866 867 868 869 870
	} else {
		/* if we have sent a request and the crc was OK,
		 * we can allow traffic again.
		 */
		if (atomic_read(&backbone_gw->request_sent)) {
871
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
872 873 874 875
			atomic_set(&backbone_gw->request_sent, 0);
		}
	}

876
	batadv_backbone_gw_put(backbone_gw);
877
	return true;
878 879
}

880 881
/**
 * batadv_handle_request - check for REQUEST frame
882 883 884 885 886
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
 * @ethhdr: ethernet header of a packet
 * @vid: the VLAN ID of the frame
887
 *
888
 * Return: true if handled
889
 */
890 891 892 893
static bool batadv_handle_request(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, struct ethhdr *ethhdr,
				  unsigned short vid)
894 895
{
	/* check for REQUEST frame */
896
	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
897
		return false;
898 899 900 901

	/* sanity check, this should not happen on a normal switch,
	 * we ignore it in this case.
	 */
902
	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
903
		return true;
904

905
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
906
		   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
907
		   BATADV_PRINT_VID(vid), ethhdr->h_source);
908

909
	batadv_bla_answer_request(bat_priv, primary_if, vid);
910
	return true;
911 912
}

913 914
/**
 * batadv_handle_unclaim - check for UNCLAIM frame
915 916 917 918 919
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet source)
 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
920
 *
921
 * Return: true if handled
922
 */
923 924 925 926
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
				  struct batadv_hard_iface *primary_if,
				  u8 *backbone_addr, u8 *claim_addr,
				  unsigned short vid)
927
{
928
	struct batadv_bla_backbone_gw *backbone_gw;
929 930

	/* unclaim in any case if it is our own */
931 932
	if (primary_if && batadv_compare_eth(backbone_addr,
					     primary_if->net_dev->dev_addr))
933
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
934
				      BATADV_CLAIM_TYPE_UNCLAIM);
935

936
	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
937 938

	if (!backbone_gw)
939
		return true;
940 941

	/* this must be an UNCLAIM frame */
942
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
943
		   "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
944
		   claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
945

946
	batadv_bla_del_claim(bat_priv, claim_addr, vid);
947
	batadv_backbone_gw_put(backbone_gw);
948
	return true;
949 950
}

951 952
/**
 * batadv_handle_claim - check for CLAIM frame
953 954 955 956 957
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the primary hard interface of this batman soft interface
 * @backbone_addr: originator address of the backbone (Ethernet Source)
 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
 * @vid: the VLAN ID of the frame
958
 *
959
 * Return: true if handled
960
 */
961 962 963 964
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
				struct batadv_hard_iface *primary_if,
				u8 *backbone_addr, u8 *claim_addr,
				unsigned short vid)
965
{
966
	struct batadv_bla_backbone_gw *backbone_gw;
967 968 969

	/* register the gateway if not yet available, and add the claim. */

970 971
	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
						 false);
972 973

	if (unlikely(!backbone_gw))
974
		return true;
975 976

	/* this must be a CLAIM frame */
977
	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
978
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
979
		batadv_bla_send_claim(bat_priv, claim_addr, vid,
980
				      BATADV_CLAIM_TYPE_CLAIM);
981 982 983

	/* TODO: we could call something like tt_local_del() here. */

984
	batadv_backbone_gw_put(backbone_gw);
985
	return true;
986 987
}

988
/**
989
 * batadv_check_claim_group - check for claim group membership
990
 * @bat_priv: the bat priv with all the soft interface information
991
 * @primary_if: the primary interface of this batman interface
992 993 994 995 996 997 998 999
 * @hw_src: the Hardware source in the ARP Header
 * @hw_dst: the Hardware destination in the ARP Header
 * @ethhdr: pointer to the Ethernet header of the claim frame
 *
 * checks if it is a claim packet and if its on the same group.
 * This function also applies the group ID of the sender
 * if it is in the same mesh.
 *
1000
 * Return:
1001 1002 1003 1004
 *	2  - if it is a claim packet and on the same group
 *	1  - if is a claim packet from another group
 *	0  - if it is not a claim packet
 */
1005 1006
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
1007
				    u8 *hw_src, u8 *hw_dst,
1008
				    struct ethhdr *ethhdr)
1009
{
1010
	u8 *backbone_addr;
1011
	struct batadv_orig_node *orig_node;
1012
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1013

1014
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1015
	bla_dst_own = &bat_priv->bla.claim_dest;
1016 1017 1018 1019 1020

	/* if announcement packet, use the source,
	 * otherwise assume it is in the hw_src
	 */
	switch (bla_dst->type) {
1021
	case BATADV_CLAIM_TYPE_CLAIM:
1022 1023
		backbone_addr = hw_src;
		break;
1024 1025
	case BATADV_CLAIM_TYPE_REQUEST:
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1026
	case BATADV_CLAIM_TYPE_UNCLAIM:
1027 1028 1029 1030 1031 1032 1033
		backbone_addr = ethhdr->h_source;
		break;
	default:
		return 0;
	}

	/* don't accept claim frames from ourselves */
1034
	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1035 1036 1037 1038 1039 1040 1041
		return 0;

	/* if its already the same group, it is fine. */
	if (bla_dst->group == bla_dst_own->group)
		return 2;

	/* lets see if this originator is in our mesh */
1042
	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1043 1044 1045 1046 1047 1048 1049 1050 1051

	/* dont accept claims from gateways which are not in
	 * the same mesh or group.
	 */
	if (!orig_node)
		return 1;

	/* if our mesh friends mac is bigger, use it for ourselves. */
	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1052
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1053
			   "taking other backbones claim group: %#.4x\n",
1054
			   ntohs(bla_dst->group));
1055 1056 1057
		bla_dst_own->group = bla_dst->group;
	}

1058
	batadv_orig_node_put(orig_node);
1059 1060 1061 1062

	return 2;
}

1063
/**
1064
 * batadv_bla_process_claim - Check if this is a claim frame, and process it
1065
 * @bat_priv: the bat priv with all the soft interface information
1066
 * @primary_if: the primary hard interface of this batman soft interface
1067 1068
 * @skb: the frame to be checked
 *
1069
 * Return: true if it was a claim frame, otherwise return false to
1070 1071
 * tell the callee that it can use the frame on its own.
 */
1072 1073 1074
static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
				     struct batadv_hard_iface *primary_if,
				     struct sk_buff *skb)
1075
{
1076
	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1077
	u8 *hw_src, *hw_dst;
1078
	struct vlan_hdr *vhdr, vhdr_buf;
1079
	struct ethhdr *ethhdr;
1080
	struct arphdr *arphdr;
1081
	unsigned short vid;
1082
	int vlan_depth = 0;
1083
	__be16 proto;
1084
	int headlen;
1085
	int ret;
1086

1087
	vid = batadv_get_vid(skb, 0);
1088
	ethhdr = eth_hdr(skb);
1089

1090 1091 1092
	proto = ethhdr->h_proto;
	headlen = ETH_HLEN;
	if (vid & BATADV_VLAN_HAS_TAG) {
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
		/* Traverse the VLAN/Ethertypes.
		 *
		 * At this point it is known that the first protocol is a VLAN
		 * header, so start checking at the encapsulated protocol.
		 *
		 * The depth of the VLAN headers is recorded to drop BLA claim
		 * frames encapsulated into multiple VLAN headers (QinQ).
		 */
		do {
			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
						  &vhdr_buf);
			if (!vhdr)
1105
				return false;
1106 1107 1108 1109 1110

			proto = vhdr->h_vlan_encapsulated_proto;
			headlen += VLAN_HLEN;
			vlan_depth++;
		} while (proto == htons(ETH_P_8021Q));
1111 1112
	}

1113
	if (proto != htons(ETH_P_ARP))
1114
		return false; /* not a claim frame */
1115 1116 1117 1118

	/* this must be a ARP frame. check if it is a claim. */

	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1119
		return false;
1120 1121

	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
1122
	ethhdr = eth_hdr(skb);
1123
	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1124 1125 1126 1127 1128

	/* Check whether the ARP frame carries a valid
	 * IP information
	 */
	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1129
		return false;
1130
	if (arphdr->ar_pro != htons(ETH_P_IP))
1131
		return false;
1132
	if (arphdr->ar_hln != ETH_ALEN)
1133
		return false;
1134
	if (arphdr->ar_pln != 4)
1135
		return false;
1136

1137
	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1138
	hw_dst = hw_src + ETH_ALEN + 4;
1139
	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1140 1141 1142 1143 1144
	bla_dst_own = &bat_priv->bla.claim_dest;

	/* check if it is a claim frame in general */
	if (memcmp(bla_dst->magic, bla_dst_own->magic,
		   sizeof(bla_dst->magic)) != 0)
1145
		return false;
1146 1147 1148 1149 1150 1151

	/* check if there is a claim frame encapsulated deeper in (QinQ) and
	 * drop that, as this is not supported by BLA but should also not be
	 * sent via the mesh.
	 */
	if (vlan_depth > 1)
1152
		return true;
1153

1154 1155
	/* Let the loopdetect frames on the mesh in any case. */
	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1156
		return false;
1157

1158
	/* check if it is a claim frame. */
1159 1160
	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
				       ethhdr);
1161
	if (ret == 1)
1162
		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1163
			   "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1164 1165
			   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
			   hw_dst);
1166 1167

	if (ret < 2)
1168
		return !!ret;
1169 1170

	/* become a backbone gw ourselves on this vlan if not happened yet */
1171
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1172 1173 1174

	/* check for the different types of claim frames ... */
	switch (bla_dst->type) {
1175
	case BATADV_CLAIM_TYPE_CLAIM:
1176 1177
		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
					ethhdr->h_source, vid))
1178
			return true;
1179
		break;
1180
	case BATADV_CLAIM_TYPE_UNCLAIM:
1181 1182
		if (batadv_handle_unclaim(bat_priv, primary_if,
					  ethhdr->h_source, hw_src, vid))
1183
			return true;
1184 1185
		break;

1186
	case BATADV_CLAIM_TYPE_ANNOUNCE:
1187 1188
		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
					   vid))
1189
			return true;
1190
		break;
1191
	case BATADV_CLAIM_TYPE_REQUEST:
1192 1193
		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
					  vid))
1194
			return true;
1195 1196 1197
		break;
	}

1198
	batadv_dbg(BATADV_DBG_BLA, bat_priv,
1199
		   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1200
		   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
1201
	return true;
1202 1203
}

1204 1205 1206 1207 1208 1209 1210
/**
 * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
 *  immediately
 * @bat_priv: the bat priv with all the soft interface information
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we last heard from other nodes, and remove them in case of
1211 1212
 * a time out, or clean all backbone gws if now is set.
 */
1213
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1214
{
1215
	struct batadv_bla_backbone_gw *backbone_gw;
1216
	struct hlist_node *node_tmp;
1217
	struct hlist_head *head;
1218
	struct batadv_hashtable *hash;
1219 1220 1221
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

1222
	hash = bat_priv->bla.backbone_hash;
1223 1224 1225 1226 1227 1228 1229 1230
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
1231
		hlist_for_each_entry_safe(backbone_gw, node_tmp,
1232 1233 1234
					  head, hash_entry) {
			if (now)
				goto purge_now;
1235
			if (!batadv_has_timed_out(backbone_gw->lasttime,
1236
						  BATADV_BLA_BACKBONE_TIMEOUT))
1237 1238
				continue;

1239
			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1240 1241
				   "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
				   backbone_gw->orig);
1242 1243 1244 1245

purge_now:
			/* don't wait for the pending request anymore */
			if (atomic_read(&backbone_gw->request_sent))
1246
				atomic_dec(&bat_priv->bla.num_requests);
1247

1248
			batadv_bla_del_backbone_claims(backbone_gw);
1249

1250
			hlist_del_rcu(&backbone_gw->hash_entry);
1251
			batadv_backbone_gw_put(backbone_gw);
1252 1253 1254 1255 1256
		}
		spin_unlock_bh(list_lock);
	}
}

1257
/**
1258
 * batadv_bla_purge_claims - Remove claims after a timeout or immediately
1259
 * @bat_priv: the bat priv with all the soft interface information
1260 1261 1262 1263 1264 1265
 * @primary_if: the selected primary interface, may be NULL if now is set
 * @now: whether the whole hash shall be wiped now
 *
 * Check when we heard last time from our own claims, and remove them in case of
 * a time out, or clean all claims if now is set
 */
1266 1267 1268
static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    int now)
1269
{
1270
	struct batadv_bla_backbone_gw *backbone_gw;
1271
	struct batadv_bla_claim *claim;
1272
	struct hlist_head *head;
1273
	struct batadv_hashtable *hash;
1274 1275
	int i;

1276
	hash = bat_priv->bla.claim_hash;
1277 1278 1279 1280 1281 1282 1283
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1284
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
1285
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1286 1287
			if (now)
				goto purge_now;
1288 1289

			if (!batadv_compare_eth(backbone_gw->orig,
1290
						primary_if->net_dev->dev_addr))
1291 1292
				goto skip;

1293
			if (!batadv_has_timed_out(claim->lasttime,
1294
						  BATADV_BLA_CLAIM_TIMEOUT))
1295
				goto skip;
1296

1297
			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1298 1299
				   "bla_purge_claims(): %pM, vid %d, time out\n",
				   claim->addr, claim->vid);
1300 1301

purge_now:
1302
			batadv_handle_unclaim(bat_priv, primary_if,
1303
					      backbone_gw->orig,
1304
					      claim->addr, claim->vid);
1305 1306
skip:
			batadv_backbone_gw_put(backbone_gw);
1307 1308 1309 1310 1311
		}
		rcu_read_unlock();
	}
}

1312
/**
1313 1314
 * batadv_bla_update_orig_address - Update the backbone gateways when the own
 *  originator address changes
1315
 * @bat_priv: the bat priv with all the soft interface information
1316 1317 1318
 * @primary_if: the new selected primary_if
 * @oldif: the old primary interface, may be NULL
 */
1319 1320 1321
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    struct batadv_hard_iface *oldif)
1322
{
1323
	struct batadv_bla_backbone_gw *backbone_gw;
1324
	struct hlist_head *head;
1325
	struct batadv_hashtable *hash;
1326
	__be16 group;
1327 1328
	int i;

1329
	/* reset bridge loop avoidance group id */
1330 1331
	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
	bat_priv->bla.claim_dest.group = group;
1332

1333 1334 1335 1336
	/* purge everything when bridge loop avoidance is turned off */
	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		oldif = NULL;

1337
	if (!oldif) {
1338 1339
		batadv_bla_purge_claims(bat_priv, NULL, 1);
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1340 1341 1342
		return;
	}

1343
	hash = bat_priv->bla.backbone_hash;
1344 1345 1346 1347 1348 1349 1350
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1351
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1352
			/* own orig still holds the old value. */
1353 1354
			if (!batadv_compare_eth(backbone_gw->orig,
						oldif->net_dev->dev_addr))
1355 1356
				continue;

1357 1358
			ether_addr_copy(backbone_gw->orig,
					primary_if->net_dev->dev_addr);
1359 1360 1361
			/* send an announce frame so others will ask for our
			 * claims and update their tables.
			 */
1362
			batadv_bla_send_announce(bat_priv, backbone_gw);
1363 1364 1365 1366 1367
		}
		rcu_read_unlock();
	}
}

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
/**
 * batadv_bla_send_loopdetect - send a loopdetect frame
 * @bat_priv: the bat priv with all the soft interface information
 * @backbone_gw: the backbone gateway for which a loop should be detected
 *
 * To detect loops that the bridge loop avoidance can't handle, send a loop
 * detection packet on the backbone. Unlike other BLA frames, this frame will
 * be allowed on the mesh by other nodes. If it is received on the mesh, this
 * indicates that there is a loop.
 */
static void
batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
			   struct batadv_bla_backbone_gw *backbone_gw)
{
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
		   backbone_gw->vid);
	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
}

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
/**
 * batadv_bla_status_update - purge bla interfaces if necessary
 * @net_dev: the soft interface net device
 */
void batadv_bla_status_update(struct net_device *net_dev)
{
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
	struct batadv_hard_iface *primary_if;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		return;

	/* this function already purges everything when bla is disabled,
	 * so just call that one.
	 */
	batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1405
	batadv_hardif_put(primary_if);
1406 1407
}

1408 1409 1410 1411 1412
/**
 * batadv_bla_periodic_work - performs periodic bla work
 * @work: kernel work struct
 *
 * periodic work to do:
1413 1414 1415
 *  * purge structures when they are too old
 *  * send announcements
 */
1416
static void batadv_bla_periodic_work(struct work_struct *work)
1417
{
1418
	struct delayed_work *delayed_work;
1419
	struct batadv_priv *bat_priv;
1420
	struct batadv_priv_bla *priv_bla;
1421
	struct hlist_head *head;
1422
	struct batadv_bla_backbone_gw *backbone_gw;
1423
	struct batadv_hashtable *hash;
1424
	struct batadv_hard_iface *primary_if;
1425
	bool send_loopdetect = false;
1426 1427
	int i;

G
Geliang Tang 已提交
1428
	delayed_work = to_delayed_work(work);
1429 1430
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1431
	primary_if = batadv_primary_if_get_selected(bat_priv);
1432 1433 1434
	if (!primary_if)
		goto out;

1435 1436
	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);
1437 1438 1439 1440

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
		/* set a new random mac address for the next bridge loop
		 * detection frames. Set the locally administered bit to avoid
		 * collisions with users mac addresses.
		 */
		random_ether_addr(bat_priv->bla.loopdetect_addr);
		bat_priv->bla.loopdetect_addr[0] = 0xba;
		bat_priv->bla.loopdetect_addr[1] = 0xbe;
		bat_priv->bla.loopdetect_lasttime = jiffies;
		atomic_set(&bat_priv->bla.loopdetect_next,
			   BATADV_BLA_LOOPDETECT_PERIODS);

		/* mark for sending loop detect on all VLANs */
		send_loopdetect = true;
	}

1457
	hash = bat_priv->bla.backbone_hash;
1458 1459 1460 1461 1462 1463 1464
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1465
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1466 1467
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
1468 1469 1470 1471
				continue;

			backbone_gw->lasttime = jiffies;

1472
			batadv_bla_send_announce(bat_priv, backbone_gw);
1473 1474 1475
			if (send_loopdetect)
				batadv_bla_send_loopdetect(bat_priv,
							   backbone_gw);
1476 1477 1478 1479 1480

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
1481 1482 1483
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
1484 1485 1486 1487 1488
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

1489 1490 1491
			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

1492 1493
			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
1494 1495 1496 1497 1498
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
1499
		batadv_hardif_put(primary_if);
1500

1501 1502
	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1503 1504
}

1505 1506 1507 1508 1509
/* The hash for claim and backbone hash receive the same key because they
 * are getting initialized by hash_new with the same key. Reinitializing
 * them with to different keys to allow nested locking without generating
 * lockdep warnings
 */
1510 1511
static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
1512

1513 1514 1515 1516 1517 1518
/**
 * batadv_bla_init - initialize all bla structures
 * @bat_priv: the bat priv with all the soft interface information
 *
 * Return: 0 on success, < 0 on error.
 */
1519
int batadv_bla_init(struct batadv_priv *bat_priv)
1520
{
1521
	int i;
1522
	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1523
	struct batadv_hard_iface *primary_if;
1524
	u16 crc;
1525
	unsigned long entrytime;
1526

1527 1528
	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);

1529
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1530

1531
	/* setting claim destination address */
1532 1533
	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
	bat_priv->bla.claim_dest.type = 0;
1534
	primary_if = batadv_primary_if_get_selected(bat_priv);
1535
	if (primary_if) {
1536 1537
		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
		bat_priv->bla.claim_dest.group = htons(crc);
1538
		batadv_hardif_put(primary_if);
1539
	} else {
1540
		bat_priv->bla.claim_dest.group = 0; /* will be set later */
1541 1542
	}

1543
	/* initialize the duplicate list */
1544
	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1545
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1546 1547
		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
	bat_priv->bla.bcast_duplist_curr = 0;
1548

1549 1550 1551
	atomic_set(&bat_priv->bla.loopdetect_next,
		   BATADV_BLA_LOOPDETECT_PERIODS);

1552
	if (bat_priv->bla.claim_hash)
1553
		return 0;
1554

1555 1556
	bat_priv->bla.claim_hash = batadv_hash_new(128);
	bat_priv->bla.backbone_hash = batadv_hash_new(32);
1557

1558
	if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1559
		return -ENOMEM;
1560

1561
	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1562
				   &batadv_claim_hash_lock_class_key);
1563
	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1564
				   &batadv_backbone_hash_lock_class_key);
1565

1566
	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1567

1568 1569 1570 1571
	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1572
	return 0;
1573 1574
}

1575
/**
1576
 * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
1577
 * @bat_priv: the bat priv with all the soft interface information
1578
 * @skb: contains the bcast_packet to be checked
1579 1580 1581 1582 1583 1584 1585 1586 1587
 *
 * check if it is on our broadcast list. Another gateway might
 * have sent the same packet because it is connected to the same backbone,
 * so we have to remove this duplicate.
 *
 * This is performed by checking the CRC, which will tell us
 * with a good chance that it is the same packet. If it is furthermore
 * sent by another host, drop it. We allow equal packets from
 * the same host however as this might be intended.
1588
 *
1589
 * Return: true if a packet is in the duplicate list, false otherwise.
1590
 */
1591 1592
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
				    struct sk_buff *skb)
1593
{
1594
	int i, curr;
1595 1596
	__be32 crc;
	struct batadv_bcast_packet *bcast_packet;
1597
	struct batadv_bcast_duplist_entry *entry;
1598
	bool ret = false;
1599

1600
	bcast_packet = (struct batadv_bcast_packet *)skb->data;
1601 1602

	/* calculate the crc ... */
1603
	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1604

1605 1606
	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);

1607
	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1608 1609 1610
		curr = (bat_priv->bla.bcast_duplist_curr + i);
		curr %= BATADV_DUPLIST_SIZE;
		entry = &bat_priv->bla.bcast_duplist[curr];
1611 1612 1613 1614

		/* we can stop searching if the entry is too old ;
		 * later entries will be even older
		 */
1615 1616
		if (batadv_has_timed_out(entry->entrytime,
					 BATADV_DUPLIST_TIMEOUT))
1617 1618 1619 1620 1621
			break;

		if (entry->crc != crc)
			continue;

1622
		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1623 1624 1625
			continue;

		/* this entry seems to match: same crc, not too old,
1626
		 * and from another gw. therefore return true to forbid it.
1627
		 */
1628
		ret = true;
1629
		goto out;
1630
	}
1631
	/* not found, add a new entry (overwrite the oldest entry)
1632
	 * and allow it, its the first occurrence.
1633
	 */
1634
	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1635
	curr %= BATADV_DUPLIST_SIZE;
1636
	entry = &bat_priv->bla.bcast_duplist[curr];
1637 1638
	entry->crc = crc;
	entry->entrytime = jiffies;
1639
	ether_addr_copy(entry->orig, bcast_packet->orig);
1640
	bat_priv->bla.bcast_duplist_curr = curr;
1641

1642 1643 1644 1645
out:
	spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);

	return ret;
1646 1647
}

1648
/**
1649 1650
 * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
 *  the VLAN identified by vid.
1651
 * @bat_priv: the bat priv with all the soft interface information
1652
 * @orig: originator mac address
1653
 * @vid: VLAN identifier
1654
 *
1655
 * Return: true if orig is a backbone for this vid, false otherwise.
1656
 */
1657
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1658
				    unsigned short vid)
1659
{
1660
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1661
	struct hlist_head *head;
1662
	struct batadv_bla_backbone_gw *backbone_gw;
1663 1664 1665
	int i;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1666
		return false;
1667 1668

	if (!hash)
1669
		return false;
1670 1671 1672 1673 1674

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
1675
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1676 1677
			if (batadv_compare_eth(backbone_gw->orig, orig) &&
			    backbone_gw->vid == vid) {
1678
				rcu_read_unlock();
1679
				return true;
1680 1681 1682 1683 1684
			}
		}
		rcu_read_unlock();
	}

1685
	return false;
1686 1687
}

1688
/**
1689
 * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
1690
 * @skb: the frame to be checked
1691 1692 1693
 * @orig_node: the orig_node of the frame
 * @hdr_size: maximum length of the frame
 *
1694 1695
 * Return: true if the orig_node is also a gateway on the soft interface,
 * otherwise it returns false.
1696
 */
1697 1698
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
			       struct batadv_orig_node *orig_node, int hdr_size)
1699
{
1700
	struct batadv_bla_backbone_gw *backbone_gw;
1701
	unsigned short vid;
1702 1703

	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1704
		return false;
1705 1706

	/* first, find out the vid. */
1707
	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1708
		return false;
1709

1710
	vid = batadv_get_vid(skb, hdr_size);
1711 1712

	/* see if this originator is a backbone gw for this VLAN */
1713 1714
	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
						orig_node->orig, vid);
1715
	if (!backbone_gw)
1716
		return false;
1717

1718
	batadv_backbone_gw_put(backbone_gw);
1719
	return true;
1720 1721
}

1722
/**
1723
 * batadv_bla_free - free all bla structures
1724 1725 1726 1727
 * @bat_priv: the bat priv with all the soft interface information
 *
 * for softinterface free or module unload
 */
1728
void batadv_bla_free(struct batadv_priv *bat_priv)
1729
{
1730
	struct batadv_hard_iface *primary_if;
1731

1732
	cancel_delayed_work_sync(&bat_priv->bla.work);
1733
	primary_if = batadv_primary_if_get_selected(bat_priv);
1734

1735
	if (bat_priv->bla.claim_hash) {
1736
		batadv_bla_purge_claims(bat_priv, primary_if, 1);
1737 1738
		batadv_hash_destroy(bat_priv->bla.claim_hash);
		bat_priv->bla.claim_hash = NULL;
1739
	}
1740
	if (bat_priv->bla.backbone_hash) {
1741
		batadv_bla_purge_backbone_gw(bat_priv, 1);
1742 1743
		batadv_hash_destroy(bat_priv->bla.backbone_hash);
		bat_priv->bla.backbone_hash = NULL;
1744 1745
	}
	if (primary_if)
1746
		batadv_hardif_put(primary_if);
1747 1748
}

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
/**
 * batadv_bla_loopdetect_check - check and handle a detected loop
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the packet to check
 * @primary_if: interface where the request came on
 * @vid: the VLAN ID of the frame
 *
 * Checks if this packet is a loop detect frame which has been sent by us,
 * throw an uevent and log the event if that is the case.
 *
 * Return: true if it is a loop detect frame which is to be dropped, false
 * otherwise.
 */
static bool
batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
			    struct batadv_hard_iface *primary_if,
			    unsigned short vid)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct ethhdr *ethhdr;

	ethhdr = eth_hdr(skb);

	/* Only check for the MAC address and skip more checks here for
	 * performance reasons - this function is on the hotpath, after all.
	 */
	if (!batadv_compare_eth(ethhdr->h_source,
				bat_priv->bla.loopdetect_addr))
		return false;

	/* If the packet came too late, don't forward it on the mesh
	 * but don't consider that as loop. It might be a coincidence.
	 */
	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
				 BATADV_BLA_LOOPDETECT_TIMEOUT))
		return true;

	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
						 primary_if->net_dev->dev_addr,
						 vid, true);
	if (unlikely(!backbone_gw))
		return true;

	queue_work(batadv_event_workqueue, &backbone_gw->report_work);
	/* backbone_gw is unreferenced in the report work function function */

	return true;
}

1798
/**
1799
 * batadv_bla_rx - check packets coming from the mesh.
1800
 * @bat_priv: the bat priv with all the soft interface information
1801 1802
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
1803
 * @is_bcast: the packet came in a broadcast packet type.
1804
 *
1805
 * batadv_bla_rx avoidance checks if:
1806 1807 1808
 *  * we have to race for a claim
 *  * if the frame is allowed on the LAN
 *
1809 1810
 * in these cases, the skb is further handled by this function
 *
1811 1812
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1813
 */
1814 1815
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid, bool is_bcast)
1816
{
1817
	struct batadv_bla_backbone_gw *backbone_gw;
1818
	struct ethhdr *ethhdr;
1819
	struct batadv_bla_claim search_claim, *claim = NULL;
1820
	struct batadv_hard_iface *primary_if;
1821
	bool own_claim;
1822
	bool ret;
1823

1824
	ethhdr = eth_hdr(skb);
1825

1826
	primary_if = batadv_primary_if_get_selected(bat_priv);
1827 1828 1829 1830 1831 1832
	if (!primary_if)
		goto handled;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1833 1834 1835
	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
		goto handled;

1836
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1837
		/* don't allow broadcasts while requests are in flight */
1838
		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1839 1840
			goto handled;

1841
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1842
	search_claim.vid = vid;
1843
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1844 1845 1846 1847 1848

	if (!claim) {
		/* possible optimization: race for a claim */
		/* No claim exists yet, claim it for us!
		 */
1849 1850 1851
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1852 1853 1854 1855
		goto allow;
	}

	/* if it is our own claim ... */
1856 1857 1858 1859 1860 1861
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	own_claim = batadv_compare_eth(backbone_gw->orig,
				       primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (own_claim) {
1862 1863 1864 1865 1866 1867
		/* ... allow it in any case */
		claim->lasttime = jiffies;
		goto allow;
	}

	/* if it is a broadcast ... */
1868 1869 1870 1871 1872 1873 1874
	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
		/* ... drop it. the responsible gateway is in charge.
		 *
		 * We need to check is_bcast because with the gateway
		 * feature, broadcasts (like DHCP requests) may be sent
		 * using a unicast packet type.
		 */
1875 1876 1877 1878 1879 1880
		goto handled;
	} else {
		/* seems the client considers us as its best gateway.
		 * send a claim and update the claim table
		 * immediately.
		 */
1881 1882 1883
		batadv_handle_claim(bat_priv, primary_if,
				    primary_if->net_dev->dev_addr,
				    ethhdr->h_source, vid);
1884 1885 1886
		goto allow;
	}
allow:
1887
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1888
	ret = false;
1889 1890 1891 1892
	goto out;

handled:
	kfree_skb(skb);
1893
	ret = true;
1894 1895 1896

out:
	if (primary_if)
1897
		batadv_hardif_put(primary_if);
1898
	if (claim)
1899
		batadv_claim_put(claim);
1900 1901 1902
	return ret;
}

1903
/**
1904
 * batadv_bla_tx - check packets going into the mesh
1905
 * @bat_priv: the bat priv with all the soft interface information
1906 1907 1908
 * @skb: the frame to be checked
 * @vid: the VLAN ID of the frame
 *
1909
 * batadv_bla_tx checks if:
1910 1911 1912
 *  * a claim was received which has to be processed
 *  * the frame is allowed on the mesh
 *
1913
 * in these cases, the skb is further handled by this function.
1914 1915
 *
 * This call might reallocate skb data.
1916
 *
1917 1918
 * Return: true if handled, otherwise it returns false and the caller shall
 * further process the skb.
1919
 */
1920 1921
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
		   unsigned short vid)
1922 1923
{
	struct ethhdr *ethhdr;
1924
	struct batadv_bla_claim search_claim, *claim = NULL;
1925
	struct batadv_bla_backbone_gw *backbone_gw;
1926
	struct batadv_hard_iface *primary_if;
1927
	bool client_roamed;
1928
	bool ret = false;
1929

1930
	primary_if = batadv_primary_if_get_selected(bat_priv);
1931 1932 1933 1934 1935 1936
	if (!primary_if)
		goto out;

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto allow;

1937
	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1938 1939
		goto handled;

1940
	ethhdr = eth_hdr(skb);
1941

1942
	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1943 1944 1945 1946
		/* don't allow broadcasts while requests are in flight */
		if (is_multicast_ether_addr(ethhdr->h_dest))
			goto handled;

1947
	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1948 1949
	search_claim.vid = vid;

1950
	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1951 1952 1953 1954 1955 1956

	/* if no claim exists, allow it. */
	if (!claim)
		goto allow;

	/* check if we are responsible. */
1957 1958 1959 1960 1961 1962
	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
	client_roamed = batadv_compare_eth(backbone_gw->orig,
					   primary_if->net_dev->dev_addr);
	batadv_backbone_gw_put(backbone_gw);

	if (client_roamed) {
1963 1964 1965
		/* if yes, the client has roamed and we have
		 * to unclaim it.
		 */
1966 1967 1968
		batadv_handle_unclaim(bat_priv, primary_if,
				      primary_if->net_dev->dev_addr,
				      ethhdr->h_source, vid);
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
		goto allow;
	}

	/* check if it is a multicast/broadcast frame */
	if (is_multicast_ether_addr(ethhdr->h_dest)) {
		/* drop it. the responsible gateway has forwarded it into
		 * the backbone network.
		 */
		goto handled;
	} else {
		/* we must allow it. at least if we are
		 * responsible for the DESTINATION.
		 */
		goto allow;
	}
allow:
1985
	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1986
	ret = false;
1987 1988
	goto out;
handled:
1989
	ret = true;
1990 1991
out:
	if (primary_if)
1992
		batadv_hardif_put(primary_if);
1993
	if (claim)
1994
		batadv_claim_put(claim);
1995 1996
	return ret;
}
1997

1998
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
1999 2000 2001 2002 2003 2004 2005
/**
 * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2006
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2007 2008
{
	struct net_device *net_dev = (struct net_device *)seq->private;
2009
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2010
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2011
	struct batadv_bla_backbone_gw *backbone_gw;
2012
	struct batadv_bla_claim *claim;
2013
	struct batadv_hard_iface *primary_if;
2014
	struct hlist_head *head;
2015
	u16 backbone_crc;
2016
	u32 i;
2017
	bool is_own;
2018
	u8 *primary_addr;
2019

2020 2021
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2022 2023
		goto out;

2024
	primary_addr = primary_if->net_dev->dev_addr;
2025
	seq_printf(seq,
2026
		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2027
		   net_dev->name, primary_addr,
2028
		   ntohs(bat_priv->bla.claim_dest.group));
2029 2030
	seq_puts(seq,
		 "   Client               VID      Originator        [o] (CRC   )\n");
2031 2032 2033 2034
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2035
		hlist_for_each_entry_rcu(claim, head, hash_entry) {
2036 2037 2038
			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);

			is_own = batadv_compare_eth(backbone_gw->orig,
2039
						    primary_addr);
2040

2041 2042 2043
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);
2044
			seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2045
				   claim->addr, BATADV_PRINT_VID(claim->vid),
2046
				   backbone_gw->orig,
2047
				   (is_own ? 'x' : ' '),
2048
				   backbone_crc);
2049 2050

			batadv_backbone_gw_put(backbone_gw);
2051 2052 2053 2054 2055
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2056
		batadv_hardif_put(primary_if);
2057
	return 0;
2058
}
2059
#endif
2060

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
/**
 * batadv_bla_claim_dump_entry - dump one entry of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @claim: entry to dump
 *
 * Return: 0 or error code.
 */
static int
batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
			    struct batadv_hard_iface *primary_if,
			    struct batadv_bla_claim *claim)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	void *hdr;
	int ret = -EINVAL;

	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
			  NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

	is_own = batadv_compare_eth(claim->backbone_gw->orig,
				    primary_addr);

	spin_lock_bh(&claim->backbone_gw->crc_lock);
	backbone_crc = claim->backbone_gw->crc;
	spin_unlock_bh(&claim->backbone_gw->crc_lock);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
	    nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    claim->backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
 * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @head: bucket to dump
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
			     struct batadv_hard_iface *primary_if,
			     struct hlist_head *head, int *idx_skip)
{
	struct batadv_bla_claim *claim;
	int idx = 0;

	rcu_read_lock();
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
		if (idx++ < *idx_skip)
			continue;
		if (batadv_bla_claim_dump_entry(msg, portid, seq,
						primary_if, claim)) {
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

	*idx_skip = idx;
unlock:
	rcu_read_unlock();
	return 0;
}

/**
 * batadv_bla_claim_dump - dump claim table to a netlink socket
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	struct hlist_head *head;
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.claim_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
		head = &hash->table[bucket];

		if (batadv_bla_claim_dump_bucket(msg, portid,
						 cb->nlh->nlmsg_seq,
						 primary_if, head, &idx))
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}

2223
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2224 2225 2226 2227 2228 2229 2230 2231
/**
 * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
 *  file
 * @seq: seq file to print on
 * @offset: not used
 *
 * Return: always 0
 */
2232 2233 2234 2235
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
{
	struct net_device *net_dev = (struct net_device *)seq->private;
	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2236
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2237
	struct batadv_bla_backbone_gw *backbone_gw;
2238 2239 2240
	struct batadv_hard_iface *primary_if;
	struct hlist_head *head;
	int secs, msecs;
2241
	u16 backbone_crc;
2242
	u32 i;
2243
	bool is_own;
2244
	u8 *primary_addr;
2245

2246 2247
	primary_if = batadv_seq_print_text_primary_if_get(seq);
	if (!primary_if)
2248 2249 2250 2251
		goto out;

	primary_addr = primary_if->net_dev->dev_addr;
	seq_printf(seq,
2252
		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2253
		   net_dev->name, primary_addr,
2254
		   ntohs(bat_priv->bla.claim_dest.group));
2255
	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2256 2257 2258 2259
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
2260
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
			msecs = jiffies_to_msecs(jiffies -
						 backbone_gw->lasttime);
			secs = msecs / 1000;
			msecs = msecs % 1000;

			is_own = batadv_compare_eth(backbone_gw->orig,
						    primary_addr);
			if (is_own)
				continue;

2271 2272 2273 2274
			spin_lock_bh(&backbone_gw->crc_lock);
			backbone_crc = backbone_gw->crc;
			spin_unlock_bh(&backbone_gw->crc_lock);

2275
			seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2276 2277
				   backbone_gw->orig,
				   BATADV_PRINT_VID(backbone_gw->vid), secs,
2278
				   msecs, backbone_crc);
2279 2280 2281 2282 2283
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
2284
		batadv_hardif_put(primary_if);
2285
	return 0;
2286
}
2287
#endif
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451

/**
 * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @backbone_gw: entry to dump
 *
 * Return: 0 or error code.
 */
static int
batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
			       struct batadv_hard_iface *primary_if,
			       struct batadv_bla_backbone_gw *backbone_gw)
{
	u8 *primary_addr = primary_if->net_dev->dev_addr;
	u16 backbone_crc;
	bool is_own;
	int msecs;
	void *hdr;
	int ret = -EINVAL;

	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
			  NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
	if (!hdr) {
		ret = -ENOBUFS;
		goto out;
	}

	is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);

	spin_lock_bh(&backbone_gw->crc_lock);
	backbone_crc = backbone_gw->crc;
	spin_unlock_bh(&backbone_gw->crc_lock);

	msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);

	if (is_own)
		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
			genlmsg_cancel(msg, hdr);
			goto out;
		}

	if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
		    backbone_gw->orig) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
			backbone_crc) ||
	    nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
		genlmsg_cancel(msg, hdr);
		goto out;
	}

	genlmsg_end(msg, hdr);
	ret = 0;

out:
	return ret;
}

/**
 * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
 * to a netlink socket
 * @msg: buffer for the message
 * @portid: netlink port
 * @seq: Sequence number of netlink message
 * @primary_if: primary interface
 * @head: bucket to dump
 * @idx_skip: How many entries to skip
 *
 * Return: always 0.
 */
static int
batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
				struct batadv_hard_iface *primary_if,
				struct hlist_head *head, int *idx_skip)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	int idx = 0;

	rcu_read_lock();
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
		if (idx++ < *idx_skip)
			continue;
		if (batadv_bla_backbone_dump_entry(msg, portid, seq,
						   primary_if, backbone_gw)) {
			*idx_skip = idx - 1;
			goto unlock;
		}
	}

	*idx_skip = idx;
unlock:
	rcu_read_unlock();
	return 0;
}

/**
 * batadv_bla_backbone_dump - dump backbone table to a netlink socket
 * @msg: buffer for the message
 * @cb: callback structure containing arguments
 *
 * Return: message length.
 */
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct batadv_hard_iface *primary_if = NULL;
	int portid = NETLINK_CB(cb->skb).portid;
	struct net *net = sock_net(cb->skb->sk);
	struct net_device *soft_iface;
	struct batadv_hashtable *hash;
	struct batadv_priv *bat_priv;
	int bucket = cb->args[0];
	struct hlist_head *head;
	int idx = cb->args[1];
	int ifindex;
	int ret = 0;

	ifindex = batadv_netlink_get_ifindex(cb->nlh,
					     BATADV_ATTR_MESH_IFINDEX);
	if (!ifindex)
		return -EINVAL;

	soft_iface = dev_get_by_index(net, ifindex);
	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
		ret = -ENODEV;
		goto out;
	}

	bat_priv = netdev_priv(soft_iface);
	hash = bat_priv->bla.backbone_hash;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
		ret = -ENOENT;
		goto out;
	}

	while (bucket < hash->size) {
		head = &hash->table[bucket];

		if (batadv_bla_backbone_dump_bucket(msg, portid,
						    cb->nlh->nlmsg_seq,
						    primary_if, head, &idx))
			break;
		bucket++;
	}

	cb->args[0] = bucket;
	cb->args[1] = idx;

	ret = msg->len;

out:
	if (primary_if)
		batadv_hardif_put(primary_if);

	if (soft_iface)
		dev_put(soft_iface);

	return ret;
}