routing.c 30.3 KB
Newer Older
1
/*
2
 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 *
 */

#include "main.h"
#include "routing.h"
#include "send.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
#include "vis.h"
#include "unicast.h"
32
#include "bridge_loop_avoidance.h"
33

34 35 36
static int route_unicast_packet(struct sk_buff *skb,
				struct hard_iface *recv_if);

37
void slide_own_bcast_window(struct hard_iface *hard_iface)
38
{
39
	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
40
	struct hashtable_t *hash = bat_priv->orig_hash;
41
	struct hlist_node *node;
42 43 44
	struct hlist_head *head;
	struct orig_node *orig_node;
	unsigned long *word;
45
	uint32_t i;
46 47 48 49 50
	size_t word_index;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

51
		rcu_read_lock();
52
		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
53
			spin_lock_bh(&orig_node->ogm_cnt_lock);
54
			word_index = hard_iface->if_num * NUM_WORDS;
55 56 57
			word = &(orig_node->bcast_own[word_index]);

			bit_get_packet(bat_priv, word, 1, 0);
58
			orig_node->bcast_own_sum[hard_iface->if_num] =
59
				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
60
			spin_unlock_bh(&orig_node->ogm_cnt_lock);
61
		}
62
		rcu_read_unlock();
63 64 65
	}
}

66 67 68
static void _update_route(struct bat_priv *bat_priv,
			  struct orig_node *orig_node,
			  struct neigh_node *neigh_node)
69
{
70 71 72
	struct neigh_node *curr_router;

	curr_router = orig_node_get_router(orig_node);
73

74
	/* route deleted */
75
	if ((curr_router) && (!neigh_node)) {
76 77
		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
			orig_node->orig);
78
		tt_global_del_orig(bat_priv, orig_node,
79
				   "Deleted route towards originator");
80

81 82
	/* route added */
	} else if ((!curr_router) && (neigh_node)) {
83 84 85 86

		bat_dbg(DBG_ROUTES, bat_priv,
			"Adding route towards: %pM (via %pM)\n",
			orig_node->orig, neigh_node->addr);
87
	/* route changed */
88
	} else if (neigh_node && curr_router) {
89
		bat_dbg(DBG_ROUTES, bat_priv,
90
			"Changing route towards: %pM (now via %pM - was via %pM)\n",
91
			orig_node->orig, neigh_node->addr,
92
			curr_router->addr);
93 94
	}

95 96 97 98
	if (curr_router)
		neigh_node_free_ref(curr_router);

	/* increase refcount of new best neighbor */
99 100
	if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
		neigh_node = NULL;
101 102 103 104 105 106 107 108

	spin_lock_bh(&orig_node->neigh_list_lock);
	rcu_assign_pointer(orig_node->router, neigh_node);
	spin_unlock_bh(&orig_node->neigh_list_lock);

	/* decrease refcount of previous best neighbor */
	if (curr_router)
		neigh_node_free_ref(curr_router);
109 110
}

111 112
void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
		  struct neigh_node *neigh_node)
113
{
114
	struct neigh_node *router = NULL;
115 116

	if (!orig_node)
117 118 119
		goto out;

	router = orig_node_get_router(orig_node);
120

121
	if (router != neigh_node)
122
		_update_route(bat_priv, orig_node, neigh_node);
123 124 125 126

out:
	if (router)
		neigh_node_free_ref(router);
127 128
}

129 130 131 132 133 134 135 136 137 138
/* caller must hold the neigh_list_lock */
void bonding_candidate_del(struct orig_node *orig_node,
			   struct neigh_node *neigh_node)
{
	/* this neighbor is not part of our candidate list */
	if (list_empty(&neigh_node->bonding_list))
		goto out;

	list_del_rcu(&neigh_node->bonding_list);
	INIT_LIST_HEAD(&neigh_node->bonding_list);
139
	neigh_node_free_ref(neigh_node);
140 141 142 143 144 145
	atomic_dec(&orig_node->bond_candidates);

out:
	return;
}

146 147
void bonding_candidate_add(struct orig_node *orig_node,
			   struct neigh_node *neigh_node)
148 149
{
	struct hlist_node *node;
150 151
	struct neigh_node *tmp_neigh_node, *router = NULL;
	uint8_t interference_candidate = 0;
152 153 154 155

	spin_lock_bh(&orig_node->neigh_list_lock);

	/* only consider if it has the same primary address ...  */
156 157
	if (!compare_eth(orig_node->orig,
			 neigh_node->orig_node->primary_addr))
158 159
		goto candidate_del;

160 161
	router = orig_node_get_router(orig_node);
	if (!router)
162 163 164
		goto candidate_del;

	/* ... and is good enough to be considered */
165
	if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		goto candidate_del;

	/**
	 * check if we have another candidate with the same mac address or
	 * interface. If we do, we won't select this candidate because of
	 * possible interference.
	 */
	hlist_for_each_entry_rcu(tmp_neigh_node, node,
				 &orig_node->neigh_list, list) {

		if (tmp_neigh_node == neigh_node)
			continue;

		/* we only care if the other candidate is even
		* considered as candidate. */
		if (list_empty(&tmp_neigh_node->bonding_list))
			continue;

		if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
185
		    (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
186 187 188 189 190 191 192 193 194 195 196 197 198
			interference_candidate = 1;
			break;
		}
	}

	/* don't care further if it is an interference candidate */
	if (interference_candidate)
		goto candidate_del;

	/* this neighbor already is part of our candidate list */
	if (!list_empty(&neigh_node->bonding_list))
		goto out;

199 200 201
	if (!atomic_inc_not_zero(&neigh_node->refcount))
		goto out;

202 203 204 205 206 207 208 209 210
	list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
	atomic_inc(&orig_node->bond_candidates);
	goto out;

candidate_del:
	bonding_candidate_del(orig_node, neigh_node);

out:
	spin_unlock_bh(&orig_node->neigh_list_lock);
211 212 213

	if (router)
		neigh_node_free_ref(router);
214 215 216
}

/* copy primary address for bonding */
217 218 219
void bonding_save_primary(const struct orig_node *orig_node,
			  struct orig_node *orig_neigh_node,
			  const struct batman_ogm_packet *batman_ogm_packet)
220
{
221
	if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
222 223 224 225 226
		return;

	memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
}

227 228 229 230 231
/* checks whether the host restarted and is in the protection time.
 * returns:
 *  0 if the packet is to be accepted
 *  1 if the packet is to be ignored.
 */
232 233
int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
		     unsigned long *last_reset)
234
{
235 236
	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
	    (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
237
		if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
238
			return 1;
239 240 241 242

		*last_reset = jiffies;
		bat_dbg(DBG_BATMAN, bat_priv,
			"old packet received, start protection\n");
243
	}
244

245 246 247
	return 0;
}

248 249 250
bool check_management_packet(struct sk_buff *skb,
			     struct hard_iface *hard_iface,
			     int header_len)
251 252 253 254
{
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
255 256
	if (unlikely(!pskb_may_pull(skb, header_len)))
		return false;
257 258 259 260 261

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with broadcast indication but unicast recipient */
	if (!is_broadcast_ether_addr(ethhdr->h_dest))
262
		return false;
263 264 265

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
266
		return false;
267 268 269

	/* create a copy of the skb, if needed, to modify it. */
	if (skb_cow(skb, 0) < 0)
270
		return false;
271 272 273

	/* keep skb linear */
	if (skb_linearize(skb) < 0)
274
		return false;
275

276
	return true;
277 278 279 280 281
}

static int recv_my_icmp_packet(struct bat_priv *bat_priv,
			       struct sk_buff *skb, size_t icmp_len)
{
282
	struct hard_iface *primary_if = NULL;
283
	struct orig_node *orig_node = NULL;
284
	struct neigh_node *router = NULL;
285
	struct icmp_packet_rr *icmp_packet;
286
	int ret = NET_RX_DROP;
287 288 289 290 291 292

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	/* add data to device queue */
	if (icmp_packet->msg_type != ECHO_REQUEST) {
		bat_socket_receive_packet(icmp_packet, icmp_len);
293
		goto out;
294 295
	}

296 297
	primary_if = primary_if_get_selected(bat_priv);
	if (!primary_if)
298
		goto out;
299 300 301

	/* answer echo request (ping) */
	/* get routing information */
302
	orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
303
	if (!orig_node)
304
		goto out;
305

306 307 308
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
309

310
	/* create a copy of the skb, if needed, to modify it. */
311
	if (skb_cow(skb, ETH_HLEN) < 0)
312 313 314 315 316
		goto out;

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
317
	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
318
	icmp_packet->msg_type = ECHO_REPLY;
319
	icmp_packet->header.ttl = TTL;
320

321
	send_skb_packet(skb, router->if_incoming, router->addr);
322
	ret = NET_RX_SUCCESS;
323

324
out:
325 326
	if (primary_if)
		hardif_free_ref(primary_if);
327 328
	if (router)
		neigh_node_free_ref(router);
329
	if (orig_node)
330
		orig_node_free_ref(orig_node);
331 332 333 334
	return ret;
}

static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
335
				  struct sk_buff *skb)
336
{
337
	struct hard_iface *primary_if = NULL;
338
	struct orig_node *orig_node = NULL;
339
	struct neigh_node *router = NULL;
340
	struct icmp_packet *icmp_packet;
341
	int ret = NET_RX_DROP;
342 343 344 345 346

	icmp_packet = (struct icmp_packet *)skb->data;

	/* send TTL exceeded if packet is an echo request (traceroute) */
	if (icmp_packet->msg_type != ECHO_REQUEST) {
347 348
		pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
			 icmp_packet->orig, icmp_packet->dst);
349
		goto out;
350 351
	}

352 353
	primary_if = primary_if_get_selected(bat_priv);
	if (!primary_if)
354
		goto out;
355 356

	/* get routing information */
357
	orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
358
	if (!orig_node)
359
		goto out;
360

361 362 363
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
364

365
	/* create a copy of the skb, if needed, to modify it. */
366
	if (skb_cow(skb, ETH_HLEN) < 0)
367
		goto out;
368

369
	icmp_packet = (struct icmp_packet *)skb->data;
370

371
	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
372
	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
373
	icmp_packet->msg_type = TTL_EXCEEDED;
374
	icmp_packet->header.ttl = TTL;
375

376
	send_skb_packet(skb, router->if_incoming, router->addr);
377
	ret = NET_RX_SUCCESS;
378

379
out:
380 381
	if (primary_if)
		hardif_free_ref(primary_if);
382 383
	if (router)
		neigh_node_free_ref(router);
384
	if (orig_node)
385
		orig_node_free_ref(orig_node);
386 387 388 389
	return ret;
}


390
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
391 392 393 394
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct icmp_packet_rr *icmp_packet;
	struct ethhdr *ethhdr;
395
	struct orig_node *orig_node = NULL;
396
	struct neigh_node *router = NULL;
397
	int hdr_size = sizeof(struct icmp_packet);
398
	int ret = NET_RX_DROP;
399 400 401 402 403 404 405 406 407

	/**
	 * we truncate all incoming icmp packets if they don't match our size
	 */
	if (skb->len >= sizeof(struct icmp_packet_rr))
		hdr_size = sizeof(struct icmp_packet_rr);

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
408
		goto out;
409 410 411 412 413

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
414
		goto out;
415 416 417

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
418
		goto out;
419 420 421

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
422
		goto out;
423 424 425 426 427 428 429

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	/* add record route information if not full */
	if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
	    (icmp_packet->rr_cur < BAT_RR_LEN)) {
		memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
430
		       ethhdr->h_dest, ETH_ALEN);
431 432 433 434 435 436 437 438
		icmp_packet->rr_cur++;
	}

	/* packet for me */
	if (is_my_mac(icmp_packet->dst))
		return recv_my_icmp_packet(bat_priv, skb, hdr_size);

	/* TTL exceeded */
439
	if (icmp_packet->header.ttl < 2)
440
		return recv_icmp_ttl_exceeded(bat_priv, skb);
441 442

	/* get routing information */
443
	orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
444
	if (!orig_node)
445
		goto out;
446

447 448 449
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
450

451
	/* create a copy of the skb, if needed, to modify it. */
452
	if (skb_cow(skb, ETH_HLEN) < 0)
453
		goto out;
454

455
	icmp_packet = (struct icmp_packet_rr *)skb->data;
456

457
	/* decrement ttl */
458
	icmp_packet->header.ttl--;
459 460

	/* route it */
461
	send_skb_packet(skb, router->if_incoming, router->addr);
462
	ret = NET_RX_SUCCESS;
463

464
out:
465 466
	if (router)
		neigh_node_free_ref(router);
467
	if (orig_node)
468
		orig_node_free_ref(orig_node);
469 470 471
	return ret;
}

472 473 474 475 476 477
/* In the bonding case, send the packets in a round
 * robin fashion over the remaining interfaces.
 *
 * This method rotates the bonding list and increases the
 * returned router's refcount. */
static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
478
					   const struct hard_iface *recv_if)
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
{
	struct neigh_node *tmp_neigh_node;
	struct neigh_node *router = NULL, *first_candidate = NULL;

	rcu_read_lock();
	list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
				bonding_list) {
		if (!first_candidate)
			first_candidate = tmp_neigh_node;

		/* recv_if == NULL on the first node. */
		if (tmp_neigh_node->if_incoming == recv_if)
			continue;

		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
			continue;

		router = tmp_neigh_node;
		break;
	}

	/* use the first candidate if nothing was found. */
	if (!router && first_candidate &&
	    atomic_inc_not_zero(&first_candidate->refcount))
		router = first_candidate;

	if (!router)
		goto out;

	/* selected should point to the next element
	 * after the current router */
	spin_lock_bh(&primary_orig->neigh_list_lock);
	/* this is a list_move(), which unfortunately
	 * does not exist as rcu version */
	list_del_rcu(&primary_orig->bond_list);
	list_add_rcu(&primary_orig->bond_list,
		     &router->bonding_list);
	spin_unlock_bh(&primary_orig->neigh_list_lock);

out:
	rcu_read_unlock();
	return router;
}

/* Interface Alternating: Use the best of the
 * remaining candidates which are not using
 * this interface.
 *
 * Increases the returned router's refcount */
static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
529
					      const struct hard_iface *recv_if)
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
{
	struct neigh_node *tmp_neigh_node;
	struct neigh_node *router = NULL, *first_candidate = NULL;

	rcu_read_lock();
	list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
				bonding_list) {
		if (!first_candidate)
			first_candidate = tmp_neigh_node;

		/* recv_if == NULL on the first node. */
		if (tmp_neigh_node->if_incoming == recv_if)
			continue;

		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
			continue;

		/* if we don't have a router yet
		 * or this one is better, choose it. */
		if ((!router) ||
		    (tmp_neigh_node->tq_avg > router->tq_avg)) {
			/* decrement refcount of
			 * previously selected router */
			if (router)
				neigh_node_free_ref(router);

			router = tmp_neigh_node;
			atomic_inc_not_zero(&router->refcount);
		}

		neigh_node_free_ref(tmp_neigh_node);
	}

	/* use the first candidate if nothing was found. */
	if (!router && first_candidate &&
	    atomic_inc_not_zero(&first_candidate->refcount))
		router = first_candidate;

	rcu_read_unlock();
	return router;
}

572 573 574 575
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct tt_query_packet *tt_query;
576
	uint16_t tt_size;
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
		goto out;

	/* I could need to modify it */
	if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
		goto out;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		goto out;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		goto out;

	tt_query = (struct tt_query_packet *)skb->data;

	switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
	case TT_REQUEST:
601 602
		batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_RX);

603 604 605 606 607 608 609 610 611 612 613
		/* If we cannot provide an answer the tt_request is
		 * forwarded */
		if (!send_tt_response(bat_priv, tt_query)) {
			bat_dbg(DBG_TT, bat_priv,
				"Routing TT_REQUEST to %pM [%c]\n",
				tt_query->dst,
				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
			return route_unicast_packet(skb, recv_if);
		}
		break;
	case TT_RESPONSE:
614 615
		batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_RX);

616 617 618 619 620
		if (is_my_mac(tt_query->dst)) {
			/* packet needs to be linearized to access the TT
			 * changes */
			if (skb_linearize(skb) < 0)
				goto out;
621 622
			/* skb_linearize() possibly changed skb->data */
			tt_query = (struct tt_query_packet *)skb->data;
623

624
			tt_size = tt_len(ntohs(tt_query->tt_data));
625 626 627

			/* Ensure we have all the claimed data */
			if (unlikely(skb_headlen(skb) <
628
				     sizeof(struct tt_query_packet) + tt_size))
629 630
				goto out;

631
			handle_tt_response(bat_priv, tt_query);
632
		} else {
633 634 635 636 637 638 639 640 641 642 643 644 645 646
			bat_dbg(DBG_TT, bat_priv,
				"Routing TT_RESPONSE to %pM [%c]\n",
				tt_query->dst,
				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
			return route_unicast_packet(skb, recv_if);
		}
		break;
	}

out:
	/* returning NET_RX_DROP will make the caller function kfree the skb */
	return NET_RX_DROP;
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct roam_adv_packet *roam_adv_packet;
	struct orig_node *orig_node;
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
		goto out;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		goto out;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		goto out;

668 669
	batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_RX);

670 671 672 673 674
	roam_adv_packet = (struct roam_adv_packet *)skb->data;

	if (!is_my_mac(roam_adv_packet->dst))
		return route_unicast_packet(skb, recv_if);

675 676 677 678 679 680 681
	/* check if it is a backbone gateway. we don't accept
	 * roaming advertisement from it, as it has the same
	 * entries as we have.
	 */
	if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
		goto out;

682 683 684 685
	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
	if (!orig_node)
		goto out;

686 687 688
	bat_dbg(DBG_TT, bat_priv,
		"Received ROAMING_ADV from %pM (client %pM)\n",
		roam_adv_packet->src, roam_adv_packet->client);
689 690

	tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
691
		      atomic_read(&orig_node->last_ttvn) + 1, true, false);
692 693 694 695 696 697 698 699 700 701 702 703

	/* Roaming phase starts: I have new information but the ttvn has not
	 * been incremented yet. This flag will make me check all the incoming
	 * packets for the correct destination. */
	bat_priv->tt_poss_change = true;

	orig_node_free_ref(orig_node);
out:
	/* returning NET_RX_DROP will make the caller function kfree the skb */
	return NET_RX_DROP;
}

704
/* find a suitable router for this originator, and use
705 706
 * bonding if possible. increases the found neighbors
 * refcount.*/
707 708
struct neigh_node *find_router(struct bat_priv *bat_priv,
			       struct orig_node *orig_node,
709
			       const struct hard_iface *recv_if)
710 711 712
{
	struct orig_node *primary_orig_node;
	struct orig_node *router_orig;
713
	struct neigh_node *router;
714 715 716 717 718 719
	static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
	int bonding_enabled;

	if (!orig_node)
		return NULL;

720 721
	router = orig_node_get_router(orig_node);
	if (!router)
722
		goto err;
723 724 725 726 727

	/* without bonding, the first node should
	 * always choose the default router. */
	bonding_enabled = atomic_read(&bat_priv->bonding);

728 729
	rcu_read_lock();
	/* select default router to output */
730
	router_orig = router->orig_node;
731 732
	if (!router_orig)
		goto err_unlock;
733 734 735

	if ((!recv_if) && (!bonding_enabled))
		goto return_router;
736 737 738

	/* if we have something in the primary_addr, we can search
	 * for a potential bonding candidate. */
739
	if (compare_eth(router_orig->primary_addr, zero_mac))
740
		goto return_router;
741 742 743 744

	/* find the orig_node which has the primary interface. might
	 * even be the same as our router_orig in many cases */

745
	if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
746 747
		primary_orig_node = router_orig;
	} else {
748 749
		primary_orig_node = orig_hash_find(bat_priv,
						   router_orig->primary_addr);
750
		if (!primary_orig_node)
751
			goto return_router;
752

753
		orig_node_free_ref(primary_orig_node);
754 755 756 757
	}

	/* with less than 2 candidates, we can't do any
	 * bonding and prefer the original router. */
758 759
	if (atomic_read(&primary_orig_node->bond_candidates) < 2)
		goto return_router;
760 761 762 763

	/* all nodes between should choose a candidate which
	 * is is not on the interface where the packet came
	 * in. */
764

765
	neigh_node_free_ref(router);
766

767 768 769 770
	if (bonding_enabled)
		router = find_bond_router(primary_orig_node, recv_if);
	else
		router = find_ifalter_router(primary_orig_node, recv_if);
771

772
return_router:
773 774 775
	if (router && router->if_incoming->if_status != IF_ACTIVE)
		goto err_unlock;

776
	rcu_read_unlock();
777
	return router;
778 779 780 781 782 783
err_unlock:
	rcu_read_unlock();
err:
	if (router)
		neigh_node_free_ref(router);
	return NULL;
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
}

static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
		return -1;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		return -1;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		return -1;

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
		return -1;

	return 0;
}

811
static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
812 813
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
814 815
	struct orig_node *orig_node = NULL;
	struct neigh_node *neigh_node = NULL;
816 817
	struct unicast_packet *unicast_packet;
	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
818
	int ret = NET_RX_DROP;
819 820 821 822 823
	struct sk_buff *new_skb;

	unicast_packet = (struct unicast_packet *)skb->data;

	/* TTL exceeded */
824
	if (unicast_packet->header.ttl < 2) {
825 826
		pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
			 ethhdr->h_source, unicast_packet->dest);
827
		goto out;
828 829 830
	}

	/* get routing information */
831 832
	orig_node = orig_hash_find(bat_priv, unicast_packet->dest);

833
	if (!orig_node)
834
		goto out;
835

836
	/* find_router() increases neigh_nodes refcount if found. */
837
	neigh_node = find_router(bat_priv, orig_node, recv_if);
838

839
	if (!neigh_node)
840
		goto out;
841 842

	/* create a copy of the skb, if needed, to modify it. */
843
	if (skb_cow(skb, ETH_HLEN) < 0)
844
		goto out;
845 846 847

	unicast_packet = (struct unicast_packet *)skb->data;

848
	if (unicast_packet->header.packet_type == BAT_UNICAST &&
849
	    atomic_read(&bat_priv->fragmentation) &&
850 851 852 853 854
	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
		ret = frag_send_skb(skb, bat_priv,
				    neigh_node->if_incoming, neigh_node->addr);
		goto out;
	}
855

856
	if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
857
	    frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
858 859 860 861

		ret = frag_reassemble_skb(skb, bat_priv, &new_skb);

		if (ret == NET_RX_DROP)
862
			goto out;
863 864

		/* packet was buffered for late merge */
865 866 867 868
		if (!new_skb) {
			ret = NET_RX_SUCCESS;
			goto out;
		}
869 870 871 872 873 874

		skb = new_skb;
		unicast_packet = (struct unicast_packet *)skb->data;
	}

	/* decrement ttl */
875
	unicast_packet->header.ttl--;
876

877 878 879 880 881
	/* Update stats counter */
	batadv_inc_counter(bat_priv, BAT_CNT_FORWARD);
	batadv_add_counter(bat_priv, BAT_CNT_FORWARD_BYTES,
			   skb->len + ETH_HLEN);

882
	/* route it */
883
	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
884
	ret = NET_RX_SUCCESS;
885

886 887 888 889
out:
	if (neigh_node)
		neigh_node_free_ref(neigh_node);
	if (orig_node)
890
		orig_node_free_ref(orig_node);
891
	return ret;
892 893
}

894 895 896 897 898 899 900
static int check_unicast_ttvn(struct bat_priv *bat_priv,
			       struct sk_buff *skb) {
	uint8_t curr_ttvn;
	struct orig_node *orig_node;
	struct ethhdr *ethhdr;
	struct hard_iface *primary_if;
	struct unicast_packet *unicast_packet;
901
	bool tt_poss_change;
902 903 904 905 906 907 908

	/* I could need to modify it */
	if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
		return 0;

	unicast_packet = (struct unicast_packet *)skb->data;

909 910
	if (is_my_mac(unicast_packet->dest)) {
		tt_poss_change = bat_priv->tt_poss_change;
911
		curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
912
	} else {
913 914 915 916 917 918
		orig_node = orig_hash_find(bat_priv, unicast_packet->dest);

		if (!orig_node)
			return 0;

		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
919
		tt_poss_change = orig_node->tt_poss_change;
920 921 922 923
		orig_node_free_ref(orig_node);
	}

	/* Check whether I have to reroute the packet */
924
	if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
925 926 927
		/* check if there is enough data before accessing it */
		if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
				  ETH_HLEN) < 0)
928 929 930 931
			return 0;

		ethhdr = (struct ethhdr *)(skb->data +
			sizeof(struct unicast_packet));
932 933 934 935 936 937 938

		/* we don't have an updated route for this client, so we should
		 * not try to reroute the packet!!
		 */
		if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
			return 1;

939
		orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957

		if (!orig_node) {
			if (!is_my_client(bat_priv, ethhdr->h_dest))
				return 0;
			primary_if = primary_if_get_selected(bat_priv);
			if (!primary_if)
				return 0;
			memcpy(unicast_packet->dest,
			       primary_if->net_dev->dev_addr, ETH_ALEN);
			hardif_free_ref(primary_if);
		} else {
			memcpy(unicast_packet->dest, orig_node->orig,
			       ETH_ALEN);
			curr_ttvn = (uint8_t)
				atomic_read(&orig_node->last_ttvn);
			orig_node_free_ref(orig_node);
		}

958 959 960 961
		bat_dbg(DBG_ROUTES, bat_priv,
			"TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
			unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
			unicast_packet->dest);
962 963 964 965 966 967

		unicast_packet->ttvn = curr_ttvn;
	}
	return 1;
}

968
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
969
{
970
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
971
	struct unicast_packet *unicast_packet;
972
	int hdr_size = sizeof(*unicast_packet);
973 974 975 976

	if (check_unicast_packet(skb, hdr_size) < 0)
		return NET_RX_DROP;

977 978 979
	if (!check_unicast_ttvn(bat_priv, skb))
		return NET_RX_DROP;

980 981 982 983 984 985 986 987
	unicast_packet = (struct unicast_packet *)skb->data;

	/* packet for me */
	if (is_my_mac(unicast_packet->dest)) {
		interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
		return NET_RX_SUCCESS;
	}

988
	return route_unicast_packet(skb, recv_if);
989 990
}

991
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
992 993 994
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct unicast_frag_packet *unicast_packet;
995
	int hdr_size = sizeof(*unicast_packet);
996 997 998 999 1000 1001
	struct sk_buff *new_skb = NULL;
	int ret;

	if (check_unicast_packet(skb, hdr_size) < 0)
		return NET_RX_DROP;

1002 1003 1004
	if (!check_unicast_ttvn(bat_priv, skb))
		return NET_RX_DROP;

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	unicast_packet = (struct unicast_frag_packet *)skb->data;

	/* packet for me */
	if (is_my_mac(unicast_packet->dest)) {

		ret = frag_reassemble_skb(skb, bat_priv, &new_skb);

		if (ret == NET_RX_DROP)
			return NET_RX_DROP;

		/* packet was buffered for late merge */
		if (!new_skb)
			return NET_RX_SUCCESS;

		interface_rx(recv_if->soft_iface, new_skb, recv_if,
			     sizeof(struct unicast_packet));
		return NET_RX_SUCCESS;
	}

1024
	return route_unicast_packet(skb, recv_if);
1025 1026 1027
}


1028
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1029 1030
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1031
	struct orig_node *orig_node = NULL;
1032 1033
	struct bcast_packet *bcast_packet;
	struct ethhdr *ethhdr;
1034
	int hdr_size = sizeof(*bcast_packet);
1035
	int ret = NET_RX_DROP;
1036 1037 1038 1039
	int32_t seq_diff;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
1040
		goto out;
1041 1042 1043 1044 1045

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with broadcast indication but unicast recipient */
	if (!is_broadcast_ether_addr(ethhdr->h_dest))
1046
		goto out;
1047 1048 1049

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
1050
		goto out;
1051 1052 1053

	/* ignore broadcasts sent by myself */
	if (is_my_mac(ethhdr->h_source))
1054
		goto out;
1055 1056 1057 1058 1059

	bcast_packet = (struct bcast_packet *)skb->data;

	/* ignore broadcasts originated by myself */
	if (is_my_mac(bcast_packet->orig))
1060
		goto out;
1061

1062
	if (bcast_packet->header.ttl < 2)
1063
		goto out;
1064

1065
	orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1066 1067

	if (!orig_node)
1068
		goto out;
1069

1070
	spin_lock_bh(&orig_node->bcast_seqno_lock);
1071 1072

	/* check whether the packet is a duplicate */
1073 1074
	if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
			 ntohl(bcast_packet->seqno)))
1075
		goto spin_unlock;
1076 1077 1078 1079 1080

	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;

	/* check whether the packet is old and the host just restarted. */
	if (window_protected(bat_priv, seq_diff,
1081 1082
			     &orig_node->bcast_seqno_reset))
		goto spin_unlock;
1083 1084 1085 1086 1087 1088

	/* mark broadcast in flood history, update window position
	 * if required. */
	if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
		orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);

1089 1090
	spin_unlock_bh(&orig_node->bcast_seqno_lock);

1091 1092 1093 1094
	/* check whether this has been sent by another originator before */
	if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
		goto out;

1095
	/* rebroadcast packet */
1096
	add_bcast_packet_to_list(bat_priv, skb, 1);
1097

1098 1099 1100 1101 1102 1103
	/* don't hand the broadcast up if it is from an originator
	 * from the same backbone.
	 */
	if (bla_is_backbone_gw(skb, orig_node, hdr_size))
		goto out;

1104 1105
	/* broadcast for me */
	interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1106 1107
	ret = NET_RX_SUCCESS;
	goto out;
1108

1109 1110 1111 1112
spin_unlock:
	spin_unlock_bh(&orig_node->bcast_seqno_lock);
out:
	if (orig_node)
1113
		orig_node_free_ref(orig_node);
1114
	return ret;
1115 1116
}

1117
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1118 1119 1120 1121
{
	struct vis_packet *vis_packet;
	struct ethhdr *ethhdr;
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1122
	int hdr_size = sizeof(*vis_packet);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163

	/* keep skb linear */
	if (skb_linearize(skb) < 0)
		return NET_RX_DROP;

	if (unlikely(!pskb_may_pull(skb, hdr_size)))
		return NET_RX_DROP;

	vis_packet = (struct vis_packet *)skb->data;
	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
		return NET_RX_DROP;

	/* ignore own packets */
	if (is_my_mac(vis_packet->vis_orig))
		return NET_RX_DROP;

	if (is_my_mac(vis_packet->sender_orig))
		return NET_RX_DROP;

	switch (vis_packet->vis_type) {
	case VIS_TYPE_SERVER_SYNC:
		receive_server_sync_packet(bat_priv, vis_packet,
					   skb_headlen(skb));
		break;

	case VIS_TYPE_CLIENT_UPDATE:
		receive_client_update_packet(bat_priv, vis_packet,
					     skb_headlen(skb));
		break;

	default:	/* ignore unknown packet */
		break;
	}

	/* We take a copy of the data in the packet, so we should
	   always free the skbuf. */
	return NET_RX_DROP;
}