routing.c 29.8 KB
Newer Older
1
/*
2
 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 *
 */

#include "main.h"
#include "routing.h"
#include "send.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
#include "vis.h"
#include "unicast.h"
32
#include "bridge_loop_avoidance.h"
33

34 35 36
static int route_unicast_packet(struct sk_buff *skb,
				struct hard_iface *recv_if);

37
void slide_own_bcast_window(struct hard_iface *hard_iface)
38
{
39
	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
40
	struct hashtable_t *hash = bat_priv->orig_hash;
41
	struct hlist_node *node;
42 43 44
	struct hlist_head *head;
	struct orig_node *orig_node;
	unsigned long *word;
45
	uint32_t i;
46 47 48 49 50
	size_t word_index;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

51
		rcu_read_lock();
52
		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
53
			spin_lock_bh(&orig_node->ogm_cnt_lock);
54
			word_index = hard_iface->if_num * NUM_WORDS;
55 56 57
			word = &(orig_node->bcast_own[word_index]);

			bit_get_packet(bat_priv, word, 1, 0);
58
			orig_node->bcast_own_sum[hard_iface->if_num] =
59
				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
60
			spin_unlock_bh(&orig_node->ogm_cnt_lock);
61
		}
62
		rcu_read_unlock();
63 64 65
	}
}

66 67 68
static void _update_route(struct bat_priv *bat_priv,
			  struct orig_node *orig_node,
			  struct neigh_node *neigh_node)
69
{
70 71 72
	struct neigh_node *curr_router;

	curr_router = orig_node_get_router(orig_node);
73

74
	/* route deleted */
75
	if ((curr_router) && (!neigh_node)) {
76 77
		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
			orig_node->orig);
78
		tt_global_del_orig(bat_priv, orig_node,
79
				   "Deleted route towards originator");
80

81 82
	/* route added */
	} else if ((!curr_router) && (neigh_node)) {
83 84 85 86

		bat_dbg(DBG_ROUTES, bat_priv,
			"Adding route towards: %pM (via %pM)\n",
			orig_node->orig, neigh_node->addr);
87
	/* route changed */
88
	} else if (neigh_node && curr_router) {
89
		bat_dbg(DBG_ROUTES, bat_priv,
90
			"Changing route towards: %pM (now via %pM - was via %pM)\n",
91
			orig_node->orig, neigh_node->addr,
92
			curr_router->addr);
93 94
	}

95 96 97 98
	if (curr_router)
		neigh_node_free_ref(curr_router);

	/* increase refcount of new best neighbor */
99 100
	if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
		neigh_node = NULL;
101 102 103 104 105 106 107 108

	spin_lock_bh(&orig_node->neigh_list_lock);
	rcu_assign_pointer(orig_node->router, neigh_node);
	spin_unlock_bh(&orig_node->neigh_list_lock);

	/* decrease refcount of previous best neighbor */
	if (curr_router)
		neigh_node_free_ref(curr_router);
109 110
}

111 112
void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
		  struct neigh_node *neigh_node)
113
{
114
	struct neigh_node *router = NULL;
115 116

	if (!orig_node)
117 118 119
		goto out;

	router = orig_node_get_router(orig_node);
120

121
	if (router != neigh_node)
122
		_update_route(bat_priv, orig_node, neigh_node);
123 124 125 126

out:
	if (router)
		neigh_node_free_ref(router);
127 128
}

129 130 131 132 133 134 135 136 137 138
/* caller must hold the neigh_list_lock */
void bonding_candidate_del(struct orig_node *orig_node,
			   struct neigh_node *neigh_node)
{
	/* this neighbor is not part of our candidate list */
	if (list_empty(&neigh_node->bonding_list))
		goto out;

	list_del_rcu(&neigh_node->bonding_list);
	INIT_LIST_HEAD(&neigh_node->bonding_list);
139
	neigh_node_free_ref(neigh_node);
140 141 142 143 144 145
	atomic_dec(&orig_node->bond_candidates);

out:
	return;
}

146 147
void bonding_candidate_add(struct orig_node *orig_node,
			   struct neigh_node *neigh_node)
148 149
{
	struct hlist_node *node;
150 151
	struct neigh_node *tmp_neigh_node, *router = NULL;
	uint8_t interference_candidate = 0;
152 153 154 155

	spin_lock_bh(&orig_node->neigh_list_lock);

	/* only consider if it has the same primary address ...  */
156 157
	if (!compare_eth(orig_node->orig,
			 neigh_node->orig_node->primary_addr))
158 159
		goto candidate_del;

160 161
	router = orig_node_get_router(orig_node);
	if (!router)
162 163 164
		goto candidate_del;

	/* ... and is good enough to be considered */
165
	if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		goto candidate_del;

	/**
	 * check if we have another candidate with the same mac address or
	 * interface. If we do, we won't select this candidate because of
	 * possible interference.
	 */
	hlist_for_each_entry_rcu(tmp_neigh_node, node,
				 &orig_node->neigh_list, list) {

		if (tmp_neigh_node == neigh_node)
			continue;

		/* we only care if the other candidate is even
		* considered as candidate. */
		if (list_empty(&tmp_neigh_node->bonding_list))
			continue;

		if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
185
		    (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
186 187 188 189 190 191 192 193 194 195 196 197 198
			interference_candidate = 1;
			break;
		}
	}

	/* don't care further if it is an interference candidate */
	if (interference_candidate)
		goto candidate_del;

	/* this neighbor already is part of our candidate list */
	if (!list_empty(&neigh_node->bonding_list))
		goto out;

199 200 201
	if (!atomic_inc_not_zero(&neigh_node->refcount))
		goto out;

202 203 204 205 206 207 208 209 210
	list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
	atomic_inc(&orig_node->bond_candidates);
	goto out;

candidate_del:
	bonding_candidate_del(orig_node, neigh_node);

out:
	spin_unlock_bh(&orig_node->neigh_list_lock);
211 212 213

	if (router)
		neigh_node_free_ref(router);
214 215 216
}

/* copy primary address for bonding */
217 218 219
void bonding_save_primary(const struct orig_node *orig_node,
			  struct orig_node *orig_neigh_node,
			  const struct batman_ogm_packet *batman_ogm_packet)
220
{
221
	if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
222 223 224 225 226
		return;

	memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
}

227 228 229 230 231
/* checks whether the host restarted and is in the protection time.
 * returns:
 *  0 if the packet is to be accepted
 *  1 if the packet is to be ignored.
 */
232 233
int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
		     unsigned long *last_reset)
234
{
235 236
	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
	    (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
237
		if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
238 239 240 241 242 243

			*last_reset = jiffies;
			bat_dbg(DBG_BATMAN, bat_priv,
				"old packet received, start protection\n");

			return 0;
244
		} else {
245
			return 1;
246
		}
247 248 249 250
	}
	return 0;
}

251 252 253
bool check_management_packet(struct sk_buff *skb,
			     struct hard_iface *hard_iface,
			     int header_len)
254 255 256 257
{
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
258 259
	if (unlikely(!pskb_may_pull(skb, header_len)))
		return false;
260 261 262 263 264

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with broadcast indication but unicast recipient */
	if (!is_broadcast_ether_addr(ethhdr->h_dest))
265
		return false;
266 267 268

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
269
		return false;
270 271 272

	/* create a copy of the skb, if needed, to modify it. */
	if (skb_cow(skb, 0) < 0)
273
		return false;
274 275 276

	/* keep skb linear */
	if (skb_linearize(skb) < 0)
277
		return false;
278

279
	return true;
280 281 282 283 284
}

static int recv_my_icmp_packet(struct bat_priv *bat_priv,
			       struct sk_buff *skb, size_t icmp_len)
{
285
	struct hard_iface *primary_if = NULL;
286
	struct orig_node *orig_node = NULL;
287
	struct neigh_node *router = NULL;
288
	struct icmp_packet_rr *icmp_packet;
289
	int ret = NET_RX_DROP;
290 291 292 293 294 295

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	/* add data to device queue */
	if (icmp_packet->msg_type != ECHO_REQUEST) {
		bat_socket_receive_packet(icmp_packet, icmp_len);
296
		goto out;
297 298
	}

299 300
	primary_if = primary_if_get_selected(bat_priv);
	if (!primary_if)
301
		goto out;
302 303 304

	/* answer echo request (ping) */
	/* get routing information */
305
	orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
306
	if (!orig_node)
307
		goto out;
308

309 310 311
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
312

313
	/* create a copy of the skb, if needed, to modify it. */
314
	if (skb_cow(skb, ETH_HLEN) < 0)
315 316 317 318 319
		goto out;

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
320
	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
321
	icmp_packet->msg_type = ECHO_REPLY;
322
	icmp_packet->header.ttl = TTL;
323

324
	send_skb_packet(skb, router->if_incoming, router->addr);
325
	ret = NET_RX_SUCCESS;
326

327
out:
328 329
	if (primary_if)
		hardif_free_ref(primary_if);
330 331
	if (router)
		neigh_node_free_ref(router);
332
	if (orig_node)
333
		orig_node_free_ref(orig_node);
334 335 336 337
	return ret;
}

static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
338
				  struct sk_buff *skb)
339
{
340
	struct hard_iface *primary_if = NULL;
341
	struct orig_node *orig_node = NULL;
342
	struct neigh_node *router = NULL;
343
	struct icmp_packet *icmp_packet;
344
	int ret = NET_RX_DROP;
345 346 347 348 349

	icmp_packet = (struct icmp_packet *)skb->data;

	/* send TTL exceeded if packet is an echo request (traceroute) */
	if (icmp_packet->msg_type != ECHO_REQUEST) {
350 351
		pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
			 icmp_packet->orig, icmp_packet->dst);
352
		goto out;
353 354
	}

355 356
	primary_if = primary_if_get_selected(bat_priv);
	if (!primary_if)
357
		goto out;
358 359

	/* get routing information */
360
	orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
361
	if (!orig_node)
362
		goto out;
363

364 365 366
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
367

368
	/* create a copy of the skb, if needed, to modify it. */
369
	if (skb_cow(skb, ETH_HLEN) < 0)
370
		goto out;
371

372
	icmp_packet = (struct icmp_packet *)skb->data;
373

374
	memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
375
	memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
376
	icmp_packet->msg_type = TTL_EXCEEDED;
377
	icmp_packet->header.ttl = TTL;
378

379
	send_skb_packet(skb, router->if_incoming, router->addr);
380
	ret = NET_RX_SUCCESS;
381

382
out:
383 384
	if (primary_if)
		hardif_free_ref(primary_if);
385 386
	if (router)
		neigh_node_free_ref(router);
387
	if (orig_node)
388
		orig_node_free_ref(orig_node);
389 390 391 392
	return ret;
}


393
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
394 395 396 397
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct icmp_packet_rr *icmp_packet;
	struct ethhdr *ethhdr;
398
	struct orig_node *orig_node = NULL;
399
	struct neigh_node *router = NULL;
400
	int hdr_size = sizeof(struct icmp_packet);
401
	int ret = NET_RX_DROP;
402 403 404 405 406 407 408 409 410

	/**
	 * we truncate all incoming icmp packets if they don't match our size
	 */
	if (skb->len >= sizeof(struct icmp_packet_rr))
		hdr_size = sizeof(struct icmp_packet_rr);

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
411
		goto out;
412 413 414 415 416

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
417
		goto out;
418 419 420

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
421
		goto out;
422 423 424

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
425
		goto out;
426 427 428 429 430 431 432

	icmp_packet = (struct icmp_packet_rr *)skb->data;

	/* add record route information if not full */
	if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
	    (icmp_packet->rr_cur < BAT_RR_LEN)) {
		memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
433
		       ethhdr->h_dest, ETH_ALEN);
434 435 436 437 438 439 440 441
		icmp_packet->rr_cur++;
	}

	/* packet for me */
	if (is_my_mac(icmp_packet->dst))
		return recv_my_icmp_packet(bat_priv, skb, hdr_size);

	/* TTL exceeded */
442
	if (icmp_packet->header.ttl < 2)
443
		return recv_icmp_ttl_exceeded(bat_priv, skb);
444 445

	/* get routing information */
446
	orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
447
	if (!orig_node)
448
		goto out;
449

450 451 452
	router = orig_node_get_router(orig_node);
	if (!router)
		goto out;
453

454
	/* create a copy of the skb, if needed, to modify it. */
455
	if (skb_cow(skb, ETH_HLEN) < 0)
456
		goto out;
457

458
	icmp_packet = (struct icmp_packet_rr *)skb->data;
459

460
	/* decrement ttl */
461
	icmp_packet->header.ttl--;
462 463

	/* route it */
464
	send_skb_packet(skb, router->if_incoming, router->addr);
465
	ret = NET_RX_SUCCESS;
466

467
out:
468 469
	if (router)
		neigh_node_free_ref(router);
470
	if (orig_node)
471
		orig_node_free_ref(orig_node);
472 473 474
	return ret;
}

475 476 477 478 479 480
/* In the bonding case, send the packets in a round
 * robin fashion over the remaining interfaces.
 *
 * This method rotates the bonding list and increases the
 * returned router's refcount. */
static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
481
					   const struct hard_iface *recv_if)
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
{
	struct neigh_node *tmp_neigh_node;
	struct neigh_node *router = NULL, *first_candidate = NULL;

	rcu_read_lock();
	list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
				bonding_list) {
		if (!first_candidate)
			first_candidate = tmp_neigh_node;

		/* recv_if == NULL on the first node. */
		if (tmp_neigh_node->if_incoming == recv_if)
			continue;

		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
			continue;

		router = tmp_neigh_node;
		break;
	}

	/* use the first candidate if nothing was found. */
	if (!router && first_candidate &&
	    atomic_inc_not_zero(&first_candidate->refcount))
		router = first_candidate;

	if (!router)
		goto out;

	/* selected should point to the next element
	 * after the current router */
	spin_lock_bh(&primary_orig->neigh_list_lock);
	/* this is a list_move(), which unfortunately
	 * does not exist as rcu version */
	list_del_rcu(&primary_orig->bond_list);
	list_add_rcu(&primary_orig->bond_list,
		     &router->bonding_list);
	spin_unlock_bh(&primary_orig->neigh_list_lock);

out:
	rcu_read_unlock();
	return router;
}

/* Interface Alternating: Use the best of the
 * remaining candidates which are not using
 * this interface.
 *
 * Increases the returned router's refcount */
static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
532
					      const struct hard_iface *recv_if)
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
{
	struct neigh_node *tmp_neigh_node;
	struct neigh_node *router = NULL, *first_candidate = NULL;

	rcu_read_lock();
	list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
				bonding_list) {
		if (!first_candidate)
			first_candidate = tmp_neigh_node;

		/* recv_if == NULL on the first node. */
		if (tmp_neigh_node->if_incoming == recv_if)
			continue;

		if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
			continue;

		/* if we don't have a router yet
		 * or this one is better, choose it. */
		if ((!router) ||
		    (tmp_neigh_node->tq_avg > router->tq_avg)) {
			/* decrement refcount of
			 * previously selected router */
			if (router)
				neigh_node_free_ref(router);

			router = tmp_neigh_node;
			atomic_inc_not_zero(&router->refcount);
		}

		neigh_node_free_ref(tmp_neigh_node);
	}

	/* use the first candidate if nothing was found. */
	if (!router && first_candidate &&
	    atomic_inc_not_zero(&first_candidate->refcount))
		router = first_candidate;

	rcu_read_unlock();
	return router;
}

575 576 577 578
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct tt_query_packet *tt_query;
579
	uint16_t tt_len;
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
		goto out;

	/* I could need to modify it */
	if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
		goto out;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		goto out;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		goto out;

	tt_query = (struct tt_query_packet *)skb->data;

	tt_query->tt_data = ntohs(tt_query->tt_data);

	switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
	case TT_REQUEST:
		/* If we cannot provide an answer the tt_request is
		 * forwarded */
		if (!send_tt_response(bat_priv, tt_query)) {
			bat_dbg(DBG_TT, bat_priv,
				"Routing TT_REQUEST to %pM [%c]\n",
				tt_query->dst,
				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
			tt_query->tt_data = htons(tt_query->tt_data);
			return route_unicast_packet(skb, recv_if);
		}
		break;
	case TT_RESPONSE:
618 619 620 621 622
		if (is_my_mac(tt_query->dst)) {
			/* packet needs to be linearized to access the TT
			 * changes */
			if (skb_linearize(skb) < 0)
				goto out;
623

624 625 626 627
			tt_len = tt_query->tt_data * sizeof(struct tt_change);

			/* Ensure we have all the claimed data */
			if (unlikely(skb_headlen(skb) <
628
				     sizeof(struct tt_query_packet) + tt_len))
629 630
				goto out;

631
			handle_tt_response(bat_priv, tt_query);
632
		} else {
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
			bat_dbg(DBG_TT, bat_priv,
				"Routing TT_RESPONSE to %pM [%c]\n",
				tt_query->dst,
				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
			tt_query->tt_data = htons(tt_query->tt_data);
			return route_unicast_packet(skb, recv_if);
		}
		break;
	}

out:
	/* returning NET_RX_DROP will make the caller function kfree the skb */
	return NET_RX_DROP;
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct roam_adv_packet *roam_adv_packet;
	struct orig_node *orig_node;
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
		goto out;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		goto out;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		goto out;

	roam_adv_packet = (struct roam_adv_packet *)skb->data;

	if (!is_my_mac(roam_adv_packet->dst))
		return route_unicast_packet(skb, recv_if);

674 675 676 677 678 679 680
	/* check if it is a backbone gateway. we don't accept
	 * roaming advertisement from it, as it has the same
	 * entries as we have.
	 */
	if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
		goto out;

681 682 683 684
	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
	if (!orig_node)
		goto out;

685 686 687
	bat_dbg(DBG_TT, bat_priv,
		"Received ROAMING_ADV from %pM (client %pM)\n",
		roam_adv_packet->src, roam_adv_packet->client);
688 689

	tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
690
		      atomic_read(&orig_node->last_ttvn) + 1, true, false);
691 692 693 694 695 696 697 698 699 700 701 702

	/* Roaming phase starts: I have new information but the ttvn has not
	 * been incremented yet. This flag will make me check all the incoming
	 * packets for the correct destination. */
	bat_priv->tt_poss_change = true;

	orig_node_free_ref(orig_node);
out:
	/* returning NET_RX_DROP will make the caller function kfree the skb */
	return NET_RX_DROP;
}

703
/* find a suitable router for this originator, and use
704 705
 * bonding if possible. increases the found neighbors
 * refcount.*/
706 707
struct neigh_node *find_router(struct bat_priv *bat_priv,
			       struct orig_node *orig_node,
708
			       const struct hard_iface *recv_if)
709 710 711
{
	struct orig_node *primary_orig_node;
	struct orig_node *router_orig;
712
	struct neigh_node *router;
713 714 715 716 717 718
	static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
	int bonding_enabled;

	if (!orig_node)
		return NULL;

719 720
	router = orig_node_get_router(orig_node);
	if (!router)
721
		goto err;
722 723 724 725 726

	/* without bonding, the first node should
	 * always choose the default router. */
	bonding_enabled = atomic_read(&bat_priv->bonding);

727 728
	rcu_read_lock();
	/* select default router to output */
729
	router_orig = router->orig_node;
730 731
	if (!router_orig)
		goto err_unlock;
732 733 734

	if ((!recv_if) && (!bonding_enabled))
		goto return_router;
735 736 737

	/* if we have something in the primary_addr, we can search
	 * for a potential bonding candidate. */
738
	if (compare_eth(router_orig->primary_addr, zero_mac))
739
		goto return_router;
740 741 742 743

	/* find the orig_node which has the primary interface. might
	 * even be the same as our router_orig in many cases */

744
	if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
745 746
		primary_orig_node = router_orig;
	} else {
747 748
		primary_orig_node = orig_hash_find(bat_priv,
						   router_orig->primary_addr);
749
		if (!primary_orig_node)
750
			goto return_router;
751

752
		orig_node_free_ref(primary_orig_node);
753 754 755 756
	}

	/* with less than 2 candidates, we can't do any
	 * bonding and prefer the original router. */
757 758
	if (atomic_read(&primary_orig_node->bond_candidates) < 2)
		goto return_router;
759 760 761 762

	/* all nodes between should choose a candidate which
	 * is is not on the interface where the packet came
	 * in. */
763

764
	neigh_node_free_ref(router);
765

766 767 768 769
	if (bonding_enabled)
		router = find_bond_router(primary_orig_node, recv_if);
	else
		router = find_ifalter_router(primary_orig_node, recv_if);
770

771
return_router:
772 773 774
	if (router && router->if_incoming->if_status != IF_ACTIVE)
		goto err_unlock;

775
	rcu_read_unlock();
776
	return router;
777 778 779 780 781 782
err_unlock:
	rcu_read_unlock();
err:
	if (router)
		neigh_node_free_ref(router);
	return NULL;
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
}

static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
	struct ethhdr *ethhdr;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
		return -1;

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with unicast indication but broadcast recipient */
	if (is_broadcast_ether_addr(ethhdr->h_dest))
		return -1;

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
		return -1;

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
		return -1;

	return 0;
}

810
static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
811 812
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
813 814
	struct orig_node *orig_node = NULL;
	struct neigh_node *neigh_node = NULL;
815 816
	struct unicast_packet *unicast_packet;
	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
817
	int ret = NET_RX_DROP;
818 819 820 821 822
	struct sk_buff *new_skb;

	unicast_packet = (struct unicast_packet *)skb->data;

	/* TTL exceeded */
823
	if (unicast_packet->header.ttl < 2) {
824 825
		pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
			 ethhdr->h_source, unicast_packet->dest);
826
		goto out;
827 828 829
	}

	/* get routing information */
830 831
	orig_node = orig_hash_find(bat_priv, unicast_packet->dest);

832
	if (!orig_node)
833
		goto out;
834

835
	/* find_router() increases neigh_nodes refcount if found. */
836
	neigh_node = find_router(bat_priv, orig_node, recv_if);
837

838
	if (!neigh_node)
839
		goto out;
840 841

	/* create a copy of the skb, if needed, to modify it. */
842
	if (skb_cow(skb, ETH_HLEN) < 0)
843
		goto out;
844 845 846

	unicast_packet = (struct unicast_packet *)skb->data;

847
	if (unicast_packet->header.packet_type == BAT_UNICAST &&
848
	    atomic_read(&bat_priv->fragmentation) &&
849 850 851 852 853
	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
		ret = frag_send_skb(skb, bat_priv,
				    neigh_node->if_incoming, neigh_node->addr);
		goto out;
	}
854

855
	if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
856
	    frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
857 858 859 860

		ret = frag_reassemble_skb(skb, bat_priv, &new_skb);

		if (ret == NET_RX_DROP)
861
			goto out;
862 863

		/* packet was buffered for late merge */
864 865 866 867
		if (!new_skb) {
			ret = NET_RX_SUCCESS;
			goto out;
		}
868 869 870 871 872 873

		skb = new_skb;
		unicast_packet = (struct unicast_packet *)skb->data;
	}

	/* decrement ttl */
874
	unicast_packet->header.ttl--;
875 876

	/* route it */
877
	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
878
	ret = NET_RX_SUCCESS;
879

880 881 882 883
out:
	if (neigh_node)
		neigh_node_free_ref(neigh_node);
	if (orig_node)
884
		orig_node_free_ref(orig_node);
885
	return ret;
886 887
}

888 889 890 891 892 893 894
static int check_unicast_ttvn(struct bat_priv *bat_priv,
			       struct sk_buff *skb) {
	uint8_t curr_ttvn;
	struct orig_node *orig_node;
	struct ethhdr *ethhdr;
	struct hard_iface *primary_if;
	struct unicast_packet *unicast_packet;
895
	bool tt_poss_change;
896 897 898 899 900 901 902

	/* I could need to modify it */
	if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
		return 0;

	unicast_packet = (struct unicast_packet *)skb->data;

903 904
	if (is_my_mac(unicast_packet->dest)) {
		tt_poss_change = bat_priv->tt_poss_change;
905
		curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
906
	} else {
907 908 909 910 911 912
		orig_node = orig_hash_find(bat_priv, unicast_packet->dest);

		if (!orig_node)
			return 0;

		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
913
		tt_poss_change = orig_node->tt_poss_change;
914 915 916 917
		orig_node_free_ref(orig_node);
	}

	/* Check whether I have to reroute the packet */
918
	if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
919 920 921
		/* check if there is enough data before accessing it */
		if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
				  ETH_HLEN) < 0)
922 923 924 925
			return 0;

		ethhdr = (struct ethhdr *)(skb->data +
			sizeof(struct unicast_packet));
926
		orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

		if (!orig_node) {
			if (!is_my_client(bat_priv, ethhdr->h_dest))
				return 0;
			primary_if = primary_if_get_selected(bat_priv);
			if (!primary_if)
				return 0;
			memcpy(unicast_packet->dest,
			       primary_if->net_dev->dev_addr, ETH_ALEN);
			hardif_free_ref(primary_if);
		} else {
			memcpy(unicast_packet->dest, orig_node->orig,
			       ETH_ALEN);
			curr_ttvn = (uint8_t)
				atomic_read(&orig_node->last_ttvn);
			orig_node_free_ref(orig_node);
		}

945 946 947 948
		bat_dbg(DBG_ROUTES, bat_priv,
			"TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
			unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
			unicast_packet->dest);
949 950 951 952 953 954

		unicast_packet->ttvn = curr_ttvn;
	}
	return 1;
}

955
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
956
{
957
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
958
	struct unicast_packet *unicast_packet;
959
	int hdr_size = sizeof(*unicast_packet);
960 961 962 963

	if (check_unicast_packet(skb, hdr_size) < 0)
		return NET_RX_DROP;

964 965 966
	if (!check_unicast_ttvn(bat_priv, skb))
		return NET_RX_DROP;

967 968 969 970 971 972 973 974
	unicast_packet = (struct unicast_packet *)skb->data;

	/* packet for me */
	if (is_my_mac(unicast_packet->dest)) {
		interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
		return NET_RX_SUCCESS;
	}

975
	return route_unicast_packet(skb, recv_if);
976 977
}

978
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
979 980 981
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
	struct unicast_frag_packet *unicast_packet;
982
	int hdr_size = sizeof(*unicast_packet);
983 984 985 986 987 988
	struct sk_buff *new_skb = NULL;
	int ret;

	if (check_unicast_packet(skb, hdr_size) < 0)
		return NET_RX_DROP;

989 990 991
	if (!check_unicast_ttvn(bat_priv, skb))
		return NET_RX_DROP;

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	unicast_packet = (struct unicast_frag_packet *)skb->data;

	/* packet for me */
	if (is_my_mac(unicast_packet->dest)) {

		ret = frag_reassemble_skb(skb, bat_priv, &new_skb);

		if (ret == NET_RX_DROP)
			return NET_RX_DROP;

		/* packet was buffered for late merge */
		if (!new_skb)
			return NET_RX_SUCCESS;

		interface_rx(recv_if->soft_iface, new_skb, recv_if,
			     sizeof(struct unicast_packet));
		return NET_RX_SUCCESS;
	}

1011
	return route_unicast_packet(skb, recv_if);
1012 1013 1014
}


1015
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1016 1017
{
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1018
	struct orig_node *orig_node = NULL;
1019 1020
	struct bcast_packet *bcast_packet;
	struct ethhdr *ethhdr;
1021
	int hdr_size = sizeof(*bcast_packet);
1022
	int ret = NET_RX_DROP;
1023 1024 1025 1026
	int32_t seq_diff;

	/* drop packet if it has not necessary minimum size */
	if (unlikely(!pskb_may_pull(skb, hdr_size)))
1027
		goto out;
1028 1029 1030 1031 1032

	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* packet with broadcast indication but unicast recipient */
	if (!is_broadcast_ether_addr(ethhdr->h_dest))
1033
		goto out;
1034 1035 1036

	/* packet with broadcast sender address */
	if (is_broadcast_ether_addr(ethhdr->h_source))
1037
		goto out;
1038 1039 1040

	/* ignore broadcasts sent by myself */
	if (is_my_mac(ethhdr->h_source))
1041
		goto out;
1042 1043 1044 1045 1046

	bcast_packet = (struct bcast_packet *)skb->data;

	/* ignore broadcasts originated by myself */
	if (is_my_mac(bcast_packet->orig))
1047
		goto out;
1048

1049
	if (bcast_packet->header.ttl < 2)
1050
		goto out;
1051

1052
	orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1053 1054

	if (!orig_node)
1055
		goto out;
1056

1057
	spin_lock_bh(&orig_node->bcast_seqno_lock);
1058 1059

	/* check whether the packet is a duplicate */
1060 1061
	if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
			 ntohl(bcast_packet->seqno)))
1062
		goto spin_unlock;
1063 1064 1065 1066 1067

	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;

	/* check whether the packet is old and the host just restarted. */
	if (window_protected(bat_priv, seq_diff,
1068 1069
			     &orig_node->bcast_seqno_reset))
		goto spin_unlock;
1070 1071 1072 1073 1074 1075

	/* mark broadcast in flood history, update window position
	 * if required. */
	if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
		orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);

1076 1077
	spin_unlock_bh(&orig_node->bcast_seqno_lock);

1078 1079 1080 1081
	/* check whether this has been sent by another originator before */
	if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
		goto out;

1082
	/* rebroadcast packet */
1083
	add_bcast_packet_to_list(bat_priv, skb, 1);
1084

1085 1086 1087 1088 1089 1090
	/* don't hand the broadcast up if it is from an originator
	 * from the same backbone.
	 */
	if (bla_is_backbone_gw(skb, orig_node, hdr_size))
		goto out;

1091 1092
	/* broadcast for me */
	interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1093 1094
	ret = NET_RX_SUCCESS;
	goto out;
1095

1096 1097 1098 1099
spin_unlock:
	spin_unlock_bh(&orig_node->bcast_seqno_lock);
out:
	if (orig_node)
1100
		orig_node_free_ref(orig_node);
1101
	return ret;
1102 1103
}

1104
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1105 1106 1107 1108
{
	struct vis_packet *vis_packet;
	struct ethhdr *ethhdr;
	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1109
	int hdr_size = sizeof(*vis_packet);
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150

	/* keep skb linear */
	if (skb_linearize(skb) < 0)
		return NET_RX_DROP;

	if (unlikely(!pskb_may_pull(skb, hdr_size)))
		return NET_RX_DROP;

	vis_packet = (struct vis_packet *)skb->data;
	ethhdr = (struct ethhdr *)skb_mac_header(skb);

	/* not for me */
	if (!is_my_mac(ethhdr->h_dest))
		return NET_RX_DROP;

	/* ignore own packets */
	if (is_my_mac(vis_packet->vis_orig))
		return NET_RX_DROP;

	if (is_my_mac(vis_packet->sender_orig))
		return NET_RX_DROP;

	switch (vis_packet->vis_type) {
	case VIS_TYPE_SERVER_SYNC:
		receive_server_sync_packet(bat_priv, vis_packet,
					   skb_headlen(skb));
		break;

	case VIS_TYPE_CLIENT_UPDATE:
		receive_client_update_packet(bat_priv, vis_packet,
					     skb_headlen(skb));
		break;

	default:	/* ignore unknown packet */
		break;
	}

	/* We take a copy of the data in the packet, so we should
	   always free the skbuf. */
	return NET_RX_DROP;
}