send.c 29.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (C) 2007-2020  B.A.T.M.A.N. contributors:
3 4 5 6
 *
 * Marek Lindner, Simon Wunderlich
 */

7
#include "send.h"
8
#include "main.h"
9 10

#include <linux/atomic.h>
11
#include <linux/bug.h>
12
#include <linux/byteorder/generic.h>
13
#include <linux/errno.h>
14
#include <linux/etherdevice.h>
15
#include <linux/gfp.h>
16
#include <linux/if.h>
17
#include <linux/if_ether.h>
18 19
#include <linux/jiffies.h>
#include <linux/kernel.h>
20
#include <linux/kref.h>
21 22 23 24 25 26 27 28 29 30 31
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/workqueue.h>

32
#include "distributed-arp-table.h"
33
#include "fragmentation.h"
34
#include "gateway_client.h"
35
#include "hard-interface.h"
36
#include "log.h"
37
#include "network-coding.h"
38 39 40 41
#include "originator.h"
#include "routing.h"
#include "soft-interface.h"
#include "translation-table.h"
A
Antonio Quartulli 已提交
42

43
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
44

45
/**
46
 * batadv_send_skb_packet() - send an already prepared packet
47 48 49 50 51 52 53 54 55 56
 * @skb: the packet to send
 * @hard_iface: the interface to use to send the broadcast packet
 * @dst_addr: the payload destination
 *
 * Send out an already prepared packet to the given neighbor or broadcast it
 * using the specified interface. Either hard_iface or neigh_node must be not
 * NULL.
 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
 * otherwise it is sent as unicast to the given neighbor.
 *
57 58 59 60 61
 * Regardless of the return value, the skb is consumed.
 *
 * Return: A negative errno code is returned on a failure. A success does not
 * guarantee the frame will be transmitted as it may be dropped due
 * to congestion or traffic shaping.
62
 */
63 64
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
65
			   const u8 *dst_addr)
66
{
67
	struct batadv_priv *bat_priv;
68
	struct ethhdr *ethhdr;
69
	int ret;
70

71 72
	bat_priv = netdev_priv(hard_iface->soft_iface);

73
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
74 75
		goto send_skb_err;

76
	if (unlikely(!hard_iface->net_dev))
77 78
		goto send_skb_err;

79
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
80 81
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
82 83 84 85
		goto send_skb_err;
	}

	/* push to the ethernet header. */
86
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
87 88 89 90
		goto send_skb_err;

	skb_reset_mac_header(skb);

91
	ethhdr = eth_hdr(skb);
92 93
	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
	ether_addr_copy(ethhdr->h_dest, dst_addr);
94
	ethhdr->h_proto = htons(ETH_P_BATMAN);
95 96

	skb_set_network_header(skb, ETH_HLEN);
97
	skb->protocol = htons(ETH_P_BATMAN);
98

99
	skb->dev = hard_iface->net_dev;
100

101 102 103
	/* Save a clone of the skb to use when decoding coded packets */
	batadv_nc_skb_store_for_decoding(bat_priv, skb);

104 105
	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
106 107
	 * (which is > 0). This will not be treated as an error.
	 */
108 109
	ret = dev_queue_xmit(skb);
	return net_xmit_eval(ret);
110 111 112 113 114
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

115 116 117 118 119 120 121 122 123
/**
 * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
 * @skb: packet to be transmitted (with batadv header and no outer eth header)
 * @hard_iface: outgoing interface
 *
 * Return: A negative errno code is returned on a failure. A success does not
 * guarantee the frame will be transmitted as it may be dropped due
 * to congestion or traffic shaping.
 */
124 125 126 127 128 129
int batadv_send_broadcast_skb(struct sk_buff *skb,
			      struct batadv_hard_iface *hard_iface)
{
	return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
}

130 131 132 133 134 135 136 137 138
/**
 * batadv_send_unicast_skb() - Send unicast packet to neighbor
 * @skb: packet to be transmitted (with batadv header and no outer eth header)
 * @neigh: neighbor which is used as next hop to destination
 *
 * Return: A negative errno code is returned on a failure. A success does not
 * guarantee the frame will be transmitted as it may be dropped due
 * to congestion or traffic shaping.
 */
139 140 141 142 143 144 145 146 147 148 149 150 151
int batadv_send_unicast_skb(struct sk_buff *skb,
			    struct batadv_neigh_node *neigh)
{
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
	struct batadv_hardif_neigh_node *hardif_neigh;
#endif
	int ret;

	ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);

#ifdef CONFIG_BATMAN_ADV_BATMAN_V
	hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);

152
	if (hardif_neigh && ret != NET_XMIT_DROP)
153 154 155 156 157 158 159 160 161
		hardif_neigh->bat_v.last_unicast_tx = jiffies;

	if (hardif_neigh)
		batadv_hardif_neigh_put(hardif_neigh);
#endif

	return ret;
}

162
/**
163
 * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
164 165 166 167 168 169 170 171 172
 * @skb: Packet to be transmitted.
 * @orig_node: Final destination of the packet.
 * @recv_if: Interface used when receiving the packet (can be NULL).
 *
 * Looks up the best next-hop towards the passed originator and passes the
 * skb on for preparation of MAC header. If the packet originated from this
 * host, NULL can be passed as recv_if and no interface alternating is
 * attempted.
 *
173 174
 * Return: negative errno code on a failure, -EINPROGRESS if the skb is
 * buffered for later transmit or the NET_XMIT status returned by the
175
 * lower routine if the packet has been passed down.
176
 */
177 178 179
int batadv_send_skb_to_orig(struct sk_buff *skb,
			    struct batadv_orig_node *orig_node,
			    struct batadv_hard_iface *recv_if)
180 181 182
{
	struct batadv_priv *bat_priv = orig_node->bat_priv;
	struct batadv_neigh_node *neigh_node;
183
	int ret;
184 185 186

	/* batadv_find_router() increases neigh_nodes refcount if found. */
	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
187 188 189 190
	if (!neigh_node) {
		ret = -EINVAL;
		goto free_skb;
	}
191 192 193 194 195 196 197

	/* Check if the skb is too large to send in one piece and fragment
	 * it if needed.
	 */
	if (atomic_read(&bat_priv->fragmentation) &&
	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
		/* Fragment and send packet. */
198
		ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
199 200
		/* skb was consumed */
		skb = NULL;
201

202
		goto put_neigh_node;
203
	}
204

205 206 207 208
	/* try to network code the packet, if it is received on an interface
	 * (i.e. being forwarded). If the packet originates from this node or if
	 * network coding fails, then send the packet as usual.
	 */
209
	if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
210
		ret = -EINPROGRESS;
211 212
	else
		ret = batadv_send_unicast_skb(skb, neigh_node);
213

214 215 216 217 218 219 220
	/* skb was consumed */
	skb = NULL;

put_neigh_node:
	batadv_neigh_node_put(neigh_node);
free_skb:
	kfree_skb(skb);
221

222
	return ret;
223 224
}

225
/**
226
 * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
227 228 229 230 231
 *  common fields for unicast packets
 * @skb: the skb carrying the unicast header to initialize
 * @hdr_size: amount of bytes to push at the beginning of the skb
 * @orig_node: the destination node
 *
232
 * Return: false if the buffer extension was not possible or true otherwise.
233 234 235 236 237 238
 */
static bool
batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
				  struct batadv_orig_node *orig_node)
{
	struct batadv_unicast_packet *unicast_packet;
239
	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
240 241 242 243 244

	if (batadv_skb_head_push(skb, hdr_size) < 0)
		return false;

	unicast_packet = (struct batadv_unicast_packet *)skb->data;
245
	unicast_packet->version = BATADV_COMPAT_VERSION;
246
	/* batman packet type: unicast */
247
	unicast_packet->packet_type = BATADV_UNICAST;
248
	/* set unicast ttl */
249
	unicast_packet->ttl = BATADV_TTL;
250
	/* copy the destination for faster routing */
251
	ether_addr_copy(unicast_packet->dest, orig_node->orig);
252 253 254 255 256 257 258
	/* set the destination tt version number */
	unicast_packet->ttvn = ttvn;

	return true;
}

/**
259
 * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
260 261 262
 * @skb: the skb containing the payload to encapsulate
 * @orig_node: the destination node
 *
263
 * Return: false if the payload could not be encapsulated or true otherwise.
264 265 266 267 268 269 270 271 272 273
 */
static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
					    struct batadv_orig_node *orig_node)
{
	size_t uni_size = sizeof(struct batadv_unicast_packet);

	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
}

/**
274
 * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
275 276 277
 *  unicast 4addr header
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the skb containing the payload to encapsulate
278
 * @orig: the destination node
279 280
 * @packet_subtype: the unicast 4addr packet subtype to use
 *
281
 * Return: false if the payload could not be encapsulated or true otherwise.
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
 */
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
					   struct sk_buff *skb,
					   struct batadv_orig_node *orig,
					   int packet_subtype)
{
	struct batadv_hard_iface *primary_if;
	struct batadv_unicast_4addr_packet *uc_4addr_packet;
	bool ret = false;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	/* Pull the header space and fill the unicast_packet substructure.
	 * We can do that because the first member of the uc_4addr_packet
	 * is of type struct unicast_packet
	 */
	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
					       orig))
		goto out;

	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
305
	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
306
	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
307 308 309 310 311 312
	uc_4addr_packet->subtype = packet_subtype;
	uc_4addr_packet->reserved = 0;

	ret = true;
out:
	if (primary_if)
313
		batadv_hardif_put(primary_if);
314 315 316 317
	return ret;
}

/**
318
 * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
319 320 321 322 323
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
324
 * @orig_node: the originator to send the packet to
325
 * @vid: the vid to be used to search the translation table
326
 *
327 328
 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
329
 * as packet_type. Then send this frame to the given orig_node.
330
 *
331
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
332
 */
333 334 335 336 337
int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
			    struct sk_buff *skb, int packet_type,
			    int packet_subtype,
			    struct batadv_orig_node *orig_node,
			    unsigned short vid)
338 339
{
	struct batadv_unicast_packet *unicast_packet;
340
	struct ethhdr *ethhdr;
341
	int ret = NET_XMIT_DROP;
342

343
	if (!orig_node)
344 345 346 347
		goto out;

	switch (packet_type) {
	case BATADV_UNICAST:
348 349
		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
			goto out;
350 351
		break;
	case BATADV_UNICAST_4ADDR:
352 353 354 355
		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
							   orig_node,
							   packet_subtype))
			goto out;
356 357 358 359 360 361 362 363
		break;
	default:
		/* this function supports UNICAST and UNICAST_4ADDR only. It
		 * should never be invoked with any other packet type
		 */
		goto out;
	}

364 365 366 367
	/* skb->data might have been reallocated by
	 * batadv_send_skb_prepare_unicast{,_4addr}()
	 */
	ethhdr = eth_hdr(skb);
368 369 370 371 372 373 374
	unicast_packet = (struct batadv_unicast_packet *)skb->data;

	/* inform the destination node that we are still missing a correct route
	 * for this client. The destination will receive this packet and will
	 * try to reroute it because the ttvn contained in the header is less
	 * than the current one
	 */
375
	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
376 377
		unicast_packet->ttvn = unicast_packet->ttvn - 1;

378 379 380
	ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
	 /* skb was consumed */
	skb = NULL;
381 382

out:
383
	kfree_skb(skb);
384 385 386
	return ret;
}

387
/**
388
 * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
389 390 391 392 393
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
394
 * @dst_hint: can be used to override the destination contained in the skb
395 396 397 398 399 400 401 402
 * @vid: the vid to be used to search the translation table
 *
 * Look up the recipient node for the destination address in the ethernet
 * header via the translation table. Wrap the given skb into a batman-adv
 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
 * to the according destination node.
 *
403
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
404 405 406
 */
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
				   struct sk_buff *skb, int packet_type,
407
				   int packet_subtype, u8 *dst_hint,
408
				   unsigned short vid)
409 410 411
{
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	struct batadv_orig_node *orig_node;
412
	u8 *src, *dst;
413
	int ret;
414 415 416 417 418 419 420 421 422 423

	src = ethhdr->h_source;
	dst = ethhdr->h_dest;

	/* if we got an hint! let's send the packet to this client (if any) */
	if (dst_hint) {
		src = NULL;
		dst = dst_hint;
	}
	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
424

425 426 427 428 429 430 431
	ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
				      packet_subtype, orig_node, vid);

	if (orig_node)
		batadv_orig_node_put(orig_node);

	return ret;
432 433 434
}

/**
435
 * batadv_send_skb_via_gw() - send an skb via gateway lookup
436 437 438 439 440 441 442
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @vid: the vid to be used to search the translation table
 *
 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
 * unicast header and send this frame to this gateway node.
 *
443
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
444 445 446 447 448
 */
int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
			   unsigned short vid)
{
	struct batadv_orig_node *orig_node;
449
	int ret;
450 451

	orig_node = batadv_gw_get_selected_orig(bat_priv);
452 453 454 455 456 457 458
	ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
				      BATADV_P_DATA, orig_node, vid);

	if (orig_node)
		batadv_orig_node_put(orig_node);

	return ret;
459 460
}

461
/**
462
 * batadv_forw_packet_free() - free a forwarding packet
463
 * @forw_packet: The packet to free
464
 * @dropped: whether the packet is freed because is dropped
465 466 467 468
 *
 * This frees a forwarding packet and releases any resources it might
 * have claimed.
 */
469 470
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
			     bool dropped)
471
{
472 473 474 475 476
	if (dropped)
		kfree_skb(forw_packet->skb);
	else
		consume_skb(forw_packet->skb);

477
	if (forw_packet->if_incoming)
478
		batadv_hardif_put(forw_packet->if_incoming);
479
	if (forw_packet->if_outgoing)
480
		batadv_hardif_put(forw_packet->if_outgoing);
481 482
	if (forw_packet->queue_left)
		atomic_inc(forw_packet->queue_left);
483 484 485
	kfree(forw_packet);
}

486
/**
487
 * batadv_forw_packet_alloc() - allocate a forwarding packet
488 489 490 491
 * @if_incoming: The (optional) if_incoming to be grabbed
 * @if_outgoing: The (optional) if_outgoing to be grabbed
 * @queue_left: The (optional) queue counter to decrease
 * @bat_priv: The bat_priv for the mesh of this forw_packet
492
 * @skb: The raw packet this forwarding packet shall contain
493 494 495 496 497 498 499 500 501 502 503
 *
 * Allocates a forwarding packet and tries to get a reference to the
 * (optional) if_incoming, if_outgoing and queue_left. If queue_left
 * is NULL then bat_priv is optional, too.
 *
 * Return: An allocated forwarding packet on success, NULL otherwise.
 */
struct batadv_forw_packet *
batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
			 struct batadv_hard_iface *if_outgoing,
			 atomic_t *queue_left,
504 505
			 struct batadv_priv *bat_priv,
			 struct sk_buff *skb)
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
{
	struct batadv_forw_packet *forw_packet;
	const char *qname;

	if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
		qname = "unknown";

		if (queue_left == &bat_priv->bcast_queue_left)
			qname = "bcast";

		if (queue_left == &bat_priv->batman_queue_left)
			qname = "batman";

		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "%s queue is full\n", qname);

		return NULL;
	}

	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
	if (!forw_packet)
		goto err;

	if (if_incoming)
		kref_get(&if_incoming->refcount);

	if (if_outgoing)
		kref_get(&if_outgoing->refcount);

535 536
	INIT_HLIST_NODE(&forw_packet->list);
	INIT_HLIST_NODE(&forw_packet->cleanup_list);
537
	forw_packet->skb = skb;
538 539 540 541 542 543 544 545 546 547 548 549 550 551
	forw_packet->queue_left = queue_left;
	forw_packet->if_incoming = if_incoming;
	forw_packet->if_outgoing = if_outgoing;
	forw_packet->num_packets = 0;

	return forw_packet;

err:
	if (queue_left)
		atomic_inc(queue_left);

	return NULL;
}

552
/**
553
 * batadv_forw_packet_was_stolen() - check whether someone stole this packet
554 555 556 557 558 559 560 561 562 563 564 565 566 567
 * @forw_packet: the forwarding packet to check
 *
 * This function checks whether the given forwarding packet was claimed by
 * someone else for free().
 *
 * Return: True if someone stole it, false otherwise.
 */
static bool
batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
{
	return !hlist_unhashed(&forw_packet->cleanup_list);
}

/**
568
 * batadv_forw_packet_steal() - claim a forw_packet for free()
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
 * @forw_packet: the forwarding packet to steal
 * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
 *
 * This function tries to steal a specific forw_packet from global
 * visibility for the purpose of getting it for free(). That means
 * the caller is *not* allowed to requeue it afterwards.
 *
 * Return: True if stealing was successful. False if someone else stole it
 * before us.
 */
bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
			      spinlock_t *lock)
{
	/* did purging routine steal it earlier? */
	spin_lock_bh(lock);
	if (batadv_forw_packet_was_stolen(forw_packet)) {
		spin_unlock_bh(lock);
		return false;
	}

	hlist_del_init(&forw_packet->list);

	/* Just to spot misuse of this function */
	hlist_add_fake(&forw_packet->cleanup_list);

	spin_unlock_bh(lock);
	return true;
}

/**
599
 * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
600 601 602 603 604 605 606 607
 * @forw_list: the to be stolen forward packets
 * @cleanup_list: a backup pointer, to be able to dispose the packet later
 * @hard_iface: the interface to steal forward packets from
 *
 * This function claims responsibility to free any forw_packet queued on the
 * given hard_iface. If hard_iface is NULL forwarding packets on all hard
 * interfaces will be claimed.
 *
608 609
 * The packets are being moved from the forw_list to the cleanup_list. This
 * makes it possible for already running threads to notice the claim.
610
 */
611
static void
612 613 614
batadv_forw_packet_list_steal(struct hlist_head *forw_list,
			      struct hlist_head *cleanup_list,
			      const struct batadv_hard_iface *hard_iface)
615
{
616 617 618 619 620 621 622 623 624
	struct batadv_forw_packet *forw_packet;
	struct hlist_node *safe_tmp_node;

	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
				  forw_list, list) {
		/* if purge_outstanding_packets() was called with an argument
		 * we delete only packets belonging to the given interface
		 */
		if (hard_iface &&
625 626
		    forw_packet->if_incoming != hard_iface &&
		    forw_packet->if_outgoing != hard_iface)
627 628 629 630 631 632 633 634
			continue;

		hlist_del(&forw_packet->list);
		hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
	}
}

/**
635
 * batadv_forw_packet_list_free() - free a list of forward packets
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
 * @head: a list of to be freed forw_packets
 *
 * This function cancels the scheduling of any packet in the provided list,
 * waits for any possibly running packet forwarding thread to finish and
 * finally, safely frees this forward packet.
 *
 * This function might sleep.
 */
static void batadv_forw_packet_list_free(struct hlist_head *head)
{
	struct batadv_forw_packet *forw_packet;
	struct hlist_node *safe_tmp_node;

	hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
				  cleanup_list) {
		cancel_delayed_work_sync(&forw_packet->delayed_work);
652

653 654 655 656 657 658
		hlist_del(&forw_packet->cleanup_list);
		batadv_forw_packet_free(forw_packet, true);
	}
}

/**
659
 * batadv_forw_packet_queue() - try to queue a forwarding packet
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
 * @forw_packet: the forwarding packet to queue
 * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
 * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
 * @send_time: timestamp (jiffies) when the packet is to be sent
 *
 * This function tries to (re)queue a forwarding packet. Requeuing
 * is prevented if the according interface is shutting down
 * (e.g. if batadv_forw_packet_list_steal() was called for this
 * packet earlier).
 *
 * Calling batadv_forw_packet_queue() after a call to
 * batadv_forw_packet_steal() is forbidden!
 *
 * Caller needs to ensure that forw_packet->delayed_work was initialized.
 */
static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
				     spinlock_t *lock, struct hlist_head *head,
				     unsigned long send_time)
{
	spin_lock_bh(lock);

	/* did purging routine steal it from us? */
	if (batadv_forw_packet_was_stolen(forw_packet)) {
		/* If you got it for free() without trouble, then
		 * don't get back into the queue after stealing...
		 */
		WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
			  "Requeuing after batadv_forw_packet_steal() not allowed!\n");

		spin_unlock_bh(lock);
		return;
	}

	hlist_del_init(&forw_packet->list);
	hlist_add_head(&forw_packet->list, head);

	queue_delayed_work(batadv_event_workqueue,
			   &forw_packet->delayed_work,
			   send_time - jiffies);
	spin_unlock_bh(lock);
}

/**
703
 * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
 * @bat_priv: the bat priv with all the soft interface information
 * @forw_packet: the forwarding packet to queue
 * @send_time: timestamp (jiffies) when the packet is to be sent
 *
 * This function tries to (re)queue a broadcast packet.
 *
 * Caller needs to ensure that forw_packet->delayed_work was initialized.
 */
static void
batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
			       struct batadv_forw_packet *forw_packet,
			       unsigned long send_time)
{
	batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
				 &bat_priv->forw_bcast_list, send_time);
}

/**
722
 * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
723 724 725 726 727 728 729 730 731 732 733 734 735 736
 * @bat_priv: the bat priv with all the soft interface information
 * @forw_packet: the forwarding packet to queue
 * @send_time: timestamp (jiffies) when the packet is to be sent
 *
 * This function tries to (re)queue an OGMv1 packet.
 *
 * Caller needs to ensure that forw_packet->delayed_work was initialized.
 */
void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
				    struct batadv_forw_packet *forw_packet,
				    unsigned long send_time)
{
	batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
				 &bat_priv->forw_bat_list, send_time);
737 738
}

739
/**
740
 * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
741 742 743
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: broadcast packet to add
 * @delay: number of jiffies to wait before sending
744
 * @own_packet: true if it is a self-generated broadcast packet
745
 *
746 747
 * add a broadcast packet to the queue and setup timers. broadcast packets
 * are sent multiple times to increase probability for being received.
748 749
 *
 * The skb is not consumed, so the caller should make sure that the
750
 * skb is freed.
751 752
 *
 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
753
 */
754
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
755
				    const struct sk_buff *skb,
756 757
				    unsigned long delay,
				    bool own_packet)
758
{
759
	struct batadv_hard_iface *primary_if;
760
	struct batadv_forw_packet *forw_packet;
761
	struct batadv_bcast_packet *bcast_packet;
762
	struct sk_buff *newskb;
763

764
	primary_if = batadv_primary_if_get_selected(bat_priv);
765
	if (!primary_if)
766
		goto err;
767

768 769 770 771 772 773
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb) {
		batadv_hardif_put(primary_if);
		goto err;
	}

774 775
	forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
					       &bat_priv->bcast_queue_left,
776
					       bat_priv, newskb);
777
	batadv_hardif_put(primary_if);
778
	if (!forw_packet)
779
		goto err_packet_free;
780 781

	/* as we have a copy now, it is safe to decrease the TTL */
782
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
783
	bcast_packet->ttl--;
784

785
	forw_packet->own = own_packet;
786

787 788 789
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
			  batadv_send_outstanding_bcast_packet);

790
	batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
791 792
	return NETDEV_TX_OK;

793
err_packet_free:
794
	kfree_skb(newskb);
795
err:
796 797 798
	return NETDEV_TX_BUSY;
}

799
/**
800
 * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
 * @forw_packet: the forwarding packet to check
 * @hard_iface: the interface to check on
 *
 * Checks whether a given packet has any (re)transmissions left on the provided
 * interface.
 *
 * hard_iface may be NULL: In that case the number of transmissions this skb had
 * so far is compared with the maximum amount of retransmissions independent of
 * any interface instead.
 *
 * Return: True if (re)transmissions are left, false otherwise.
 */
static bool
batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
			       struct batadv_hard_iface *hard_iface)
{
	unsigned int max;

	if (hard_iface)
		max = hard_iface->num_bcasts;
	else
		max = BATADV_NUM_BCASTS_MAX;

	return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
}

/**
828 829
 * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
 *  packet
830 831 832 833 834 835 836 837 838
 * @forw_packet: the packet to increase the counter for
 */
static void
batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
{
	BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
}

/**
839
 * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
840 841 842 843 844 845 846 847 848
 * @forw_packet: the packet to check
 *
 * Return: True if this packet was transmitted before, false otherwise.
 */
bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
{
	return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
}

849
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
850
{
851
	struct batadv_hard_iface *hard_iface;
852
	struct batadv_hardif_neigh_node *neigh_node;
853
	struct delayed_work *delayed_work;
854
	struct batadv_forw_packet *forw_packet;
855
	struct batadv_bcast_packet *bcast_packet;
856
	struct sk_buff *skb1;
857 858
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;
859
	unsigned long send_time = jiffies + msecs_to_jiffies(5);
860
	bool dropped = false;
861 862 863
	u8 *neigh_addr;
	u8 *orig_neigh;
	int ret = 0;
864

G
Geliang Tang 已提交
865
	delayed_work = to_delayed_work(work);
866 867 868 869
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
870

871 872
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
		dropped = true;
873
		goto out;
874
	}
875

876 877
	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
		dropped = true;
878
		goto out;
879
	}
880

881 882
	bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;

883 884
	/* rebroadcast packet */
	rcu_read_lock();
885
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
886
		if (hard_iface->soft_iface != soft_iface)
887 888
			continue;

889
		if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface))
890 891
			continue;

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
		if (forw_packet->own) {
			neigh_node = NULL;
		} else {
			neigh_addr = eth_hdr(forw_packet->skb)->h_source;
			neigh_node = batadv_hardif_neigh_get(hard_iface,
							     neigh_addr);
		}

		orig_neigh = neigh_node ? neigh_node->orig : NULL;

		ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
						 orig_neigh);

		if (ret) {
			char *type;

			switch (ret) {
			case BATADV_HARDIF_BCAST_NORECIPIENT:
				type = "no neighbor";
				break;
			case BATADV_HARDIF_BCAST_DUPFWD:
				type = "single neighbor is source";
				break;
			case BATADV_HARDIF_BCAST_DUPORIG:
				type = "single neighbor is originator";
				break;
			default:
				type = "unknown";
			}

922
			batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n",
923 924 925 926 927 928 929 930 931 932 933 934
				   bcast_packet->orig,
				   hard_iface->net_dev->name, type);

			if (neigh_node)
				batadv_hardif_neigh_put(neigh_node);

			continue;
		}

		if (neigh_node)
			batadv_hardif_neigh_put(neigh_node);

935 936 937
		if (!kref_get_unless_zero(&hard_iface->refcount))
			continue;

938 939 940
		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
941
			batadv_send_broadcast_skb(skb1, hard_iface);
942 943

		batadv_hardif_put(hard_iface);
944 945 946
	}
	rcu_read_unlock();

947
	batadv_forw_packet_bcasts_inc(forw_packet);
948 949

	/* if we still have some more bcasts to send */
950
	if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) {
951 952
		batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
					       send_time);
953 954 955 956
		return;
	}

out:
957 958 959 960
	/* do we get something for free()? */
	if (batadv_forw_packet_steal(forw_packet,
				     &bat_priv->forw_bcast_list_lock))
		batadv_forw_packet_free(forw_packet, dropped);
961 962
}

963
/**
964
 * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
965 966 967 968 969 970 971 972 973
 * @bat_priv: the bat priv with all the soft interface information
 * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
 *
 * This method cancels and purges any broadcast and OGMv1 packet on the given
 * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
 * interfaces will be canceled and purged.
 *
 * This function might sleep.
 */
974 975 976
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
977
{
978
	struct hlist_head head = HLIST_HEAD_INIT;
979

980
	if (hard_iface)
981
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
982 983
			   "%s(): %s\n",
			   __func__, hard_iface->net_dev->name);
984
	else
985
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
986
			   "%s()\n", __func__);
987

988
	/* claim bcast list for free() */
989
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
990 991
	batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
				      hard_iface);
992 993
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

994
	/* claim batman packet list for free() */
995
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
996 997
	batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
				      hard_iface);
998
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
999 1000 1001

	/* then cancel or wait for packet workers to finish and free */
	batadv_forw_packet_list_free(&head);
1002
}