send.c 19.7 KB
Newer Older
1
/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 17 18
 */

#include "main.h"
19
#include "distributed-arp-table.h"
20 21 22 23 24 25
#include "send.h"
#include "routing.h"
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "gateway_common.h"
26
#include "gateway_client.h"
27
#include "originator.h"
28
#include "network-coding.h"
29
#include "fragmentation.h"
30
#include "multicast.h"
A
Antonio Quartulli 已提交
31

32
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
33 34

/* send out an already prepared packet to the given address via the
35 36
 * specified batman interface
 */
37 38
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
39
			   const uint8_t *dst_addr)
40
{
41
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
42 43
	struct ethhdr *ethhdr;

44
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
45 46
		goto send_skb_err;

47
	if (unlikely(!hard_iface->net_dev))
48 49
		goto send_skb_err;

50
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
51 52
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
53 54 55 56
		goto send_skb_err;
	}

	/* push to the ethernet header. */
57
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
58 59 60 61
		goto send_skb_err;

	skb_reset_mac_header(skb);

62
	ethhdr = eth_hdr(skb);
63 64
	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
	ether_addr_copy(ethhdr->h_dest, dst_addr);
65
	ethhdr->h_proto = htons(ETH_P_BATMAN);
66 67

	skb_set_network_header(skb, ETH_HLEN);
68
	skb->protocol = htons(ETH_P_BATMAN);
69

70
	skb->dev = hard_iface->net_dev;
71

72 73 74
	/* Save a clone of the skb to use when decoding coded packets */
	batadv_nc_skb_store_for_decoding(bat_priv, skb);

75 76
	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
77 78
	 * (which is > 0). This will not be treated as an error.
	 */
79 80 81 82 83 84
	return dev_queue_xmit(skb);
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

85 86 87 88 89 90 91 92 93 94 95
/**
 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
 * @skb: Packet to be transmitted.
 * @orig_node: Final destination of the packet.
 * @recv_if: Interface used when receiving the packet (can be NULL).
 *
 * Looks up the best next-hop towards the passed originator and passes the
 * skb on for preparation of MAC header. If the packet originated from this
 * host, NULL can be passed as recv_if and no interface alternating is
 * attempted.
 *
96 97
 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
 * NET_XMIT_POLICED if the skb is buffered for later transmit.
98
 */
99 100 101
int batadv_send_skb_to_orig(struct sk_buff *skb,
			    struct batadv_orig_node *orig_node,
			    struct batadv_hard_iface *recv_if)
102 103 104
{
	struct batadv_priv *bat_priv = orig_node->bat_priv;
	struct batadv_neigh_node *neigh_node;
105
	int ret = NET_XMIT_DROP;
106 107 108 109

	/* batadv_find_router() increases neigh_nodes refcount if found. */
	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
	if (!neigh_node)
110 111 112 113 114 115 116 117 118 119 120 121 122
		goto out;

	/* Check if the skb is too large to send in one piece and fragment
	 * it if needed.
	 */
	if (atomic_read(&bat_priv->fragmentation) &&
	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
		/* Fragment and send packet. */
		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
			ret = NET_XMIT_SUCCESS;

		goto out;
	}
123

124 125 126 127 128 129 130 131 132 133 134
	/* try to network code the packet, if it is received on an interface
	 * (i.e. being forwarded). If the packet originates from this node or if
	 * network coding fails, then send the packet as usual.
	 */
	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
		ret = NET_XMIT_POLICED;
	} else {
		batadv_send_skb_packet(skb, neigh_node->if_incoming,
				       neigh_node->addr);
		ret = NET_XMIT_SUCCESS;
	}
135

136 137 138
out:
	if (neigh_node)
		batadv_neigh_node_free_ref(neigh_node);
139

140
	return ret;
141 142
}

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
/**
 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
 *  common fields for unicast packets
 * @skb: the skb carrying the unicast header to initialize
 * @hdr_size: amount of bytes to push at the beginning of the skb
 * @orig_node: the destination node
 *
 * Returns false if the buffer extension was not possible or true otherwise.
 */
static bool
batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
				  struct batadv_orig_node *orig_node)
{
	struct batadv_unicast_packet *unicast_packet;
	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);

	if (batadv_skb_head_push(skb, hdr_size) < 0)
		return false;

	unicast_packet = (struct batadv_unicast_packet *)skb->data;
163
	unicast_packet->version = BATADV_COMPAT_VERSION;
164
	/* batman packet type: unicast */
165
	unicast_packet->packet_type = BATADV_UNICAST;
166
	/* set unicast ttl */
167
	unicast_packet->ttl = BATADV_TTL;
168
	/* copy the destination for faster routing */
169
	ether_addr_copy(unicast_packet->dest, orig_node->orig);
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	/* set the destination tt version number */
	unicast_packet->ttvn = ttvn;

	return true;
}

/**
 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
 * @skb: the skb containing the payload to encapsulate
 * @orig_node: the destination node
 *
 * Returns false if the payload could not be encapsulated or true otherwise.
 */
static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
					    struct batadv_orig_node *orig_node)
{
	size_t uni_size = sizeof(struct batadv_unicast_packet);

	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
}

/**
 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
 *  unicast 4addr header
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the skb containing the payload to encapsulate
 * @orig_node: the destination node
 * @packet_subtype: the unicast 4addr packet subtype to use
 *
 * Returns false if the payload could not be encapsulated or true otherwise.
 */
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
					   struct sk_buff *skb,
					   struct batadv_orig_node *orig,
					   int packet_subtype)
{
	struct batadv_hard_iface *primary_if;
	struct batadv_unicast_4addr_packet *uc_4addr_packet;
	bool ret = false;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	/* Pull the header space and fill the unicast_packet substructure.
	 * We can do that because the first member of the uc_4addr_packet
	 * is of type struct unicast_packet
	 */
	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
					       orig))
		goto out;

	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
223
	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
224
	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
225 226 227 228 229 230 231 232 233 234 235
	uc_4addr_packet->subtype = packet_subtype;
	uc_4addr_packet->reserved = 0;

	ret = true;
out:
	if (primary_if)
		batadv_hardif_free_ref(primary_if);
	return ret;
}

/**
236
 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
237 238 239 240 241
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
242
 * @orig_node: the originator to send the packet to
243
 * @vid: the vid to be used to search the translation table
244
 *
245 246 247 248 249 250
 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
 * as packet_type. Then send this frame to the given orig_node and release a
 * reference to this orig_node.
 *
 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
251
 */
252 253 254 255 256
int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
			    struct sk_buff *skb, int packet_type,
			    int packet_subtype,
			    struct batadv_orig_node *orig_node,
			    unsigned short vid)
257
{
258
	struct ethhdr *ethhdr;
259
	struct batadv_unicast_packet *unicast_packet;
260
	int ret = NET_XMIT_DROP;
261

262
	if (!orig_node)
263 264 265 266
		goto out;

	switch (packet_type) {
	case BATADV_UNICAST:
267 268
		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
			goto out;
269 270
		break;
	case BATADV_UNICAST_4ADDR:
271 272 273 274
		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
							   orig_node,
							   packet_subtype))
			goto out;
275 276 277 278 279 280 281 282
		break;
	default:
		/* this function supports UNICAST and UNICAST_4ADDR only. It
		 * should never be invoked with any other packet type
		 */
		goto out;
	}

283 284 285 286
	/* skb->data might have been reallocated by
	 * batadv_send_skb_prepare_unicast{,_4addr}()
	 */
	ethhdr = eth_hdr(skb);
287 288 289 290 291 292 293
	unicast_packet = (struct batadv_unicast_packet *)skb->data;

	/* inform the destination node that we are still missing a correct route
	 * for this client. The destination will receive this packet and will
	 * try to reroute it because the ttvn contained in the header is less
	 * than the current one
	 */
294
	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
295 296 297
		unicast_packet->ttvn = unicast_packet->ttvn - 1;

	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
298
		ret = NET_XMIT_SUCCESS;
299 300 301 302

out:
	if (orig_node)
		batadv_orig_node_free_ref(orig_node);
303
	if (ret == NET_XMIT_DROP)
304 305 306 307
		kfree_skb(skb);
	return ret;
}

308 309 310 311 312 313 314
/**
 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
315
 * @dst_hint: can be used to override the destination contained in the skb
316 317 318 319 320 321 322 323 324 325 326 327
 * @vid: the vid to be used to search the translation table
 *
 * Look up the recipient node for the destination address in the ethernet
 * header via the translation table. Wrap the given skb into a batman-adv
 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
 * to the according destination node.
 *
 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
 */
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
				   struct sk_buff *skb, int packet_type,
328 329
				   int packet_subtype, uint8_t *dst_hint,
				   unsigned short vid)
330 331 332
{
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	struct batadv_orig_node *orig_node;
333 334 335 336 337 338 339 340 341 342 343
	uint8_t *src, *dst;

	src = ethhdr->h_source;
	dst = ethhdr->h_dest;

	/* if we got an hint! let's send the packet to this client (if any) */
	if (dst_hint) {
		src = NULL;
		dst = dst_hint;
	}
	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
				       packet_subtype, orig_node, vid);
}

/**
 * batadv_send_skb_via_gw - send an skb via gateway lookup
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @vid: the vid to be used to search the translation table
 *
 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
 * unicast header and send this frame to this gateway node.
 *
 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
 */
int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
			   unsigned short vid)
{
	struct batadv_orig_node *orig_node;

	orig_node = batadv_gw_get_selected_orig(bat_priv);
	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
				       orig_node, vid);
}

370
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
371
{
372
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
373

374 375
	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
376 377
		return;

378
	/* the interface gets activated here to avoid race conditions between
379 380 381 382 383
	 * the moment of activating the interface in
	 * hardif_activate_interface() where the originator mac is set and
	 * outdated packets (especially uninitialized mac addresses) in the
	 * packet queue
	 */
384 385
	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
		hard_iface->if_status = BATADV_IF_ACTIVE;
386

387
	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
388 389
}

390
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
391 392 393
{
	if (forw_packet->skb)
		kfree_skb(forw_packet->skb);
394
	if (forw_packet->if_incoming)
395
		batadv_hardif_free_ref(forw_packet->if_incoming);
396 397
	if (forw_packet->if_outgoing)
		batadv_hardif_free_ref(forw_packet->if_outgoing);
398 399 400
	kfree(forw_packet);
}

401 402 403 404
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
				 struct batadv_forw_packet *forw_packet,
				 unsigned long send_time)
405 406 407 408 409 410 411
{
	/* add new packet to packet list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* start timer for this packet */
412
	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
413 414 415 416
			   send_time);
}

/* add a broadcast packet to the queue and setup timers. broadcast packets
417
 * are sent multiple times to increase probability for being received.
418 419 420 421 422
 *
 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 * errors.
 *
 * The skb is not consumed, so the caller should make sure that the
423 424
 * skb is freed.
 */
425
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
426 427
				    const struct sk_buff *skb,
				    unsigned long delay)
428
{
429 430
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_forw_packet *forw_packet;
431
	struct batadv_bcast_packet *bcast_packet;
432
	struct sk_buff *newskb;
433

434
	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
435 436
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "bcast packet queue full\n");
437 438 439
		goto out;
	}

440
	primary_if = batadv_primary_if_get_selected(bat_priv);
441
	if (!primary_if)
442
		goto out_and_inc;
443

444
	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
445 446 447 448

	if (!forw_packet)
		goto out_and_inc;

449 450
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb)
451 452 453
		goto packet_free;

	/* as we have a copy now, it is safe to decrease the TTL */
454
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
455
	bcast_packet->ttl--;
456

457
	skb_reset_mac_header(newskb);
458

459
	forw_packet->skb = newskb;
460
	forw_packet->if_incoming = primary_if;
461
	forw_packet->if_outgoing = NULL;
462 463 464 465

	/* how often did we send the bcast packet ? */
	forw_packet->num_packets = 0;

466 467 468
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
			  batadv_send_outstanding_bcast_packet);

469
	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
470 471 472 473 474 475 476
	return NETDEV_TX_OK;

packet_free:
	kfree(forw_packet);
out_and_inc:
	atomic_inc(&bat_priv->bcast_queue_left);
out:
477
	if (primary_if)
478
		batadv_hardif_free_ref(primary_if);
479 480 481
	return NETDEV_TX_BUSY;
}

482
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
483
{
484
	struct batadv_hard_iface *hard_iface;
485
	struct delayed_work *delayed_work;
486
	struct batadv_forw_packet *forw_packet;
487
	struct sk_buff *skb1;
488 489 490
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;

491
	delayed_work = container_of(work, struct delayed_work, work);
492 493 494 495
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
496 497 498 499 500

	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

501
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
502 503
		goto out;

504 505 506
	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
		goto out;

507 508
	/* rebroadcast packet */
	rcu_read_lock();
509
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
510
		if (hard_iface->soft_iface != soft_iface)
511 512
			continue;

513 514 515
		if (forw_packet->num_packets >= hard_iface->num_bcasts)
			continue;

516 517 518
		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
519
			batadv_send_skb_packet(skb1, hard_iface,
520
					       batadv_broadcast_addr);
521 522 523 524 525 526
	}
	rcu_read_unlock();

	forw_packet->num_packets++;

	/* if we still have some more bcasts to send */
527
	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
528 529
		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
						 msecs_to_jiffies(5));
530 531 532 533
		return;
	}

out:
534
	batadv_forw_packet_free(forw_packet);
535 536 537
	atomic_inc(&bat_priv->bcast_queue_left);
}

538
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
539
{
540
	struct delayed_work *delayed_work;
541 542
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
543

544
	delayed_work = container_of(work, struct delayed_work, work);
545 546
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
547 548 549 550 551
	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

552
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
553 554
		goto out;

555
	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
556

557 558 559 560 561 562 563
	/* we have to have at least one packet in the queue to determine the
	 * queues wake up time unless we are shutting down.
	 *
	 * only re-schedule if this is the "original" copy, e.g. the OGM of the
	 * primary interface should only be rescheduled once per period, but
	 * this function will be called for the forw_packet instances of the
	 * other secondary interfaces as well.
564
	 */
565 566
	if (forw_packet->own &&
	    forw_packet->if_incoming == forw_packet->if_outgoing)
567
		batadv_schedule_bat_ogm(forw_packet->if_incoming);
568 569 570 571 572 573

out:
	/* don't count own packet */
	if (!forw_packet->own)
		atomic_inc(&bat_priv->batman_queue_left);

574
	batadv_forw_packet_free(forw_packet);
575 576
}

577 578 579
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
580
{
581
	struct batadv_forw_packet *forw_packet;
582
	struct hlist_node *safe_tmp_node;
583
	bool pending;
584

585
	if (hard_iface)
586
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
587 588
			   "purge_outstanding_packets(): %s\n",
			   hard_iface->net_dev->name);
589
	else
590
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
591
			   "purge_outstanding_packets()\n");
592 593 594

	/* free bcast list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
595
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
596
				  &bat_priv->forw_bcast_list, list) {
597
		/* if purge_outstanding_packets() was called with an argument
598 599
		 * we delete only packets belonging to the given interface
		 */
600 601
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
602 603 604 605
			continue;

		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

606
		/* batadv_send_outstanding_bcast_packet() will lock the list to
607 608
		 * delete the item from the list
		 */
609
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
610
		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
611 612 613

		if (pending) {
			hlist_del(&forw_packet->list);
614
			batadv_forw_packet_free(forw_packet);
615
		}
616 617 618 619 620
	}
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* free batman packet list */
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
621
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
622
				  &bat_priv->forw_bat_list, list) {
623
		/* if purge_outstanding_packets() was called with an argument
624 625
		 * we delete only packets belonging to the given interface
		 */
626
		if ((hard_iface) &&
627 628
		    (forw_packet->if_incoming != hard_iface) &&
		    (forw_packet->if_outgoing != hard_iface))
629 630 631 632
			continue;

		spin_unlock_bh(&bat_priv->forw_bat_list_lock);

633
		/* send_outstanding_bat_packet() will lock the list to
634 635
		 * delete the item from the list
		 */
636
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
637
		spin_lock_bh(&bat_priv->forw_bat_list_lock);
638 639 640

		if (pending) {
			hlist_del(&forw_packet->list);
641
			batadv_forw_packet_free(forw_packet);
642
		}
643 644 645
	}
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}