send.c 21.7 KB
Newer Older
1
/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 17
 */

18
#include "send.h"
19
#include "main.h"
20 21 22 23 24 25 26 27 28

#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
#include <linux/if.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
29
#include <linux/kref.h>
30 31 32 33 34 35 36 37 38 39 40
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/workqueue.h>

41
#include "distributed-arp-table.h"
42
#include "fragmentation.h"
43
#include "gateway_client.h"
44
#include "hard-interface.h"
45
#include "network-coding.h"
46 47 48 49
#include "originator.h"
#include "routing.h"
#include "soft-interface.h"
#include "translation-table.h"
A
Antonio Quartulli 已提交
50

51
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66
/**
 * batadv_send_skb_packet - send an already prepared packet
 * @skb: the packet to send
 * @hard_iface: the interface to use to send the broadcast packet
 * @dst_addr: the payload destination
 *
 * Send out an already prepared packet to the given neighbor or broadcast it
 * using the specified interface. Either hard_iface or neigh_node must be not
 * NULL.
 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
 * otherwise it is sent as unicast to the given neighbor.
 *
 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
 * otherwise
67
 */
68 69
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
70
			   const u8 *dst_addr)
71
{
72
	struct batadv_priv *bat_priv;
73 74
	struct ethhdr *ethhdr;

75 76
	bat_priv = netdev_priv(hard_iface->soft_iface);

77
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
78 79
		goto send_skb_err;

80
	if (unlikely(!hard_iface->net_dev))
81 82
		goto send_skb_err;

83
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
84 85
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
86 87 88 89
		goto send_skb_err;
	}

	/* push to the ethernet header. */
90
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
91 92 93 94
		goto send_skb_err;

	skb_reset_mac_header(skb);

95
	ethhdr = eth_hdr(skb);
96 97
	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
	ether_addr_copy(ethhdr->h_dest, dst_addr);
98
	ethhdr->h_proto = htons(ETH_P_BATMAN);
99 100

	skb_set_network_header(skb, ETH_HLEN);
101
	skb->protocol = htons(ETH_P_BATMAN);
102

103
	skb->dev = hard_iface->net_dev;
104

105 106 107
	/* Save a clone of the skb to use when decoding coded packets */
	batadv_nc_skb_store_for_decoding(bat_priv, skb);

108 109
	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
110 111
	 * (which is > 0). This will not be treated as an error.
	 */
112 113 114 115 116 117
	return dev_queue_xmit(skb);
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
int batadv_send_broadcast_skb(struct sk_buff *skb,
			      struct batadv_hard_iface *hard_iface)
{
	return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
}

int batadv_send_unicast_skb(struct sk_buff *skb,
			    struct batadv_neigh_node *neigh)
{
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
	struct batadv_hardif_neigh_node *hardif_neigh;
#endif
	int ret;

	ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);

#ifdef CONFIG_BATMAN_ADV_BATMAN_V
	hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);

	if ((hardif_neigh) && (ret != NET_XMIT_DROP))
		hardif_neigh->bat_v.last_unicast_tx = jiffies;

	if (hardif_neigh)
		batadv_hardif_neigh_put(hardif_neigh);
#endif

	return ret;
}

147 148 149 150 151 152 153 154 155 156 157
/**
 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
 * @skb: Packet to be transmitted.
 * @orig_node: Final destination of the packet.
 * @recv_if: Interface used when receiving the packet (can be NULL).
 *
 * Looks up the best next-hop towards the passed originator and passes the
 * skb on for preparation of MAC header. If the packet originated from this
 * host, NULL can be passed as recv_if and no interface alternating is
 * attempted.
 *
158
 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
159
 * NET_XMIT_POLICED if the skb is buffered for later transmit.
160
 */
161 162 163
int batadv_send_skb_to_orig(struct sk_buff *skb,
			    struct batadv_orig_node *orig_node,
			    struct batadv_hard_iface *recv_if)
164 165 166
{
	struct batadv_priv *bat_priv = orig_node->bat_priv;
	struct batadv_neigh_node *neigh_node;
167
	int ret = NET_XMIT_DROP;
168 169 170 171

	/* batadv_find_router() increases neigh_nodes refcount if found. */
	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
	if (!neigh_node)
172 173 174 175 176 177 178 179 180 181 182 183 184
		goto out;

	/* Check if the skb is too large to send in one piece and fragment
	 * it if needed.
	 */
	if (atomic_read(&bat_priv->fragmentation) &&
	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
		/* Fragment and send packet. */
		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
			ret = NET_XMIT_SUCCESS;

		goto out;
	}
185

186 187 188 189 190 191 192
	/* try to network code the packet, if it is received on an interface
	 * (i.e. being forwarded). If the packet originates from this node or if
	 * network coding fails, then send the packet as usual.
	 */
	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
		ret = NET_XMIT_POLICED;
	} else {
193
		batadv_send_unicast_skb(skb, neigh_node);
194 195
		ret = NET_XMIT_SUCCESS;
	}
196

197 198
out:
	if (neigh_node)
199
		batadv_neigh_node_put(neigh_node);
200

201
	return ret;
202 203
}

204 205 206 207 208 209 210
/**
 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
 *  common fields for unicast packets
 * @skb: the skb carrying the unicast header to initialize
 * @hdr_size: amount of bytes to push at the beginning of the skb
 * @orig_node: the destination node
 *
211
 * Return: false if the buffer extension was not possible or true otherwise.
212 213 214 215 216 217
 */
static bool
batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
				  struct batadv_orig_node *orig_node)
{
	struct batadv_unicast_packet *unicast_packet;
218
	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
219 220 221 222 223

	if (batadv_skb_head_push(skb, hdr_size) < 0)
		return false;

	unicast_packet = (struct batadv_unicast_packet *)skb->data;
224
	unicast_packet->version = BATADV_COMPAT_VERSION;
225
	/* batman packet type: unicast */
226
	unicast_packet->packet_type = BATADV_UNICAST;
227
	/* set unicast ttl */
228
	unicast_packet->ttl = BATADV_TTL;
229
	/* copy the destination for faster routing */
230
	ether_addr_copy(unicast_packet->dest, orig_node->orig);
231 232 233 234 235 236 237 238 239 240 241
	/* set the destination tt version number */
	unicast_packet->ttvn = ttvn;

	return true;
}

/**
 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
 * @skb: the skb containing the payload to encapsulate
 * @orig_node: the destination node
 *
242
 * Return: false if the payload could not be encapsulated or true otherwise.
243 244 245 246 247 248 249 250 251 252 253 254 255 256
 */
static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
					    struct batadv_orig_node *orig_node)
{
	size_t uni_size = sizeof(struct batadv_unicast_packet);

	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
}

/**
 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
 *  unicast 4addr header
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: the skb containing the payload to encapsulate
257
 * @orig: the destination node
258 259
 * @packet_subtype: the unicast 4addr packet subtype to use
 *
260
 * Return: false if the payload could not be encapsulated or true otherwise.
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
 */
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
					   struct sk_buff *skb,
					   struct batadv_orig_node *orig,
					   int packet_subtype)
{
	struct batadv_hard_iface *primary_if;
	struct batadv_unicast_4addr_packet *uc_4addr_packet;
	bool ret = false;

	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	/* Pull the header space and fill the unicast_packet substructure.
	 * We can do that because the first member of the uc_4addr_packet
	 * is of type struct unicast_packet
	 */
	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
					       orig))
		goto out;

	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
284
	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
285
	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
286 287 288 289 290 291
	uc_4addr_packet->subtype = packet_subtype;
	uc_4addr_packet->reserved = 0;

	ret = true;
out:
	if (primary_if)
292
		batadv_hardif_put(primary_if);
293 294 295 296
	return ret;
}

/**
297
 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
298 299 300 301 302
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
303
 * @orig_node: the originator to send the packet to
304
 * @vid: the vid to be used to search the translation table
305
 *
306 307 308 309 310
 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
 * as packet_type. Then send this frame to the given orig_node and release a
 * reference to this orig_node.
 *
311
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
312
 */
313 314 315 316 317
int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
			    struct sk_buff *skb, int packet_type,
			    int packet_subtype,
			    struct batadv_orig_node *orig_node,
			    unsigned short vid)
318 319
{
	struct batadv_unicast_packet *unicast_packet;
320
	struct ethhdr *ethhdr;
321
	int ret = NET_XMIT_DROP;
322

323
	if (!orig_node)
324 325 326 327
		goto out;

	switch (packet_type) {
	case BATADV_UNICAST:
328 329
		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
			goto out;
330 331
		break;
	case BATADV_UNICAST_4ADDR:
332 333 334 335
		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
							   orig_node,
							   packet_subtype))
			goto out;
336 337 338 339 340 341 342 343
		break;
	default:
		/* this function supports UNICAST and UNICAST_4ADDR only. It
		 * should never be invoked with any other packet type
		 */
		goto out;
	}

344 345 346 347
	/* skb->data might have been reallocated by
	 * batadv_send_skb_prepare_unicast{,_4addr}()
	 */
	ethhdr = eth_hdr(skb);
348 349 350 351 352 353 354
	unicast_packet = (struct batadv_unicast_packet *)skb->data;

	/* inform the destination node that we are still missing a correct route
	 * for this client. The destination will receive this packet and will
	 * try to reroute it because the ttvn contained in the header is less
	 * than the current one
	 */
355
	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
356 357 358
		unicast_packet->ttvn = unicast_packet->ttvn - 1;

	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
359
		ret = NET_XMIT_SUCCESS;
360 361 362

out:
	if (orig_node)
363
		batadv_orig_node_put(orig_node);
364
	if (ret == NET_XMIT_DROP)
365 366 367 368
		kfree_skb(skb);
	return ret;
}

369 370 371 372 373 374 375
/**
 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @packet_type: the batman unicast packet type to use
 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
 *  4addr packets)
376
 * @dst_hint: can be used to override the destination contained in the skb
377 378 379 380 381 382 383 384
 * @vid: the vid to be used to search the translation table
 *
 * Look up the recipient node for the destination address in the ethernet
 * header via the translation table. Wrap the given skb into a batman-adv
 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
 * to the according destination node.
 *
385
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
386 387 388
 */
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
				   struct sk_buff *skb, int packet_type,
389
				   int packet_subtype, u8 *dst_hint,
390
				   unsigned short vid)
391 392 393
{
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	struct batadv_orig_node *orig_node;
394
	u8 *src, *dst;
395 396 397 398 399 400 401 402 403 404

	src = ethhdr->h_source;
	dst = ethhdr->h_dest;

	/* if we got an hint! let's send the packet to this client (if any) */
	if (dst_hint) {
		src = NULL;
		dst = dst_hint;
	}
	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
405 406 407 408 409 410 411 412 413 414 415 416 417 418

	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
				       packet_subtype, orig_node, vid);
}

/**
 * batadv_send_skb_via_gw - send an skb via gateway lookup
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: payload to send
 * @vid: the vid to be used to search the translation table
 *
 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
 * unicast header and send this frame to this gateway node.
 *
419
 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
420 421 422 423 424 425 426
 */
int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
			   unsigned short vid)
{
	struct batadv_orig_node *orig_node;

	orig_node = batadv_gw_get_selected_orig(bat_priv);
427 428
	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
				       BATADV_P_DATA, orig_node, vid);
429 430
}

431
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
432
{
433
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
434

435 436
	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
437 438
		return;

439
	/* the interface gets activated here to avoid race conditions between
440 441 442 443 444
	 * the moment of activating the interface in
	 * hardif_activate_interface() where the originator mac is set and
	 * outdated packets (especially uninitialized mac addresses) in the
	 * packet queue
	 */
445 446
	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
		hard_iface->if_status = BATADV_IF_ACTIVE;
447

448
	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
449 450
}

451
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
452
{
453
	kfree_skb(forw_packet->skb);
454
	if (forw_packet->if_incoming)
455
		batadv_hardif_put(forw_packet->if_incoming);
456
	if (forw_packet->if_outgoing)
457
		batadv_hardif_put(forw_packet->if_outgoing);
458 459 460
	kfree(forw_packet);
}

461 462 463 464
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
				 struct batadv_forw_packet *forw_packet,
				 unsigned long send_time)
465 466 467 468 469 470 471
{
	/* add new packet to packet list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* start timer for this packet */
472
	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
473 474 475
			   send_time);
}

476
/**
477 478 479 480
 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
 * @bat_priv: the bat priv with all the soft interface information
 * @skb: broadcast packet to add
 * @delay: number of jiffies to wait before sending
481
 *
482 483
 * add a broadcast packet to the queue and setup timers. broadcast packets
 * are sent multiple times to increase probability for being received.
484 485
 *
 * The skb is not consumed, so the caller should make sure that the
486
 * skb is freed.
487 488
 *
 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
489
 */
490
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
491 492
				    const struct sk_buff *skb,
				    unsigned long delay)
493
{
494 495
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_forw_packet *forw_packet;
496
	struct batadv_bcast_packet *bcast_packet;
497
	struct sk_buff *newskb;
498

499
	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
500 501
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "bcast packet queue full\n");
502 503 504
		goto out;
	}

505
	primary_if = batadv_primary_if_get_selected(bat_priv);
506
	if (!primary_if)
507
		goto out_and_inc;
508

509
	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
510 511 512 513

	if (!forw_packet)
		goto out_and_inc;

514 515
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb)
516 517 518
		goto packet_free;

	/* as we have a copy now, it is safe to decrease the TTL */
519
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
520
	bcast_packet->ttl--;
521

522
	skb_reset_mac_header(newskb);
523

524
	forw_packet->skb = newskb;
525
	forw_packet->if_incoming = primary_if;
526
	forw_packet->if_outgoing = NULL;
527 528 529 530

	/* how often did we send the bcast packet ? */
	forw_packet->num_packets = 0;

531 532 533
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
			  batadv_send_outstanding_bcast_packet);

534
	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
535 536 537 538 539 540 541
	return NETDEV_TX_OK;

packet_free:
	kfree(forw_packet);
out_and_inc:
	atomic_inc(&bat_priv->bcast_queue_left);
out:
542
	if (primary_if)
543
		batadv_hardif_put(primary_if);
544 545 546
	return NETDEV_TX_BUSY;
}

547
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
548
{
549
	struct batadv_hard_iface *hard_iface;
550
	struct delayed_work *delayed_work;
551
	struct batadv_forw_packet *forw_packet;
552
	struct sk_buff *skb1;
553 554 555
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;

G
Geliang Tang 已提交
556
	delayed_work = to_delayed_work(work);
557 558 559 560
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
561 562 563 564 565

	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

566
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
567 568
		goto out;

569 570 571
	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
		goto out;

572 573
	/* rebroadcast packet */
	rcu_read_lock();
574
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
575
		if (hard_iface->soft_iface != soft_iface)
576 577
			continue;

578 579 580
		if (forw_packet->num_packets >= hard_iface->num_bcasts)
			continue;

581 582 583
		if (!kref_get_unless_zero(&hard_iface->refcount))
			continue;

584 585 586
		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
587
			batadv_send_broadcast_skb(skb1, hard_iface);
588 589

		batadv_hardif_put(hard_iface);
590 591 592 593 594 595
	}
	rcu_read_unlock();

	forw_packet->num_packets++;

	/* if we still have some more bcasts to send */
596
	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
597 598
		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
						 msecs_to_jiffies(5));
599 600 601 602
		return;
	}

out:
603
	batadv_forw_packet_free(forw_packet);
604 605 606
	atomic_inc(&bat_priv->bcast_queue_left);
}

607
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
608
{
609
	struct delayed_work *delayed_work;
610 611
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
612

G
Geliang Tang 已提交
613
	delayed_work = to_delayed_work(work);
614 615
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
616 617 618 619 620
	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

621
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
622 623
		goto out;

624
	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
625

626 627 628 629 630 631 632
	/* we have to have at least one packet in the queue to determine the
	 * queues wake up time unless we are shutting down.
	 *
	 * only re-schedule if this is the "original" copy, e.g. the OGM of the
	 * primary interface should only be rescheduled once per period, but
	 * this function will be called for the forw_packet instances of the
	 * other secondary interfaces as well.
633
	 */
634 635
	if (forw_packet->own &&
	    forw_packet->if_incoming == forw_packet->if_outgoing)
636
		batadv_schedule_bat_ogm(forw_packet->if_incoming);
637 638 639 640 641 642

out:
	/* don't count own packet */
	if (!forw_packet->own)
		atomic_inc(&bat_priv->batman_queue_left);

643
	batadv_forw_packet_free(forw_packet);
644 645
}

646 647 648
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
649
{
650
	struct batadv_forw_packet *forw_packet;
651
	struct hlist_node *safe_tmp_node;
652
	bool pending;
653

654
	if (hard_iface)
655
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
656 657
			   "purge_outstanding_packets(): %s\n",
			   hard_iface->net_dev->name);
658
	else
659
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
660
			   "purge_outstanding_packets()\n");
661 662 663

	/* free bcast list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
664
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
665
				  &bat_priv->forw_bcast_list, list) {
666
		/* if purge_outstanding_packets() was called with an argument
667 668
		 * we delete only packets belonging to the given interface
		 */
669
		if ((hard_iface) &&
670 671
		    (forw_packet->if_incoming != hard_iface) &&
		    (forw_packet->if_outgoing != hard_iface))
672 673 674 675
			continue;

		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

676
		/* batadv_send_outstanding_bcast_packet() will lock the list to
677 678
		 * delete the item from the list
		 */
679
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
680
		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
681 682 683

		if (pending) {
			hlist_del(&forw_packet->list);
684 685 686
			if (!forw_packet->own)
				atomic_inc(&bat_priv->bcast_queue_left);

687
			batadv_forw_packet_free(forw_packet);
688
		}
689 690 691 692 693
	}
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* free batman packet list */
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
694
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
695
				  &bat_priv->forw_bat_list, list) {
696
		/* if purge_outstanding_packets() was called with an argument
697 698
		 * we delete only packets belonging to the given interface
		 */
699
		if ((hard_iface) &&
700 701
		    (forw_packet->if_incoming != hard_iface) &&
		    (forw_packet->if_outgoing != hard_iface))
702 703 704 705
			continue;

		spin_unlock_bh(&bat_priv->forw_bat_list_lock);

706
		/* send_outstanding_bat_packet() will lock the list to
707 708
		 * delete the item from the list
		 */
709
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
710
		spin_lock_bh(&bat_priv->forw_bat_list_lock);
711 712 713

		if (pending) {
			hlist_del(&forw_packet->list);
714 715 716
			if (!forw_packet->own)
				atomic_inc(&bat_priv->batman_queue_left);

717
			batadv_forw_packet_free(forw_packet);
718
		}
719 720 721
	}
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}