send.c 11.2 KB
Newer Older
1
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#include "main.h"
21
#include "distributed-arp-table.h"
22 23 24 25 26 27 28 29
#include "send.h"
#include "routing.h"
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "vis.h"
#include "gateway_common.h"
#include "originator.h"
30
#include "network-coding.h"
31

A
Antonio Quartulli 已提交
32 33
#include <linux/if_ether.h>

34
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
35 36

/* send out an already prepared packet to the given address via the
37 38
 * specified batman interface
 */
39 40
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
41
			   const uint8_t *dst_addr)
42
{
43
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
44 45
	struct ethhdr *ethhdr;

46
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
47 48
		goto send_skb_err;

49
	if (unlikely(!hard_iface->net_dev))
50 51
		goto send_skb_err;

52
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
53 54
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
55 56 57 58
		goto send_skb_err;
	}

	/* push to the ethernet header. */
59
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
60 61 62 63
		goto send_skb_err;

	skb_reset_mac_header(skb);

64
	ethhdr = eth_hdr(skb);
65
	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
66
	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
A
Antonio Quartulli 已提交
67
	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
68 69 70

	skb_set_network_header(skb, ETH_HLEN);
	skb->priority = TC_PRIO_CONTROL;
A
Antonio Quartulli 已提交
71
	skb->protocol = __constant_htons(ETH_P_BATMAN);
72

73
	skb->dev = hard_iface->net_dev;
74

75 76 77
	/* Save a clone of the skb to use when decoding coded packets */
	batadv_nc_skb_store_for_decoding(bat_priv, skb);

78 79
	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
80 81
	 * (which is > 0). This will not be treated as an error.
	 */
82 83 84 85 86 87
	return dev_queue_xmit(skb);
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
/**
 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
 * @skb: Packet to be transmitted.
 * @orig_node: Final destination of the packet.
 * @recv_if: Interface used when receiving the packet (can be NULL).
 *
 * Looks up the best next-hop towards the passed originator and passes the
 * skb on for preparation of MAC header. If the packet originated from this
 * host, NULL can be passed as recv_if and no interface alternating is
 * attempted.
 *
 * Returns TRUE on success; FALSE otherwise.
 */
bool batadv_send_skb_to_orig(struct sk_buff *skb,
			     struct batadv_orig_node *orig_node,
			     struct batadv_hard_iface *recv_if)
{
	struct batadv_priv *bat_priv = orig_node->bat_priv;
	struct batadv_neigh_node *neigh_node;

	/* batadv_find_router() increases neigh_nodes refcount if found. */
	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
	if (!neigh_node)
		return false;

	/* route it */
	batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);

	batadv_neigh_node_free_ref(neigh_node);

	return true;
}

121
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
122
{
123
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
124

125 126
	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
127 128
		return;

129
	/* the interface gets activated here to avoid race conditions between
130 131 132 133 134
	 * the moment of activating the interface in
	 * hardif_activate_interface() where the originator mac is set and
	 * outdated packets (especially uninitialized mac addresses) in the
	 * packet queue
	 */
135 136
	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
		hard_iface->if_status = BATADV_IF_ACTIVE;
137

138
	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
139 140
}

141
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
142 143 144
{
	if (forw_packet->skb)
		kfree_skb(forw_packet->skb);
145
	if (forw_packet->if_incoming)
146
		batadv_hardif_free_ref(forw_packet->if_incoming);
147 148 149
	kfree(forw_packet);
}

150 151 152 153
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
				 struct batadv_forw_packet *forw_packet,
				 unsigned long send_time)
154 155 156 157 158 159 160
{
	/* add new packet to packet list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* start timer for this packet */
161
	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
162 163 164 165
			   send_time);
}

/* add a broadcast packet to the queue and setup timers. broadcast packets
166
 * are sent multiple times to increase probability for being received.
167 168 169 170 171
 *
 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 * errors.
 *
 * The skb is not consumed, so the caller should make sure that the
172 173
 * skb is freed.
 */
174
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
175 176
				    const struct sk_buff *skb,
				    unsigned long delay)
177
{
178 179
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_forw_packet *forw_packet;
180
	struct batadv_bcast_packet *bcast_packet;
181
	struct sk_buff *newskb;
182

183
	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
184 185
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "bcast packet queue full\n");
186 187 188
		goto out;
	}

189
	primary_if = batadv_primary_if_get_selected(bat_priv);
190
	if (!primary_if)
191
		goto out_and_inc;
192

193
	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
194 195 196 197

	if (!forw_packet)
		goto out_and_inc;

198 199
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb)
200 201 202
		goto packet_free;

	/* as we have a copy now, it is safe to decrease the TTL */
203
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
204
	bcast_packet->header.ttl--;
205

206
	skb_reset_mac_header(newskb);
207

208
	forw_packet->skb = newskb;
209
	forw_packet->if_incoming = primary_if;
210 211 212 213

	/* how often did we send the bcast packet ? */
	forw_packet->num_packets = 0;

214 215 216
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
			  batadv_send_outstanding_bcast_packet);

217
	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
218 219 220 221 222 223 224
	return NETDEV_TX_OK;

packet_free:
	kfree(forw_packet);
out_and_inc:
	atomic_inc(&bat_priv->bcast_queue_left);
out:
225
	if (primary_if)
226
		batadv_hardif_free_ref(primary_if);
227 228 229
	return NETDEV_TX_BUSY;
}

230
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
231
{
232
	struct batadv_hard_iface *hard_iface;
233
	struct delayed_work *delayed_work;
234
	struct batadv_forw_packet *forw_packet;
235
	struct sk_buff *skb1;
236 237 238
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;

239
	delayed_work = container_of(work, struct delayed_work, work);
240 241 242 243
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
244 245 246 247 248

	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

249
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
250 251
		goto out;

252 253 254
	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
		goto out;

255 256
	/* rebroadcast packet */
	rcu_read_lock();
257
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
258
		if (hard_iface->soft_iface != soft_iface)
259 260
			continue;

261 262 263
		if (forw_packet->num_packets >= hard_iface->num_bcasts)
			continue;

264 265 266
		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
267
			batadv_send_skb_packet(skb1, hard_iface,
268
					       batadv_broadcast_addr);
269 270 271 272 273 274
	}
	rcu_read_unlock();

	forw_packet->num_packets++;

	/* if we still have some more bcasts to send */
275
	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
276 277
		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
						 msecs_to_jiffies(5));
278 279 280 281
		return;
	}

out:
282
	batadv_forw_packet_free(forw_packet);
283 284 285
	atomic_inc(&bat_priv->bcast_queue_left);
}

286
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
287
{
288
	struct delayed_work *delayed_work;
289 290
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
291

292
	delayed_work = container_of(work, struct delayed_work, work);
293 294
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
295 296 297 298 299
	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

300
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
301 302
		goto out;

303
	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
304

305
	/* we have to have at least one packet in the queue
306 307 308 309
	 * to determine the queues wake up time unless we are
	 * shutting down
	 */
	if (forw_packet->own)
310
		batadv_schedule_bat_ogm(forw_packet->if_incoming);
311 312 313 314 315 316

out:
	/* don't count own packet */
	if (!forw_packet->own)
		atomic_inc(&bat_priv->batman_queue_left);

317
	batadv_forw_packet_free(forw_packet);
318 319
}

320 321 322
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
323
{
324
	struct batadv_forw_packet *forw_packet;
325
	struct hlist_node *safe_tmp_node;
326
	bool pending;
327

328
	if (hard_iface)
329
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
330 331
			   "purge_outstanding_packets(): %s\n",
			   hard_iface->net_dev->name);
332
	else
333
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
334
			   "purge_outstanding_packets()\n");
335 336 337

	/* free bcast list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
338
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
339
				  &bat_priv->forw_bcast_list, list) {
340
		/* if purge_outstanding_packets() was called with an argument
341 342
		 * we delete only packets belonging to the given interface
		 */
343 344
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
345 346 347 348
			continue;

		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

349
		/* batadv_send_outstanding_bcast_packet() will lock the list to
350 351
		 * delete the item from the list
		 */
352
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
353
		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
354 355 356

		if (pending) {
			hlist_del(&forw_packet->list);
357
			batadv_forw_packet_free(forw_packet);
358
		}
359 360 361 362 363
	}
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* free batman packet list */
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
364
	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
365
				  &bat_priv->forw_bat_list, list) {
366
		/* if purge_outstanding_packets() was called with an argument
367 368
		 * we delete only packets belonging to the given interface
		 */
369 370
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
371 372 373 374
			continue;

		spin_unlock_bh(&bat_priv->forw_bat_list_lock);

375
		/* send_outstanding_bat_packet() will lock the list to
376 377
		 * delete the item from the list
		 */
378
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
379
		spin_lock_bh(&bat_priv->forw_bat_list_lock);
380 381 382

		if (pending) {
			hlist_del(&forw_packet->list);
383
			batadv_forw_packet_free(forw_packet);
384
		}
385 386 387
	}
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}