send.c 10.0 KB
Newer Older
1
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#include "main.h"
21
#include "distributed-arp-table.h"
22 23 24 25 26 27 28 29 30
#include "send.h"
#include "routing.h"
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "vis.h"
#include "gateway_common.h"
#include "originator.h"

31
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
32 33

/* send out an already prepared packet to the given address via the
34 35
 * specified batman interface
 */
36 37
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
38
			   const uint8_t *dst_addr)
39 40 41
{
	struct ethhdr *ethhdr;

42
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
43 44
		goto send_skb_err;

45
	if (unlikely(!hard_iface->net_dev))
46 47
		goto send_skb_err;

48
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
49 50
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
51 52 53 54
		goto send_skb_err;
	}

	/* push to the ethernet header. */
55
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
56 57 58 59
		goto send_skb_err;

	skb_reset_mac_header(skb);

60
	ethhdr = (struct ethhdr *)skb_mac_header(skb);
61
	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
62
	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63
	ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
64 65 66

	skb_set_network_header(skb, ETH_HLEN);
	skb->priority = TC_PRIO_CONTROL;
67
	skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
68

69
	skb->dev = hard_iface->net_dev;
70 71 72

	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
73 74
	 * (which is > 0). This will not be treated as an error.
	 */
75 76 77 78 79 80
	return dev_queue_xmit(skb);
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

81
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
82
{
83
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84

85 86
	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
87 88
		return;

89
	/* the interface gets activated here to avoid race conditions between
90 91 92 93 94
	 * the moment of activating the interface in
	 * hardif_activate_interface() where the originator mac is set and
	 * outdated packets (especially uninitialized mac addresses) in the
	 * packet queue
	 */
95 96
	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
		hard_iface->if_status = BATADV_IF_ACTIVE;
97

98
	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
99 100
}

101
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
102 103 104
{
	if (forw_packet->skb)
		kfree_skb(forw_packet->skb);
105
	if (forw_packet->if_incoming)
106
		batadv_hardif_free_ref(forw_packet->if_incoming);
107 108 109
	kfree(forw_packet);
}

110 111 112 113
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
				 struct batadv_forw_packet *forw_packet,
				 unsigned long send_time)
114 115 116 117 118 119 120 121 122 123
{
	INIT_HLIST_NODE(&forw_packet->list);

	/* add new packet to packet list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* start timer for this packet */
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
124
			  batadv_send_outstanding_bcast_packet);
125
	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
126 127 128 129
			   send_time);
}

/* add a broadcast packet to the queue and setup timers. broadcast packets
130
 * are sent multiple times to increase probability for being received.
131 132 133 134 135
 *
 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 * errors.
 *
 * The skb is not consumed, so the caller should make sure that the
136 137
 * skb is freed.
 */
138
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
139 140
				    const struct sk_buff *skb,
				    unsigned long delay)
141
{
142 143
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_forw_packet *forw_packet;
144
	struct batadv_bcast_packet *bcast_packet;
145
	struct sk_buff *newskb;
146

147
	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
148 149
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "bcast packet queue full\n");
150 151 152
		goto out;
	}

153
	primary_if = batadv_primary_if_get_selected(bat_priv);
154
	if (!primary_if)
155
		goto out_and_inc;
156

157
	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
158 159 160 161

	if (!forw_packet)
		goto out_and_inc;

162 163
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb)
164 165 166
		goto packet_free;

	/* as we have a copy now, it is safe to decrease the TTL */
167
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
168
	bcast_packet->header.ttl--;
169

170
	skb_reset_mac_header(newskb);
171

172
	forw_packet->skb = newskb;
173
	forw_packet->if_incoming = primary_if;
174 175 176 177

	/* how often did we send the bcast packet ? */
	forw_packet->num_packets = 0;

178
	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
179 180 181 182 183 184 185
	return NETDEV_TX_OK;

packet_free:
	kfree(forw_packet);
out_and_inc:
	atomic_inc(&bat_priv->bcast_queue_left);
out:
186
	if (primary_if)
187
		batadv_hardif_free_ref(primary_if);
188 189 190
	return NETDEV_TX_BUSY;
}

191
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
192
{
193
	struct batadv_hard_iface *hard_iface;
194
	struct delayed_work *delayed_work;
195
	struct batadv_forw_packet *forw_packet;
196
	struct sk_buff *skb1;
197 198 199
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;

200
	delayed_work = container_of(work, struct delayed_work, work);
201 202 203 204
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
205 206 207 208 209

	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

210
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
211 212
		goto out;

213 214 215
	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
		goto out;

216 217
	/* rebroadcast packet */
	rcu_read_lock();
218
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
219
		if (hard_iface->soft_iface != soft_iface)
220 221 222 223 224
			continue;

		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
225
			batadv_send_skb_packet(skb1, hard_iface,
226
					       batadv_broadcast_addr);
227 228 229 230 231 232 233
	}
	rcu_read_unlock();

	forw_packet->num_packets++;

	/* if we still have some more bcasts to send */
	if (forw_packet->num_packets < 3) {
234 235
		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
						 msecs_to_jiffies(5));
236 237 238 239
		return;
	}

out:
240
	batadv_forw_packet_free(forw_packet);
241 242 243
	atomic_inc(&bat_priv->bcast_queue_left);
}

244
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
245
{
246
	struct delayed_work *delayed_work;
247 248
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
249

250
	delayed_work = container_of(work, struct delayed_work, work);
251 252
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
253 254 255 256 257
	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

258
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
259 260
		goto out;

261
	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
262

263
	/* we have to have at least one packet in the queue
264 265 266 267
	 * to determine the queues wake up time unless we are
	 * shutting down
	 */
	if (forw_packet->own)
268
		batadv_schedule_bat_ogm(forw_packet->if_incoming);
269 270 271 272 273 274

out:
	/* don't count own packet */
	if (!forw_packet->own)
		atomic_inc(&bat_priv->batman_queue_left);

275
	batadv_forw_packet_free(forw_packet);
276 277
}

278 279 280
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
281
{
282
	struct batadv_forw_packet *forw_packet;
283
	struct hlist_node *tmp_node, *safe_tmp_node;
284
	bool pending;
285

286
	if (hard_iface)
287
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
288 289
			   "purge_outstanding_packets(): %s\n",
			   hard_iface->net_dev->name);
290
	else
291
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
292
			   "purge_outstanding_packets()\n");
293 294 295 296 297 298

	/* free bcast list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
				  &bat_priv->forw_bcast_list, list) {

299
		/* if purge_outstanding_packets() was called with an argument
300 301
		 * we delete only packets belonging to the given interface
		 */
302 303
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
304 305 306 307
			continue;

		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

308
		/* batadv_send_outstanding_bcast_packet() will lock the list to
309 310
		 * delete the item from the list
		 */
311
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
312
		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
313 314 315

		if (pending) {
			hlist_del(&forw_packet->list);
316
			batadv_forw_packet_free(forw_packet);
317
		}
318 319 320 321 322 323 324 325
	}
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* free batman packet list */
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
				  &bat_priv->forw_bat_list, list) {

326
		/* if purge_outstanding_packets() was called with an argument
327 328
		 * we delete only packets belonging to the given interface
		 */
329 330
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
331 332 333 334
			continue;

		spin_unlock_bh(&bat_priv->forw_bat_list_lock);

335
		/* send_outstanding_bat_packet() will lock the list to
336 337
		 * delete the item from the list
		 */
338
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
339
		spin_lock_bh(&bat_priv->forw_bat_list_lock);
340 341 342

		if (pending) {
			hlist_del(&forw_packet->list);
343
			batadv_forw_packet_free(forw_packet);
344
		}
345 346 347
	}
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}