send.c 9.9 KB
Newer Older
1
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 * Marek Lindner, Simon Wunderlich
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#include "main.h"
#include "send.h"
#include "routing.h"
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "vis.h"
#include "gateway_common.h"
#include "originator.h"

30
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
31 32

/* send out an already prepared packet to the given address via the
33 34
 * specified batman interface
 */
35 36
int batadv_send_skb_packet(struct sk_buff *skb,
			   struct batadv_hard_iface *hard_iface,
37
			   const uint8_t *dst_addr)
38 39 40
{
	struct ethhdr *ethhdr;

41
	if (hard_iface->if_status != BATADV_IF_ACTIVE)
42 43
		goto send_skb_err;

44
	if (unlikely(!hard_iface->net_dev))
45 46
		goto send_skb_err;

47
	if (!(hard_iface->net_dev->flags & IFF_UP)) {
48 49
		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
			hard_iface->net_dev->name);
50 51 52 53
		goto send_skb_err;
	}

	/* push to the ethernet header. */
54
	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
55 56 57 58
		goto send_skb_err;

	skb_reset_mac_header(skb);

59
	ethhdr = (struct ethhdr *)skb_mac_header(skb);
60
	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61
	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62
	ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
63 64 65

	skb_set_network_header(skb, ETH_HLEN);
	skb->priority = TC_PRIO_CONTROL;
66
	skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
67

68
	skb->dev = hard_iface->net_dev;
69 70 71

	/* dev_queue_xmit() returns a negative result on error.	 However on
	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 73
	 * (which is > 0). This will not be treated as an error.
	 */
74 75 76 77 78 79
	return dev_queue_xmit(skb);
send_skb_err:
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

80
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
81
{
82
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
83

84 85
	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
86 87
		return;

88
	/* the interface gets activated here to avoid race conditions between
89 90 91 92 93
	 * the moment of activating the interface in
	 * hardif_activate_interface() where the originator mac is set and
	 * outdated packets (especially uninitialized mac addresses) in the
	 * packet queue
	 */
94 95
	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
		hard_iface->if_status = BATADV_IF_ACTIVE;
96

97
	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
98 99
}

100
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
101 102 103
{
	if (forw_packet->skb)
		kfree_skb(forw_packet->skb);
104
	if (forw_packet->if_incoming)
105
		batadv_hardif_free_ref(forw_packet->if_incoming);
106 107 108
	kfree(forw_packet);
}

109 110 111 112
static void
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
				 struct batadv_forw_packet *forw_packet,
				 unsigned long send_time)
113 114 115 116 117 118 119 120 121 122
{
	INIT_HLIST_NODE(&forw_packet->list);

	/* add new packet to packet list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* start timer for this packet */
	INIT_DELAYED_WORK(&forw_packet->delayed_work,
123
			  batadv_send_outstanding_bcast_packet);
124
	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
125 126 127 128
			   send_time);
}

/* add a broadcast packet to the queue and setup timers. broadcast packets
129
 * are sent multiple times to increase probability for being received.
130 131 132 133 134
 *
 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 * errors.
 *
 * The skb is not consumed, so the caller should make sure that the
135 136
 * skb is freed.
 */
137
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
138 139
				    const struct sk_buff *skb,
				    unsigned long delay)
140
{
141 142
	struct batadv_hard_iface *primary_if = NULL;
	struct batadv_forw_packet *forw_packet;
143
	struct batadv_bcast_packet *bcast_packet;
144
	struct sk_buff *newskb;
145

146
	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
147 148
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "bcast packet queue full\n");
149 150 151
		goto out;
	}

152
	primary_if = batadv_primary_if_get_selected(bat_priv);
153
	if (!primary_if)
154
		goto out_and_inc;
155

156
	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
157 158 159 160

	if (!forw_packet)
		goto out_and_inc;

161 162
	newskb = skb_copy(skb, GFP_ATOMIC);
	if (!newskb)
163 164 165
		goto packet_free;

	/* as we have a copy now, it is safe to decrease the TTL */
166
	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
167
	bcast_packet->header.ttl--;
168

169
	skb_reset_mac_header(newskb);
170

171
	forw_packet->skb = newskb;
172
	forw_packet->if_incoming = primary_if;
173 174 175 176

	/* how often did we send the bcast packet ? */
	forw_packet->num_packets = 0;

177
	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
178 179 180 181 182 183 184
	return NETDEV_TX_OK;

packet_free:
	kfree(forw_packet);
out_and_inc:
	atomic_inc(&bat_priv->bcast_queue_left);
out:
185
	if (primary_if)
186
		batadv_hardif_free_ref(primary_if);
187 188 189
	return NETDEV_TX_BUSY;
}

190
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
191
{
192
	struct batadv_hard_iface *hard_iface;
193
	struct delayed_work *delayed_work;
194
	struct batadv_forw_packet *forw_packet;
195
	struct sk_buff *skb1;
196 197 198
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;

199
	delayed_work = container_of(work, struct delayed_work, work);
200 201 202 203
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
	soft_iface = forw_packet->if_incoming->soft_iface;
	bat_priv = netdev_priv(soft_iface);
204 205 206 207 208

	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

209
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
210 211 212 213
		goto out;

	/* rebroadcast packet */
	rcu_read_lock();
214
	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
215
		if (hard_iface->soft_iface != soft_iface)
216 217 218 219 220
			continue;

		/* send a copy of the saved skb */
		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
		if (skb1)
221
			batadv_send_skb_packet(skb1, hard_iface,
222
					       batadv_broadcast_addr);
223 224 225 226 227 228 229
	}
	rcu_read_unlock();

	forw_packet->num_packets++;

	/* if we still have some more bcasts to send */
	if (forw_packet->num_packets < 3) {
230 231
		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
						 msecs_to_jiffies(5));
232 233 234 235
		return;
	}

out:
236
	batadv_forw_packet_free(forw_packet);
237 238 239
	atomic_inc(&bat_priv->bcast_queue_left);
}

240
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
241
{
242
	struct delayed_work *delayed_work;
243 244
	struct batadv_forw_packet *forw_packet;
	struct batadv_priv *bat_priv;
245

246
	delayed_work = container_of(work, struct delayed_work, work);
247 248
	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
				   delayed_work);
249 250 251 252 253
	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_del(&forw_packet->list);
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);

254
	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
255 256
		goto out;

257
	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
258

259
	/* we have to have at least one packet in the queue
260 261 262 263
	 * to determine the queues wake up time unless we are
	 * shutting down
	 */
	if (forw_packet->own)
264
		batadv_schedule_bat_ogm(forw_packet->if_incoming);
265 266 267 268 269 270

out:
	/* don't count own packet */
	if (!forw_packet->own)
		atomic_inc(&bat_priv->batman_queue_left);

271
	batadv_forw_packet_free(forw_packet);
272 273
}

274 275 276
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
				 const struct batadv_hard_iface *hard_iface)
277
{
278
	struct batadv_forw_packet *forw_packet;
279
	struct hlist_node *tmp_node, *safe_tmp_node;
280
	bool pending;
281

282
	if (hard_iface)
283
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
284 285
			   "purge_outstanding_packets(): %s\n",
			   hard_iface->net_dev->name);
286
	else
287
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
288
			   "purge_outstanding_packets()\n");
289 290 291 292 293 294

	/* free bcast list */
	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
				  &bat_priv->forw_bcast_list, list) {

295
		/* if purge_outstanding_packets() was called with an argument
296 297
		 * we delete only packets belonging to the given interface
		 */
298 299
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
300 301 302 303
			continue;

		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

304
		/* batadv_send_outstanding_bcast_packet() will lock the list to
305 306
		 * delete the item from the list
		 */
307
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
308
		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
309 310 311

		if (pending) {
			hlist_del(&forw_packet->list);
312
			batadv_forw_packet_free(forw_packet);
313
		}
314 315 316 317 318 319 320 321
	}
	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);

	/* free batman packet list */
	spin_lock_bh(&bat_priv->forw_bat_list_lock);
	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
				  &bat_priv->forw_bat_list, list) {

322
		/* if purge_outstanding_packets() was called with an argument
323 324
		 * we delete only packets belonging to the given interface
		 */
325 326
		if ((hard_iface) &&
		    (forw_packet->if_incoming != hard_iface))
327 328 329 330
			continue;

		spin_unlock_bh(&bat_priv->forw_bat_list_lock);

331
		/* send_outstanding_bat_packet() will lock the list to
332 333
		 * delete the item from the list
		 */
334
		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
335
		spin_lock_bh(&bat_priv->forw_bat_list_lock);
336 337 338

		if (pending) {
			hlist_del(&forw_packet->list);
339
			batadv_forw_packet_free(forw_packet);
340
		}
341 342 343
	}
	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}