mesh_hwmp.c 34.1 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

10
#include <linux/slab.h>
11
#include <linux/etherdevice.h>
12
#include <asm/unaligned.h>
13
#include "wme.h"
14 15
#include "mesh.h"

16
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
17
#define mhwmp_dbg(fmt, args...) \
18
	pr_debug("Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
19 20 21 22
#else
#define mhwmp_dbg(fmt, args...)   do { (void)(0); } while (0)
#endif

23 24 25 26 27 28 29 30 31 32 33 34
#define TEST_FRAME_LEN	8192
#define MAX_METRIC	0xffffffff
#define ARITH_SHIFT	8

/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10
#define MAX_PREQ_QUEUE_LEN	64

/* Destination only */
#define MP_F_DO	0x1
/* Reply and forward */
#define MP_F_RF	0x2
R
Rui Paulo 已提交
35 36 37 38
/* Unknown Sequence Number */
#define MP_F_USN    0x01
/* Reason code Present */
#define MP_F_RCODE  0x02
39

40 41
static void mesh_queue_preq(struct mesh_path *, u8);

42 43 44 45
static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
46
	return get_unaligned_le32(preq_elem + offset);
47 48
}

R
Rui Paulo 已提交
49 50 51 52 53 54 55
static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
	return get_unaligned_le16(preq_elem + offset);
}

56
/* HWMP IE processing macros */
57 58 59 60 61 62 63
#define AE_F			(1<<6)
#define AE_F_SET(x)		(*x & AE_F)
#define PREQ_IE_FLAGS(x)	(*(x))
#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
#define PREQ_IE_TTL(x)		(*(x + 2))
#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
64 65 66
#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
67 68
#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
69
#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
70 71 72 73 74


#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
75 76
#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
77 78
#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
79 80
#define PREP_IE_TARGET_ADDR(x)	(x + 3)
#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
81

R
Rui Paulo 已提交
82
#define PERR_IE_TTL(x)		(*(x))
83 84
#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
#define PERR_IE_TARGET_ADDR(x)	(x + 3)
85 86
#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
87 88

#define MSEC_TO_TU(x) (x*1000/1024)
89 90
#define SN_GT(x, y) ((s32)(y - x) < 0)
#define SN_LT(x, y) ((s32)(x - y) < 0)
91 92

#define net_traversal_jiffies(s) \
93
	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
94
#define default_lifetime(s) \
95
	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
96
#define min_preq_int_jiff(s) \
97 98
	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
99
#define disc_timeout_jiff(s) \
100
	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
101 102
#define root_path_confirmation_jiffies(s) \
	msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
103 104 105 106

enum mpath_frame_type {
	MPATH_PREQ = 0,
	MPATH_PREP,
107 108
	MPATH_PERR,
	MPATH_RANN
109 110
};

111 112
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};

113
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
114
		u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
115 116
		__le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
		__le32 lifetime, __le32 metric, __le32 preq_id,
117
		struct ieee80211_sub_if_data *sdata)
118
{
119
	struct ieee80211_local *local = sdata->local;
120
	struct sk_buff *skb;
121
	struct ieee80211_mgmt *mgmt;
122 123 124
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
125

126
	skb = dev_alloc_skb(local->tx_headroom +
127 128
			    hdr_len +
			    2 + 37); /* max HWMP IE */
129 130
	if (!skb)
		return -1;
131
	skb_reserve(skb, local->tx_headroom);
132 133
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
134 135
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
136 137

	memcpy(mgmt->da, da, ETH_ALEN);
138
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
139
	/* BSSID == SA */
140
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
141 142 143
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
144 145 146

	switch (action) {
	case MPATH_PREQ:
147
		mhwmp_dbg("sending PREQ to %pM", target);
148 149 150 151 152
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
153
		mhwmp_dbg("sending PREP to %pM", target);
154 155 156 157
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
158
	case MPATH_RANN:
159
		mhwmp_dbg("sending RANN from %pM", orig_addr);
160 161 162 163
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
164
	default:
165
		kfree_skb(skb);
166 167 168 169 170 171 172
		return -ENOTSUPP;
		break;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
173 174 175 176
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &target_sn, 4);
177
		pos += 4;
178 179 180 181 182 183 184 185
	} else {
		if (action == MPATH_PREQ) {
			memcpy(pos, &preq_id, 4);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
186 187
		pos += 4;
	}
188 189
	memcpy(pos, &lifetime, 4);	/* interval for RANN */
	pos += 4;
190 191 192
	memcpy(pos, &metric, 4);
	pos += 4;
	if (action == MPATH_PREQ) {
193
		*pos++ = 1; /* destination count */
194 195
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
196
		pos += ETH_ALEN;
197
		memcpy(pos, &target_sn, 4);
198 199 200 201 202 203
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
		pos += 4;
204
	}
205

206
	ieee80211_tx_skb(sdata, skb);
207 208 209
	return 0;
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
227
	ieee80211_set_qos_hdr(sdata, skb);
228 229
}

230 231 232
/**
 * mesh_send_path error - Sends a PERR mesh management frame
 *
233 234 235
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
236
 * @ra: node this frame is addressed to
237 238 239 240
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
241
 */
242
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
243 244
		       __le16 target_rcode, const u8 *ra,
		       struct ieee80211_sub_if_data *sdata)
245
{
246
	struct ieee80211_local *local = sdata->local;
247
	struct sk_buff *skb;
248
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
249
	struct ieee80211_mgmt *mgmt;
250 251 252
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
253

254 255 256
	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

257
	skb = dev_alloc_skb(local->tx_headroom +
258 259
			    hdr_len +
			    2 + 15 /* PERR IE */);
260 261
	if (!skb)
		return -1;
262
	skb_reserve(skb, local->tx_headroom);
263 264
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
265 266
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
267 268

	memcpy(mgmt->da, ra, ETH_ALEN);
269
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
270 271 272 273 274
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
R
Rui Paulo 已提交
275
	ie_len = 15;
276 277 278
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
R
Rui Paulo 已提交
279
	/* ttl */
280
	*pos++ = ttl;
281 282
	/* number of destinations */
	*pos++ = 1;
R
Rui Paulo 已提交
283 284 285 286 287
	/*
	 * flags bit, bit 1 is unset if we know the sequence number and
	 * bit 2 is set if we have a reason code
	 */
	*pos = 0;
288
	if (!target_sn)
R
Rui Paulo 已提交
289
		*pos |= MP_F_USN;
290
	if (target_rcode)
R
Rui Paulo 已提交
291 292
		*pos |= MP_F_RCODE;
	pos++;
293
	memcpy(pos, target, ETH_ALEN);
294
	pos += ETH_ALEN;
295
	memcpy(pos, &target_sn, 4);
R
Rui Paulo 已提交
296
	pos += 4;
297
	memcpy(pos, &target_rcode, 2);
298

299 300
	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
301 302
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
303
	ieee80211_add_pending_skb(local, skb);
304 305 306
	return 0;
}

307
void ieee80211s_update_metric(struct ieee80211_local *local,
308
		struct sta_info *sta, struct sk_buff *skb)
309 310 311 312 313 314 315 316 317 318 319
{
	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	int failed;

	if (!ieee80211_is_data(hdr->frame_control))
		return;

	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);

	/* moving average, scaled to 100 */
320 321 322
	sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
	if (sta->fail_avg > 95)
		mesh_plink_broken(sta);
323 324
}

325 326 327
static u32 airtime_link_metric_get(struct ieee80211_local *local,
				   struct sta_info *sta)
{
328
	struct rate_info rinfo;
329 330 331 332 333 334 335 336 337 338
	/* This should be adjusted for each device */
	int device_constant = 1 << ARITH_SHIFT;
	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
	int s_unit = 1 << ARITH_SHIFT;
	int rate, err;
	u32 tx_time, estimated_retx;
	u64 result;

	if (sta->fail_avg >= 100)
		return MAX_METRIC;
339

340 341 342
	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
	rate = cfg80211_calculate_bitrate(&rinfo);
	if (WARN_ON(!rate))
343 344
		return MAX_METRIC;

345 346 347 348 349 350 351 352 353 354 355 356 357 358
	err = (sta->fail_avg << ARITH_SHIFT) / 100;

	/* bitrate is in units of 100 Kbps, while we need rate in units of
	 * 1Mbps. This will be corrected on tx_time computation.
	 */
	tx_time = (device_constant + 10 * test_frame_len / rate);
	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
	return (u32)result;
}

/**
 * hwmp_route_info_get - Update routing info to originator and transmitter
 *
359
 * @sdata: local mesh subif
360 361 362 363
 * @mgmt: mesh management frame
 * @hwmp_ie: hwmp information element (PREP or PREQ)
 *
 * This function updates the path routing information to the originator and the
364
 * transmitter of a HWMP PREQ or PREP frame.
365 366 367 368 369 370 371
 *
 * Returns: metric to frame originator or 0 if the frame should not be further
 * processed
 *
 * Notes: this function is the only place (besides user-provided info) where
 * path routing information is updated.
 */
372
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
373
			    struct ieee80211_mgmt *mgmt,
374
			    u8 *hwmp_ie, enum mpath_frame_type action)
375
{
376
	struct ieee80211_local *local = sdata->local;
377 378 379 380
	struct mesh_path *mpath;
	struct sta_info *sta;
	bool fresh_info;
	u8 *orig_addr, *ta;
381
	u32 orig_sn, orig_metric;
382 383 384 385 386
	unsigned long orig_lifetime, exp_time;
	u32 last_hop_metric, new_metric;
	bool process = true;

	rcu_read_lock();
387
	sta = sta_info_get(sdata, mgmt->sa);
388 389
	if (!sta) {
		rcu_read_unlock();
390
		return 0;
391
	}
392 393 394 395 396 397 398 399

	last_hop_metric = airtime_link_metric_get(local, sta);
	/* Update and check originator routing info */
	fresh_info = true;

	switch (action) {
	case MPATH_PREQ:
		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
400
		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
401 402 403 404
		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
		orig_metric = PREQ_IE_METRIC(hwmp_ie);
		break;
	case MPATH_PREP:
405 406
		/* Originator here refers to the MP that was the target in the
		 * Path Request. We divert from the nomenclature in the draft
407 408 409
		 * so that we can easily use a single function to gather path
		 * information from both PREQ and PREP frames.
		 */
410 411
		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
412 413 414 415
		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
		orig_metric = PREP_IE_METRIC(hwmp_ie);
		break;
	default:
416
		rcu_read_unlock();
417 418 419 420 421 422 423
		return 0;
	}
	new_metric = orig_metric + last_hop_metric;
	if (new_metric < orig_metric)
		new_metric = MAX_METRIC;
	exp_time = TU_TO_EXP_TIME(orig_lifetime);

424
	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
425 426 427 428 429 430
		/* This MP is the originator, we are not interested in this
		 * frame, except for updating transmitter's path info.
		 */
		process = false;
		fresh_info = false;
	} else {
431
		mpath = mesh_path_lookup(orig_addr, sdata);
432 433 434 435 436
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if (mpath->flags & MESH_PATH_FIXED)
				fresh_info = false;
			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
437 438 439
			    (mpath->flags & MESH_PATH_SN_VALID)) {
				if (SN_GT(mpath->sn, orig_sn) ||
				    (mpath->sn == orig_sn &&
440
				     new_metric >= mpath->metric)) {
441 442 443 444 445
					process = false;
					fresh_info = false;
				}
			}
		} else {
446 447
			mesh_path_add(orig_addr, sdata);
			mpath = mesh_path_lookup(orig_addr, sdata);
448 449 450 451 452 453 454 455 456
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
457
			mpath->flags |= MESH_PATH_SN_VALID;
458
			mpath->metric = new_metric;
459
			mpath->sn = orig_sn;
460 461 462 463 464 465 466 467 468 469 470 471 472 473
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
			/* draft says preq_id should be saved to, but there does
			 * not seem to be any use for it, skipping by now
			 */
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	/* Update and check transmitter routing info */
	ta = mgmt->sa;
474
	if (ether_addr_equal(orig_addr, ta))
475 476 477 478
		fresh_info = false;
	else {
		fresh_info = true;

479
		mpath = mesh_path_lookup(ta, sdata);
480 481 482 483 484 485 486
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if ((mpath->flags & MESH_PATH_FIXED) ||
				((mpath->flags & MESH_PATH_ACTIVE) &&
					(last_hop_metric > mpath->metric)))
				fresh_info = false;
		} else {
487 488
			mesh_path_add(ta, sdata);
			mpath = mesh_path_lookup(ta, sdata);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->metric = last_hop_metric;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	rcu_read_unlock();

	return process ? new_metric : 0;
}

513
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
514
				    struct ieee80211_mgmt *mgmt,
515 516
				    u8 *preq_elem, u32 metric)
{
517
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
518
	struct mesh_path *mpath = NULL;
519
	u8 *target_addr, *orig_addr;
520
	const u8 *da;
521 522
	u8 target_flags, ttl, flags;
	u32 orig_sn, target_sn, lifetime, orig_metric;
523 524
	bool reply = false;
	bool forward = true;
525
	bool root_is_gate;
526

527 528
	/* Update target SN, if present */
	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
529
	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
530 531 532
	target_sn = PREQ_IE_TARGET_SN(preq_elem);
	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
	target_flags = PREQ_IE_TARGET_F(preq_elem);
533 534 535 536
	orig_metric = metric;
	/* Proactive PREQ gate announcements */
	flags = PREQ_IE_FLAGS(preq_elem);
	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
537

538
	mhwmp_dbg("received PREQ from %pM", orig_addr);
539

540
	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
541
		mhwmp_dbg("PREQ is for us");
542 543 544
		forward = false;
		reply = true;
		metric = 0;
545
		if (time_after(jiffies, ifmsh->last_sn_update +
546
					net_traversal_jiffies(sdata)) ||
547 548 549
		    time_before(jiffies, ifmsh->last_sn_update)) {
			target_sn = ++ifmsh->sn;
			ifmsh->last_sn_update = jiffies;
550
		}
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	} else if (is_broadcast_ether_addr(target_addr) &&
		   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
		rcu_read_lock();
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (mpath) {
			if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
				reply = true;
				target_addr = sdata->vif.addr;
				target_sn = ++ifmsh->sn;
				metric = 0;
				ifmsh->last_sn_update = jiffies;
			}
			if (root_is_gate)
				mesh_path_add_gate(mpath);
		}
		rcu_read_unlock();
567 568
	} else {
		rcu_read_lock();
569
		mpath = mesh_path_lookup(target_addr, sdata);
570
		if (mpath) {
571 572 573 574 575
			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
					SN_LT(mpath->sn, target_sn)) {
				mpath->sn = target_sn;
				mpath->flags |= MESH_PATH_SN_VALID;
			} else if ((!(target_flags & MP_F_DO)) &&
576 577 578
					(mpath->flags & MESH_PATH_ACTIVE)) {
				reply = true;
				metric = mpath->metric;
579 580 581
				target_sn = mpath->sn;
				if (target_flags & MP_F_RF)
					target_flags |= MP_F_DO;
582 583 584 585 586 587 588 589 590
				else
					forward = false;
			}
		}
		rcu_read_unlock();
	}

	if (reply) {
		lifetime = PREQ_IE_LIFETIME(preq_elem);
591
		ttl = ifmsh->mshcfg.element_ttl;
592
		if (ttl != 0) {
593
			mhwmp_dbg("replying to the PREQ");
594 595 596
			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
				cpu_to_le32(orig_sn), 0, target_addr,
				cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
597
				cpu_to_le32(lifetime), cpu_to_le32(metric),
598
				0, sdata);
599
		} else {
600
			ifmsh->mshstats.dropped_frames_ttl++;
601
		}
602 603
	}

604
	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
605
		u32 preq_id;
606
		u8 hopcount;
607 608 609 610

		ttl = PREQ_IE_TTL(preq_elem);
		lifetime = PREQ_IE_LIFETIME(preq_elem);
		if (ttl <= 1) {
611
			ifmsh->mshstats.dropped_frames_ttl++;
612 613
			return;
		}
614
		mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
615 616 617
		--ttl;
		preq_id = PREQ_IE_PREQ_ID(preq_elem);
		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
618 619
		da = (mpath && mpath->is_root) ?
			mpath->rann_snd_addr : broadcast_addr;
620 621 622 623 624 625 626

		if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
			target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
			target_sn = PREQ_IE_TARGET_SN(preq_elem);
			metric = orig_metric;
		}

627
		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
628
				cpu_to_le32(orig_sn), target_flags, target_addr,
629
				cpu_to_le32(target_sn), da,
630 631
				hopcount, ttl, cpu_to_le32(lifetime),
				cpu_to_le32(metric), cpu_to_le32(preq_id),
632
				sdata);
633 634 635 636
		if (!is_multicast_ether_addr(da))
			ifmsh->mshstats.fwded_unicast++;
		else
			ifmsh->mshstats.fwded_mcast++;
637
		ifmsh->mshstats.fwded_frames++;
638 639 640 641
	}
}


J
Johannes Berg 已提交
642 643 644 645 646 647 648 649
static inline struct sta_info *
next_hop_deref_protected(struct mesh_path *mpath)
{
	return rcu_dereference_protected(mpath->next_hop,
					 lockdep_is_held(&mpath->state_lock));
}


650
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
651 652 653
				    struct ieee80211_mgmt *mgmt,
				    u8 *prep_elem, u32 metric)
{
654
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
655
	struct mesh_path *mpath;
656
	u8 *target_addr, *orig_addr;
657 658
	u8 ttl, hopcount, flags;
	u8 next_hop[ETH_ALEN];
659
	u32 target_sn, orig_sn, lifetime;
660

661
	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
662

663
	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
664
	if (ether_addr_equal(orig_addr, sdata->vif.addr))
665 666 667
		/* destination, no forwarding required */
		return;

668 669 670
	if (!ifmsh->mshcfg.dot11MeshForwarding)
		return;

671 672
	ttl = PREP_IE_TTL(prep_elem);
	if (ttl <= 1) {
673
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
674 675 676 677
		return;
	}

	rcu_read_lock();
678
	mpath = mesh_path_lookup(orig_addr, sdata);
679 680 681 682 683 684 685 686
	if (mpath)
		spin_lock_bh(&mpath->state_lock);
	else
		goto fail;
	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
		spin_unlock_bh(&mpath->state_lock);
		goto fail;
	}
J
Johannes Berg 已提交
687
	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
688 689 690 691 692
	spin_unlock_bh(&mpath->state_lock);
	--ttl;
	flags = PREP_IE_FLAGS(prep_elem);
	lifetime = PREP_IE_LIFETIME(prep_elem);
	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
693
	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
694 695
	target_sn = PREP_IE_TARGET_SN(prep_elem);
	orig_sn = PREP_IE_ORIG_SN(prep_elem);
696 697

	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
698
		cpu_to_le32(orig_sn), 0, target_addr,
699
		cpu_to_le32(target_sn), next_hop, hopcount,
700
		ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
701
		0, sdata);
702
	rcu_read_unlock();
703 704

	sdata->u.mesh.mshstats.fwded_unicast++;
705
	sdata->u.mesh.mshstats.fwded_frames++;
706 707 708 709
	return;

fail:
	rcu_read_unlock();
710
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
711 712
}

713
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
714 715
			     struct ieee80211_mgmt *mgmt, u8 *perr_elem)
{
R
Rui Paulo 已提交
716
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
717
	struct mesh_path *mpath;
R
Rui Paulo 已提交
718
	u8 ttl;
719 720 721
	u8 *ta, *target_addr;
	u32 target_sn;
	u16 target_rcode;
722 723

	ta = mgmt->sa;
R
Rui Paulo 已提交
724 725 726 727 728 729
	ttl = PERR_IE_TTL(perr_elem);
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
730 731 732
	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
	target_sn = PERR_IE_TARGET_SN(perr_elem);
	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
R
Rui Paulo 已提交
733

734
	rcu_read_lock();
735
	mpath = mesh_path_lookup(target_addr, sdata);
736
	if (mpath) {
737 738
		struct sta_info *sta;

739
		spin_lock_bh(&mpath->state_lock);
740
		sta = next_hop_deref_protected(mpath);
741
		if (mpath->flags & MESH_PATH_ACTIVE &&
742
		    ether_addr_equal(ta, sta->sta.addr) &&
743 744
		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
		    SN_GT(target_sn, mpath->sn))) {
745
			mpath->flags &= ~MESH_PATH_ACTIVE;
746
			mpath->sn = target_sn;
747
			spin_unlock_bh(&mpath->state_lock);
748 749
			if (!ifmsh->mshcfg.dot11MeshForwarding)
				goto endperr;
750 751
			mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
					   cpu_to_le16(target_rcode),
752
					   broadcast_addr, sdata);
753 754 755
		} else
			spin_unlock_bh(&mpath->state_lock);
	}
756
endperr:
757 758 759
	rcu_read_unlock();
}

760 761 762 763 764
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
				struct ieee80211_mgmt *mgmt,
				struct ieee80211_rann_ie *rann)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
765 766
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
767 768 769
	struct mesh_path *mpath;
	u8 ttl, flags, hopcount;
	u8 *orig_addr;
770
	u32 orig_sn, metric, metric_txsta, interval;
771
	bool root_is_gate;
772 773 774

	ttl = rann->rann_ttl;
	flags = rann->rann_flags;
775
	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
776
	orig_addr = rann->rann_addr;
777
	orig_sn = le32_to_cpu(rann->rann_seq);
778
	interval = le32_to_cpu(rann->rann_interval);
779
	hopcount = rann->rann_hopcount;
R
Rui Paulo 已提交
780
	hopcount++;
781
	metric = le32_to_cpu(rann->rann_metric);
782 783

	/*  Ignore our own RANNs */
784
	if (ether_addr_equal(orig_addr, sdata->vif.addr))
785 786
		return;

787 788
	mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
			orig_addr, mgmt->sa, root_is_gate);
789 790

	rcu_read_lock();
791 792 793 794 795 796 797 798
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta) {
		rcu_read_unlock();
		return;
	}

	metric_txsta = airtime_link_metric_get(local, sta);

799 800 801 802 803 804 805 806 807 808
	mpath = mesh_path_lookup(orig_addr, sdata);
	if (!mpath) {
		mesh_path_add(orig_addr, sdata);
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (!mpath) {
			rcu_read_unlock();
			sdata->u.mesh.mshstats.dropped_frames_no_route++;
			return;
		}
	}
809

810 811 812 813 814 815
	if (!(SN_LT(mpath->sn, orig_sn)) &&
	    !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
		rcu_read_unlock();
		return;
	}

816
	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
817 818 819
	     (time_after(jiffies, mpath->last_preq_to_root +
				  root_path_confirmation_jiffies(sdata)) ||
	     time_before(jiffies, mpath->last_preq_to_root))) &&
820
	     !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
821 822 823
		mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
							       orig_addr);
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
824
		mpath->last_preq_to_root = jiffies;
825 826
	}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	mpath->sn = orig_sn;
	mpath->rann_metric = metric + metric_txsta;
	mpath->is_root = true;
	/* Recording RANNs sender address to send individually
	 * addressed PREQs destined for root mesh STA */
	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);

	if (root_is_gate)
		mesh_path_add_gate(mpath);

	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		rcu_read_unlock();
		return;
	}
	ttl--;

	if (ifmsh->mshcfg.dot11MeshForwarding) {
845
		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
846
				       cpu_to_le32(orig_sn),
847
				       0, NULL, 0, broadcast_addr,
848
				       hopcount, ttl, cpu_to_le32(interval),
849
				       cpu_to_le32(metric + metric_txsta),
850 851
				       0, sdata);
	}
852

853 854
	rcu_read_unlock();
}
855 856


857
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
858 859 860 861 862 863
			    struct ieee80211_mgmt *mgmt,
			    size_t len)
{
	struct ieee802_11_elems elems;
	size_t baselen;
	u32 last_hop_metric;
864
	struct sta_info *sta;
865

866 867 868 869
	/* need action_code */
	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
		return;

870 871 872 873 874 875 876 877
	rcu_read_lock();
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

878 879 880 881
	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
			len - baselen, &elems);

882 883
	if (elems.preq) {
		if (elems.preq_len != 37)
884 885
			/* Right now we support just 1 destination and no AE */
			return;
886 887 888 889 890 891 892 893
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
						      MPATH_PREQ);
		if (last_hop_metric)
			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
						last_hop_metric);
	}
	if (elems.prep) {
		if (elems.prep_len != 31)
894 895
			/* Right now we support no AE */
			return;
896 897 898 899 900 901 902
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
						      MPATH_PREP);
		if (last_hop_metric)
			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
						last_hop_metric);
	}
	if (elems.perr) {
R
Rui Paulo 已提交
903
		if (elems.perr_len != 15)
904 905
			/* Right now we support only one destination per PERR */
			return;
906
		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
907
	}
908 909
	if (elems.rann)
		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
910 911 912 913 914 915 916 917 918 919 920 921 922
}

/**
 * mesh_queue_preq - queue a PREQ to a given destination
 *
 * @mpath: mesh path to discover
 * @flags: special attributes of the PREQ to be sent
 *
 * Locking: the function must be called from within a rcu read lock block.
 *
 */
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
{
923
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
924
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
925 926
	struct mesh_preq_queue *preq_node;

927
	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
928
	if (!preq_node) {
929
		mhwmp_dbg("could not allocate PREQ node");
930 931 932
		return;
	}

933
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
934
	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
935
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
936 937
		kfree(preq_node);
		if (printk_ratelimit())
938
			mhwmp_dbg("PREQ node queue full");
939 940 941
		return;
	}

942
	spin_lock(&mpath->state_lock);
943
	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
944
		spin_unlock(&mpath->state_lock);
945
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
946
		kfree(preq_node);
947 948 949
		return;
	}

950 951 952
	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
	preq_node->flags = flags;

953
	mpath->flags |= MESH_PATH_REQ_QUEUED;
954
	spin_unlock(&mpath->state_lock);
955

956 957
	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
	++ifmsh->preq_queue_len;
958
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
959

960
	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
J
Johannes Berg 已提交
961
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
962

963
	else if (time_before(jiffies, ifmsh->last_preq)) {
964 965 966
		/* avoid long wait if did not send preqs for a long time
		 * and jiffies wrapped around
		 */
967
		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
J
Johannes Berg 已提交
968
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
969
	} else
970
		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
971 972 973 974 975 976
						min_preq_int_jiff(sdata));
}

/**
 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 *
977
 * @sdata: local mesh subif
978
 */
979
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
980
{
981
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
982 983
	struct mesh_preq_queue *preq_node;
	struct mesh_path *mpath;
984
	u8 ttl, target_flags;
985
	const u8 *da;
986 987
	u32 lifetime;

988
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
989 990
	if (!ifmsh->preq_queue_len ||
		time_before(jiffies, ifmsh->last_preq +
991
				min_preq_int_jiff(sdata))) {
992
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
993 994 995
		return;
	}

996
	preq_node = list_first_entry(&ifmsh->preq_queue.list,
997 998
			struct mesh_preq_queue, list);
	list_del(&preq_node->list);
999
	--ifmsh->preq_queue_len;
1000
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1001 1002

	rcu_read_lock();
1003
	mpath = mesh_path_lookup(preq_node->dst, sdata);
1004 1005 1006 1007
	if (!mpath)
		goto enddiscovery;

	spin_lock_bh(&mpath->state_lock);
1008
	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	if (preq_node->flags & PREQ_Q_F_START) {
		if (mpath->flags & MESH_PATH_RESOLVING) {
			spin_unlock_bh(&mpath->state_lock);
			goto enddiscovery;
		} else {
			mpath->flags &= ~MESH_PATH_RESOLVED;
			mpath->flags |= MESH_PATH_RESOLVING;
			mpath->discovery_retries = 0;
			mpath->discovery_timeout = disc_timeout_jiff(sdata);
		}
	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
			mpath->flags & MESH_PATH_RESOLVED) {
		mpath->flags &= ~MESH_PATH_RESOLVING;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

1026
	ifmsh->last_preq = jiffies;
1027

1028
	if (time_after(jiffies, ifmsh->last_sn_update +
1029
				net_traversal_jiffies(sdata)) ||
1030 1031 1032
	    time_before(jiffies, ifmsh->last_sn_update)) {
		++ifmsh->sn;
		sdata->u.mesh.last_sn_update = jiffies;
1033 1034
	}
	lifetime = default_lifetime(sdata);
1035
	ttl = sdata->u.mesh.mshcfg.element_ttl;
1036
	if (ttl == 0) {
1037
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
1038 1039 1040 1041 1042
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	if (preq_node->flags & PREQ_Q_F_REFRESH)
1043
		target_flags = MP_F_DO;
1044
	else
1045
		target_flags = MP_F_RF;
1046 1047

	spin_unlock_bh(&mpath->state_lock);
1048
	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1049
	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
1050
			cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
1051
			cpu_to_le32(mpath->sn), da, 0,
1052
			ttl, cpu_to_le32(lifetime), 0,
1053
			cpu_to_le32(ifmsh->preq_id++), sdata);
1054 1055 1056 1057 1058 1059 1060
	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);

enddiscovery:
	rcu_read_unlock();
	kfree(preq_node);
}

1061 1062
/* mesh_nexthop_resolve - lookup next hop for given skb and start path
 * discovery if no forwarding information is found.
1063
 *
1064
 * @skb: 802.11 frame to be sent
1065
 * @sdata: network subif the frame will be sent through
1066
 *
1067 1068
 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
 * skb is freeed here if no mpath could be allocated.
1069
 */
1070 1071
int mesh_nexthop_resolve(struct sk_buff *skb,
			 struct ieee80211_sub_if_data *sdata)
1072
{
1073
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1074 1075 1076
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct mesh_path *mpath;
	struct sk_buff *skb_to_free = NULL;
1077
	u8 *target_addr = hdr->addr3;
1078 1079 1080
	int err = 0;

	rcu_read_lock();
1081 1082 1083
	err = mesh_nexthop_lookup(skb, sdata);
	if (!err)
		goto endlookup;
1084

1085 1086
	/* no nexthop found, start resolving */
	mpath = mesh_path_lookup(target_addr, sdata);
1087
	if (!mpath) {
1088 1089
		mesh_path_add(target_addr, sdata);
		mpath = mesh_path_lookup(target_addr, sdata);
1090
		if (!mpath) {
1091
			mesh_path_discard_frame(skb, sdata);
1092 1093 1094 1095 1096
			err = -ENOSPC;
			goto endlookup;
		}
	}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	if (!(mpath->flags & MESH_PATH_RESOLVING))
		mesh_queue_preq(mpath, PREQ_Q_F_START);

	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
		skb_to_free = skb_dequeue(&mpath->frame_queue);

	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
	skb_queue_tail(&mpath->frame_queue, skb);
	err = -ENOENT;
	if (skb_to_free)
		mesh_path_discard_frame(skb_to_free, sdata);

endlookup:
	rcu_read_unlock();
	return err;
}
/**
 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
 * this function is considered "using" the associated mpath, so preempt a path
 * refresh if this mpath expires soon.
 *
 * @skb: 802.11 frame to be sent
 * @sdata: network subif the frame will be sent through
 *
 * Returns: 0 if the next hop was found. Nonzero otherwise.
 */
int mesh_nexthop_lookup(struct sk_buff *skb,
			struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath;
	struct sta_info *next_hop;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u8 *target_addr = hdr->addr3;
	int err = -ENOENT;
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141
	rcu_read_lock();
	mpath = mesh_path_lookup(target_addr, sdata);

	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
		goto endlookup;

	if (time_after(jiffies,
		       mpath->exp_time -
		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1142
	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1143 1144 1145
	    !(mpath->flags & MESH_PATH_RESOLVING) &&
	    !(mpath->flags & MESH_PATH_FIXED))
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1146

1147 1148 1149 1150 1151
	next_hop = rcu_dereference(mpath->next_hop);
	if (next_hop) {
		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		err = 0;
1152 1153 1154 1155 1156 1157 1158 1159 1160
	}

endlookup:
	rcu_read_unlock();
	return err;
}

void mesh_path_timer(unsigned long data)
{
1161 1162
	struct mesh_path *mpath = (void *) data;
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1163
	int ret;
1164

1165
	if (sdata->local->quiescing)
1166 1167 1168
		return;

	spin_lock_bh(&mpath->state_lock);
1169
	if (mpath->flags & MESH_PATH_RESOLVED ||
1170
			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1171
		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1172 1173
		spin_unlock_bh(&mpath->state_lock);
	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1174 1175
		++mpath->discovery_retries;
		mpath->discovery_timeout *= 2;
1176
		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1177
		spin_unlock_bh(&mpath->state_lock);
1178 1179 1180 1181
		mesh_queue_preq(mpath, 0);
	} else {
		mpath->flags = 0;
		mpath->exp_time = jiffies;
1182 1183 1184 1185 1186 1187 1188
		spin_unlock_bh(&mpath->state_lock);
		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
			ret = mesh_path_send_to_gates(mpath);
			if (ret)
				mhwmp_dbg("no gate was reachable");
		} else
			mesh_path_flush_pending(mpath);
1189 1190
	}
}
1191 1192 1193 1194 1195

void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1196
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1197
	u8 flags, target_flags = 0;
1198

1199 1200
	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
			? RANN_FLAG_IS_GATE : 0;
1201 1202 1203 1204

	switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
	case IEEE80211_PROACTIVE_RANN:
		mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1205
			       cpu_to_le32(++ifmsh->sn),
1206
			       0, NULL, 0, broadcast_addr,
1207
			       0, ifmsh->mshcfg.element_ttl,
1208
			       cpu_to_le32(interval), 0, 0, sdata);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
		break;
	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
		target_flags |= IEEE80211_PREQ_TO_FLAG |
				IEEE80211_PREQ_USN_FLAG;
		mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
				cpu_to_le32(++ifmsh->sn), target_flags,
				(u8 *) broadcast_addr, 0, broadcast_addr,
				0, ifmsh->mshcfg.element_ttl,
				cpu_to_le32(interval),
				0, cpu_to_le32(ifmsh->preq_id++), sdata);
		break;
	default:
		mhwmp_dbg("Proactive mechanism not supported");
		return;
	}
1227
}