mesh_hwmp.c 31.7 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

10
#include <linux/slab.h>
11
#include <asm/unaligned.h>
12
#include "wme.h"
13 14
#include "mesh.h"

15
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
16 17
#define mhwmp_dbg(fmt, args...) \
	printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
18 19 20 21
#else
#define mhwmp_dbg(fmt, args...)   do { (void)(0); } while (0)
#endif

22 23 24 25 26 27 28 29 30 31 32 33
#define TEST_FRAME_LEN	8192
#define MAX_METRIC	0xffffffff
#define ARITH_SHIFT	8

/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10
#define MAX_PREQ_QUEUE_LEN	64

/* Destination only */
#define MP_F_DO	0x1
/* Reply and forward */
#define MP_F_RF	0x2
R
Rui Paulo 已提交
34 35 36 37
/* Unknown Sequence Number */
#define MP_F_USN    0x01
/* Reason code Present */
#define MP_F_RCODE  0x02
38

39 40
static void mesh_queue_preq(struct mesh_path *, u8);

41 42 43 44
static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
45
	return get_unaligned_le32(preq_elem + offset);
46 47
}

R
Rui Paulo 已提交
48 49 50 51 52 53 54
static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
	return get_unaligned_le16(preq_elem + offset);
}

55
/* HWMP IE processing macros */
56 57 58 59 60 61 62
#define AE_F			(1<<6)
#define AE_F_SET(x)		(*x & AE_F)
#define PREQ_IE_FLAGS(x)	(*(x))
#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
#define PREQ_IE_TTL(x)		(*(x + 2))
#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
63 64 65
#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
66 67
#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
68
#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
69 70 71 72 73


#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
74 75
#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
76 77
#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
78 79
#define PREP_IE_TARGET_ADDR(x)	(x + 3)
#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
80

R
Rui Paulo 已提交
81
#define PERR_IE_TTL(x)		(*(x))
82 83
#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
#define PERR_IE_TARGET_ADDR(x)	(x + 3)
84 85
#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
86 87

#define MSEC_TO_TU(x) (x*1000/1024)
88 89
#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
90 91

#define net_traversal_jiffies(s) \
92
	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
93
#define default_lifetime(s) \
94
	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
95
#define min_preq_int_jiff(s) \
96 97
	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
98
#define disc_timeout_jiff(s) \
99
	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
100 101 102 103

enum mpath_frame_type {
	MPATH_PREQ = 0,
	MPATH_PREP,
104 105
	MPATH_PERR,
	MPATH_RANN
106 107
};

108 109
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};

110
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
111
		u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
112 113
		__le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
		__le32 lifetime, __le32 metric, __le32 preq_id,
114
		struct ieee80211_sub_if_data *sdata)
115
{
116
	struct ieee80211_local *local = sdata->local;
117
	struct sk_buff *skb;
118
	struct ieee80211_mgmt *mgmt;
119 120 121
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
122

123
	skb = dev_alloc_skb(local->tx_headroom +
124 125
			    hdr_len +
			    2 + 37); /* max HWMP IE */
126 127
	if (!skb)
		return -1;
128
	skb_reserve(skb, local->tx_headroom);
129 130
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
131 132
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
133 134

	memcpy(mgmt->da, da, ETH_ALEN);
135
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
136
	/* BSSID == SA */
137
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
138 139 140
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
141 142 143

	switch (action) {
	case MPATH_PREQ:
144
		mhwmp_dbg("sending PREQ to %pM", target);
145 146 147 148 149
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
150
		mhwmp_dbg("sending PREP to %pM", target);
151 152 153 154
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
155
	case MPATH_RANN:
156
		mhwmp_dbg("sending RANN from %pM", orig_addr);
157 158 159 160
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
161
	default:
162
		kfree_skb(skb);
163 164 165 166 167 168 169
		return -ENOTSUPP;
		break;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
170 171 172 173
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &target_sn, 4);
174
		pos += 4;
175 176 177 178 179 180 181 182
	} else {
		if (action == MPATH_PREQ) {
			memcpy(pos, &preq_id, 4);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
183 184
		pos += 4;
	}
185 186
	memcpy(pos, &lifetime, 4);	/* interval for RANN */
	pos += 4;
187 188 189
	memcpy(pos, &metric, 4);
	pos += 4;
	if (action == MPATH_PREQ) {
190
		*pos++ = 1; /* destination count */
191 192
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
193
		pos += ETH_ALEN;
194
		memcpy(pos, &target_sn, 4);
195 196 197 198 199 200
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
		pos += 4;
201
	}
202

203
	ieee80211_tx_skb(sdata, skb);
204 205 206
	return 0;
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
224
	ieee80211_set_qos_hdr(sdata, skb);
225 226
}

227 228 229
/**
 * mesh_send_path error - Sends a PERR mesh management frame
 *
230 231 232
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
233
 * @ra: node this frame is addressed to
234 235 236 237
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
238
 */
239
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
240 241
		       __le16 target_rcode, const u8 *ra,
		       struct ieee80211_sub_if_data *sdata)
242
{
243
	struct ieee80211_local *local = sdata->local;
244
	struct sk_buff *skb;
245
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
246
	struct ieee80211_mgmt *mgmt;
247 248 249
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
250

251 252 253
	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

254
	skb = dev_alloc_skb(local->tx_headroom +
255 256
			    hdr_len +
			    2 + 15 /* PERR IE */);
257 258
	if (!skb)
		return -1;
259
	skb_reserve(skb, local->tx_headroom);
260 261
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
262 263
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
264 265

	memcpy(mgmt->da, ra, ETH_ALEN);
266
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
267 268 269 270 271
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
R
Rui Paulo 已提交
272
	ie_len = 15;
273 274 275
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
R
Rui Paulo 已提交
276
	/* ttl */
277
	*pos++ = ttl;
278 279
	/* number of destinations */
	*pos++ = 1;
R
Rui Paulo 已提交
280 281 282 283 284
	/*
	 * flags bit, bit 1 is unset if we know the sequence number and
	 * bit 2 is set if we have a reason code
	 */
	*pos = 0;
285
	if (!target_sn)
R
Rui Paulo 已提交
286
		*pos |= MP_F_USN;
287
	if (target_rcode)
R
Rui Paulo 已提交
288 289
		*pos |= MP_F_RCODE;
	pos++;
290
	memcpy(pos, target, ETH_ALEN);
291
	pos += ETH_ALEN;
292
	memcpy(pos, &target_sn, 4);
R
Rui Paulo 已提交
293
	pos += 4;
294
	memcpy(pos, &target_rcode, 2);
295

296 297
	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
298 299
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
300
	ieee80211_add_pending_skb(local, skb);
301 302 303
	return 0;
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
void ieee80211s_update_metric(struct ieee80211_local *local,
		struct sta_info *stainfo, struct sk_buff *skb)
{
	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	int failed;

	if (!ieee80211_is_data(hdr->frame_control))
		return;

	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);

	/* moving average, scaled to 100 */
	stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
	if (stainfo->fail_avg > 95)
		mesh_plink_broken(stainfo);
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static u32 airtime_link_metric_get(struct ieee80211_local *local,
				   struct sta_info *sta)
{
	struct ieee80211_supported_band *sband;
	/* This should be adjusted for each device */
	int device_constant = 1 << ARITH_SHIFT;
	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
	int s_unit = 1 << ARITH_SHIFT;
	int rate, err;
	u32 tx_time, estimated_retx;
	u64 result;

	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];

	if (sta->fail_avg >= 100)
		return MAX_METRIC;
338 339 340 341

	if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
		return MAX_METRIC;

342 343 344 345 346
	err = (sta->fail_avg << ARITH_SHIFT) / 100;

	/* bitrate is in units of 100 Kbps, while we need rate in units of
	 * 1Mbps. This will be corrected on tx_time computation.
	 */
347
	rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
348 349 350 351 352 353 354 355 356
	tx_time = (device_constant + 10 * test_frame_len / rate);
	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
	return (u32)result;
}

/**
 * hwmp_route_info_get - Update routing info to originator and transmitter
 *
357
 * @sdata: local mesh subif
358 359 360 361
 * @mgmt: mesh management frame
 * @hwmp_ie: hwmp information element (PREP or PREQ)
 *
 * This function updates the path routing information to the originator and the
362
 * transmitter of a HWMP PREQ or PREP frame.
363 364 365 366 367 368 369
 *
 * Returns: metric to frame originator or 0 if the frame should not be further
 * processed
 *
 * Notes: this function is the only place (besides user-provided info) where
 * path routing information is updated.
 */
370
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
371
			    struct ieee80211_mgmt *mgmt,
372
			    u8 *hwmp_ie, enum mpath_frame_type action)
373
{
374
	struct ieee80211_local *local = sdata->local;
375 376 377 378
	struct mesh_path *mpath;
	struct sta_info *sta;
	bool fresh_info;
	u8 *orig_addr, *ta;
379
	u32 orig_sn, orig_metric;
380 381 382 383 384
	unsigned long orig_lifetime, exp_time;
	u32 last_hop_metric, new_metric;
	bool process = true;

	rcu_read_lock();
385
	sta = sta_info_get(sdata, mgmt->sa);
386 387
	if (!sta) {
		rcu_read_unlock();
388
		return 0;
389
	}
390 391 392 393 394 395 396 397

	last_hop_metric = airtime_link_metric_get(local, sta);
	/* Update and check originator routing info */
	fresh_info = true;

	switch (action) {
	case MPATH_PREQ:
		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
398
		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
399 400 401 402
		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
		orig_metric = PREQ_IE_METRIC(hwmp_ie);
		break;
	case MPATH_PREP:
403 404
		/* Originator here refers to the MP that was the target in the
		 * Path Request. We divert from the nomenclature in the draft
405 406 407
		 * so that we can easily use a single function to gather path
		 * information from both PREQ and PREP frames.
		 */
408 409
		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
410 411 412 413
		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
		orig_metric = PREP_IE_METRIC(hwmp_ie);
		break;
	default:
414
		rcu_read_unlock();
415 416 417 418 419 420 421
		return 0;
	}
	new_metric = orig_metric + last_hop_metric;
	if (new_metric < orig_metric)
		new_metric = MAX_METRIC;
	exp_time = TU_TO_EXP_TIME(orig_lifetime);

422
	if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
423 424 425 426 427 428
		/* This MP is the originator, we are not interested in this
		 * frame, except for updating transmitter's path info.
		 */
		process = false;
		fresh_info = false;
	} else {
429
		mpath = mesh_path_lookup(orig_addr, sdata);
430 431 432 433 434
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if (mpath->flags & MESH_PATH_FIXED)
				fresh_info = false;
			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
435 436 437
			    (mpath->flags & MESH_PATH_SN_VALID)) {
				if (SN_GT(mpath->sn, orig_sn) ||
				    (mpath->sn == orig_sn &&
438
				     new_metric >= mpath->metric)) {
439 440 441 442 443
					process = false;
					fresh_info = false;
				}
			}
		} else {
444 445
			mesh_path_add(orig_addr, sdata);
			mpath = mesh_path_lookup(orig_addr, sdata);
446 447 448 449 450 451 452 453 454
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
455
			mpath->flags |= MESH_PATH_SN_VALID;
456
			mpath->metric = new_metric;
457
			mpath->sn = orig_sn;
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
			/* draft says preq_id should be saved to, but there does
			 * not seem to be any use for it, skipping by now
			 */
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	/* Update and check transmitter routing info */
	ta = mgmt->sa;
	if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
		fresh_info = false;
	else {
		fresh_info = true;

477
		mpath = mesh_path_lookup(ta, sdata);
478 479 480 481 482 483 484
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if ((mpath->flags & MESH_PATH_FIXED) ||
				((mpath->flags & MESH_PATH_ACTIVE) &&
					(last_hop_metric > mpath->metric)))
				fresh_info = false;
		} else {
485 486
			mesh_path_add(ta, sdata);
			mpath = mesh_path_lookup(ta, sdata);
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->metric = last_hop_metric;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	rcu_read_unlock();

	return process ? new_metric : 0;
}

511
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
512
				    struct ieee80211_mgmt *mgmt,
513 514
				    u8 *preq_elem, u32 metric)
{
515
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
516
	struct mesh_path *mpath = NULL;
517
	u8 *target_addr, *orig_addr;
518
	const u8 *da;
519 520
	u8 target_flags, ttl;
	u32 orig_sn, target_sn, lifetime;
521 522 523
	bool reply = false;
	bool forward = true;

524 525
	/* Update target SN, if present */
	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
526
	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
527 528 529
	target_sn = PREQ_IE_TARGET_SN(preq_elem);
	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
	target_flags = PREQ_IE_TARGET_F(preq_elem);
530

531
	mhwmp_dbg("received PREQ from %pM", orig_addr);
532

533
	if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
534
		mhwmp_dbg("PREQ is for us");
535 536 537
		forward = false;
		reply = true;
		metric = 0;
538
		if (time_after(jiffies, ifmsh->last_sn_update +
539
					net_traversal_jiffies(sdata)) ||
540 541 542
		    time_before(jiffies, ifmsh->last_sn_update)) {
			target_sn = ++ifmsh->sn;
			ifmsh->last_sn_update = jiffies;
543 544 545
		}
	} else {
		rcu_read_lock();
546
		mpath = mesh_path_lookup(target_addr, sdata);
547
		if (mpath) {
548 549 550 551 552
			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
					SN_LT(mpath->sn, target_sn)) {
				mpath->sn = target_sn;
				mpath->flags |= MESH_PATH_SN_VALID;
			} else if ((!(target_flags & MP_F_DO)) &&
553 554 555
					(mpath->flags & MESH_PATH_ACTIVE)) {
				reply = true;
				metric = mpath->metric;
556 557 558
				target_sn = mpath->sn;
				if (target_flags & MP_F_RF)
					target_flags |= MP_F_DO;
559 560 561 562 563 564 565 566 567
				else
					forward = false;
			}
		}
		rcu_read_unlock();
	}

	if (reply) {
		lifetime = PREQ_IE_LIFETIME(preq_elem);
568
		ttl = ifmsh->mshcfg.element_ttl;
569
		if (ttl != 0) {
570
			mhwmp_dbg("replying to the PREQ");
571 572 573
			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
				cpu_to_le32(orig_sn), 0, target_addr,
				cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
574
				cpu_to_le32(lifetime), cpu_to_le32(metric),
575
				0, sdata);
576
		} else
577
			ifmsh->mshstats.dropped_frames_ttl++;
578 579
	}

580
	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
581 582 583 584 585 586
		u32 preq_id;
		u8 hopcount, flags;

		ttl = PREQ_IE_TTL(preq_elem);
		lifetime = PREQ_IE_LIFETIME(preq_elem);
		if (ttl <= 1) {
587
			ifmsh->mshstats.dropped_frames_ttl++;
588 589
			return;
		}
590
		mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
591 592 593 594
		--ttl;
		flags = PREQ_IE_FLAGS(preq_elem);
		preq_id = PREQ_IE_PREQ_ID(preq_elem);
		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
595 596
		da = (mpath && mpath->is_root) ?
			mpath->rann_snd_addr : broadcast_addr;
597
		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
598
				cpu_to_le32(orig_sn), target_flags, target_addr,
599
				cpu_to_le32(target_sn), da,
600 601
				hopcount, ttl, cpu_to_le32(lifetime),
				cpu_to_le32(metric), cpu_to_le32(preq_id),
602
				sdata);
603
		ifmsh->mshstats.fwded_mcast++;
604
		ifmsh->mshstats.fwded_frames++;
605 606 607 608
	}
}


J
Johannes Berg 已提交
609 610 611 612 613 614 615 616
static inline struct sta_info *
next_hop_deref_protected(struct mesh_path *mpath)
{
	return rcu_dereference_protected(mpath->next_hop,
					 lockdep_is_held(&mpath->state_lock));
}


617
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
618 619 620 621
				    struct ieee80211_mgmt *mgmt,
				    u8 *prep_elem, u32 metric)
{
	struct mesh_path *mpath;
622
	u8 *target_addr, *orig_addr;
623 624
	u8 ttl, hopcount, flags;
	u8 next_hop[ETH_ALEN];
625
	u32 target_sn, orig_sn, lifetime;
626

627
	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
628

629 630
	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
	if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
631 632 633 634 635
		/* destination, no forwarding required */
		return;

	ttl = PREP_IE_TTL(prep_elem);
	if (ttl <= 1) {
636
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
637 638 639 640
		return;
	}

	rcu_read_lock();
641
	mpath = mesh_path_lookup(orig_addr, sdata);
642 643 644 645 646 647 648 649
	if (mpath)
		spin_lock_bh(&mpath->state_lock);
	else
		goto fail;
	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
		spin_unlock_bh(&mpath->state_lock);
		goto fail;
	}
J
Johannes Berg 已提交
650
	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
651 652 653 654 655
	spin_unlock_bh(&mpath->state_lock);
	--ttl;
	flags = PREP_IE_FLAGS(prep_elem);
	lifetime = PREP_IE_LIFETIME(prep_elem);
	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
656
	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
657 658
	target_sn = PREP_IE_TARGET_SN(prep_elem);
	orig_sn = PREP_IE_ORIG_SN(prep_elem);
659 660

	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
661
		cpu_to_le32(orig_sn), 0, target_addr,
662
		cpu_to_le32(target_sn), next_hop, hopcount,
663
		ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
664
		0, sdata);
665
	rcu_read_unlock();
666 667

	sdata->u.mesh.mshstats.fwded_unicast++;
668
	sdata->u.mesh.mshstats.fwded_frames++;
669 670 671 672
	return;

fail:
	rcu_read_unlock();
673
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
674 675
}

676
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
677 678
			     struct ieee80211_mgmt *mgmt, u8 *perr_elem)
{
R
Rui Paulo 已提交
679
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
680
	struct mesh_path *mpath;
R
Rui Paulo 已提交
681
	u8 ttl;
682 683 684
	u8 *ta, *target_addr;
	u32 target_sn;
	u16 target_rcode;
685 686

	ta = mgmt->sa;
R
Rui Paulo 已提交
687 688 689 690 691 692
	ttl = PERR_IE_TTL(perr_elem);
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
693 694 695
	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
	target_sn = PERR_IE_TARGET_SN(perr_elem);
	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
R
Rui Paulo 已提交
696

697
	rcu_read_lock();
698
	mpath = mesh_path_lookup(target_addr, sdata);
699 700 701
	if (mpath) {
		spin_lock_bh(&mpath->state_lock);
		if (mpath->flags & MESH_PATH_ACTIVE &&
J
Johannes Berg 已提交
702 703
		    memcmp(ta, next_hop_deref_protected(mpath)->sta.addr,
							ETH_ALEN) == 0 &&
704 705
		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
		    SN_GT(target_sn, mpath->sn))) {
706
			mpath->flags &= ~MESH_PATH_ACTIVE;
707
			mpath->sn = target_sn;
708
			spin_unlock_bh(&mpath->state_lock);
709 710
			mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
					   cpu_to_le16(target_rcode),
711
					   broadcast_addr, sdata);
712 713 714 715 716 717
		} else
			spin_unlock_bh(&mpath->state_lock);
	}
	rcu_read_unlock();
}

718 719 720 721 722 723 724 725
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
				struct ieee80211_mgmt *mgmt,
				struct ieee80211_rann_ie *rann)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct mesh_path *mpath;
	u8 ttl, flags, hopcount;
	u8 *orig_addr;
726
	u32 orig_sn, metric;
727
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
728
	bool root_is_gate;
729 730 731 732 733 734 735 736

	ttl = rann->rann_ttl;
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
	flags = rann->rann_flags;
737
	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
738
	orig_addr = rann->rann_addr;
739
	orig_sn = rann->rann_seq;
740
	hopcount = rann->rann_hopcount;
R
Rui Paulo 已提交
741
	hopcount++;
742
	metric = rann->rann_metric;
743 744 745 746 747

	/*  Ignore our own RANNs */
	if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
		return;

748 749
	mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
			orig_addr, mgmt->sa, root_is_gate);
750 751 752 753 754 755 756 757 758 759 760 761

	rcu_read_lock();
	mpath = mesh_path_lookup(orig_addr, sdata);
	if (!mpath) {
		mesh_path_add(orig_addr, sdata);
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (!mpath) {
			rcu_read_unlock();
			sdata->u.mesh.mshstats.dropped_frames_no_route++;
			return;
		}
	}
762 763 764 765 766 767 768 769 770

	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
	     time_after(jiffies, mpath->exp_time - 1*HZ)) &&
	     !(mpath->flags & MESH_PATH_FIXED)) {
		mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
							       orig_addr);
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
	}

771
	if (mpath->sn < orig_sn) {
772
		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
773
				       cpu_to_le32(orig_sn),
774
				       0, NULL, 0, broadcast_addr,
775
				       hopcount, ttl, cpu_to_le32(interval),
R
Rui Paulo 已提交
776
				       cpu_to_le32(metric + mpath->metric),
777
				       0, sdata);
778
		mpath->sn = orig_sn;
779
	}
780 781 782 783 784

	/* Using individually addressed PREQ for root node */
	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
	mpath->is_root = true;

785 786 787
	if (root_is_gate)
		mesh_path_add_gate(mpath);

788 789
	rcu_read_unlock();
}
790 791


792
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
793 794 795 796 797 798
			    struct ieee80211_mgmt *mgmt,
			    size_t len)
{
	struct ieee802_11_elems elems;
	size_t baselen;
	u32 last_hop_metric;
799
	struct sta_info *sta;
800

801 802 803 804
	/* need action_code */
	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
		return;

805 806 807 808 809 810 811 812
	rcu_read_lock();
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

813 814 815 816
	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
			len - baselen, &elems);

817 818
	if (elems.preq) {
		if (elems.preq_len != 37)
819 820
			/* Right now we support just 1 destination and no AE */
			return;
821 822 823 824 825 826 827 828
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
						      MPATH_PREQ);
		if (last_hop_metric)
			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
						last_hop_metric);
	}
	if (elems.prep) {
		if (elems.prep_len != 31)
829 830
			/* Right now we support no AE */
			return;
831 832 833 834 835 836 837
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
						      MPATH_PREP);
		if (last_hop_metric)
			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
						last_hop_metric);
	}
	if (elems.perr) {
R
Rui Paulo 已提交
838
		if (elems.perr_len != 15)
839 840
			/* Right now we support only one destination per PERR */
			return;
841
		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
842
	}
843 844
	if (elems.rann)
		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
845 846 847 848 849 850 851 852 853 854 855 856 857
}

/**
 * mesh_queue_preq - queue a PREQ to a given destination
 *
 * @mpath: mesh path to discover
 * @flags: special attributes of the PREQ to be sent
 *
 * Locking: the function must be called from within a rcu read lock block.
 *
 */
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
{
858
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
859
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
860 861
	struct mesh_preq_queue *preq_node;

862
	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
863
	if (!preq_node) {
864
		mhwmp_dbg("could not allocate PREQ node");
865 866 867
		return;
	}

868
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
869
	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
870
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
871 872
		kfree(preq_node);
		if (printk_ratelimit())
873
			mhwmp_dbg("PREQ node queue full");
874 875 876
		return;
	}

877
	spin_lock(&mpath->state_lock);
878
	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
879
		spin_unlock(&mpath->state_lock);
880
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
881
		kfree(preq_node);
882 883 884
		return;
	}

885 886 887
	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
	preq_node->flags = flags;

888
	mpath->flags |= MESH_PATH_REQ_QUEUED;
889
	spin_unlock(&mpath->state_lock);
890

891 892
	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
	++ifmsh->preq_queue_len;
893
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
894

895
	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
J
Johannes Berg 已提交
896
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
897

898
	else if (time_before(jiffies, ifmsh->last_preq)) {
899 900 901
		/* avoid long wait if did not send preqs for a long time
		 * and jiffies wrapped around
		 */
902
		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
J
Johannes Berg 已提交
903
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
904
	} else
905
		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
906 907 908 909 910 911
						min_preq_int_jiff(sdata));
}

/**
 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 *
912
 * @sdata: local mesh subif
913
 */
914
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
915
{
916
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
917 918
	struct mesh_preq_queue *preq_node;
	struct mesh_path *mpath;
919
	u8 ttl, target_flags;
920
	const u8 *da;
921 922
	u32 lifetime;

923
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
924 925
	if (!ifmsh->preq_queue_len ||
		time_before(jiffies, ifmsh->last_preq +
926
				min_preq_int_jiff(sdata))) {
927
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
928 929 930
		return;
	}

931
	preq_node = list_first_entry(&ifmsh->preq_queue.list,
932 933
			struct mesh_preq_queue, list);
	list_del(&preq_node->list);
934
	--ifmsh->preq_queue_len;
935
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
936 937

	rcu_read_lock();
938
	mpath = mesh_path_lookup(preq_node->dst, sdata);
939 940 941 942
	if (!mpath)
		goto enddiscovery;

	spin_lock_bh(&mpath->state_lock);
943
	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
	if (preq_node->flags & PREQ_Q_F_START) {
		if (mpath->flags & MESH_PATH_RESOLVING) {
			spin_unlock_bh(&mpath->state_lock);
			goto enddiscovery;
		} else {
			mpath->flags &= ~MESH_PATH_RESOLVED;
			mpath->flags |= MESH_PATH_RESOLVING;
			mpath->discovery_retries = 0;
			mpath->discovery_timeout = disc_timeout_jiff(sdata);
		}
	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
			mpath->flags & MESH_PATH_RESOLVED) {
		mpath->flags &= ~MESH_PATH_RESOLVING;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

961
	ifmsh->last_preq = jiffies;
962

963
	if (time_after(jiffies, ifmsh->last_sn_update +
964
				net_traversal_jiffies(sdata)) ||
965 966 967
	    time_before(jiffies, ifmsh->last_sn_update)) {
		++ifmsh->sn;
		sdata->u.mesh.last_sn_update = jiffies;
968 969
	}
	lifetime = default_lifetime(sdata);
970
	ttl = sdata->u.mesh.mshcfg.element_ttl;
971
	if (ttl == 0) {
972
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
973 974 975 976 977
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	if (preq_node->flags & PREQ_Q_F_REFRESH)
978
		target_flags = MP_F_DO;
979
	else
980
		target_flags = MP_F_RF;
981 982

	spin_unlock_bh(&mpath->state_lock);
983
	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
984
	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
985
			cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
986
			cpu_to_le32(mpath->sn), da, 0,
987
			ttl, cpu_to_le32(lifetime), 0,
988
			cpu_to_le32(ifmsh->preq_id++), sdata);
989 990 991 992 993 994 995
	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);

enddiscovery:
	rcu_read_unlock();
	kfree(preq_node);
}

996 997
/* mesh_nexthop_resolve - lookup next hop for given skb and start path
 * discovery if no forwarding information is found.
998
 *
999
 * @skb: 802.11 frame to be sent
1000
 * @sdata: network subif the frame will be sent through
1001
 *
1002 1003
 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
 * skb is freeed here if no mpath could be allocated.
1004
 */
1005 1006
int mesh_nexthop_resolve(struct sk_buff *skb,
			 struct ieee80211_sub_if_data *sdata)
1007
{
1008
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1009 1010 1011
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct mesh_path *mpath;
	struct sk_buff *skb_to_free = NULL;
1012
	u8 *target_addr = hdr->addr3;
1013 1014 1015
	int err = 0;

	rcu_read_lock();
1016 1017 1018
	err = mesh_nexthop_lookup(skb, sdata);
	if (!err)
		goto endlookup;
1019

1020 1021
	/* no nexthop found, start resolving */
	mpath = mesh_path_lookup(target_addr, sdata);
1022
	if (!mpath) {
1023 1024
		mesh_path_add(target_addr, sdata);
		mpath = mesh_path_lookup(target_addr, sdata);
1025
		if (!mpath) {
1026
			mesh_path_discard_frame(skb, sdata);
1027 1028 1029 1030 1031
			err = -ENOSPC;
			goto endlookup;
		}
	}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	if (!(mpath->flags & MESH_PATH_RESOLVING))
		mesh_queue_preq(mpath, PREQ_Q_F_START);

	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
		skb_to_free = skb_dequeue(&mpath->frame_queue);

	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
	skb_queue_tail(&mpath->frame_queue, skb);
	err = -ENOENT;
	if (skb_to_free)
		mesh_path_discard_frame(skb_to_free, sdata);

endlookup:
	rcu_read_unlock();
	return err;
}
/**
 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
 * this function is considered "using" the associated mpath, so preempt a path
 * refresh if this mpath expires soon.
 *
 * @skb: 802.11 frame to be sent
 * @sdata: network subif the frame will be sent through
 *
 * Returns: 0 if the next hop was found. Nonzero otherwise.
 */
int mesh_nexthop_lookup(struct sk_buff *skb,
			struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath;
	struct sta_info *next_hop;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u8 *target_addr = hdr->addr3;
	int err = -ENOENT;
1067

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	rcu_read_lock();
	mpath = mesh_path_lookup(target_addr, sdata);

	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
		goto endlookup;

	if (time_after(jiffies,
		       mpath->exp_time -
		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
	    !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
	    !(mpath->flags & MESH_PATH_RESOLVING) &&
	    !(mpath->flags & MESH_PATH_FIXED))
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1081

1082 1083 1084 1085 1086
	next_hop = rcu_dereference(mpath->next_hop);
	if (next_hop) {
		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		err = 0;
1087 1088 1089 1090 1091 1092 1093 1094 1095
	}

endlookup:
	rcu_read_unlock();
	return err;
}

void mesh_path_timer(unsigned long data)
{
1096 1097
	struct mesh_path *mpath = (void *) data;
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1098
	int ret;
1099

1100
	if (sdata->local->quiescing)
1101 1102 1103
		return;

	spin_lock_bh(&mpath->state_lock);
1104
	if (mpath->flags & MESH_PATH_RESOLVED ||
1105
			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1106
		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1107 1108
		spin_unlock_bh(&mpath->state_lock);
	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1109 1110
		++mpath->discovery_retries;
		mpath->discovery_timeout *= 2;
1111
		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1112
		spin_unlock_bh(&mpath->state_lock);
1113 1114 1115 1116
		mesh_queue_preq(mpath, 0);
	} else {
		mpath->flags = 0;
		mpath->exp_time = jiffies;
1117 1118 1119 1120 1121 1122 1123
		spin_unlock_bh(&mpath->state_lock);
		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
			ret = mesh_path_send_to_gates(mpath);
			if (ret)
				mhwmp_dbg("no gate was reachable");
		} else
			mesh_path_flush_pending(mpath);
1124 1125
	}
}
1126 1127 1128 1129 1130

void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1131
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1132
	u8 flags;
1133

1134 1135 1136
	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
			? RANN_FLAG_IS_GATE : 0;
	mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1137
			       cpu_to_le32(++ifmsh->sn),
1138
			       0, NULL, 0, broadcast_addr,
1139
			       0, sdata->u.mesh.mshcfg.element_ttl,
1140
			       cpu_to_le32(interval), 0, 0, sdata);
1141
}