mesh_hwmp.c 31.8 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

10
#include <linux/slab.h>
11
#include <linux/etherdevice.h>
12
#include <asm/unaligned.h>
13
#include "wme.h"
14 15
#include "mesh.h"

16
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
17 18
#define mhwmp_dbg(fmt, args...) \
	printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
19 20 21 22
#else
#define mhwmp_dbg(fmt, args...)   do { (void)(0); } while (0)
#endif

23 24 25 26 27 28 29 30 31 32 33 34
#define TEST_FRAME_LEN	8192
#define MAX_METRIC	0xffffffff
#define ARITH_SHIFT	8

/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10
#define MAX_PREQ_QUEUE_LEN	64

/* Destination only */
#define MP_F_DO	0x1
/* Reply and forward */
#define MP_F_RF	0x2
R
Rui Paulo 已提交
35 36 37 38
/* Unknown Sequence Number */
#define MP_F_USN    0x01
/* Reason code Present */
#define MP_F_RCODE  0x02
39

40 41
static void mesh_queue_preq(struct mesh_path *, u8);

42 43 44 45
static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
46
	return get_unaligned_le32(preq_elem + offset);
47 48
}

R
Rui Paulo 已提交
49 50 51 52 53 54 55
static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
	return get_unaligned_le16(preq_elem + offset);
}

56
/* HWMP IE processing macros */
57 58 59 60 61 62 63
#define AE_F			(1<<6)
#define AE_F_SET(x)		(*x & AE_F)
#define PREQ_IE_FLAGS(x)	(*(x))
#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
#define PREQ_IE_TTL(x)		(*(x + 2))
#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
64 65 66
#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
67 68
#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
69
#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
70 71 72 73 74


#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
75 76
#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
77 78
#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
79 80
#define PREP_IE_TARGET_ADDR(x)	(x + 3)
#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
81

R
Rui Paulo 已提交
82
#define PERR_IE_TTL(x)		(*(x))
83 84
#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
#define PERR_IE_TARGET_ADDR(x)	(x + 3)
85 86
#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
87 88

#define MSEC_TO_TU(x) (x*1000/1024)
89 90
#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
91 92

#define net_traversal_jiffies(s) \
93
	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
94
#define default_lifetime(s) \
95
	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
96
#define min_preq_int_jiff(s) \
97 98
	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
99
#define disc_timeout_jiff(s) \
100
	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
101 102 103 104

enum mpath_frame_type {
	MPATH_PREQ = 0,
	MPATH_PREP,
105 106
	MPATH_PERR,
	MPATH_RANN
107 108
};

109 110
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};

111
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
112
		u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
113 114
		__le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
		__le32 lifetime, __le32 metric, __le32 preq_id,
115
		struct ieee80211_sub_if_data *sdata)
116
{
117
	struct ieee80211_local *local = sdata->local;
118
	struct sk_buff *skb;
119
	struct ieee80211_mgmt *mgmt;
120 121 122
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
123

124
	skb = dev_alloc_skb(local->tx_headroom +
125 126
			    hdr_len +
			    2 + 37); /* max HWMP IE */
127 128
	if (!skb)
		return -1;
129
	skb_reserve(skb, local->tx_headroom);
130 131
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
132 133
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
134 135

	memcpy(mgmt->da, da, ETH_ALEN);
136
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
137
	/* BSSID == SA */
138
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
139 140 141
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
142 143 144

	switch (action) {
	case MPATH_PREQ:
145
		mhwmp_dbg("sending PREQ to %pM", target);
146 147 148 149 150
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
151
		mhwmp_dbg("sending PREP to %pM", target);
152 153 154 155
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
156
	case MPATH_RANN:
157
		mhwmp_dbg("sending RANN from %pM", orig_addr);
158 159 160 161
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
162
	default:
163
		kfree_skb(skb);
164 165 166 167 168 169 170
		return -ENOTSUPP;
		break;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
171 172 173 174
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &target_sn, 4);
175
		pos += 4;
176 177 178 179 180 181 182 183
	} else {
		if (action == MPATH_PREQ) {
			memcpy(pos, &preq_id, 4);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
184 185
		pos += 4;
	}
186 187
	memcpy(pos, &lifetime, 4);	/* interval for RANN */
	pos += 4;
188 189 190
	memcpy(pos, &metric, 4);
	pos += 4;
	if (action == MPATH_PREQ) {
191
		*pos++ = 1; /* destination count */
192 193
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
194
		pos += ETH_ALEN;
195
		memcpy(pos, &target_sn, 4);
196 197 198 199 200 201
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
		pos += 4;
202
	}
203

204
	ieee80211_tx_skb(sdata, skb);
205 206 207
	return 0;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
225
	ieee80211_set_qos_hdr(sdata, skb);
226 227
}

228 229 230
/**
 * mesh_send_path error - Sends a PERR mesh management frame
 *
231 232 233
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
234
 * @ra: node this frame is addressed to
235 236 237 238
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
239
 */
240
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
241 242
		       __le16 target_rcode, const u8 *ra,
		       struct ieee80211_sub_if_data *sdata)
243
{
244
	struct ieee80211_local *local = sdata->local;
245
	struct sk_buff *skb;
246
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
247
	struct ieee80211_mgmt *mgmt;
248 249 250
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
251

252 253 254
	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

255
	skb = dev_alloc_skb(local->tx_headroom +
256 257
			    hdr_len +
			    2 + 15 /* PERR IE */);
258 259
	if (!skb)
		return -1;
260
	skb_reserve(skb, local->tx_headroom);
261 262
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
263 264
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
265 266

	memcpy(mgmt->da, ra, ETH_ALEN);
267
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
268 269 270 271 272
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
R
Rui Paulo 已提交
273
	ie_len = 15;
274 275 276
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
R
Rui Paulo 已提交
277
	/* ttl */
278
	*pos++ = ttl;
279 280
	/* number of destinations */
	*pos++ = 1;
R
Rui Paulo 已提交
281 282 283 284 285
	/*
	 * flags bit, bit 1 is unset if we know the sequence number and
	 * bit 2 is set if we have a reason code
	 */
	*pos = 0;
286
	if (!target_sn)
R
Rui Paulo 已提交
287
		*pos |= MP_F_USN;
288
	if (target_rcode)
R
Rui Paulo 已提交
289 290
		*pos |= MP_F_RCODE;
	pos++;
291
	memcpy(pos, target, ETH_ALEN);
292
	pos += ETH_ALEN;
293
	memcpy(pos, &target_sn, 4);
R
Rui Paulo 已提交
294
	pos += 4;
295
	memcpy(pos, &target_rcode, 2);
296

297 298
	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
299 300
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
301
	ieee80211_add_pending_skb(local, skb);
302 303 304
	return 0;
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
void ieee80211s_update_metric(struct ieee80211_local *local,
		struct sta_info *stainfo, struct sk_buff *skb)
{
	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	int failed;

	if (!ieee80211_is_data(hdr->frame_control))
		return;

	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);

	/* moving average, scaled to 100 */
	stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
	if (stainfo->fail_avg > 95)
		mesh_plink_broken(stainfo);
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
static u32 airtime_link_metric_get(struct ieee80211_local *local,
				   struct sta_info *sta)
{
	struct ieee80211_supported_band *sband;
	/* This should be adjusted for each device */
	int device_constant = 1 << ARITH_SHIFT;
	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
	int s_unit = 1 << ARITH_SHIFT;
	int rate, err;
	u32 tx_time, estimated_retx;
	u64 result;

	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];

	if (sta->fail_avg >= 100)
		return MAX_METRIC;
339 340 341 342

	if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)
		return MAX_METRIC;

343 344 345 346 347
	err = (sta->fail_avg << ARITH_SHIFT) / 100;

	/* bitrate is in units of 100 Kbps, while we need rate in units of
	 * 1Mbps. This will be corrected on tx_time computation.
	 */
348
	rate = sband->bitrates[sta->last_tx_rate.idx].bitrate;
349 350 351 352 353 354 355 356 357
	tx_time = (device_constant + 10 * test_frame_len / rate);
	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
	return (u32)result;
}

/**
 * hwmp_route_info_get - Update routing info to originator and transmitter
 *
358
 * @sdata: local mesh subif
359 360 361 362
 * @mgmt: mesh management frame
 * @hwmp_ie: hwmp information element (PREP or PREQ)
 *
 * This function updates the path routing information to the originator and the
363
 * transmitter of a HWMP PREQ or PREP frame.
364 365 366 367 368 369 370
 *
 * Returns: metric to frame originator or 0 if the frame should not be further
 * processed
 *
 * Notes: this function is the only place (besides user-provided info) where
 * path routing information is updated.
 */
371
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
372
			    struct ieee80211_mgmt *mgmt,
373
			    u8 *hwmp_ie, enum mpath_frame_type action)
374
{
375
	struct ieee80211_local *local = sdata->local;
376 377 378 379
	struct mesh_path *mpath;
	struct sta_info *sta;
	bool fresh_info;
	u8 *orig_addr, *ta;
380
	u32 orig_sn, orig_metric;
381 382 383 384 385
	unsigned long orig_lifetime, exp_time;
	u32 last_hop_metric, new_metric;
	bool process = true;

	rcu_read_lock();
386
	sta = sta_info_get(sdata, mgmt->sa);
387 388
	if (!sta) {
		rcu_read_unlock();
389
		return 0;
390
	}
391 392 393 394 395 396 397 398

	last_hop_metric = airtime_link_metric_get(local, sta);
	/* Update and check originator routing info */
	fresh_info = true;

	switch (action) {
	case MPATH_PREQ:
		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
399
		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
400 401 402 403
		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
		orig_metric = PREQ_IE_METRIC(hwmp_ie);
		break;
	case MPATH_PREP:
404 405
		/* Originator here refers to the MP that was the target in the
		 * Path Request. We divert from the nomenclature in the draft
406 407 408
		 * so that we can easily use a single function to gather path
		 * information from both PREQ and PREP frames.
		 */
409 410
		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
411 412 413 414
		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
		orig_metric = PREP_IE_METRIC(hwmp_ie);
		break;
	default:
415
		rcu_read_unlock();
416 417 418 419 420 421 422
		return 0;
	}
	new_metric = orig_metric + last_hop_metric;
	if (new_metric < orig_metric)
		new_metric = MAX_METRIC;
	exp_time = TU_TO_EXP_TIME(orig_lifetime);

423
	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
424 425 426 427 428 429
		/* This MP is the originator, we are not interested in this
		 * frame, except for updating transmitter's path info.
		 */
		process = false;
		fresh_info = false;
	} else {
430
		mpath = mesh_path_lookup(orig_addr, sdata);
431 432 433 434 435
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if (mpath->flags & MESH_PATH_FIXED)
				fresh_info = false;
			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
436 437 438
			    (mpath->flags & MESH_PATH_SN_VALID)) {
				if (SN_GT(mpath->sn, orig_sn) ||
				    (mpath->sn == orig_sn &&
439
				     new_metric >= mpath->metric)) {
440 441 442 443 444
					process = false;
					fresh_info = false;
				}
			}
		} else {
445 446
			mesh_path_add(orig_addr, sdata);
			mpath = mesh_path_lookup(orig_addr, sdata);
447 448 449 450 451 452 453 454 455
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
456
			mpath->flags |= MESH_PATH_SN_VALID;
457
			mpath->metric = new_metric;
458
			mpath->sn = orig_sn;
459 460 461 462 463 464 465 466 467 468 469 470 471 472
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
			/* draft says preq_id should be saved to, but there does
			 * not seem to be any use for it, skipping by now
			 */
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	/* Update and check transmitter routing info */
	ta = mgmt->sa;
473
	if (compare_ether_addr(orig_addr, ta) == 0)
474 475 476 477
		fresh_info = false;
	else {
		fresh_info = true;

478
		mpath = mesh_path_lookup(ta, sdata);
479 480 481 482 483 484 485
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if ((mpath->flags & MESH_PATH_FIXED) ||
				((mpath->flags & MESH_PATH_ACTIVE) &&
					(last_hop_metric > mpath->metric)))
				fresh_info = false;
		} else {
486 487
			mesh_path_add(ta, sdata);
			mpath = mesh_path_lookup(ta, sdata);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->metric = last_hop_metric;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	rcu_read_unlock();

	return process ? new_metric : 0;
}

512
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
513
				    struct ieee80211_mgmt *mgmt,
514 515
				    u8 *preq_elem, u32 metric)
{
516
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
517
	struct mesh_path *mpath = NULL;
518
	u8 *target_addr, *orig_addr;
519
	const u8 *da;
520 521
	u8 target_flags, ttl;
	u32 orig_sn, target_sn, lifetime;
522 523 524
	bool reply = false;
	bool forward = true;

525 526
	/* Update target SN, if present */
	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
527
	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
528 529 530
	target_sn = PREQ_IE_TARGET_SN(preq_elem);
	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
	target_flags = PREQ_IE_TARGET_F(preq_elem);
531

532
	mhwmp_dbg("received PREQ from %pM", orig_addr);
533

534
	if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
535
		mhwmp_dbg("PREQ is for us");
536 537 538
		forward = false;
		reply = true;
		metric = 0;
539
		if (time_after(jiffies, ifmsh->last_sn_update +
540
					net_traversal_jiffies(sdata)) ||
541 542 543
		    time_before(jiffies, ifmsh->last_sn_update)) {
			target_sn = ++ifmsh->sn;
			ifmsh->last_sn_update = jiffies;
544 545 546
		}
	} else {
		rcu_read_lock();
547
		mpath = mesh_path_lookup(target_addr, sdata);
548
		if (mpath) {
549 550 551 552 553
			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
					SN_LT(mpath->sn, target_sn)) {
				mpath->sn = target_sn;
				mpath->flags |= MESH_PATH_SN_VALID;
			} else if ((!(target_flags & MP_F_DO)) &&
554 555 556
					(mpath->flags & MESH_PATH_ACTIVE)) {
				reply = true;
				metric = mpath->metric;
557 558 559
				target_sn = mpath->sn;
				if (target_flags & MP_F_RF)
					target_flags |= MP_F_DO;
560 561 562 563 564 565 566 567 568
				else
					forward = false;
			}
		}
		rcu_read_unlock();
	}

	if (reply) {
		lifetime = PREQ_IE_LIFETIME(preq_elem);
569
		ttl = ifmsh->mshcfg.element_ttl;
570
		if (ttl != 0) {
571
			mhwmp_dbg("replying to the PREQ");
572 573 574
			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
				cpu_to_le32(orig_sn), 0, target_addr,
				cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
575
				cpu_to_le32(lifetime), cpu_to_le32(metric),
576
				0, sdata);
577
		} else
578
			ifmsh->mshstats.dropped_frames_ttl++;
579 580
	}

581
	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
582 583 584 585 586 587
		u32 preq_id;
		u8 hopcount, flags;

		ttl = PREQ_IE_TTL(preq_elem);
		lifetime = PREQ_IE_LIFETIME(preq_elem);
		if (ttl <= 1) {
588
			ifmsh->mshstats.dropped_frames_ttl++;
589 590
			return;
		}
591
		mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
592 593 594 595
		--ttl;
		flags = PREQ_IE_FLAGS(preq_elem);
		preq_id = PREQ_IE_PREQ_ID(preq_elem);
		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
596 597
		da = (mpath && mpath->is_root) ?
			mpath->rann_snd_addr : broadcast_addr;
598
		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
599
				cpu_to_le32(orig_sn), target_flags, target_addr,
600
				cpu_to_le32(target_sn), da,
601 602
				hopcount, ttl, cpu_to_le32(lifetime),
				cpu_to_le32(metric), cpu_to_le32(preq_id),
603
				sdata);
604
		ifmsh->mshstats.fwded_mcast++;
605
		ifmsh->mshstats.fwded_frames++;
606 607 608 609
	}
}


J
Johannes Berg 已提交
610 611 612 613 614 615 616 617
static inline struct sta_info *
next_hop_deref_protected(struct mesh_path *mpath)
{
	return rcu_dereference_protected(mpath->next_hop,
					 lockdep_is_held(&mpath->state_lock));
}


618
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
619 620 621 622
				    struct ieee80211_mgmt *mgmt,
				    u8 *prep_elem, u32 metric)
{
	struct mesh_path *mpath;
623
	u8 *target_addr, *orig_addr;
624 625
	u8 ttl, hopcount, flags;
	u8 next_hop[ETH_ALEN];
626
	u32 target_sn, orig_sn, lifetime;
627

628
	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
629

630
	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
631
	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
632 633 634 635 636
		/* destination, no forwarding required */
		return;

	ttl = PREP_IE_TTL(prep_elem);
	if (ttl <= 1) {
637
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
638 639 640 641
		return;
	}

	rcu_read_lock();
642
	mpath = mesh_path_lookup(orig_addr, sdata);
643 644 645 646 647 648 649 650
	if (mpath)
		spin_lock_bh(&mpath->state_lock);
	else
		goto fail;
	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
		spin_unlock_bh(&mpath->state_lock);
		goto fail;
	}
J
Johannes Berg 已提交
651
	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
652 653 654 655 656
	spin_unlock_bh(&mpath->state_lock);
	--ttl;
	flags = PREP_IE_FLAGS(prep_elem);
	lifetime = PREP_IE_LIFETIME(prep_elem);
	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
657
	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
658 659
	target_sn = PREP_IE_TARGET_SN(prep_elem);
	orig_sn = PREP_IE_ORIG_SN(prep_elem);
660 661

	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
662
		cpu_to_le32(orig_sn), 0, target_addr,
663
		cpu_to_le32(target_sn), next_hop, hopcount,
664
		ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
665
		0, sdata);
666
	rcu_read_unlock();
667 668

	sdata->u.mesh.mshstats.fwded_unicast++;
669
	sdata->u.mesh.mshstats.fwded_frames++;
670 671 672 673
	return;

fail:
	rcu_read_unlock();
674
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
675 676
}

677
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
678 679
			     struct ieee80211_mgmt *mgmt, u8 *perr_elem)
{
R
Rui Paulo 已提交
680
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
681
	struct mesh_path *mpath;
R
Rui Paulo 已提交
682
	u8 ttl;
683 684 685
	u8 *ta, *target_addr;
	u32 target_sn;
	u16 target_rcode;
686 687

	ta = mgmt->sa;
R
Rui Paulo 已提交
688 689 690 691 692 693
	ttl = PERR_IE_TTL(perr_elem);
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
694 695 696
	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
	target_sn = PERR_IE_TARGET_SN(perr_elem);
	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
R
Rui Paulo 已提交
697

698
	rcu_read_lock();
699
	mpath = mesh_path_lookup(target_addr, sdata);
700
	if (mpath) {
701 702
		struct sta_info *sta;

703
		spin_lock_bh(&mpath->state_lock);
704
		sta = next_hop_deref_protected(mpath);
705
		if (mpath->flags & MESH_PATH_ACTIVE &&
706
		    compare_ether_addr(ta, sta->sta.addr) == 0 &&
707 708
		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
		    SN_GT(target_sn, mpath->sn))) {
709
			mpath->flags &= ~MESH_PATH_ACTIVE;
710
			mpath->sn = target_sn;
711
			spin_unlock_bh(&mpath->state_lock);
712 713
			mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
					   cpu_to_le16(target_rcode),
714
					   broadcast_addr, sdata);
715 716 717 718 719 720
		} else
			spin_unlock_bh(&mpath->state_lock);
	}
	rcu_read_unlock();
}

721 722 723 724 725 726 727 728
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
				struct ieee80211_mgmt *mgmt,
				struct ieee80211_rann_ie *rann)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct mesh_path *mpath;
	u8 ttl, flags, hopcount;
	u8 *orig_addr;
729
	u32 orig_sn, metric;
730
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
731
	bool root_is_gate;
732 733 734 735 736 737 738 739

	ttl = rann->rann_ttl;
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
	flags = rann->rann_flags;
740
	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
741
	orig_addr = rann->rann_addr;
742
	orig_sn = rann->rann_seq;
743
	hopcount = rann->rann_hopcount;
R
Rui Paulo 已提交
744
	hopcount++;
745
	metric = rann->rann_metric;
746 747

	/*  Ignore our own RANNs */
748
	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
749 750
		return;

751 752
	mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
			orig_addr, mgmt->sa, root_is_gate);
753 754 755 756 757 758 759 760 761 762 763 764

	rcu_read_lock();
	mpath = mesh_path_lookup(orig_addr, sdata);
	if (!mpath) {
		mesh_path_add(orig_addr, sdata);
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (!mpath) {
			rcu_read_unlock();
			sdata->u.mesh.mshstats.dropped_frames_no_route++;
			return;
		}
	}
765 766 767 768 769 770 771 772 773

	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
	     time_after(jiffies, mpath->exp_time - 1*HZ)) &&
	     !(mpath->flags & MESH_PATH_FIXED)) {
		mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
							       orig_addr);
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
	}

774
	if (mpath->sn < orig_sn) {
775
		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
776
				       cpu_to_le32(orig_sn),
777
				       0, NULL, 0, broadcast_addr,
778
				       hopcount, ttl, cpu_to_le32(interval),
R
Rui Paulo 已提交
779
				       cpu_to_le32(metric + mpath->metric),
780
				       0, sdata);
781
		mpath->sn = orig_sn;
782
	}
783 784 785 786 787

	/* Using individually addressed PREQ for root node */
	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
	mpath->is_root = true;

788 789 790
	if (root_is_gate)
		mesh_path_add_gate(mpath);

791 792
	rcu_read_unlock();
}
793 794


795
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
796 797 798 799 800 801
			    struct ieee80211_mgmt *mgmt,
			    size_t len)
{
	struct ieee802_11_elems elems;
	size_t baselen;
	u32 last_hop_metric;
802
	struct sta_info *sta;
803

804 805 806 807
	/* need action_code */
	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
		return;

808 809 810 811 812 813 814 815
	rcu_read_lock();
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

816 817 818 819
	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
			len - baselen, &elems);

820 821
	if (elems.preq) {
		if (elems.preq_len != 37)
822 823
			/* Right now we support just 1 destination and no AE */
			return;
824 825 826 827 828 829 830 831
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
						      MPATH_PREQ);
		if (last_hop_metric)
			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
						last_hop_metric);
	}
	if (elems.prep) {
		if (elems.prep_len != 31)
832 833
			/* Right now we support no AE */
			return;
834 835 836 837 838 839 840
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
						      MPATH_PREP);
		if (last_hop_metric)
			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
						last_hop_metric);
	}
	if (elems.perr) {
R
Rui Paulo 已提交
841
		if (elems.perr_len != 15)
842 843
			/* Right now we support only one destination per PERR */
			return;
844
		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
845
	}
846 847
	if (elems.rann)
		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
848 849 850 851 852 853 854 855 856 857 858 859 860
}

/**
 * mesh_queue_preq - queue a PREQ to a given destination
 *
 * @mpath: mesh path to discover
 * @flags: special attributes of the PREQ to be sent
 *
 * Locking: the function must be called from within a rcu read lock block.
 *
 */
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
{
861
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
862
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
863 864
	struct mesh_preq_queue *preq_node;

865
	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
866
	if (!preq_node) {
867
		mhwmp_dbg("could not allocate PREQ node");
868 869 870
		return;
	}

871
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
872
	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
873
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
874 875
		kfree(preq_node);
		if (printk_ratelimit())
876
			mhwmp_dbg("PREQ node queue full");
877 878 879
		return;
	}

880
	spin_lock(&mpath->state_lock);
881
	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
882
		spin_unlock(&mpath->state_lock);
883
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
884
		kfree(preq_node);
885 886 887
		return;
	}

888 889 890
	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
	preq_node->flags = flags;

891
	mpath->flags |= MESH_PATH_REQ_QUEUED;
892
	spin_unlock(&mpath->state_lock);
893

894 895
	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
	++ifmsh->preq_queue_len;
896
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
897

898
	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
J
Johannes Berg 已提交
899
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
900

901
	else if (time_before(jiffies, ifmsh->last_preq)) {
902 903 904
		/* avoid long wait if did not send preqs for a long time
		 * and jiffies wrapped around
		 */
905
		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
J
Johannes Berg 已提交
906
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
907
	} else
908
		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
909 910 911 912 913 914
						min_preq_int_jiff(sdata));
}

/**
 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 *
915
 * @sdata: local mesh subif
916
 */
917
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
918
{
919
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
920 921
	struct mesh_preq_queue *preq_node;
	struct mesh_path *mpath;
922
	u8 ttl, target_flags;
923
	const u8 *da;
924 925
	u32 lifetime;

926
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
927 928
	if (!ifmsh->preq_queue_len ||
		time_before(jiffies, ifmsh->last_preq +
929
				min_preq_int_jiff(sdata))) {
930
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
931 932 933
		return;
	}

934
	preq_node = list_first_entry(&ifmsh->preq_queue.list,
935 936
			struct mesh_preq_queue, list);
	list_del(&preq_node->list);
937
	--ifmsh->preq_queue_len;
938
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
939 940

	rcu_read_lock();
941
	mpath = mesh_path_lookup(preq_node->dst, sdata);
942 943 944 945
	if (!mpath)
		goto enddiscovery;

	spin_lock_bh(&mpath->state_lock);
946
	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	if (preq_node->flags & PREQ_Q_F_START) {
		if (mpath->flags & MESH_PATH_RESOLVING) {
			spin_unlock_bh(&mpath->state_lock);
			goto enddiscovery;
		} else {
			mpath->flags &= ~MESH_PATH_RESOLVED;
			mpath->flags |= MESH_PATH_RESOLVING;
			mpath->discovery_retries = 0;
			mpath->discovery_timeout = disc_timeout_jiff(sdata);
		}
	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
			mpath->flags & MESH_PATH_RESOLVED) {
		mpath->flags &= ~MESH_PATH_RESOLVING;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

964
	ifmsh->last_preq = jiffies;
965

966
	if (time_after(jiffies, ifmsh->last_sn_update +
967
				net_traversal_jiffies(sdata)) ||
968 969 970
	    time_before(jiffies, ifmsh->last_sn_update)) {
		++ifmsh->sn;
		sdata->u.mesh.last_sn_update = jiffies;
971 972
	}
	lifetime = default_lifetime(sdata);
973
	ttl = sdata->u.mesh.mshcfg.element_ttl;
974
	if (ttl == 0) {
975
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
976 977 978 979 980
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	if (preq_node->flags & PREQ_Q_F_REFRESH)
981
		target_flags = MP_F_DO;
982
	else
983
		target_flags = MP_F_RF;
984 985

	spin_unlock_bh(&mpath->state_lock);
986
	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
987
	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
988
			cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
989
			cpu_to_le32(mpath->sn), da, 0,
990
			ttl, cpu_to_le32(lifetime), 0,
991
			cpu_to_le32(ifmsh->preq_id++), sdata);
992 993 994 995 996 997 998
	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);

enddiscovery:
	rcu_read_unlock();
	kfree(preq_node);
}

999 1000
/* mesh_nexthop_resolve - lookup next hop for given skb and start path
 * discovery if no forwarding information is found.
1001
 *
1002
 * @skb: 802.11 frame to be sent
1003
 * @sdata: network subif the frame will be sent through
1004
 *
1005 1006
 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
 * skb is freeed here if no mpath could be allocated.
1007
 */
1008 1009
int mesh_nexthop_resolve(struct sk_buff *skb,
			 struct ieee80211_sub_if_data *sdata)
1010
{
1011
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1012 1013 1014
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct mesh_path *mpath;
	struct sk_buff *skb_to_free = NULL;
1015
	u8 *target_addr = hdr->addr3;
1016 1017 1018
	int err = 0;

	rcu_read_lock();
1019 1020 1021
	err = mesh_nexthop_lookup(skb, sdata);
	if (!err)
		goto endlookup;
1022

1023 1024
	/* no nexthop found, start resolving */
	mpath = mesh_path_lookup(target_addr, sdata);
1025
	if (!mpath) {
1026 1027
		mesh_path_add(target_addr, sdata);
		mpath = mesh_path_lookup(target_addr, sdata);
1028
		if (!mpath) {
1029
			mesh_path_discard_frame(skb, sdata);
1030 1031 1032 1033 1034
			err = -ENOSPC;
			goto endlookup;
		}
	}

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	if (!(mpath->flags & MESH_PATH_RESOLVING))
		mesh_queue_preq(mpath, PREQ_Q_F_START);

	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
		skb_to_free = skb_dequeue(&mpath->frame_queue);

	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
	skb_queue_tail(&mpath->frame_queue, skb);
	err = -ENOENT;
	if (skb_to_free)
		mesh_path_discard_frame(skb_to_free, sdata);

endlookup:
	rcu_read_unlock();
	return err;
}
/**
 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
 * this function is considered "using" the associated mpath, so preempt a path
 * refresh if this mpath expires soon.
 *
 * @skb: 802.11 frame to be sent
 * @sdata: network subif the frame will be sent through
 *
 * Returns: 0 if the next hop was found. Nonzero otherwise.
 */
int mesh_nexthop_lookup(struct sk_buff *skb,
			struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath;
	struct sta_info *next_hop;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u8 *target_addr = hdr->addr3;
	int err = -ENOENT;
1070

1071 1072 1073 1074 1075 1076 1077 1078 1079
	rcu_read_lock();
	mpath = mesh_path_lookup(target_addr, sdata);

	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
		goto endlookup;

	if (time_after(jiffies,
		       mpath->exp_time -
		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1080
	    !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
1081 1082 1083
	    !(mpath->flags & MESH_PATH_RESOLVING) &&
	    !(mpath->flags & MESH_PATH_FIXED))
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1084

1085 1086 1087 1088 1089
	next_hop = rcu_dereference(mpath->next_hop);
	if (next_hop) {
		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		err = 0;
1090 1091 1092 1093 1094 1095 1096 1097 1098
	}

endlookup:
	rcu_read_unlock();
	return err;
}

void mesh_path_timer(unsigned long data)
{
1099 1100
	struct mesh_path *mpath = (void *) data;
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1101
	int ret;
1102

1103
	if (sdata->local->quiescing)
1104 1105 1106
		return;

	spin_lock_bh(&mpath->state_lock);
1107
	if (mpath->flags & MESH_PATH_RESOLVED ||
1108
			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1109
		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1110 1111
		spin_unlock_bh(&mpath->state_lock);
	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1112 1113
		++mpath->discovery_retries;
		mpath->discovery_timeout *= 2;
1114
		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1115
		spin_unlock_bh(&mpath->state_lock);
1116 1117 1118 1119
		mesh_queue_preq(mpath, 0);
	} else {
		mpath->flags = 0;
		mpath->exp_time = jiffies;
1120 1121 1122 1123 1124 1125 1126
		spin_unlock_bh(&mpath->state_lock);
		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
			ret = mesh_path_send_to_gates(mpath);
			if (ret)
				mhwmp_dbg("no gate was reachable");
		} else
			mesh_path_flush_pending(mpath);
1127 1128
	}
}
1129 1130 1131 1132 1133

void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1134
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1135
	u8 flags;
1136

1137 1138 1139
	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
			? RANN_FLAG_IS_GATE : 0;
	mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1140
			       cpu_to_le32(++ifmsh->sn),
1141
			       0, NULL, 0, broadcast_addr,
1142
			       0, sdata->u.mesh.mshcfg.element_ttl,
1143
			       cpu_to_le32(interval), 0, 0, sdata);
1144
}