mesh_hwmp.c 32.4 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

10
#include <linux/slab.h>
11
#include <linux/etherdevice.h>
12
#include <asm/unaligned.h>
13
#include "wme.h"
14 15
#include "mesh.h"

16
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
17
#define mhwmp_dbg(fmt, args...) \
18
	pr_debug("Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
19 20 21 22
#else
#define mhwmp_dbg(fmt, args...)   do { (void)(0); } while (0)
#endif

23 24 25 26 27 28 29 30 31 32 33 34
#define TEST_FRAME_LEN	8192
#define MAX_METRIC	0xffffffff
#define ARITH_SHIFT	8

/* Number of frames buffered per destination for unresolved destinations */
#define MESH_FRAME_QUEUE_LEN	10
#define MAX_PREQ_QUEUE_LEN	64

/* Destination only */
#define MP_F_DO	0x1
/* Reply and forward */
#define MP_F_RF	0x2
R
Rui Paulo 已提交
35 36 37 38
/* Unknown Sequence Number */
#define MP_F_USN    0x01
/* Reason code Present */
#define MP_F_RCODE  0x02
39

40 41
static void mesh_queue_preq(struct mesh_path *, u8);

42 43 44 45
static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
46
	return get_unaligned_le32(preq_elem + offset);
47 48
}

R
Rui Paulo 已提交
49 50 51 52 53 54 55
static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
{
	if (ae)
		offset += 6;
	return get_unaligned_le16(preq_elem + offset);
}

56
/* HWMP IE processing macros */
57 58 59 60 61 62 63
#define AE_F			(1<<6)
#define AE_F_SET(x)		(*x & AE_F)
#define PREQ_IE_FLAGS(x)	(*(x))
#define PREQ_IE_HOPCOUNT(x)	(*(x + 1))
#define PREQ_IE_TTL(x)		(*(x + 2))
#define PREQ_IE_PREQ_ID(x)	u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x)	(x + 7)
64 65 66
#define PREQ_IE_ORIG_SN(x)	u32_field_get(x, 13, 0)
#define PREQ_IE_LIFETIME(x)	u32_field_get(x, 17, AE_F_SET(x))
#define PREQ_IE_METRIC(x) 	u32_field_get(x, 21, AE_F_SET(x))
67 68
#define PREQ_IE_TARGET_F(x)	(*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) 	(AE_F_SET(x) ? x + 33 : x + 27)
69
#define PREQ_IE_TARGET_SN(x) 	u32_field_get(x, 33, AE_F_SET(x))
70 71 72 73 74


#define PREP_IE_FLAGS(x)	PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x)	PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x)		PREQ_IE_TTL(x)
75 76
#define PREP_IE_ORIG_ADDR(x)	(AE_F_SET(x) ? x + 27 : x + 21)
#define PREP_IE_ORIG_SN(x)	u32_field_get(x, 27, AE_F_SET(x))
77 78
#define PREP_IE_LIFETIME(x)	u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x)	u32_field_get(x, 17, AE_F_SET(x))
79 80
#define PREP_IE_TARGET_ADDR(x)	(x + 3)
#define PREP_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
81

R
Rui Paulo 已提交
82
#define PERR_IE_TTL(x)		(*(x))
83 84
#define PERR_IE_TARGET_FLAGS(x)	(*(x + 2))
#define PERR_IE_TARGET_ADDR(x)	(x + 3)
85 86
#define PERR_IE_TARGET_SN(x)	u32_field_get(x, 9, 0)
#define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
87 88

#define MSEC_TO_TU(x) (x*1000/1024)
89 90
#define SN_GT(x, y) ((s32)(y - x) < 0)
#define SN_LT(x, y) ((s32)(x - y) < 0)
91 92

#define net_traversal_jiffies(s) \
93
	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
94
#define default_lifetime(s) \
95
	MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
96
#define min_preq_int_jiff(s) \
97 98
	(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
99
#define disc_timeout_jiff(s) \
100
	msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
101 102 103 104

enum mpath_frame_type {
	MPATH_PREQ = 0,
	MPATH_PREP,
105 106
	MPATH_PERR,
	MPATH_RANN
107 108
};

109 110
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};

111
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
112
		u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
113 114
		__le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
		__le32 lifetime, __le32 metric, __le32 preq_id,
115
		struct ieee80211_sub_if_data *sdata)
116
{
117
	struct ieee80211_local *local = sdata->local;
118
	struct sk_buff *skb;
119
	struct ieee80211_mgmt *mgmt;
120 121 122
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
123

124
	skb = dev_alloc_skb(local->tx_headroom +
125 126
			    hdr_len +
			    2 + 37); /* max HWMP IE */
127 128
	if (!skb)
		return -1;
129
	skb_reserve(skb, local->tx_headroom);
130 131
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
132 133
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
134 135

	memcpy(mgmt->da, da, ETH_ALEN);
136
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
137
	/* BSSID == SA */
138
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
139 140 141
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
142 143 144

	switch (action) {
	case MPATH_PREQ:
145
		mhwmp_dbg("sending PREQ to %pM", target);
146 147 148 149 150
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
151
		mhwmp_dbg("sending PREP to %pM", target);
152 153 154 155
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
156
	case MPATH_RANN:
157
		mhwmp_dbg("sending RANN from %pM", orig_addr);
158 159 160 161
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
162
	default:
163
		kfree_skb(skb);
164 165 166 167 168 169 170
		return -ENOTSUPP;
		break;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
171 172 173 174
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &target_sn, 4);
175
		pos += 4;
176 177 178 179 180 181 182 183
	} else {
		if (action == MPATH_PREQ) {
			memcpy(pos, &preq_id, 4);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
184 185
		pos += 4;
	}
186 187
	memcpy(pos, &lifetime, 4);	/* interval for RANN */
	pos += 4;
188 189 190
	memcpy(pos, &metric, 4);
	pos += 4;
	if (action == MPATH_PREQ) {
191
		*pos++ = 1; /* destination count */
192 193
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
194
		pos += ETH_ALEN;
195
		memcpy(pos, &target_sn, 4);
196 197 198 199 200 201
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		memcpy(pos, &orig_sn, 4);
		pos += 4;
202
	}
203

204
	ieee80211_tx_skb(sdata, skb);
205 206 207
	return 0;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

/*  Headroom is not adjusted.  Caller should ensure that skb has sufficient
 *  headroom in case the frame is encrypted. */
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	skb_set_mac_header(skb, 0);
	skb_set_network_header(skb, 0);
	skb_set_transport_header(skb, 0);

	/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
	skb->priority = 7;

	info->control.vif = &sdata->vif;
225
	ieee80211_set_qos_hdr(sdata, skb);
226 227
}

228 229 230
/**
 * mesh_send_path error - Sends a PERR mesh management frame
 *
231 232 233
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
234
 * @ra: node this frame is addressed to
235 236 237 238
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
239
 */
240
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
241 242
		       __le16 target_rcode, const u8 *ra,
		       struct ieee80211_sub_if_data *sdata)
243
{
244
	struct ieee80211_local *local = sdata->local;
245
	struct sk_buff *skb;
246
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
247
	struct ieee80211_mgmt *mgmt;
248 249 250
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);
251

252 253 254
	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

255
	skb = dev_alloc_skb(local->tx_headroom +
256 257
			    hdr_len +
			    2 + 15 /* PERR IE */);
258 259
	if (!skb)
		return -1;
260
	skb_reserve(skb, local->tx_headroom);
261 262
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
263 264
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);
265 266

	memcpy(mgmt->da, ra, ETH_ALEN);
267
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
268 269 270 271 272
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
R
Rui Paulo 已提交
273
	ie_len = 15;
274 275 276
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
R
Rui Paulo 已提交
277
	/* ttl */
278
	*pos++ = ttl;
279 280
	/* number of destinations */
	*pos++ = 1;
R
Rui Paulo 已提交
281 282 283 284 285
	/*
	 * flags bit, bit 1 is unset if we know the sequence number and
	 * bit 2 is set if we have a reason code
	 */
	*pos = 0;
286
	if (!target_sn)
R
Rui Paulo 已提交
287
		*pos |= MP_F_USN;
288
	if (target_rcode)
R
Rui Paulo 已提交
289 290
		*pos |= MP_F_RCODE;
	pos++;
291
	memcpy(pos, target, ETH_ALEN);
292
	pos += ETH_ALEN;
293
	memcpy(pos, &target_sn, 4);
R
Rui Paulo 已提交
294
	pos += 4;
295
	memcpy(pos, &target_rcode, 2);
296

297 298
	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
299 300
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
301
	ieee80211_add_pending_skb(local, skb);
302 303 304
	return 0;
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
void ieee80211s_update_metric(struct ieee80211_local *local,
		struct sta_info *stainfo, struct sk_buff *skb)
{
	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	int failed;

	if (!ieee80211_is_data(hdr->frame_control))
		return;

	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);

	/* moving average, scaled to 100 */
	stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
	if (stainfo->fail_avg > 95)
		mesh_plink_broken(stainfo);
}

323 324 325
static u32 airtime_link_metric_get(struct ieee80211_local *local,
				   struct sta_info *sta)
{
326
	struct rate_info rinfo;
327 328 329 330 331 332 333 334 335 336
	/* This should be adjusted for each device */
	int device_constant = 1 << ARITH_SHIFT;
	int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
	int s_unit = 1 << ARITH_SHIFT;
	int rate, err;
	u32 tx_time, estimated_retx;
	u64 result;

	if (sta->fail_avg >= 100)
		return MAX_METRIC;
337

338 339 340
	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
	rate = cfg80211_calculate_bitrate(&rinfo);
	if (WARN_ON(!rate))
341 342
		return MAX_METRIC;

343 344 345 346 347 348 349 350 351 352 353 354 355 356
	err = (sta->fail_avg << ARITH_SHIFT) / 100;

	/* bitrate is in units of 100 Kbps, while we need rate in units of
	 * 1Mbps. This will be corrected on tx_time computation.
	 */
	tx_time = (device_constant + 10 * test_frame_len / rate);
	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
	return (u32)result;
}

/**
 * hwmp_route_info_get - Update routing info to originator and transmitter
 *
357
 * @sdata: local mesh subif
358 359 360 361
 * @mgmt: mesh management frame
 * @hwmp_ie: hwmp information element (PREP or PREQ)
 *
 * This function updates the path routing information to the originator and the
362
 * transmitter of a HWMP PREQ or PREP frame.
363 364 365 366 367 368 369
 *
 * Returns: metric to frame originator or 0 if the frame should not be further
 * processed
 *
 * Notes: this function is the only place (besides user-provided info) where
 * path routing information is updated.
 */
370
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
371
			    struct ieee80211_mgmt *mgmt,
372
			    u8 *hwmp_ie, enum mpath_frame_type action)
373
{
374
	struct ieee80211_local *local = sdata->local;
375 376 377 378
	struct mesh_path *mpath;
	struct sta_info *sta;
	bool fresh_info;
	u8 *orig_addr, *ta;
379
	u32 orig_sn, orig_metric;
380 381 382 383 384
	unsigned long orig_lifetime, exp_time;
	u32 last_hop_metric, new_metric;
	bool process = true;

	rcu_read_lock();
385
	sta = sta_info_get(sdata, mgmt->sa);
386 387
	if (!sta) {
		rcu_read_unlock();
388
		return 0;
389
	}
390 391 392 393 394 395 396 397

	last_hop_metric = airtime_link_metric_get(local, sta);
	/* Update and check originator routing info */
	fresh_info = true;

	switch (action) {
	case MPATH_PREQ:
		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
398
		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
399 400 401 402
		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
		orig_metric = PREQ_IE_METRIC(hwmp_ie);
		break;
	case MPATH_PREP:
403 404
		/* Originator here refers to the MP that was the target in the
		 * Path Request. We divert from the nomenclature in the draft
405 406 407
		 * so that we can easily use a single function to gather path
		 * information from both PREQ and PREP frames.
		 */
408 409
		orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
		orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
410 411 412 413
		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
		orig_metric = PREP_IE_METRIC(hwmp_ie);
		break;
	default:
414
		rcu_read_unlock();
415 416 417 418 419 420 421
		return 0;
	}
	new_metric = orig_metric + last_hop_metric;
	if (new_metric < orig_metric)
		new_metric = MAX_METRIC;
	exp_time = TU_TO_EXP_TIME(orig_lifetime);

422
	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
423 424 425 426 427 428
		/* This MP is the originator, we are not interested in this
		 * frame, except for updating transmitter's path info.
		 */
		process = false;
		fresh_info = false;
	} else {
429
		mpath = mesh_path_lookup(orig_addr, sdata);
430 431 432 433 434
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if (mpath->flags & MESH_PATH_FIXED)
				fresh_info = false;
			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
435 436 437
			    (mpath->flags & MESH_PATH_SN_VALID)) {
				if (SN_GT(mpath->sn, orig_sn) ||
				    (mpath->sn == orig_sn &&
438
				     new_metric >= mpath->metric)) {
439 440 441 442 443
					process = false;
					fresh_info = false;
				}
			}
		} else {
444 445
			mesh_path_add(orig_addr, sdata);
			mpath = mesh_path_lookup(orig_addr, sdata);
446 447 448 449 450 451 452 453 454
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
455
			mpath->flags |= MESH_PATH_SN_VALID;
456
			mpath->metric = new_metric;
457
			mpath->sn = orig_sn;
458 459 460 461 462 463 464 465 466 467 468 469 470 471
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
			/* draft says preq_id should be saved to, but there does
			 * not seem to be any use for it, skipping by now
			 */
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	/* Update and check transmitter routing info */
	ta = mgmt->sa;
472
	if (ether_addr_equal(orig_addr, ta))
473 474 475 476
		fresh_info = false;
	else {
		fresh_info = true;

477
		mpath = mesh_path_lookup(ta, sdata);
478 479 480 481 482 483 484
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if ((mpath->flags & MESH_PATH_FIXED) ||
				((mpath->flags & MESH_PATH_ACTIVE) &&
					(last_hop_metric > mpath->metric)))
				fresh_info = false;
		} else {
485 486
			mesh_path_add(ta, sdata);
			mpath = mesh_path_lookup(ta, sdata);
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->metric = last_hop_metric;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	rcu_read_unlock();

	return process ? new_metric : 0;
}

511
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
512
				    struct ieee80211_mgmt *mgmt,
513 514
				    u8 *preq_elem, u32 metric)
{
515
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
516
	struct mesh_path *mpath = NULL;
517
	u8 *target_addr, *orig_addr;
518
	const u8 *da;
519 520
	u8 target_flags, ttl;
	u32 orig_sn, target_sn, lifetime;
521 522 523
	bool reply = false;
	bool forward = true;

524 525
	/* Update target SN, if present */
	target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
526
	orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
527 528 529
	target_sn = PREQ_IE_TARGET_SN(preq_elem);
	orig_sn = PREQ_IE_ORIG_SN(preq_elem);
	target_flags = PREQ_IE_TARGET_F(preq_elem);
530

531
	mhwmp_dbg("received PREQ from %pM", orig_addr);
532

533
	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
534
		mhwmp_dbg("PREQ is for us");
535 536 537
		forward = false;
		reply = true;
		metric = 0;
538
		if (time_after(jiffies, ifmsh->last_sn_update +
539
					net_traversal_jiffies(sdata)) ||
540 541 542
		    time_before(jiffies, ifmsh->last_sn_update)) {
			target_sn = ++ifmsh->sn;
			ifmsh->last_sn_update = jiffies;
543 544 545
		}
	} else {
		rcu_read_lock();
546
		mpath = mesh_path_lookup(target_addr, sdata);
547
		if (mpath) {
548 549 550 551 552
			if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
					SN_LT(mpath->sn, target_sn)) {
				mpath->sn = target_sn;
				mpath->flags |= MESH_PATH_SN_VALID;
			} else if ((!(target_flags & MP_F_DO)) &&
553 554 555
					(mpath->flags & MESH_PATH_ACTIVE)) {
				reply = true;
				metric = mpath->metric;
556 557 558
				target_sn = mpath->sn;
				if (target_flags & MP_F_RF)
					target_flags |= MP_F_DO;
559 560 561 562 563 564 565 566 567
				else
					forward = false;
			}
		}
		rcu_read_unlock();
	}

	if (reply) {
		lifetime = PREQ_IE_LIFETIME(preq_elem);
568
		ttl = ifmsh->mshcfg.element_ttl;
569
		if (ttl != 0) {
570
			mhwmp_dbg("replying to the PREQ");
571 572 573
			mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
				cpu_to_le32(orig_sn), 0, target_addr,
				cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
574
				cpu_to_le32(lifetime), cpu_to_le32(metric),
575
				0, sdata);
576
		} else
577
			ifmsh->mshstats.dropped_frames_ttl++;
578 579
	}

580
	if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
581 582 583 584 585 586
		u32 preq_id;
		u8 hopcount, flags;

		ttl = PREQ_IE_TTL(preq_elem);
		lifetime = PREQ_IE_LIFETIME(preq_elem);
		if (ttl <= 1) {
587
			ifmsh->mshstats.dropped_frames_ttl++;
588 589
			return;
		}
590
		mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
591 592 593 594
		--ttl;
		flags = PREQ_IE_FLAGS(preq_elem);
		preq_id = PREQ_IE_PREQ_ID(preq_elem);
		hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
595 596
		da = (mpath && mpath->is_root) ?
			mpath->rann_snd_addr : broadcast_addr;
597
		mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
598
				cpu_to_le32(orig_sn), target_flags, target_addr,
599
				cpu_to_le32(target_sn), da,
600 601
				hopcount, ttl, cpu_to_le32(lifetime),
				cpu_to_le32(metric), cpu_to_le32(preq_id),
602
				sdata);
603 604 605 606
		if (!is_multicast_ether_addr(da))
			ifmsh->mshstats.fwded_unicast++;
		else
			ifmsh->mshstats.fwded_mcast++;
607
		ifmsh->mshstats.fwded_frames++;
608 609 610 611
	}
}


J
Johannes Berg 已提交
612 613 614 615 616 617 618 619
static inline struct sta_info *
next_hop_deref_protected(struct mesh_path *mpath)
{
	return rcu_dereference_protected(mpath->next_hop,
					 lockdep_is_held(&mpath->state_lock));
}


620
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
621 622 623
				    struct ieee80211_mgmt *mgmt,
				    u8 *prep_elem, u32 metric)
{
624
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
625
	struct mesh_path *mpath;
626
	u8 *target_addr, *orig_addr;
627 628
	u8 ttl, hopcount, flags;
	u8 next_hop[ETH_ALEN];
629
	u32 target_sn, orig_sn, lifetime;
630

631
	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
632

633
	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
634
	if (ether_addr_equal(orig_addr, sdata->vif.addr))
635 636 637
		/* destination, no forwarding required */
		return;

638 639 640
	if (!ifmsh->mshcfg.dot11MeshForwarding)
		return;

641 642
	ttl = PREP_IE_TTL(prep_elem);
	if (ttl <= 1) {
643
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
644 645 646 647
		return;
	}

	rcu_read_lock();
648
	mpath = mesh_path_lookup(orig_addr, sdata);
649 650 651 652 653 654 655 656
	if (mpath)
		spin_lock_bh(&mpath->state_lock);
	else
		goto fail;
	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
		spin_unlock_bh(&mpath->state_lock);
		goto fail;
	}
J
Johannes Berg 已提交
657
	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
658 659 660 661 662
	spin_unlock_bh(&mpath->state_lock);
	--ttl;
	flags = PREP_IE_FLAGS(prep_elem);
	lifetime = PREP_IE_LIFETIME(prep_elem);
	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
663
	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
664 665
	target_sn = PREP_IE_TARGET_SN(prep_elem);
	orig_sn = PREP_IE_ORIG_SN(prep_elem);
666 667

	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
668
		cpu_to_le32(orig_sn), 0, target_addr,
669
		cpu_to_le32(target_sn), next_hop, hopcount,
670
		ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
671
		0, sdata);
672
	rcu_read_unlock();
673 674

	sdata->u.mesh.mshstats.fwded_unicast++;
675
	sdata->u.mesh.mshstats.fwded_frames++;
676 677 678 679
	return;

fail:
	rcu_read_unlock();
680
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
681 682
}

683
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
684 685
			     struct ieee80211_mgmt *mgmt, u8 *perr_elem)
{
R
Rui Paulo 已提交
686
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
687
	struct mesh_path *mpath;
R
Rui Paulo 已提交
688
	u8 ttl;
689 690 691
	u8 *ta, *target_addr;
	u32 target_sn;
	u16 target_rcode;
692 693

	ta = mgmt->sa;
R
Rui Paulo 已提交
694 695 696 697 698 699
	ttl = PERR_IE_TTL(perr_elem);
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
700 701 702
	target_addr = PERR_IE_TARGET_ADDR(perr_elem);
	target_sn = PERR_IE_TARGET_SN(perr_elem);
	target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
R
Rui Paulo 已提交
703

704
	rcu_read_lock();
705
	mpath = mesh_path_lookup(target_addr, sdata);
706
	if (mpath) {
707 708
		struct sta_info *sta;

709
		spin_lock_bh(&mpath->state_lock);
710
		sta = next_hop_deref_protected(mpath);
711
		if (mpath->flags & MESH_PATH_ACTIVE &&
712
		    ether_addr_equal(ta, sta->sta.addr) &&
713 714
		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
		    SN_GT(target_sn, mpath->sn))) {
715
			mpath->flags &= ~MESH_PATH_ACTIVE;
716
			mpath->sn = target_sn;
717
			spin_unlock_bh(&mpath->state_lock);
718 719
			if (!ifmsh->mshcfg.dot11MeshForwarding)
				goto endperr;
720 721
			mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
					   cpu_to_le16(target_rcode),
722
					   broadcast_addr, sdata);
723 724 725
		} else
			spin_unlock_bh(&mpath->state_lock);
	}
726
endperr:
727 728 729
	rcu_read_unlock();
}

730 731 732 733 734
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
				struct ieee80211_mgmt *mgmt,
				struct ieee80211_rann_ie *rann)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
735 736
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
737 738 739
	struct mesh_path *mpath;
	u8 ttl, flags, hopcount;
	u8 *orig_addr;
740
	u32 orig_sn, metric, metric_txsta, interval;
741
	bool root_is_gate;
742 743 744 745 746 747 748 749

	ttl = rann->rann_ttl;
	if (ttl <= 1) {
		ifmsh->mshstats.dropped_frames_ttl++;
		return;
	}
	ttl--;
	flags = rann->rann_flags;
750
	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
751
	orig_addr = rann->rann_addr;
752
	orig_sn = le32_to_cpu(rann->rann_seq);
753
	interval = le32_to_cpu(rann->rann_interval);
754
	hopcount = rann->rann_hopcount;
R
Rui Paulo 已提交
755
	hopcount++;
756
	metric = le32_to_cpu(rann->rann_metric);
757 758

	/*  Ignore our own RANNs */
759
	if (ether_addr_equal(orig_addr, sdata->vif.addr))
760 761
		return;

762 763
	mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
			orig_addr, mgmt->sa, root_is_gate);
764 765

	rcu_read_lock();
766 767 768 769 770 771 772 773
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta) {
		rcu_read_unlock();
		return;
	}

	metric_txsta = airtime_link_metric_get(local, sta);

774 775 776 777 778 779 780 781 782 783
	mpath = mesh_path_lookup(orig_addr, sdata);
	if (!mpath) {
		mesh_path_add(orig_addr, sdata);
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (!mpath) {
			rcu_read_unlock();
			sdata->u.mesh.mshstats.dropped_frames_no_route++;
			return;
		}
	}
784 785 786 787 788 789 790 791 792

	if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
	     time_after(jiffies, mpath->exp_time - 1*HZ)) &&
	     !(mpath->flags & MESH_PATH_FIXED)) {
		mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
							       orig_addr);
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
	}

793 794
	if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
	   metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
795
		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
796
				       cpu_to_le32(orig_sn),
797
				       0, NULL, 0, broadcast_addr,
798
				       hopcount, ttl, cpu_to_le32(interval),
799
				       cpu_to_le32(metric + metric_txsta),
800
				       0, sdata);
801
		mpath->sn = orig_sn;
802
		mpath->rann_metric = metric + metric_txsta;
803 804 805
		/* Recording RANNs sender address to send individually
		 * addressed PREQs destined for root mesh STA */
		memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
806
	}
807 808 809

	mpath->is_root = true;

810 811 812
	if (root_is_gate)
		mesh_path_add_gate(mpath);

813 814
	rcu_read_unlock();
}
815 816


817
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
818 819 820 821 822 823
			    struct ieee80211_mgmt *mgmt,
			    size_t len)
{
	struct ieee802_11_elems elems;
	size_t baselen;
	u32 last_hop_metric;
824
	struct sta_info *sta;
825

826 827 828 829
	/* need action_code */
	if (len < IEEE80211_MIN_ACTION_SIZE + 1)
		return;

830 831 832 833 834 835 836 837
	rcu_read_lock();
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

838 839 840 841
	baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
	ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
			len - baselen, &elems);

842 843
	if (elems.preq) {
		if (elems.preq_len != 37)
844 845
			/* Right now we support just 1 destination and no AE */
			return;
846 847 848 849 850 851 852 853
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
						      MPATH_PREQ);
		if (last_hop_metric)
			hwmp_preq_frame_process(sdata, mgmt, elems.preq,
						last_hop_metric);
	}
	if (elems.prep) {
		if (elems.prep_len != 31)
854 855
			/* Right now we support no AE */
			return;
856 857 858 859 860 861 862
		last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
						      MPATH_PREP);
		if (last_hop_metric)
			hwmp_prep_frame_process(sdata, mgmt, elems.prep,
						last_hop_metric);
	}
	if (elems.perr) {
R
Rui Paulo 已提交
863
		if (elems.perr_len != 15)
864 865
			/* Right now we support only one destination per PERR */
			return;
866
		hwmp_perr_frame_process(sdata, mgmt, elems.perr);
867
	}
868 869
	if (elems.rann)
		hwmp_rann_frame_process(sdata, mgmt, elems.rann);
870 871 872 873 874 875 876 877 878 879 880 881 882
}

/**
 * mesh_queue_preq - queue a PREQ to a given destination
 *
 * @mpath: mesh path to discover
 * @flags: special attributes of the PREQ to be sent
 *
 * Locking: the function must be called from within a rcu read lock block.
 *
 */
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
{
883
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
884
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
885 886
	struct mesh_preq_queue *preq_node;

887
	preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
888
	if (!preq_node) {
889
		mhwmp_dbg("could not allocate PREQ node");
890 891 892
		return;
	}

893
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
894
	if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
895
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
896 897
		kfree(preq_node);
		if (printk_ratelimit())
898
			mhwmp_dbg("PREQ node queue full");
899 900 901
		return;
	}

902
	spin_lock(&mpath->state_lock);
903
	if (mpath->flags & MESH_PATH_REQ_QUEUED) {
904
		spin_unlock(&mpath->state_lock);
905
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
906
		kfree(preq_node);
907 908 909
		return;
	}

910 911 912
	memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
	preq_node->flags = flags;

913
	mpath->flags |= MESH_PATH_REQ_QUEUED;
914
	spin_unlock(&mpath->state_lock);
915

916 917
	list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
	++ifmsh->preq_queue_len;
918
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
919

920
	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
J
Johannes Berg 已提交
921
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
922

923
	else if (time_before(jiffies, ifmsh->last_preq)) {
924 925 926
		/* avoid long wait if did not send preqs for a long time
		 * and jiffies wrapped around
		 */
927
		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
J
Johannes Berg 已提交
928
		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
929
	} else
930
		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
931 932 933 934 935 936
						min_preq_int_jiff(sdata));
}

/**
 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 *
937
 * @sdata: local mesh subif
938
 */
939
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
940
{
941
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
942 943
	struct mesh_preq_queue *preq_node;
	struct mesh_path *mpath;
944
	u8 ttl, target_flags;
945
	const u8 *da;
946 947
	u32 lifetime;

948
	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
949 950
	if (!ifmsh->preq_queue_len ||
		time_before(jiffies, ifmsh->last_preq +
951
				min_preq_int_jiff(sdata))) {
952
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
953 954 955
		return;
	}

956
	preq_node = list_first_entry(&ifmsh->preq_queue.list,
957 958
			struct mesh_preq_queue, list);
	list_del(&preq_node->list);
959
	--ifmsh->preq_queue_len;
960
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
961 962

	rcu_read_lock();
963
	mpath = mesh_path_lookup(preq_node->dst, sdata);
964 965 966 967
	if (!mpath)
		goto enddiscovery;

	spin_lock_bh(&mpath->state_lock);
968
	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
	if (preq_node->flags & PREQ_Q_F_START) {
		if (mpath->flags & MESH_PATH_RESOLVING) {
			spin_unlock_bh(&mpath->state_lock);
			goto enddiscovery;
		} else {
			mpath->flags &= ~MESH_PATH_RESOLVED;
			mpath->flags |= MESH_PATH_RESOLVING;
			mpath->discovery_retries = 0;
			mpath->discovery_timeout = disc_timeout_jiff(sdata);
		}
	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
			mpath->flags & MESH_PATH_RESOLVED) {
		mpath->flags &= ~MESH_PATH_RESOLVING;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

986
	ifmsh->last_preq = jiffies;
987

988
	if (time_after(jiffies, ifmsh->last_sn_update +
989
				net_traversal_jiffies(sdata)) ||
990 991 992
	    time_before(jiffies, ifmsh->last_sn_update)) {
		++ifmsh->sn;
		sdata->u.mesh.last_sn_update = jiffies;
993 994
	}
	lifetime = default_lifetime(sdata);
995
	ttl = sdata->u.mesh.mshcfg.element_ttl;
996
	if (ttl == 0) {
997
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
998 999 1000 1001 1002
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	if (preq_node->flags & PREQ_Q_F_REFRESH)
1003
		target_flags = MP_F_DO;
1004
	else
1005
		target_flags = MP_F_RF;
1006 1007

	spin_unlock_bh(&mpath->state_lock);
1008
	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1009
	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
1010
			cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
1011
			cpu_to_le32(mpath->sn), da, 0,
1012
			ttl, cpu_to_le32(lifetime), 0,
1013
			cpu_to_le32(ifmsh->preq_id++), sdata);
1014 1015 1016 1017 1018 1019 1020
	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);

enddiscovery:
	rcu_read_unlock();
	kfree(preq_node);
}

1021 1022
/* mesh_nexthop_resolve - lookup next hop for given skb and start path
 * discovery if no forwarding information is found.
1023
 *
1024
 * @skb: 802.11 frame to be sent
1025
 * @sdata: network subif the frame will be sent through
1026
 *
1027 1028
 * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
 * skb is freeed here if no mpath could be allocated.
1029
 */
1030 1031
int mesh_nexthop_resolve(struct sk_buff *skb,
			 struct ieee80211_sub_if_data *sdata)
1032
{
1033
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1034 1035 1036
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct mesh_path *mpath;
	struct sk_buff *skb_to_free = NULL;
1037
	u8 *target_addr = hdr->addr3;
1038 1039 1040
	int err = 0;

	rcu_read_lock();
1041 1042 1043
	err = mesh_nexthop_lookup(skb, sdata);
	if (!err)
		goto endlookup;
1044

1045 1046
	/* no nexthop found, start resolving */
	mpath = mesh_path_lookup(target_addr, sdata);
1047
	if (!mpath) {
1048 1049
		mesh_path_add(target_addr, sdata);
		mpath = mesh_path_lookup(target_addr, sdata);
1050
		if (!mpath) {
1051
			mesh_path_discard_frame(skb, sdata);
1052 1053 1054 1055 1056
			err = -ENOSPC;
			goto endlookup;
		}
	}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	if (!(mpath->flags & MESH_PATH_RESOLVING))
		mesh_queue_preq(mpath, PREQ_Q_F_START);

	if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
		skb_to_free = skb_dequeue(&mpath->frame_queue);

	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
	ieee80211_set_qos_hdr(sdata, skb);
	skb_queue_tail(&mpath->frame_queue, skb);
	err = -ENOENT;
	if (skb_to_free)
		mesh_path_discard_frame(skb_to_free, sdata);

endlookup:
	rcu_read_unlock();
	return err;
}
/**
 * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
 * this function is considered "using" the associated mpath, so preempt a path
 * refresh if this mpath expires soon.
 *
 * @skb: 802.11 frame to be sent
 * @sdata: network subif the frame will be sent through
 *
 * Returns: 0 if the next hop was found. Nonzero otherwise.
 */
int mesh_nexthop_lookup(struct sk_buff *skb,
			struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath;
	struct sta_info *next_hop;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u8 *target_addr = hdr->addr3;
	int err = -ENOENT;
1092

1093 1094 1095 1096 1097 1098 1099 1100 1101
	rcu_read_lock();
	mpath = mesh_path_lookup(target_addr, sdata);

	if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
		goto endlookup;

	if (time_after(jiffies,
		       mpath->exp_time -
		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1102
	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
1103 1104 1105
	    !(mpath->flags & MESH_PATH_RESOLVING) &&
	    !(mpath->flags & MESH_PATH_FIXED))
		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1106

1107 1108 1109 1110 1111
	next_hop = rcu_dereference(mpath->next_hop);
	if (next_hop) {
		memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		err = 0;
1112 1113 1114 1115 1116 1117 1118 1119 1120
	}

endlookup:
	rcu_read_unlock();
	return err;
}

void mesh_path_timer(unsigned long data)
{
1121 1122
	struct mesh_path *mpath = (void *) data;
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1123
	int ret;
1124

1125
	if (sdata->local->quiescing)
1126 1127 1128
		return;

	spin_lock_bh(&mpath->state_lock);
1129
	if (mpath->flags & MESH_PATH_RESOLVED ||
1130
			(!(mpath->flags & MESH_PATH_RESOLVING))) {
1131
		mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1132 1133
		spin_unlock_bh(&mpath->state_lock);
	} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1134 1135
		++mpath->discovery_retries;
		mpath->discovery_timeout *= 2;
1136
		mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1137
		spin_unlock_bh(&mpath->state_lock);
1138 1139 1140 1141
		mesh_queue_preq(mpath, 0);
	} else {
		mpath->flags = 0;
		mpath->exp_time = jiffies;
1142 1143 1144 1145 1146 1147 1148
		spin_unlock_bh(&mpath->state_lock);
		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
			ret = mesh_path_send_to_gates(mpath);
			if (ret)
				mhwmp_dbg("no gate was reachable");
		} else
			mesh_path_flush_pending(mpath);
1149 1150
	}
}
1151 1152 1153 1154 1155

void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1156
	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1157
	u8 flags;
1158

1159 1160 1161
	flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
			? RANN_FLAG_IS_GATE : 0;
	mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1162
			       cpu_to_le32(++ifmsh->sn),
1163
			       0, NULL, 0, broadcast_addr,
1164
			       0, sdata->u.mesh.mshcfg.element_ttl,
1165
			       cpu_to_le32(interval), 0, 0, sdata);
1166
}