mesh_pathtbl.c 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*
 * Copyright (c) 2008 open80211s Ltd.
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "mesh.h"

/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
				time_after(jiffies, mpath->exp_time) && \
				!(mpath->flags & MESH_PATH_FIXED))

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

static struct mesh_table *mesh_paths;
39
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
40

41 42
int mesh_paths_generation;

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/* This lock will have the grow table function as writer and add / delete nodes
 * as readers. When reading the table (i.e. doing lookups) we are well protected
 * by RCU
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
60 61 62 63 64
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	struct sk_buff_head tmpq;
	unsigned long flags;

65
	rcu_assign_pointer(mpath->next_hop, sta);
66 67 68 69 70 71 72 73 74 75 76 77 78

	__skb_queue_head_init(&tmpq);

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);

	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
		__skb_queue_tail(&tmpq, skb);
	}

	skb_queue_splice(&tmpq, &mpath->frame_queue);
	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
79 80 81 82 83 84
}


/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @dst: hardware address (ETH_ALEN length) of destination
85
 * @sdata: local subif
86 87 88 89 90
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
91
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
92 93 94 95 96 97 98 99 100
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mesh_table *tbl;
	struct mpath_node *node;

	tbl = rcu_dereference(mesh_paths);

101
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
102 103
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
104
		if (mpath->sdata == sdata &&
105 106 107 108 109 110 111 112 113 114 115 116 117
				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
				if (MPATH_EXPIRED(mpath))
					mpath->flags &= ~MESH_PATH_ACTIVE;
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mesh_table *tbl;
	struct mpath_node *node;

	tbl = rcu_dereference(mpp_paths);

	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
		if (mpath->sdata == sdata &&
		    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
				if (MPATH_EXPIRED(mpath))
					mpath->flags &= ~MESH_PATH_ACTIVE;
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}


146 147 148
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
149
 * @sdata: local subif, or NULL for all entries
150 151 152 153 154
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
155
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
156 157 158 159 160 161
{
	struct mpath_node *node;
	struct hlist_node *p;
	int i;
	int j = 0;

162
	for_each_mesh_entry(mesh_paths, p, node, i) {
163
		if (sdata && node->mpath->sdata != sdata)
164
			continue;
165 166 167 168 169 170 171 172 173
		if (j++ == idx) {
			if (MPATH_EXPIRED(node->mpath)) {
				spin_lock_bh(&node->mpath->state_lock);
				if (MPATH_EXPIRED(node->mpath))
					node->mpath->flags &= ~MESH_PATH_ACTIVE;
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
174
	}
175 176 177 178 179 180 181

	return NULL;
}

/**
 * mesh_path_add - allocate and add a new path to the mesh path table
 * @addr: destination address of the path (ETH_ALEN length)
182
 * @sdata: local subif
183 184 185 186 187
 *
 * Returns: 0 on sucess
 *
 * State: the initial state of the new path is set to 0
 */
188
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
189 190 191 192 193 194 195 196 197
{
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

J
Johannes Berg 已提交
198 199
	might_sleep();

200
	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
201 202 203 204 205 206
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

207
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
208 209
		return -ENOSPC;

210
	err = -ENOMEM;
211
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
212 213 214
	if (!new_mpath)
		goto err_path_alloc;

215
	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
216 217
	if (!new_node)
		goto err_node_alloc;
218 219

	read_lock(&pathtbl_resize_lock);
220
	memcpy(new_mpath->dst, dst, ETH_ALEN);
221
	new_mpath->sdata = sdata;
222 223 224 225 226 227 228 229 230
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

231
	hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
232 233 234 235
	bucket = &mesh_paths->hash_buckets[hash_idx];

	spin_lock(&mesh_paths->hashwlock[hash_idx]);

236
	err = -EEXIST;
237 238
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
239
		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
240
			goto err_exists;
241 242 243 244 245 246 247
	}

	hlist_add_head_rcu(&new_node->list, bucket);
	if (atomic_inc_return(&mesh_paths->entries) >=
		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
		grow = 1;

248 249
	mesh_paths_generation++;

250 251
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
252
	if (grow) {
253 254 255 256 257 258 259
		struct mesh_table *oldtbl, *newtbl;

		write_lock(&pathtbl_resize_lock);
		oldtbl = mesh_paths;
		newtbl = mesh_table_grow(mesh_paths);
		if (!newtbl) {
			write_unlock(&pathtbl_resize_lock);
260
			return 0;
261 262
		}
		rcu_assign_pointer(mesh_paths, newtbl);
263 264
		write_unlock(&pathtbl_resize_lock);

265 266 267
		synchronize_rcu();
		mesh_table_free(oldtbl, false);
	}
268 269 270 271 272 273 274 275 276
	return 0;

err_exists:
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
277
	atomic_dec(&sdata->u.mesh.mpaths);
278 279 280 281
	return err;
}


282 283 284 285 286 287 288 289 290 291
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

J
Johannes Berg 已提交
292
	might_sleep();
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367

	if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
	if (!new_mpath)
		goto err_path_alloc;

	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
	if (!new_node)
		goto err_node_alloc;

	read_lock(&pathtbl_resize_lock);
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

	hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
	bucket = &mpp_paths->hash_buckets[hash_idx];

	spin_lock(&mpp_paths->hashwlock[hash_idx]);

	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
	if (atomic_inc_return(&mpp_paths->entries) >=
		mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
		grow = 1;

	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	if (grow) {
		struct mesh_table *oldtbl, *newtbl;

		write_lock(&pathtbl_resize_lock);
		oldtbl = mpp_paths;
		newtbl = mesh_table_grow(mpp_paths);
		if (!newtbl) {
			write_unlock(&pathtbl_resize_lock);
			return 0;
		}
		rcu_assign_pointer(mpp_paths, newtbl);
		write_unlock(&pathtbl_resize_lock);

		synchronize_rcu();
		mesh_table_free(oldtbl, false);
	}
	return 0;

err_exists:
	spin_unlock(&mpp_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


368 369 370 371 372 373 374 375 376 377 378 379 380
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
381
	struct ieee80211_sub_if_data *sdata = sta->sdata;
382 383 384 385 386 387 388 389 390 391 392 393 394 395
	int i;

	rcu_read_lock();
	for_each_mesh_entry(mesh_paths, p, node, i) {
		mpath = node->mpath;
		spin_lock_bh(&mpath->state_lock);
		if (mpath->next_hop == sta &&
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
			mpath->flags &= ~MESH_PATH_ACTIVE;
			++mpath->dsn;
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_error_tx(mpath->dst,
					cpu_to_le32(mpath->dsn),
396
					sdata->dev->broadcast, sdata);
397 398 399 400 401 402 403 404 405 406 407
		} else
		spin_unlock_bh(&mpath->state_lock);
	}
	rcu_read_unlock();
}

/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
 * @sta - mesh peer to match
 *
408 409 410
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
411 412
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
413 414 415 416 417 418 419 420 421 422 423
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

	for_each_mesh_entry(mesh_paths, p, node, i) {
		mpath = node->mpath;
		if (mpath->next_hop == sta)
424
			mesh_path_del(mpath->dst, mpath->sdata);
425 426 427
	}
}

428
void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
429 430 431 432 433 434 435 436
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

	for_each_mesh_entry(mesh_paths, p, node, i) {
		mpath = node->mpath;
437 438
		if (mpath->sdata == sdata)
			mesh_path_del(mpath->dst, mpath->sdata);
439 440 441 442 443 444
	}
}

static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
445
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
446

447
	del_timer_sync(&node->mpath->timer);
448
	atomic_dec(&sdata->u.mesh.mpaths);
449 450 451 452 453 454 455 456
	kfree(node->mpath);
	kfree(node);
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
457
 * @sdata: local subif
458 459 460
 *
 * Returns: 0 if succesful
 */
461
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
462 463 464 465 466 467 468 469 470
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int hash_idx;
	int err = 0;

	read_lock(&pathtbl_resize_lock);
471
	hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
472 473 474 475 476
	bucket = &mesh_paths->hash_buckets[hash_idx];

	spin_lock(&mesh_paths->hashwlock[hash_idx]);
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
477
		if (mpath->sdata == sdata &&
478 479
				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
			spin_lock_bh(&mpath->state_lock);
480 481 482 483
			mpath->flags |= MESH_PATH_RESOLVING;
			hlist_del_rcu(&node->list);
			call_rcu(&node->rcu, mesh_path_node_reclaim);
			atomic_dec(&mesh_paths->entries);
484 485 486 487 488 489 490
			spin_unlock_bh(&mpath->state_lock);
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
491
	mesh_paths_generation++;
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
	read_unlock(&pathtbl_resize_lock);
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
507 508 509
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
510 511 512 513 514 515
}

/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
516
 * @sdata: network subif the frame was to be sent through
517
 *
518 519 520 521
 * If the frame was being forwarded from another MP, a PERR frame will be sent
 * to the precursor.  The precursor's address (i.e. the previous hop) was saved
 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
 * the destination is successfully resolved.
522 523 524
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
525 526
void mesh_path_discard_frame(struct sk_buff *skb,
			     struct ieee80211_sub_if_data *sdata)
527
{
528
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
529 530 531
	struct mesh_path *mpath;
	u32 dsn = 0;

532
	if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
533 534
		u8 *ra, *da;

535
		da = hdr->addr3;
536
		ra = hdr->addr1;
537
		mpath = mesh_path_lookup(da, sdata);
538 539
		if (mpath)
			dsn = ++mpath->dsn;
540
		mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
541 542 543
	}

	kfree_skb(skb);
544
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
 * Locking: the function must me called withing a rcu_read_lock region
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
			(mpath->flags & MESH_PATH_ACTIVE))
560
		mesh_path_discard_frame(skb, mpath->sdata);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
	mpath->dsn = 0xffff;
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
	if (free_leafs)
		kfree(mpath);
	kfree(node);
}

596
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
597 598 599 600 601
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

602
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
603 604 605
	if (new_node == NULL)
		return -ENOMEM;

606 607 608
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
609
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
610 611
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
612
	return 0;
613 614 615 616 617
}

int mesh_pathtbl_init(void)
{
	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
618 619
	if (!mesh_paths)
		return -ENOMEM;
620 621 622
	mesh_paths->free_node = &mesh_path_node_free;
	mesh_paths->copy_node = &mesh_path_node_copy;
	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
623 624 625 626

	mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!mpp_paths) {
		mesh_table_free(mesh_paths, true);
627
		return -ENOMEM;
628 629 630 631 632
	}
	mpp_paths->free_node = &mesh_path_node_free;
	mpp_paths->copy_node = &mesh_path_node_copy;
	mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;

633 634 635
	return 0;
}

636
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
637 638 639 640 641 642 643 644
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

	read_lock(&pathtbl_resize_lock);
	for_each_mesh_entry(mesh_paths, p, node, i) {
645
		if (node->mpath->sdata != sdata)
646 647 648 649 650 651 652 653
			continue;
		mpath = node->mpath;
		spin_lock_bh(&mpath->state_lock);
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
			time_after(jiffies,
			 mpath->exp_time + MESH_PATH_EXPIRE)) {
			spin_unlock_bh(&mpath->state_lock);
654
			mesh_path_del(mpath->dst, mpath->sdata);
655 656 657 658 659 660 661 662 663
		} else
			spin_unlock_bh(&mpath->state_lock);
	}
	read_unlock(&pathtbl_resize_lock);
}

void mesh_pathtbl_unregister(void)
{
	mesh_table_free(mesh_paths, true);
664
	mesh_table_free(mpp_paths, true);
665
}