mesh_pathtbl.c 29.3 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9 10 11 12
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
17
#include "wme.h"
18 19 20 21 22 23 24 25 26
#include "ieee80211_i.h"
#include "mesh.h"

/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

J
Johannes Berg 已提交
27 28 29 30 31 32
static inline bool mpath_expired(struct mesh_path *mpath)
{
	return (mpath->flags & MESH_PATH_ACTIVE) &&
	       time_after(jiffies, mpath->exp_time) &&
	       !(mpath->flags & MESH_PATH_FIXED);
}
33 34 35 36 37 38 39 40 41 42

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

43 44
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45

46
int mesh_paths_generation;
47
int mpp_paths_generation;
48 49

/* This lock will have the grow table function as writer and add / delete nodes
50 51 52 53
 * as readers. RCU provides sufficient protection only when reading the table
 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
 * the read lock or we risk operating on an old table.  The write lock is only
 * needed when modifying the number of buckets a table.
54 55 56 57
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);


58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
	return rcu_dereference_protected(mesh_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
	return rcu_dereference_protected(mpp_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

/*
 * CAREFUL -- "tbl" must not be an expression,
 * in particular not an rcu_dereference(), since
 * it's used twice. So it is illegal to do
 *	for_each_mesh_entry(rcu_dereference(...), ...)
 */
76
#define for_each_mesh_entry(tbl, node, i) \
77
	for (i = 0; i <= tbl->hash_mask; i++) \
78
		hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
79 80


81 82 83 84 85
static struct mesh_table *mesh_table_alloc(int size_order)
{
	int i;
	struct mesh_table *newtbl;

86
	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
87 88 89 90
	if (!newtbl)
		return NULL;

	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
91
			(1 << size_order), GFP_ATOMIC);
92 93 94 95 96 97 98

	if (!newtbl->hash_buckets) {
		kfree(newtbl);
		return NULL;
	}

	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
99
			(1 << size_order), GFP_ATOMIC);
100 101 102 103 104 105 106 107 108 109 110 111 112
	if (!newtbl->hashwlock) {
		kfree(newtbl->hash_buckets);
		kfree(newtbl);
		return NULL;
	}

	newtbl->size_order = size_order;
	newtbl->hash_mask = (1 << size_order) - 1;
	atomic_set(&newtbl->entries,  0);
	get_random_bytes(&newtbl->hash_rnd,
			sizeof(newtbl->hash_rnd));
	for (i = 0; i <= newtbl->hash_mask; i++)
		spin_lock_init(&newtbl->hashwlock[i]);
113
	spin_lock_init(&newtbl->gates_lock);
114 115 116 117

	return newtbl;
}

118 119 120 121 122 123 124
static void __mesh_table_free(struct mesh_table *tbl)
{
	kfree(tbl->hash_buckets);
	kfree(tbl->hashwlock);
	kfree(tbl);
}

125
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
126 127 128
{
	struct hlist_head *mesh_hash;
	struct hlist_node *p, *q;
129
	struct mpath_node *gate;
130 131 132 133
	int i;

	mesh_hash = tbl->hash_buckets;
	for (i = 0; i <= tbl->hash_mask; i++) {
134
		spin_lock_bh(&tbl->hashwlock[i]);
135 136 137 138
		hlist_for_each_safe(p, q, &mesh_hash[i]) {
			tbl->free_node(p, free_leafs);
			atomic_dec(&tbl->entries);
		}
139
		spin_unlock_bh(&tbl->hashwlock[i]);
140
	}
141 142
	if (free_leafs) {
		spin_lock_bh(&tbl->gates_lock);
143
		hlist_for_each_entry_safe(gate, q,
144 145 146 147 148 149 150 151
					 tbl->known_gates, list) {
			hlist_del(&gate->list);
			kfree(gate);
		}
		kfree(tbl->known_gates);
		spin_unlock_bh(&tbl->gates_lock);
	}

152 153 154
	__mesh_table_free(tbl);
}

155
static int mesh_table_grow(struct mesh_table *oldtbl,
156
			   struct mesh_table *newtbl)
157 158 159 160 161
{
	struct hlist_head *oldhash;
	struct hlist_node *p, *q;
	int i;

162 163 164
	if (atomic_read(&oldtbl->entries)
			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
		return -EAGAIN;
165

166 167 168
	newtbl->free_node = oldtbl->free_node;
	newtbl->mean_chain_len = oldtbl->mean_chain_len;
	newtbl->copy_node = oldtbl->copy_node;
169
	newtbl->known_gates = oldtbl->known_gates;
170
	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
171

172 173
	oldhash = oldtbl->hash_buckets;
	for (i = 0; i <= oldtbl->hash_mask; i++)
174
		hlist_for_each(p, &oldhash[i])
175
			if (oldtbl->copy_node(p, newtbl) < 0)
176 177
				goto errcopy;

178
	return 0;
179 180 181 182

errcopy:
	for (i = 0; i <= newtbl->hash_mask; i++) {
		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
183
			oldtbl->free_node(p, 0);
184
	}
185
	return -ENOMEM;
186 187
}

J
Johannes Berg 已提交
188
static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
189 190 191
			   struct mesh_table *tbl)
{
	/* Use last four bytes of hw addr and interface index as hash index */
J
Johannes Berg 已提交
192 193
	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
			    tbl->hash_rnd) & tbl->hash_mask;
194
}
195

196 197 198 199 200 201 202 203 204 205 206 207

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
208 209 210 211
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	unsigned long flags;

212
	rcu_assign_pointer(mpath->next_hop, sta);
213 214

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
215
	skb_queue_walk(&mpath->frame_queue, skb) {
216 217
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218
		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
M
Marco Porsch 已提交
219
		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
220 221 222
	}

	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
			     struct mesh_path *gate_mpath)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211s_hdr *mshdr;
	int mesh_hdrlen, hdrlen;
	char *next_hop;

	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);

	if (!(mshdr->flags & MESH_FLAGS_AE)) {
		/* size of the fixed part of the mesh header */
		mesh_hdrlen = 6;

		/* make room for the two extended addresses */
		skb_push(skb, 2 * ETH_ALEN);
		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);

		hdr = (struct ieee80211_hdr *) skb->data;

		/* we preserve the previous mesh header and only add
		 * the new addreses */
		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
		mshdr->flags = MESH_FLAGS_AE_A5_A6;
		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
	}

	/* update next hop */
	hdr = (struct ieee80211_hdr *) skb->data;
	rcu_read_lock();
	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
	memcpy(hdr->addr1, next_hop, ETH_ALEN);
	rcu_read_unlock();
261
	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}

/**
 *
 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
 *
 * This function is used to transfer or copy frames from an unresolved mpath to
 * a gate mpath.  The function also adds the Address Extension field and
 * updates the next hop.
 *
 * If a frame already has an Address Extension field, only the next hop and
 * destination addresses are updated.
 *
 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 *
 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
 * @from_mpath: The failed mpath
 * @copy: When true, copy all the frames to the new mpath queue.  When false,
 * move them.
 */
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
{
287 288
	struct sk_buff *skb, *fskb, *tmp;
	struct sk_buff_head failq;
289 290
	unsigned long flags;

J
Johannes Berg 已提交
291 292 293 294
	if (WARN_ON(gate_mpath == from_mpath))
		return;
	if (WARN_ON(!gate_mpath->next_hop))
		return;
295 296 297 298 299 300 301

	__skb_queue_head_init(&failq);

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);

302 303 304 305 306
	skb_queue_walk_safe(&failq, fskb, tmp) {
		if (skb_queue_len(&gate_mpath->frame_queue) >=
				  MESH_FRAME_QUEUE_LEN) {
			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
			break;
307
		}
308

309 310 311 312
		skb = skb_copy(fskb, GFP_ATOMIC);
		if (WARN_ON(!skb))
			break;

313
		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 315 316 317 318 319 320
		skb_queue_tail(&gate_mpath->frame_queue, skb);

		if (copy)
			continue;

		__skb_unlink(fskb, &failq);
		kfree_skb(fskb);
321 322
	}

J
Johannes Berg 已提交
323 324
	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
325 326 327 328 329 330 331 332 333

	if (!copy)
		return;

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice(&failq, &from_mpath->frame_queue);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}

334

J
Johannes Berg 已提交
335 336
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
				      struct ieee80211_sub_if_data *sdata)
337 338 339 340 341
{
	struct mesh_path *mpath;
	struct hlist_head *bucket;
	struct mpath_node *node;

342
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
343
	hlist_for_each_entry_rcu(node, bucket, list) {
344
		mpath = node->mpath;
345
		if (mpath->sdata == sdata &&
346
		    ether_addr_equal(dst, mpath->dst)) {
J
Johannes Berg 已提交
347
			if (mpath_expired(mpath)) {
348
				spin_lock_bh(&mpath->state_lock);
349
				mpath->flags &= ~MESH_PATH_ACTIVE;
350 351 352 353 354 355 356 357
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

358 359 360
/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @sdata: local subif
J
Johannes Berg 已提交
361
 * @dst: hardware address (ETH_ALEN length) of destination
362 363 364 365 366
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
J
Johannes Berg 已提交
367 368
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
369
{
370
	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
371
}
372

J
Johannes Berg 已提交
373 374
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
375
{
376
	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
377 378 379
}


380 381 382
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
383
 * @sdata: local subif, or NULL for all entries
384 385 386 387 388
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
J
Johannes Berg 已提交
389 390
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
391
{
392
	struct mesh_table *tbl = rcu_dereference(mesh_paths);
393 394 395 396
	struct mpath_node *node;
	int i;
	int j = 0;

397
	for_each_mesh_entry(tbl, node, i) {
398
		if (sdata && node->mpath->sdata != sdata)
399
			continue;
400
		if (j++ == idx) {
J
Johannes Berg 已提交
401
			if (mpath_expired(node->mpath)) {
402
				spin_lock_bh(&node->mpath->state_lock);
403
				node->mpath->flags &= ~MESH_PATH_ACTIVE;
404 405 406 407
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
408
	}
409 410 411 412

	return NULL;
}

413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
/**
 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
 * @idx: index
 * @sdata: local subif, or NULL for all entries
 *
 * Returns: pointer to the proxy path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
	struct mesh_table *tbl = rcu_dereference(mpp_paths);
	struct mpath_node *node;
	int i;
	int j = 0;

	for_each_mesh_entry(tbl, node, i) {
		if (sdata && node->mpath->sdata != sdata)
			continue;
		if (j++ == idx)
			return node->mpath;
	}

	return NULL;
}

440
/**
J
Johannes Berg 已提交
441 442
 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
 * @mpath: gate path to add to table
443
 */
J
Johannes Berg 已提交
444
int mesh_path_add_gate(struct mesh_path *mpath)
445
{
J
Johannes Berg 已提交
446
	struct mesh_table *tbl;
447 448 449 450
	struct mpath_node *gate, *new_gate;
	int err;

	rcu_read_lock();
J
Johannes Berg 已提交
451
	tbl = rcu_dereference(mesh_paths);
452

453
	hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
		if (gate->mpath == mpath) {
			err = -EEXIST;
			goto err_rcu;
		}

	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
	if (!new_gate) {
		err = -ENOMEM;
		goto err_rcu;
	}

	mpath->is_gate = true;
	mpath->sdata->u.mesh.num_gates++;
	new_gate->mpath = mpath;
	spin_lock_bh(&tbl->gates_lock);
	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
	spin_unlock_bh(&tbl->gates_lock);
J
Johannes Berg 已提交
471 472 473
	mpath_dbg(mpath->sdata,
		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
		  mpath->dst, mpath->sdata->u.mesh.num_gates);
J
Johannes Berg 已提交
474
	err = 0;
475 476 477 478 479 480 481 482 483 484 485 486
err_rcu:
	rcu_read_unlock();
	return err;
}

/**
 * mesh_gate_del - remove a mesh gate from the list of known gates
 * @tbl: table which holds our list of known gates
 * @mpath: gate mpath
 *
 * Locking: must be called inside rcu_read_lock() section
 */
J
Johannes Berg 已提交
487
static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
488 489
{
	struct mpath_node *gate;
490
	struct hlist_node *q;
491

492
	hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
J
Johannes Berg 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505
		if (gate->mpath != mpath)
			continue;
		spin_lock_bh(&tbl->gates_lock);
		hlist_del_rcu(&gate->list);
		kfree_rcu(gate, rcu);
		spin_unlock_bh(&tbl->gates_lock);
		mpath->sdata->u.mesh.num_gates--;
		mpath->is_gate = false;
		mpath_dbg(mpath->sdata,
			  "Mesh path: Deleted gate: %pM. %d known gates\n",
			  mpath->dst, mpath->sdata->u.mesh.num_gates);
		break;
	}
506 507 508 509 510 511 512 513 514 515 516
}

/**
 * mesh_gate_num - number of gates known to this interface
 * @sdata: subif data
 */
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
	return sdata->u.mesh.num_gates;
}

517 518
/**
 * mesh_path_add - allocate and add a new path to the mesh path table
J
Johannes Berg 已提交
519
 * @dst: destination address of the path (ETH_ALEN length)
520
 * @sdata: local subif
521
 *
522
 * Returns: 0 on success
523 524 525
 *
 * State: the initial state of the new path is set to 0
 */
526 527
struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
				const u8 *dst)
528
{
529 530
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
531
	struct mesh_table *tbl;
532 533 534 535
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	int grow = 0;
536
	int err;
537 538
	u32 hash_idx;

539
	if (ether_addr_equal(dst, sdata->vif.addr))
540
		/* never add ourselves as neighbours */
541
		return ERR_PTR(-ENOTSUPP);
542 543

	if (is_multicast_ether_addr(dst))
544
		return ERR_PTR(-ENOTSUPP);
545

546
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
		return ERR_PTR(-ENOSPC);

	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();

	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

	spin_lock(&tbl->hashwlock[hash_idx]);

	hlist_for_each_entry(node, bucket, list) {
		mpath = node->mpath;
		if (mpath->sdata == sdata &&
		    ether_addr_equal(dst, mpath->dst))
			goto found;
	}
563

564
	err = -ENOMEM;
565
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
566 567 568
	if (!new_mpath)
		goto err_path_alloc;

569
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
570 571
	if (!new_node)
		goto err_node_alloc;
572

573
	memcpy(new_mpath->dst, dst, ETH_ALEN);
J
Johannes Berg 已提交
574
	eth_broadcast_addr(new_mpath->rann_snd_addr);
575
	new_mpath->is_root = false;
576
	new_mpath->sdata = sdata;
577 578 579 580 581 582 583 584 585 586
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

	hlist_add_head_rcu(&new_node->list, bucket);
587 588
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
589 590
		grow = 1;

591 592
	mesh_paths_generation++;

593
	if (grow) {
594
		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
595
		ieee80211_queue_work(&local->hw, &sdata->work);
596
	}
597 598
	mpath = new_mpath;
found:
599
	spin_unlock(&tbl->hashwlock[hash_idx]);
600
	read_unlock_bh(&pathtbl_resize_lock);
601 602
	return mpath;

603 604 605
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
606
	atomic_dec(&sdata->u.mesh.mpaths);
607 608 609
	spin_unlock(&tbl->hashwlock[hash_idx]);
	read_unlock_bh(&pathtbl_resize_lock);
	return ERR_PTR(err);
610 611
}

612 613 614 615 616 617 618
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);

	mesh_table_free(tbl, false);
}

619 620 621 622
void mesh_mpath_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

623
	write_lock_bh(&pathtbl_resize_lock);
624 625
	oldtbl = resize_dereference_mesh_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
626 627
	if (!newtbl)
		goto out;
628
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
629
		__mesh_table_free(newtbl);
630
		goto out;
631 632 633
	}
	rcu_assign_pointer(mesh_paths, newtbl);

634 635 636 637
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);

 out:
	write_unlock_bh(&pathtbl_resize_lock);
638 639 640 641 642 643
}

void mesh_mpp_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

644
	write_lock_bh(&pathtbl_resize_lock);
645 646
	oldtbl = resize_dereference_mpp_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
647 648
	if (!newtbl)
		goto out;
649
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
650
		__mesh_table_free(newtbl);
651
		goto out;
652 653
	}
	rcu_assign_pointer(mpp_paths, newtbl);
654
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
655

656 657
 out:
	write_unlock_bh(&pathtbl_resize_lock);
658
}
659

J
Johannes Berg 已提交
660 661
int mpp_path_add(struct ieee80211_sub_if_data *sdata,
		 const u8 *dst, const u8 *mpp)
662
{
663 664
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
665
	struct mesh_table *tbl;
666 667 668 669 670 671 672
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

673
	if (ether_addr_equal(dst, sdata->vif.addr))
674 675 676 677 678 679 680
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
681
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
682 683 684
	if (!new_mpath)
		goto err_path_alloc;

685
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
686 687 688
	if (!new_node)
		goto err_node_alloc;

689
	read_lock_bh(&pathtbl_resize_lock);
690 691 692 693 694 695
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
T
Thomas Pedersen 已提交
696
	init_timer(&new_mpath->timer);
697 698 699
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

700
	tbl = resize_dereference_mpp_paths();
701

702 703 704
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

705
	spin_lock(&tbl->hashwlock[hash_idx]);
706 707

	err = -EEXIST;
708
	hlist_for_each_entry(node, bucket, list) {
709
		mpath = node->mpath;
710
		if (mpath->sdata == sdata &&
711
		    ether_addr_equal(dst, mpath->dst))
712 713 714 715
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
716 717
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
718 719
		grow = 1;

720
	spin_unlock(&tbl->hashwlock[hash_idx]);
721
	read_unlock_bh(&pathtbl_resize_lock);
722 723 724

	mpp_paths_generation++;

725
	if (grow) {
726
		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
727
		ieee80211_queue_work(&local->hw, &sdata->work);
728 729 730 731
	}
	return 0;

err_exists:
732
	spin_unlock(&tbl->hashwlock[hash_idx]);
733
	read_unlock_bh(&pathtbl_resize_lock);
734 735 736 737 738 739 740 741
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


742 743 744 745 746 747 748 749 750 751
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
752
	struct mesh_table *tbl;
753
	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
754 755
	struct mesh_path *mpath;
	struct mpath_node *node;
756
	struct ieee80211_sub_if_data *sdata = sta->sdata;
757 758 759
	int i;

	rcu_read_lock();
760
	tbl = rcu_dereference(mesh_paths);
761
	for_each_mesh_entry(tbl, node, i) {
762
		mpath = node->mpath;
763
		if (rcu_access_pointer(mpath->next_hop) == sta &&
764 765
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
766
			spin_lock_bh(&mpath->state_lock);
767
			mpath->flags &= ~MESH_PATH_ACTIVE;
768
			++mpath->sn;
769
			spin_unlock_bh(&mpath->state_lock);
J
Johannes Berg 已提交
770
			mesh_path_error_tx(sdata,
771 772 773
				sdata->u.mesh.mshcfg.element_ttl,
				mpath->dst, mpath->sn,
				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
774
		}
775 776 777 778
	}
	rcu_read_unlock();
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;

	del_timer_sync(&node->mpath->timer);
	atomic_dec(&sdata->u.mesh.mpaths);
	kfree(node->mpath);
	kfree(node);
}

/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
	struct mesh_path *mpath;
	mpath = node->mpath;
	spin_lock(&mpath->state_lock);
	mpath->flags |= MESH_PATH_RESOLVING;
	if (mpath->is_gate)
		mesh_gate_del(tbl, mpath);
	hlist_del_rcu(&node->list);
	call_rcu(&node->rcu, mesh_path_node_reclaim);
	spin_unlock(&mpath->state_lock);
	atomic_dec(&tbl->entries);
}

805 806 807
/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
808
 * @sta: mesh peer to match
809
 *
810 811 812
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
813 814
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
815 816 817
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
818
	struct mesh_table *tbl;
819 820 821 822
	struct mesh_path *mpath;
	struct mpath_node *node;
	int i;

823
	rcu_read_lock();
824 825
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
826
	for_each_mesh_entry(tbl, node, i) {
827
		mpath = node->mpath;
828
		if (rcu_access_pointer(mpath->next_hop) == sta) {
829
			spin_lock(&tbl->hashwlock[i]);
830
			__mesh_path_del(tbl, node);
831
			spin_unlock(&tbl->hashwlock[i]);
832
		}
833
	}
834
	read_unlock_bh(&pathtbl_resize_lock);
835
	rcu_read_unlock();
836 837
}

838 839
static void table_flush_by_iface(struct mesh_table *tbl,
				 struct ieee80211_sub_if_data *sdata)
840 841 842 843 844
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	int i;

845
	WARN_ON(!rcu_read_lock_held());
846
	for_each_mesh_entry(tbl, node, i) {
847
		mpath = node->mpath;
848 849
		if (mpath->sdata != sdata)
			continue;
850
		spin_lock_bh(&tbl->hashwlock[i]);
851
		__mesh_path_del(tbl, node);
852
		spin_unlock_bh(&tbl->hashwlock[i]);
853 854 855
	}
}

856 857 858 859 860
/**
 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
 *
 * This function deletes both mesh paths as well as mesh portal paths.
 *
861
 * @sdata: interface data to match
862 863 864
 *
 */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
865
{
866
	struct mesh_table *tbl;
867

868
	rcu_read_lock();
869 870
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
871
	table_flush_by_iface(tbl, sdata);
872
	tbl = resize_dereference_mpp_paths();
873
	table_flush_by_iface(tbl, sdata);
874
	read_unlock_bh(&pathtbl_resize_lock);
875
	rcu_read_unlock();
876 877 878 879 880 881
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
882
 * @sdata: local subif
883
 *
884
 * Returns: 0 if successful
885
 */
J
Johannes Berg 已提交
886
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
887
{
888
	struct mesh_table *tbl;
889 890 891 892 893 894
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	int hash_idx;
	int err = 0;

895
	read_lock_bh(&pathtbl_resize_lock);
896 897 898
	tbl = resize_dereference_mesh_paths();
	hash_idx = mesh_table_hash(addr, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
899

900
	spin_lock(&tbl->hashwlock[hash_idx]);
901
	hlist_for_each_entry(node, bucket, list) {
902
		mpath = node->mpath;
903
		if (mpath->sdata == sdata &&
904
		    ether_addr_equal(addr, mpath->dst)) {
905
			__mesh_path_del(tbl, node);
906 907 908 909 910 911
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
912
	mesh_paths_generation++;
913
	spin_unlock(&tbl->hashwlock[hash_idx]);
914
	read_unlock_bh(&pathtbl_resize_lock);
915 916 917 918 919 920 921 922 923 924 925 926 927
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
928 929 930
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
931 932
}

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
/**
 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
 *
 * @mpath: mesh path whose queue will be emptied
 *
 * If there is only one gate, the frames are transferred from the failed mpath
 * queue to that gate's queue.  If there are more than one gates, the frames
 * are copied from each gate to the next.  After frames are copied, the
 * mpath queues are emptied onto the transmission queue.
 */
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
	struct mesh_table *tbl;
	struct mesh_path *from_mpath = mpath;
	struct mpath_node *gate = NULL;
	bool copy = false;
	struct hlist_head *known_gates;

	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	known_gates = tbl->known_gates;
	rcu_read_unlock();

	if (!known_gates)
		return -EHOSTUNREACH;

960
	hlist_for_each_entry_rcu(gate, known_gates, list) {
961 962 963 964
		if (gate->mpath->sdata != sdata)
			continue;

		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
J
Johannes Berg 已提交
965
			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
966 967 968 969
			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
			from_mpath = gate->mpath;
			copy = true;
		} else {
J
Johannes Berg 已提交
970 971 972
			mpath_dbg(sdata,
				  "Not forwarding %p (flags %#x)\n",
				  gate->mpath, gate->mpath->flags);
973 974 975
		}
	}

976
	hlist_for_each_entry_rcu(gate, known_gates, list)
977
		if (gate->mpath->sdata == sdata) {
J
Johannes Berg 已提交
978
			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
979 980 981 982 983 984
			mesh_path_tx_pending(gate->mpath);
		}

	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}

985 986 987 988
/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
989
 * @sdata: network subif the frame was to be sent through
990 991 992
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
J
Johannes Berg 已提交
993 994
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
			     struct sk_buff *skb)
995 996
{
	kfree_skb(skb);
997
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
998 999 1000 1001 1002 1003 1004
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
L
Lucas De Marchi 已提交
1005
 * Locking: the function must me called within a rcu_read_lock region
1006 1007 1008 1009 1010
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

1011
	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
J
Johannes Berg 已提交
1012
		mesh_path_discard_frame(mpath->sdata, skb);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
1027
	mpath->sn = 0xffff;
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
1043 1044
	if (free_leafs) {
		del_timer_sync(&mpath->timer);
1045
		kfree(mpath);
1046
	}
1047 1048 1049
	kfree(node);
}

1050
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1051 1052 1053 1054 1055
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

1056
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1057 1058 1059
	if (new_node == NULL)
		return -ENOMEM;

1060 1061 1062
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
1063
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1064 1065
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
1066
	return 0;
1067 1068 1069 1070
}

int mesh_pathtbl_init(void)
{
1071
	struct mesh_table *tbl_path, *tbl_mpp;
1072
	int ret;
1073 1074 1075

	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_path)
1076
		return -ENOMEM;
1077 1078 1079
	tbl_path->free_node = &mesh_path_node_free;
	tbl_path->copy_node = &mesh_path_node_copy;
	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1080
	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1081 1082 1083 1084
	if (!tbl_path->known_gates) {
		ret = -ENOMEM;
		goto free_path;
	}
1085 1086
	INIT_HLIST_HEAD(tbl_path->known_gates);

1087

1088 1089
	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_mpp) {
1090 1091
		ret = -ENOMEM;
		goto free_path;
1092
	}
1093 1094 1095
	tbl_mpp->free_node = &mesh_path_node_free;
	tbl_mpp->copy_node = &mesh_path_node_copy;
	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1096
	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1097 1098 1099 1100
	if (!tbl_mpp->known_gates) {
		ret = -ENOMEM;
		goto free_mpp;
	}
1101
	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1102 1103 1104 1105

	/* Need no locking since this is during init */
	RCU_INIT_POINTER(mesh_paths, tbl_path);
	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1106

1107
	return 0;
1108 1109 1110 1111 1112 1113

free_mpp:
	mesh_table_free(tbl_mpp, true);
free_path:
	mesh_table_free(tbl_path, true);
	return ret;
1114 1115
}

1116
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1117
{
1118
	struct mesh_table *tbl;
1119 1120 1121 1122
	struct mesh_path *mpath;
	struct mpath_node *node;
	int i;

1123 1124
	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
1125
	for_each_mesh_entry(tbl, node, i) {
1126
		if (node->mpath->sdata != sdata)
1127 1128 1129 1130
			continue;
		mpath = node->mpath;
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1131
		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
J
Johannes Berg 已提交
1132
			mesh_path_del(mpath->sdata, mpath->dst);
1133
	}
1134
	rcu_read_unlock();
1135 1136 1137 1138
}

void mesh_pathtbl_unregister(void)
{
1139
	/* no need for locking during exit path */
1140 1141
	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1142
}