mesh_pathtbl.c 28.9 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9 10 11 12
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
17
#include "wme.h"
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include "ieee80211_i.h"
#include "mesh.h"

/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
				time_after(jiffies, mpath->exp_time) && \
				!(mpath->flags & MESH_PATH_FIXED))

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

40 41
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42

43
int mesh_paths_generation;
44 45

/* This lock will have the grow table function as writer and add / delete nodes
46 47 48 49
 * as readers. RCU provides sufficient protection only when reading the table
 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
 * the read lock or we risk operating on an old table.  The write lock is only
 * needed when modifying the number of buckets a table.
50 51 52 53
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);


54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
	return rcu_dereference_protected(mesh_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
	return rcu_dereference_protected(mpp_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

/*
 * CAREFUL -- "tbl" must not be an expression,
 * in particular not an rcu_dereference(), since
 * it's used twice. So it is illegal to do
 *	for_each_mesh_entry(rcu_dereference(...), ...)
 */
#define for_each_mesh_entry(tbl, p, node, i) \
	for (i = 0; i <= tbl->hash_mask; i++) \
		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)


77 78 79 80 81
static struct mesh_table *mesh_table_alloc(int size_order)
{
	int i;
	struct mesh_table *newtbl;

82
	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
83 84 85 86
	if (!newtbl)
		return NULL;

	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87
			(1 << size_order), GFP_ATOMIC);
88 89 90 91 92 93 94

	if (!newtbl->hash_buckets) {
		kfree(newtbl);
		return NULL;
	}

	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95
			(1 << size_order), GFP_ATOMIC);
96 97 98 99 100 101 102 103 104 105 106 107 108
	if (!newtbl->hashwlock) {
		kfree(newtbl->hash_buckets);
		kfree(newtbl);
		return NULL;
	}

	newtbl->size_order = size_order;
	newtbl->hash_mask = (1 << size_order) - 1;
	atomic_set(&newtbl->entries,  0);
	get_random_bytes(&newtbl->hash_rnd,
			sizeof(newtbl->hash_rnd));
	for (i = 0; i <= newtbl->hash_mask; i++)
		spin_lock_init(&newtbl->hashwlock[i]);
109
	spin_lock_init(&newtbl->gates_lock);
110 111 112 113

	return newtbl;
}

114 115 116 117 118 119 120
static void __mesh_table_free(struct mesh_table *tbl)
{
	kfree(tbl->hash_buckets);
	kfree(tbl->hashwlock);
	kfree(tbl);
}

121
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
122 123 124
{
	struct hlist_head *mesh_hash;
	struct hlist_node *p, *q;
125
	struct mpath_node *gate;
126 127 128 129
	int i;

	mesh_hash = tbl->hash_buckets;
	for (i = 0; i <= tbl->hash_mask; i++) {
130
		spin_lock_bh(&tbl->hashwlock[i]);
131 132 133 134
		hlist_for_each_safe(p, q, &mesh_hash[i]) {
			tbl->free_node(p, free_leafs);
			atomic_dec(&tbl->entries);
		}
135
		spin_unlock_bh(&tbl->hashwlock[i]);
136
	}
137 138 139 140 141 142 143 144 145 146 147
	if (free_leafs) {
		spin_lock_bh(&tbl->gates_lock);
		hlist_for_each_entry_safe(gate, p, q,
					 tbl->known_gates, list) {
			hlist_del(&gate->list);
			kfree(gate);
		}
		kfree(tbl->known_gates);
		spin_unlock_bh(&tbl->gates_lock);
	}

148 149 150
	__mesh_table_free(tbl);
}

151
static int mesh_table_grow(struct mesh_table *oldtbl,
152
			   struct mesh_table *newtbl)
153 154 155 156 157
{
	struct hlist_head *oldhash;
	struct hlist_node *p, *q;
	int i;

158 159 160
	if (atomic_read(&oldtbl->entries)
			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
		return -EAGAIN;
161

162 163 164
	newtbl->free_node = oldtbl->free_node;
	newtbl->mean_chain_len = oldtbl->mean_chain_len;
	newtbl->copy_node = oldtbl->copy_node;
165
	newtbl->known_gates = oldtbl->known_gates;
166
	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
167

168 169
	oldhash = oldtbl->hash_buckets;
	for (i = 0; i <= oldtbl->hash_mask; i++)
170
		hlist_for_each(p, &oldhash[i])
171
			if (oldtbl->copy_node(p, newtbl) < 0)
172 173
				goto errcopy;

174
	return 0;
175 176 177 178

errcopy:
	for (i = 0; i <= newtbl->hash_mask; i++) {
		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179
			oldtbl->free_node(p, 0);
180
	}
181
	return -ENOMEM;
182 183
}

184 185 186 187 188 189 190
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
			   struct mesh_table *tbl)
{
	/* Use last four bytes of hw addr and interface index as hash index */
	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
		& tbl->hash_mask;
}
191

192 193 194 195 196 197 198 199 200 201 202 203

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
204 205 206 207
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	unsigned long flags;

208
	rcu_assign_pointer(mpath->next_hop, sta);
209 210

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
211
	skb_queue_walk(&mpath->frame_queue, skb) {
212 213
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214
		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
215 216 217
	}

	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
			     struct mesh_path *gate_mpath)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211s_hdr *mshdr;
	int mesh_hdrlen, hdrlen;
	char *next_hop;

	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);

	if (!(mshdr->flags & MESH_FLAGS_AE)) {
		/* size of the fixed part of the mesh header */
		mesh_hdrlen = 6;

		/* make room for the two extended addresses */
		skb_push(skb, 2 * ETH_ALEN);
		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);

		hdr = (struct ieee80211_hdr *) skb->data;

		/* we preserve the previous mesh header and only add
		 * the new addreses */
		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
		mshdr->flags = MESH_FLAGS_AE_A5_A6;
		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
	}

	/* update next hop */
	hdr = (struct ieee80211_hdr *) skb->data;
	rcu_read_lock();
	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
	memcpy(hdr->addr1, next_hop, ETH_ALEN);
	rcu_read_unlock();
256
	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}

/**
 *
 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
 *
 * This function is used to transfer or copy frames from an unresolved mpath to
 * a gate mpath.  The function also adds the Address Extension field and
 * updates the next hop.
 *
 * If a frame already has an Address Extension field, only the next hop and
 * destination addresses are updated.
 *
 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 *
 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
 * @from_mpath: The failed mpath
 * @copy: When true, copy all the frames to the new mpath queue.  When false,
 * move them.
 */
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
{
T
Thomas Pedersen 已提交
282
	struct sk_buff *skb, *cp_skb = NULL;
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	struct sk_buff_head gateq, failq;
	unsigned long flags;
	int num_skbs;

	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(!gate_mpath->next_hop);

	__skb_queue_head_init(&gateq);
	__skb_queue_head_init(&failq);

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);

	num_skbs = skb_queue_len(&failq);

	while (num_skbs--) {
		skb = __skb_dequeue(&failq);
301
		if (copy) {
302
			cp_skb = skb_copy(skb, GFP_ATOMIC);
303 304 305
			if (cp_skb)
				__skb_queue_tail(&failq, cp_skb);
		}
306 307 308 309 310 311 312

		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
		__skb_queue_tail(&gateq, skb);
	}

	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
J
Johannes Berg 已提交
313 314
	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
315 316 317 318 319 320 321 322 323 324
	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);

	if (!copy)
		return;

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice(&failq, &from_mpath->frame_queue);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}

325

326
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
327
					  struct ieee80211_sub_if_data *sdata)
328 329 330 331 332 333
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mpath_node *node;

334
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
335 336
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
337
		if (mpath->sdata == sdata &&
338
		    ether_addr_equal(dst, mpath->dst)) {
339 340
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
341
				mpath->flags &= ~MESH_PATH_ACTIVE;
342 343 344 345 346 347 348 349
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

350 351 352 353 354 355 356 357 358 359
/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @dst: hardware address (ETH_ALEN length) of destination
 * @sdata: local subif
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
360
{
361
	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
362
}
363

364 365
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
366
	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
367 368 369
}


370 371 372
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
373
 * @sdata: local subif, or NULL for all entries
374 375 376 377 378
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
379
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
380
{
381
	struct mesh_table *tbl = rcu_dereference(mesh_paths);
382 383 384 385 386
	struct mpath_node *node;
	struct hlist_node *p;
	int i;
	int j = 0;

387
	for_each_mesh_entry(tbl, p, node, i) {
388
		if (sdata && node->mpath->sdata != sdata)
389
			continue;
390 391 392
		if (j++ == idx) {
			if (MPATH_EXPIRED(node->mpath)) {
				spin_lock_bh(&node->mpath->state_lock);
393
				node->mpath->flags &= ~MESH_PATH_ACTIVE;
394 395 396 397
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
398
	}
399 400 401 402

	return NULL;
}

403
/**
J
Johannes Berg 已提交
404 405
 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
 * @mpath: gate path to add to table
406
 */
J
Johannes Berg 已提交
407
int mesh_path_add_gate(struct mesh_path *mpath)
408
{
J
Johannes Berg 已提交
409
	struct mesh_table *tbl;
410 411 412 413 414
	struct mpath_node *gate, *new_gate;
	struct hlist_node *n;
	int err;

	rcu_read_lock();
J
Johannes Berg 已提交
415
	tbl = rcu_dereference(mesh_paths);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			err = -EEXIST;
			goto err_rcu;
		}

	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
	if (!new_gate) {
		err = -ENOMEM;
		goto err_rcu;
	}

	mpath->is_gate = true;
	mpath->sdata->u.mesh.num_gates++;
	new_gate->mpath = mpath;
	spin_lock_bh(&tbl->gates_lock);
	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
	spin_unlock_bh(&tbl->gates_lock);
	rcu_read_unlock();
J
Johannes Berg 已提交
436 437 438
	mpath_dbg(mpath->sdata,
		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
		  mpath->dst, mpath->sdata->u.mesh.num_gates);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	return 0;
err_rcu:
	rcu_read_unlock();
	return err;
}

/**
 * mesh_gate_del - remove a mesh gate from the list of known gates
 * @tbl: table which holds our list of known gates
 * @mpath: gate mpath
 *
 * Returns: 0 on success
 *
 * Locking: must be called inside rcu_read_lock() section
 */
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
	struct mpath_node *gate;
	struct hlist_node *p, *q;

	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			spin_lock_bh(&tbl->gates_lock);
			hlist_del_rcu(&gate->list);
463
			kfree_rcu(gate, rcu);
464 465 466
			spin_unlock_bh(&tbl->gates_lock);
			mpath->sdata->u.mesh.num_gates--;
			mpath->is_gate = false;
J
Johannes Berg 已提交
467 468
			mpath_dbg(mpath->sdata,
				  "Mesh path: Deleted gate: %pM. %d known gates\n",
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
				  mpath->dst, mpath->sdata->u.mesh.num_gates);
			break;
		}

	return 0;
}

/**
 * mesh_gate_num - number of gates known to this interface
 * @sdata: subif data
 */
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
	return sdata->u.mesh.num_gates;
}

485 486 487
/**
 * mesh_path_add - allocate and add a new path to the mesh path table
 * @addr: destination address of the path (ETH_ALEN length)
488
 * @sdata: local subif
489
 *
490
 * Returns: 0 on success
491 492 493
 *
 * State: the initial state of the new path is set to 0
 */
494
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
495
{
496 497
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
498
	struct mesh_table *tbl;
499 500 501 502 503 504 505 506
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

507
	if (ether_addr_equal(dst, sdata->vif.addr))
508 509 510 511 512 513
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

514
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
515 516
		return -ENOSPC;

517
	err = -ENOMEM;
518
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
519 520 521
	if (!new_mpath)
		goto err_path_alloc;

522
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
523 524
	if (!new_node)
		goto err_node_alloc;
525

526
	read_lock_bh(&pathtbl_resize_lock);
527
	memcpy(new_mpath->dst, dst, ETH_ALEN);
J
Johannes Berg 已提交
528
	eth_broadcast_addr(new_mpath->rann_snd_addr);
529
	new_mpath->is_root = false;
530
	new_mpath->sdata = sdata;
531 532 533 534 535 536 537 538 539
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

540
	tbl = resize_dereference_mesh_paths();
541

542 543
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
544

545
	spin_lock(&tbl->hashwlock[hash_idx]);
546

547
	err = -EEXIST;
548 549
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
550
		if (mpath->sdata == sdata &&
551
		    ether_addr_equal(dst, mpath->dst))
552
			goto err_exists;
553 554 555
	}

	hlist_add_head_rcu(&new_node->list, bucket);
556 557
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
558 559
		grow = 1;

560 561
	mesh_paths_generation++;

562
	spin_unlock(&tbl->hashwlock[hash_idx]);
563
	read_unlock_bh(&pathtbl_resize_lock);
564
	if (grow) {
565
		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
566
		ieee80211_queue_work(&local->hw, &sdata->work);
567
	}
568 569 570
	return 0;

err_exists:
571
	spin_unlock(&tbl->hashwlock[hash_idx]);
572
	read_unlock_bh(&pathtbl_resize_lock);
573 574 575 576
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
577
	atomic_dec(&sdata->u.mesh.mpaths);
578 579 580
	return err;
}

581 582 583 584 585 586 587
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);

	mesh_table_free(tbl, false);
}

588 589 590 591
void mesh_mpath_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

592
	write_lock_bh(&pathtbl_resize_lock);
593 594
	oldtbl = resize_dereference_mesh_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
595 596
	if (!newtbl)
		goto out;
597
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
598
		__mesh_table_free(newtbl);
599
		goto out;
600 601 602
	}
	rcu_assign_pointer(mesh_paths, newtbl);

603 604 605 606
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);

 out:
	write_unlock_bh(&pathtbl_resize_lock);
607 608 609 610 611 612
}

void mesh_mpp_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

613
	write_lock_bh(&pathtbl_resize_lock);
614 615
	oldtbl = resize_dereference_mpp_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
616 617
	if (!newtbl)
		goto out;
618
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
619
		__mesh_table_free(newtbl);
620
		goto out;
621 622
	}
	rcu_assign_pointer(mpp_paths, newtbl);
623
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
624

625 626
 out:
	write_unlock_bh(&pathtbl_resize_lock);
627
}
628

629 630
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
631 632
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
633
	struct mesh_table *tbl;
634 635 636 637 638 639 640 641
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

642
	if (ether_addr_equal(dst, sdata->vif.addr))
643 644 645 646 647 648 649
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
650
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
651 652 653
	if (!new_mpath)
		goto err_path_alloc;

654
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
655 656 657
	if (!new_node)
		goto err_node_alloc;

658
	read_lock_bh(&pathtbl_resize_lock);
659 660 661 662 663 664
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
T
Thomas Pedersen 已提交
665
	init_timer(&new_mpath->timer);
666 667 668
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

669
	tbl = resize_dereference_mpp_paths();
670

671 672 673
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

674
	spin_lock(&tbl->hashwlock[hash_idx]);
675 676 677 678

	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
679
		if (mpath->sdata == sdata &&
680
		    ether_addr_equal(dst, mpath->dst))
681 682 683 684
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
685 686
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
687 688
		grow = 1;

689
	spin_unlock(&tbl->hashwlock[hash_idx]);
690
	read_unlock_bh(&pathtbl_resize_lock);
691
	if (grow) {
692
		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
693
		ieee80211_queue_work(&local->hw, &sdata->work);
694 695 696 697
	}
	return 0;

err_exists:
698
	spin_unlock(&tbl->hashwlock[hash_idx]);
699
	read_unlock_bh(&pathtbl_resize_lock);
700 701 702 703 704 705 706 707
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


708 709 710 711 712 713 714 715 716 717
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
718
	struct mesh_table *tbl;
719
	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
720 721 722
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
723
	struct ieee80211_sub_if_data *sdata = sta->sdata;
724
	int i;
725
	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
726 727

	rcu_read_lock();
728 729
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
730
		mpath = node->mpath;
731
		if (rcu_dereference(mpath->next_hop) == sta &&
732 733
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
734
			spin_lock_bh(&mpath->state_lock);
735
			mpath->flags &= ~MESH_PATH_ACTIVE;
736
			++mpath->sn;
737
			spin_unlock_bh(&mpath->state_lock);
738 739
			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
					mpath->dst, cpu_to_le32(mpath->sn),
740
					reason, bcast, sdata);
741
		}
742 743 744 745
	}
	rcu_read_unlock();
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;

	del_timer_sync(&node->mpath->timer);
	atomic_dec(&sdata->u.mesh.mpaths);
	kfree(node->mpath);
	kfree(node);
}

/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
	struct mesh_path *mpath;
	mpath = node->mpath;
	spin_lock(&mpath->state_lock);
	mpath->flags |= MESH_PATH_RESOLVING;
	if (mpath->is_gate)
		mesh_gate_del(tbl, mpath);
	hlist_del_rcu(&node->list);
	call_rcu(&node->rcu, mesh_path_node_reclaim);
	spin_unlock(&mpath->state_lock);
	atomic_dec(&tbl->entries);
}

772 773 774
/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
775
 * @sta: mesh peer to match
776
 *
777 778 779
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
780 781
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
782 783 784
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
785
	struct mesh_table *tbl;
786 787 788 789 790
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

791
	rcu_read_lock();
792 793
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
794
	for_each_mesh_entry(tbl, p, node, i) {
795
		mpath = node->mpath;
796
		if (rcu_dereference(mpath->next_hop) == sta) {
797
			spin_lock(&tbl->hashwlock[i]);
798
			__mesh_path_del(tbl, node);
799
			spin_unlock(&tbl->hashwlock[i]);
800
		}
801
	}
802
	read_unlock_bh(&pathtbl_resize_lock);
803
	rcu_read_unlock();
804 805
}

806 807
static void table_flush_by_iface(struct mesh_table *tbl,
				 struct ieee80211_sub_if_data *sdata)
808 809 810 811 812 813
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

814
	WARN_ON(!rcu_read_lock_held());
815
	for_each_mesh_entry(tbl, p, node, i) {
816
		mpath = node->mpath;
817 818
		if (mpath->sdata != sdata)
			continue;
819
		spin_lock_bh(&tbl->hashwlock[i]);
820
		__mesh_path_del(tbl, node);
821
		spin_unlock_bh(&tbl->hashwlock[i]);
822 823 824
	}
}

825 826 827 828 829
/**
 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
 *
 * This function deletes both mesh paths as well as mesh portal paths.
 *
830
 * @sdata: interface data to match
831 832 833
 *
 */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
834
{
835
	struct mesh_table *tbl;
836

837
	rcu_read_lock();
838 839
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
840
	table_flush_by_iface(tbl, sdata);
841
	tbl = resize_dereference_mpp_paths();
842
	table_flush_by_iface(tbl, sdata);
843
	read_unlock_bh(&pathtbl_resize_lock);
844
	rcu_read_unlock();
845 846 847 848 849 850
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
851
 * @sdata: local subif
852
 *
853
 * Returns: 0 if successful
854
 */
855
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
856
{
857
	struct mesh_table *tbl;
858 859 860 861 862 863 864
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int hash_idx;
	int err = 0;

865
	read_lock_bh(&pathtbl_resize_lock);
866 867 868
	tbl = resize_dereference_mesh_paths();
	hash_idx = mesh_table_hash(addr, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
869

870
	spin_lock(&tbl->hashwlock[hash_idx]);
871 872
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
873
		if (mpath->sdata == sdata &&
874
		    ether_addr_equal(addr, mpath->dst)) {
875
			__mesh_path_del(tbl, node);
876 877 878 879 880 881
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
882
	mesh_paths_generation++;
883
	spin_unlock(&tbl->hashwlock[hash_idx]);
884
	read_unlock_bh(&pathtbl_resize_lock);
885 886 887 888 889 890 891 892 893 894 895 896 897
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
898 899 900
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
901 902
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
/**
 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
 *
 * @mpath: mesh path whose queue will be emptied
 *
 * If there is only one gate, the frames are transferred from the failed mpath
 * queue to that gate's queue.  If there are more than one gates, the frames
 * are copied from each gate to the next.  After frames are copied, the
 * mpath queues are emptied onto the transmission queue.
 */
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
	struct hlist_node *n;
	struct mesh_table *tbl;
	struct mesh_path *from_mpath = mpath;
	struct mpath_node *gate = NULL;
	bool copy = false;
	struct hlist_head *known_gates;

	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	known_gates = tbl->known_gates;
	rcu_read_unlock();

	if (!known_gates)
		return -EHOSTUNREACH;

	hlist_for_each_entry_rcu(gate, n, known_gates, list) {
		if (gate->mpath->sdata != sdata)
			continue;

		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
J
Johannes Berg 已提交
936
			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
937 938 939 940
			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
			from_mpath = gate->mpath;
			copy = true;
		} else {
J
Johannes Berg 已提交
941 942 943
			mpath_dbg(sdata,
				  "Not forwarding %p (flags %#x)\n",
				  gate->mpath, gate->mpath->flags);
944 945 946 947 948
		}
	}

	hlist_for_each_entry_rcu(gate, n, known_gates, list)
		if (gate->mpath->sdata == sdata) {
J
Johannes Berg 已提交
949
			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
950 951 952 953 954 955
			mesh_path_tx_pending(gate->mpath);
		}

	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}

956 957 958 959
/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
960
 * @sdata: network subif the frame was to be sent through
961 962 963
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
964 965
void mesh_path_discard_frame(struct sk_buff *skb,
			     struct ieee80211_sub_if_data *sdata)
966 967
{
	kfree_skb(skb);
968
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
969 970 971 972 973 974 975
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
L
Lucas De Marchi 已提交
976
 * Locking: the function must me called within a rcu_read_lock region
977 978 979 980 981
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

982
	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
983
		mesh_path_discard_frame(skb, mpath->sdata);
984 985 986 987 988 989 990 991 992 993 994 995 996 997
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
998
	mpath->sn = 0xffff;
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
1014 1015
	if (free_leafs) {
		del_timer_sync(&mpath->timer);
1016
		kfree(mpath);
1017
	}
1018 1019 1020
	kfree(node);
}

1021
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1022 1023 1024 1025 1026
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

1027
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1028 1029 1030
	if (new_node == NULL)
		return -ENOMEM;

1031 1032 1033
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
1034
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1035 1036
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
1037
	return 0;
1038 1039 1040 1041
}

int mesh_pathtbl_init(void)
{
1042
	struct mesh_table *tbl_path, *tbl_mpp;
1043
	int ret;
1044 1045 1046

	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_path)
1047
		return -ENOMEM;
1048 1049 1050
	tbl_path->free_node = &mesh_path_node_free;
	tbl_path->copy_node = &mesh_path_node_copy;
	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1051
	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1052 1053 1054 1055
	if (!tbl_path->known_gates) {
		ret = -ENOMEM;
		goto free_path;
	}
1056 1057
	INIT_HLIST_HEAD(tbl_path->known_gates);

1058

1059 1060
	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_mpp) {
1061 1062
		ret = -ENOMEM;
		goto free_path;
1063
	}
1064 1065 1066
	tbl_mpp->free_node = &mesh_path_node_free;
	tbl_mpp->copy_node = &mesh_path_node_copy;
	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1067
	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1068 1069 1070 1071
	if (!tbl_mpp->known_gates) {
		ret = -ENOMEM;
		goto free_mpp;
	}
1072
	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1073 1074 1075 1076

	/* Need no locking since this is during init */
	RCU_INIT_POINTER(mesh_paths, tbl_path);
	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1077

1078
	return 0;
1079 1080 1081 1082 1083 1084

free_mpp:
	mesh_table_free(tbl_mpp, true);
free_path:
	mesh_table_free(tbl_path, true);
	return ret;
1085 1086
}

1087
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1088
{
1089
	struct mesh_table *tbl;
1090 1091 1092 1093 1094
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

1095 1096 1097
	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
1098
		if (node->mpath->sdata != sdata)
1099 1100 1101 1102
			continue;
		mpath = node->mpath;
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1103
		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1104
			mesh_path_del(mpath->dst, mpath->sdata);
1105
	}
1106
	rcu_read_unlock();
1107 1108 1109 1110
}

void mesh_pathtbl_unregister(void)
{
1111
	/* no need for locking during exit path */
1112 1113
	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1114
}