mesh_pathtbl.c 28.9 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9 10 11 12
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
17
#include "wme.h"
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include "ieee80211_i.h"
#include "mesh.h"

/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
				time_after(jiffies, mpath->exp_time) && \
				!(mpath->flags & MESH_PATH_FIXED))

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

40 41
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42

43
int mesh_paths_generation;
44 45

/* This lock will have the grow table function as writer and add / delete nodes
46 47 48 49
 * as readers. RCU provides sufficient protection only when reading the table
 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
 * the read lock or we risk operating on an old table.  The write lock is only
 * needed when modifying the number of buckets a table.
50 51 52 53
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);


54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
	return rcu_dereference_protected(mesh_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
	return rcu_dereference_protected(mpp_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

/*
 * CAREFUL -- "tbl" must not be an expression,
 * in particular not an rcu_dereference(), since
 * it's used twice. So it is illegal to do
 *	for_each_mesh_entry(rcu_dereference(...), ...)
 */
#define for_each_mesh_entry(tbl, p, node, i) \
	for (i = 0; i <= tbl->hash_mask; i++) \
		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)


77 78 79 80 81
static struct mesh_table *mesh_table_alloc(int size_order)
{
	int i;
	struct mesh_table *newtbl;

82
	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
83 84 85 86
	if (!newtbl)
		return NULL;

	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87
			(1 << size_order), GFP_ATOMIC);
88 89 90 91 92 93 94

	if (!newtbl->hash_buckets) {
		kfree(newtbl);
		return NULL;
	}

	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95
			(1 << size_order), GFP_ATOMIC);
96 97 98 99 100 101 102 103 104 105 106 107 108
	if (!newtbl->hashwlock) {
		kfree(newtbl->hash_buckets);
		kfree(newtbl);
		return NULL;
	}

	newtbl->size_order = size_order;
	newtbl->hash_mask = (1 << size_order) - 1;
	atomic_set(&newtbl->entries,  0);
	get_random_bytes(&newtbl->hash_rnd,
			sizeof(newtbl->hash_rnd));
	for (i = 0; i <= newtbl->hash_mask; i++)
		spin_lock_init(&newtbl->hashwlock[i]);
109
	spin_lock_init(&newtbl->gates_lock);
110 111 112 113

	return newtbl;
}

114 115 116 117 118 119 120
static void __mesh_table_free(struct mesh_table *tbl)
{
	kfree(tbl->hash_buckets);
	kfree(tbl->hashwlock);
	kfree(tbl);
}

121
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
122 123 124
{
	struct hlist_head *mesh_hash;
	struct hlist_node *p, *q;
125
	struct mpath_node *gate;
126 127 128 129
	int i;

	mesh_hash = tbl->hash_buckets;
	for (i = 0; i <= tbl->hash_mask; i++) {
130
		spin_lock_bh(&tbl->hashwlock[i]);
131 132 133 134
		hlist_for_each_safe(p, q, &mesh_hash[i]) {
			tbl->free_node(p, free_leafs);
			atomic_dec(&tbl->entries);
		}
135
		spin_unlock_bh(&tbl->hashwlock[i]);
136
	}
137 138 139 140 141 142 143 144 145 146 147
	if (free_leafs) {
		spin_lock_bh(&tbl->gates_lock);
		hlist_for_each_entry_safe(gate, p, q,
					 tbl->known_gates, list) {
			hlist_del(&gate->list);
			kfree(gate);
		}
		kfree(tbl->known_gates);
		spin_unlock_bh(&tbl->gates_lock);
	}

148 149 150
	__mesh_table_free(tbl);
}

151
static int mesh_table_grow(struct mesh_table *oldtbl,
152
			   struct mesh_table *newtbl)
153 154 155 156 157
{
	struct hlist_head *oldhash;
	struct hlist_node *p, *q;
	int i;

158 159 160
	if (atomic_read(&oldtbl->entries)
			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
		return -EAGAIN;
161

162 163 164
	newtbl->free_node = oldtbl->free_node;
	newtbl->mean_chain_len = oldtbl->mean_chain_len;
	newtbl->copy_node = oldtbl->copy_node;
165
	newtbl->known_gates = oldtbl->known_gates;
166
	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
167

168 169
	oldhash = oldtbl->hash_buckets;
	for (i = 0; i <= oldtbl->hash_mask; i++)
170
		hlist_for_each(p, &oldhash[i])
171
			if (oldtbl->copy_node(p, newtbl) < 0)
172 173
				goto errcopy;

174
	return 0;
175 176 177 178

errcopy:
	for (i = 0; i <= newtbl->hash_mask; i++) {
		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179
			oldtbl->free_node(p, 0);
180
	}
181
	return -ENOMEM;
182 183
}

184 185 186 187 188 189 190
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
			   struct mesh_table *tbl)
{
	/* Use last four bytes of hw addr and interface index as hash index */
	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
		& tbl->hash_mask;
}
191

192 193 194 195 196 197 198 199 200 201 202 203

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
204 205 206 207
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	unsigned long flags;

208
	rcu_assign_pointer(mpath->next_hop, sta);
209 210

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
211
	skb_queue_walk(&mpath->frame_queue, skb) {
212 213
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214
		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
M
Marco Porsch 已提交
215
		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
216 217 218
	}

	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
219 220
}

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
			     struct mesh_path *gate_mpath)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211s_hdr *mshdr;
	int mesh_hdrlen, hdrlen;
	char *next_hop;

	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);

	if (!(mshdr->flags & MESH_FLAGS_AE)) {
		/* size of the fixed part of the mesh header */
		mesh_hdrlen = 6;

		/* make room for the two extended addresses */
		skb_push(skb, 2 * ETH_ALEN);
		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);

		hdr = (struct ieee80211_hdr *) skb->data;

		/* we preserve the previous mesh header and only add
		 * the new addreses */
		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
		mshdr->flags = MESH_FLAGS_AE_A5_A6;
		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
	}

	/* update next hop */
	hdr = (struct ieee80211_hdr *) skb->data;
	rcu_read_lock();
	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
	memcpy(hdr->addr1, next_hop, ETH_ALEN);
	rcu_read_unlock();
257
	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}

/**
 *
 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
 *
 * This function is used to transfer or copy frames from an unresolved mpath to
 * a gate mpath.  The function also adds the Address Extension field and
 * updates the next hop.
 *
 * If a frame already has an Address Extension field, only the next hop and
 * destination addresses are updated.
 *
 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 *
 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
 * @from_mpath: The failed mpath
 * @copy: When true, copy all the frames to the new mpath queue.  When false,
 * move them.
 */
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
{
283 284
	struct sk_buff *skb, *fskb, *tmp;
	struct sk_buff_head failq;
285 286 287 288 289 290 291 292 293 294 295
	unsigned long flags;

	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(!gate_mpath->next_hop);

	__skb_queue_head_init(&failq);

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);

296 297 298 299 300
	skb_queue_walk_safe(&failq, fskb, tmp) {
		if (skb_queue_len(&gate_mpath->frame_queue) >=
				  MESH_FRAME_QUEUE_LEN) {
			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
			break;
301
		}
302

303 304 305 306
		skb = skb_copy(fskb, GFP_ATOMIC);
		if (WARN_ON(!skb))
			break;

307
		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
308 309 310 311 312 313 314
		skb_queue_tail(&gate_mpath->frame_queue, skb);

		if (copy)
			continue;

		__skb_unlink(fskb, &failq);
		kfree_skb(fskb);
315 316
	}

J
Johannes Berg 已提交
317 318
	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
319 320 321 322 323 324 325 326 327

	if (!copy)
		return;

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice(&failq, &from_mpath->frame_queue);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}

328

329
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
330
					  struct ieee80211_sub_if_data *sdata)
331 332 333 334 335 336
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mpath_node *node;

337
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
338 339
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
340
		if (mpath->sdata == sdata &&
341
		    ether_addr_equal(dst, mpath->dst)) {
342 343
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
344
				mpath->flags &= ~MESH_PATH_ACTIVE;
345 346 347 348 349 350 351 352
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

353 354 355 356 357 358 359 360 361 362
/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @dst: hardware address (ETH_ALEN length) of destination
 * @sdata: local subif
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
363
{
364
	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
365
}
366

367 368
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
369
	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
370 371 372
}


373 374 375
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
376
 * @sdata: local subif, or NULL for all entries
377 378 379 380 381
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
382
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
383
{
384
	struct mesh_table *tbl = rcu_dereference(mesh_paths);
385 386 387 388 389
	struct mpath_node *node;
	struct hlist_node *p;
	int i;
	int j = 0;

390
	for_each_mesh_entry(tbl, p, node, i) {
391
		if (sdata && node->mpath->sdata != sdata)
392
			continue;
393 394 395
		if (j++ == idx) {
			if (MPATH_EXPIRED(node->mpath)) {
				spin_lock_bh(&node->mpath->state_lock);
396
				node->mpath->flags &= ~MESH_PATH_ACTIVE;
397 398 399 400
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
401
	}
402 403 404 405

	return NULL;
}

406
/**
J
Johannes Berg 已提交
407 408
 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
 * @mpath: gate path to add to table
409
 */
J
Johannes Berg 已提交
410
int mesh_path_add_gate(struct mesh_path *mpath)
411
{
J
Johannes Berg 已提交
412
	struct mesh_table *tbl;
413 414 415 416 417
	struct mpath_node *gate, *new_gate;
	struct hlist_node *n;
	int err;

	rcu_read_lock();
J
Johannes Berg 已提交
418
	tbl = rcu_dereference(mesh_paths);
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438

	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			err = -EEXIST;
			goto err_rcu;
		}

	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
	if (!new_gate) {
		err = -ENOMEM;
		goto err_rcu;
	}

	mpath->is_gate = true;
	mpath->sdata->u.mesh.num_gates++;
	new_gate->mpath = mpath;
	spin_lock_bh(&tbl->gates_lock);
	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
	spin_unlock_bh(&tbl->gates_lock);
	rcu_read_unlock();
J
Johannes Berg 已提交
439 440 441
	mpath_dbg(mpath->sdata,
		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
		  mpath->dst, mpath->sdata->u.mesh.num_gates);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	return 0;
err_rcu:
	rcu_read_unlock();
	return err;
}

/**
 * mesh_gate_del - remove a mesh gate from the list of known gates
 * @tbl: table which holds our list of known gates
 * @mpath: gate mpath
 *
 * Returns: 0 on success
 *
 * Locking: must be called inside rcu_read_lock() section
 */
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
	struct mpath_node *gate;
	struct hlist_node *p, *q;

	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			spin_lock_bh(&tbl->gates_lock);
			hlist_del_rcu(&gate->list);
466
			kfree_rcu(gate, rcu);
467 468 469
			spin_unlock_bh(&tbl->gates_lock);
			mpath->sdata->u.mesh.num_gates--;
			mpath->is_gate = false;
J
Johannes Berg 已提交
470 471
			mpath_dbg(mpath->sdata,
				  "Mesh path: Deleted gate: %pM. %d known gates\n",
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
				  mpath->dst, mpath->sdata->u.mesh.num_gates);
			break;
		}

	return 0;
}

/**
 * mesh_gate_num - number of gates known to this interface
 * @sdata: subif data
 */
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
	return sdata->u.mesh.num_gates;
}

488 489 490
/**
 * mesh_path_add - allocate and add a new path to the mesh path table
 * @addr: destination address of the path (ETH_ALEN length)
491
 * @sdata: local subif
492
 *
493
 * Returns: 0 on success
494 495 496
 *
 * State: the initial state of the new path is set to 0
 */
497
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
498
{
499 500
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
501
	struct mesh_table *tbl;
502 503 504 505 506 507 508 509
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

510
	if (ether_addr_equal(dst, sdata->vif.addr))
511 512 513 514 515 516
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

517
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
518 519
		return -ENOSPC;

520
	err = -ENOMEM;
521
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
522 523 524
	if (!new_mpath)
		goto err_path_alloc;

525
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
526 527
	if (!new_node)
		goto err_node_alloc;
528

529
	read_lock_bh(&pathtbl_resize_lock);
530
	memcpy(new_mpath->dst, dst, ETH_ALEN);
J
Johannes Berg 已提交
531
	eth_broadcast_addr(new_mpath->rann_snd_addr);
532
	new_mpath->is_root = false;
533
	new_mpath->sdata = sdata;
534 535 536 537 538 539 540 541 542
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

543
	tbl = resize_dereference_mesh_paths();
544

545 546
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
547

548
	spin_lock(&tbl->hashwlock[hash_idx]);
549

550
	err = -EEXIST;
551 552
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
553
		if (mpath->sdata == sdata &&
554
		    ether_addr_equal(dst, mpath->dst))
555
			goto err_exists;
556 557 558
	}

	hlist_add_head_rcu(&new_node->list, bucket);
559 560
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
561 562
		grow = 1;

563 564
	mesh_paths_generation++;

565
	spin_unlock(&tbl->hashwlock[hash_idx]);
566
	read_unlock_bh(&pathtbl_resize_lock);
567
	if (grow) {
568
		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
569
		ieee80211_queue_work(&local->hw, &sdata->work);
570
	}
571 572 573
	return 0;

err_exists:
574
	spin_unlock(&tbl->hashwlock[hash_idx]);
575
	read_unlock_bh(&pathtbl_resize_lock);
576 577 578 579
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
580
	atomic_dec(&sdata->u.mesh.mpaths);
581 582 583
	return err;
}

584 585 586 587 588 589 590
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);

	mesh_table_free(tbl, false);
}

591 592 593 594
void mesh_mpath_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

595
	write_lock_bh(&pathtbl_resize_lock);
596 597
	oldtbl = resize_dereference_mesh_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
598 599
	if (!newtbl)
		goto out;
600
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
601
		__mesh_table_free(newtbl);
602
		goto out;
603 604 605
	}
	rcu_assign_pointer(mesh_paths, newtbl);

606 607 608 609
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);

 out:
	write_unlock_bh(&pathtbl_resize_lock);
610 611 612 613 614 615
}

void mesh_mpp_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

616
	write_lock_bh(&pathtbl_resize_lock);
617 618
	oldtbl = resize_dereference_mpp_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
619 620
	if (!newtbl)
		goto out;
621
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
622
		__mesh_table_free(newtbl);
623
		goto out;
624 625
	}
	rcu_assign_pointer(mpp_paths, newtbl);
626
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
627

628 629
 out:
	write_unlock_bh(&pathtbl_resize_lock);
630
}
631

632 633
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
634 635
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
636
	struct mesh_table *tbl;
637 638 639 640 641 642 643 644
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

645
	if (ether_addr_equal(dst, sdata->vif.addr))
646 647 648 649 650 651 652
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
653
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
654 655 656
	if (!new_mpath)
		goto err_path_alloc;

657
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
658 659 660
	if (!new_node)
		goto err_node_alloc;

661
	read_lock_bh(&pathtbl_resize_lock);
662 663 664 665 666 667
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
T
Thomas Pedersen 已提交
668
	init_timer(&new_mpath->timer);
669 670 671
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

672
	tbl = resize_dereference_mpp_paths();
673

674 675 676
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

677
	spin_lock(&tbl->hashwlock[hash_idx]);
678 679 680 681

	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
682
		if (mpath->sdata == sdata &&
683
		    ether_addr_equal(dst, mpath->dst))
684 685 686 687
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
688 689
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
690 691
		grow = 1;

692
	spin_unlock(&tbl->hashwlock[hash_idx]);
693
	read_unlock_bh(&pathtbl_resize_lock);
694
	if (grow) {
695
		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
696
		ieee80211_queue_work(&local->hw, &sdata->work);
697 698 699 700
	}
	return 0;

err_exists:
701
	spin_unlock(&tbl->hashwlock[hash_idx]);
702
	read_unlock_bh(&pathtbl_resize_lock);
703 704 705 706 707 708 709 710
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


711 712 713 714 715 716 717 718 719 720
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
721
	struct mesh_table *tbl;
722
	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
723 724 725
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
726
	struct ieee80211_sub_if_data *sdata = sta->sdata;
727
	int i;
728
	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
729 730

	rcu_read_lock();
731 732
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
733
		mpath = node->mpath;
734
		if (rcu_dereference(mpath->next_hop) == sta &&
735 736
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
737
			spin_lock_bh(&mpath->state_lock);
738
			mpath->flags &= ~MESH_PATH_ACTIVE;
739
			++mpath->sn;
740
			spin_unlock_bh(&mpath->state_lock);
741 742
			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
					mpath->dst, cpu_to_le32(mpath->sn),
743
					reason, bcast, sdata);
744
		}
745 746 747 748
	}
	rcu_read_unlock();
}

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;

	del_timer_sync(&node->mpath->timer);
	atomic_dec(&sdata->u.mesh.mpaths);
	kfree(node->mpath);
	kfree(node);
}

/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
	struct mesh_path *mpath;
	mpath = node->mpath;
	spin_lock(&mpath->state_lock);
	mpath->flags |= MESH_PATH_RESOLVING;
	if (mpath->is_gate)
		mesh_gate_del(tbl, mpath);
	hlist_del_rcu(&node->list);
	call_rcu(&node->rcu, mesh_path_node_reclaim);
	spin_unlock(&mpath->state_lock);
	atomic_dec(&tbl->entries);
}

775 776 777
/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
778
 * @sta: mesh peer to match
779
 *
780 781 782
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
783 784
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
785 786 787
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
788
	struct mesh_table *tbl;
789 790 791 792 793
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

794
	rcu_read_lock();
795 796
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
797
	for_each_mesh_entry(tbl, p, node, i) {
798
		mpath = node->mpath;
799
		if (rcu_dereference(mpath->next_hop) == sta) {
800
			spin_lock(&tbl->hashwlock[i]);
801
			__mesh_path_del(tbl, node);
802
			spin_unlock(&tbl->hashwlock[i]);
803
		}
804
	}
805
	read_unlock_bh(&pathtbl_resize_lock);
806
	rcu_read_unlock();
807 808
}

809 810
static void table_flush_by_iface(struct mesh_table *tbl,
				 struct ieee80211_sub_if_data *sdata)
811 812 813 814 815 816
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

817
	WARN_ON(!rcu_read_lock_held());
818
	for_each_mesh_entry(tbl, p, node, i) {
819
		mpath = node->mpath;
820 821
		if (mpath->sdata != sdata)
			continue;
822
		spin_lock_bh(&tbl->hashwlock[i]);
823
		__mesh_path_del(tbl, node);
824
		spin_unlock_bh(&tbl->hashwlock[i]);
825 826 827
	}
}

828 829 830 831 832
/**
 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
 *
 * This function deletes both mesh paths as well as mesh portal paths.
 *
833
 * @sdata: interface data to match
834 835 836
 *
 */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
837
{
838
	struct mesh_table *tbl;
839

840
	rcu_read_lock();
841 842
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
843
	table_flush_by_iface(tbl, sdata);
844
	tbl = resize_dereference_mpp_paths();
845
	table_flush_by_iface(tbl, sdata);
846
	read_unlock_bh(&pathtbl_resize_lock);
847
	rcu_read_unlock();
848 849 850 851 852 853
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
854
 * @sdata: local subif
855
 *
856
 * Returns: 0 if successful
857
 */
858
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
859
{
860
	struct mesh_table *tbl;
861 862 863 864 865 866 867
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int hash_idx;
	int err = 0;

868
	read_lock_bh(&pathtbl_resize_lock);
869 870 871
	tbl = resize_dereference_mesh_paths();
	hash_idx = mesh_table_hash(addr, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
872

873
	spin_lock(&tbl->hashwlock[hash_idx]);
874 875
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
876
		if (mpath->sdata == sdata &&
877
		    ether_addr_equal(addr, mpath->dst)) {
878
			__mesh_path_del(tbl, node);
879 880 881 882 883 884
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
885
	mesh_paths_generation++;
886
	spin_unlock(&tbl->hashwlock[hash_idx]);
887
	read_unlock_bh(&pathtbl_resize_lock);
888 889 890 891 892 893 894 895 896 897 898 899 900
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
901 902 903
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
904 905
}

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
/**
 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
 *
 * @mpath: mesh path whose queue will be emptied
 *
 * If there is only one gate, the frames are transferred from the failed mpath
 * queue to that gate's queue.  If there are more than one gates, the frames
 * are copied from each gate to the next.  After frames are copied, the
 * mpath queues are emptied onto the transmission queue.
 */
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
	struct hlist_node *n;
	struct mesh_table *tbl;
	struct mesh_path *from_mpath = mpath;
	struct mpath_node *gate = NULL;
	bool copy = false;
	struct hlist_head *known_gates;

	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	known_gates = tbl->known_gates;
	rcu_read_unlock();

	if (!known_gates)
		return -EHOSTUNREACH;

	hlist_for_each_entry_rcu(gate, n, known_gates, list) {
		if (gate->mpath->sdata != sdata)
			continue;

		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
J
Johannes Berg 已提交
939
			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
940 941 942 943
			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
			from_mpath = gate->mpath;
			copy = true;
		} else {
J
Johannes Berg 已提交
944 945 946
			mpath_dbg(sdata,
				  "Not forwarding %p (flags %#x)\n",
				  gate->mpath, gate->mpath->flags);
947 948 949 950 951
		}
	}

	hlist_for_each_entry_rcu(gate, n, known_gates, list)
		if (gate->mpath->sdata == sdata) {
J
Johannes Berg 已提交
952
			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
953 954 955 956 957 958
			mesh_path_tx_pending(gate->mpath);
		}

	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}

959 960 961 962
/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
963
 * @sdata: network subif the frame was to be sent through
964 965 966
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
967 968
void mesh_path_discard_frame(struct sk_buff *skb,
			     struct ieee80211_sub_if_data *sdata)
969 970
{
	kfree_skb(skb);
971
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
972 973 974 975 976 977 978
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
L
Lucas De Marchi 已提交
979
 * Locking: the function must me called within a rcu_read_lock region
980 981 982 983 984
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

985
	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
986
		mesh_path_discard_frame(skb, mpath->sdata);
987 988 989 990 991 992 993 994 995 996 997 998 999 1000
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
1001
	mpath->sn = 0xffff;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
1017 1018
	if (free_leafs) {
		del_timer_sync(&mpath->timer);
1019
		kfree(mpath);
1020
	}
1021 1022 1023
	kfree(node);
}

1024
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1025 1026 1027 1028 1029
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

1030
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1031 1032 1033
	if (new_node == NULL)
		return -ENOMEM;

1034 1035 1036
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
1037
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1038 1039
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
1040
	return 0;
1041 1042 1043 1044
}

int mesh_pathtbl_init(void)
{
1045
	struct mesh_table *tbl_path, *tbl_mpp;
1046
	int ret;
1047 1048 1049

	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_path)
1050
		return -ENOMEM;
1051 1052 1053
	tbl_path->free_node = &mesh_path_node_free;
	tbl_path->copy_node = &mesh_path_node_copy;
	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1054
	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1055 1056 1057 1058
	if (!tbl_path->known_gates) {
		ret = -ENOMEM;
		goto free_path;
	}
1059 1060
	INIT_HLIST_HEAD(tbl_path->known_gates);

1061

1062 1063
	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_mpp) {
1064 1065
		ret = -ENOMEM;
		goto free_path;
1066
	}
1067 1068 1069
	tbl_mpp->free_node = &mesh_path_node_free;
	tbl_mpp->copy_node = &mesh_path_node_copy;
	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1070
	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1071 1072 1073 1074
	if (!tbl_mpp->known_gates) {
		ret = -ENOMEM;
		goto free_mpp;
	}
1075
	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1076 1077 1078 1079

	/* Need no locking since this is during init */
	RCU_INIT_POINTER(mesh_paths, tbl_path);
	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1080

1081
	return 0;
1082 1083 1084 1085 1086 1087

free_mpp:
	mesh_table_free(tbl_mpp, true);
free_path:
	mesh_table_free(tbl_path, true);
	return ret;
1088 1089
}

1090
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1091
{
1092
	struct mesh_table *tbl;
1093 1094 1095 1096 1097
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

1098 1099 1100
	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
1101
		if (node->mpath->sdata != sdata)
1102 1103 1104 1105
			continue;
		mpath = node->mpath;
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1106
		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1107
			mesh_path_del(mpath->dst, mpath->sdata);
1108
	}
1109
	rcu_read_unlock();
1110 1111 1112 1113
}

void mesh_pathtbl_unregister(void)
{
1114
	/* no need for locking during exit path */
1115 1116
	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1117
}