mesh_pathtbl.c 29.2 KB
Newer Older
1
/*
R
Rui Paulo 已提交
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3 4 5 6 7 8 9 10 11 12
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
17
#include "wme.h"
18 19 20
#include "ieee80211_i.h"
#include "mesh.h"

21 22 23 24 25 26
#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
#define mpath_dbg(fmt, args...)	printk(KERN_DEBUG fmt, ##args)
#else
#define mpath_dbg(fmt, args...)	do { (void)(0); } while (0)
#endif

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
				time_after(jiffies, mpath->exp_time) && \
				!(mpath->flags & MESH_PATH_FIXED))

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

46 47
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
48

49
int mesh_paths_generation;
50 51

/* This lock will have the grow table function as writer and add / delete nodes
52 53 54 55
 * as readers. RCU provides sufficient protection only when reading the table
 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
 * the read lock or we risk operating on an old table.  The write lock is only
 * needed when modifying the number of buckets a table.
56 57 58 59
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);


60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
	return rcu_dereference_protected(mesh_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
	return rcu_dereference_protected(mpp_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

/*
 * CAREFUL -- "tbl" must not be an expression,
 * in particular not an rcu_dereference(), since
 * it's used twice. So it is illegal to do
 *	for_each_mesh_entry(rcu_dereference(...), ...)
 */
#define for_each_mesh_entry(tbl, p, node, i) \
	for (i = 0; i <= tbl->hash_mask; i++) \
		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)


83 84 85 86 87
static struct mesh_table *mesh_table_alloc(int size_order)
{
	int i;
	struct mesh_table *newtbl;

88
	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
89 90 91 92
	if (!newtbl)
		return NULL;

	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
93
			(1 << size_order), GFP_ATOMIC);
94 95 96 97 98 99 100

	if (!newtbl->hash_buckets) {
		kfree(newtbl);
		return NULL;
	}

	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
101
			(1 << size_order), GFP_ATOMIC);
102 103 104 105 106 107 108 109 110 111 112 113 114
	if (!newtbl->hashwlock) {
		kfree(newtbl->hash_buckets);
		kfree(newtbl);
		return NULL;
	}

	newtbl->size_order = size_order;
	newtbl->hash_mask = (1 << size_order) - 1;
	atomic_set(&newtbl->entries,  0);
	get_random_bytes(&newtbl->hash_rnd,
			sizeof(newtbl->hash_rnd));
	for (i = 0; i <= newtbl->hash_mask; i++)
		spin_lock_init(&newtbl->hashwlock[i]);
115
	spin_lock_init(&newtbl->gates_lock);
116 117 118 119

	return newtbl;
}

120 121 122 123 124 125 126
static void __mesh_table_free(struct mesh_table *tbl)
{
	kfree(tbl->hash_buckets);
	kfree(tbl->hashwlock);
	kfree(tbl);
}

127
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
128 129 130
{
	struct hlist_head *mesh_hash;
	struct hlist_node *p, *q;
131
	struct mpath_node *gate;
132 133 134 135
	int i;

	mesh_hash = tbl->hash_buckets;
	for (i = 0; i <= tbl->hash_mask; i++) {
136
		spin_lock_bh(&tbl->hashwlock[i]);
137 138 139 140
		hlist_for_each_safe(p, q, &mesh_hash[i]) {
			tbl->free_node(p, free_leafs);
			atomic_dec(&tbl->entries);
		}
141
		spin_unlock_bh(&tbl->hashwlock[i]);
142
	}
143 144 145 146 147 148 149 150 151 152 153
	if (free_leafs) {
		spin_lock_bh(&tbl->gates_lock);
		hlist_for_each_entry_safe(gate, p, q,
					 tbl->known_gates, list) {
			hlist_del(&gate->list);
			kfree(gate);
		}
		kfree(tbl->known_gates);
		spin_unlock_bh(&tbl->gates_lock);
	}

154 155 156
	__mesh_table_free(tbl);
}

157
static int mesh_table_grow(struct mesh_table *oldtbl,
158
			   struct mesh_table *newtbl)
159 160 161 162 163
{
	struct hlist_head *oldhash;
	struct hlist_node *p, *q;
	int i;

164 165 166
	if (atomic_read(&oldtbl->entries)
			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
		return -EAGAIN;
167

168 169 170
	newtbl->free_node = oldtbl->free_node;
	newtbl->mean_chain_len = oldtbl->mean_chain_len;
	newtbl->copy_node = oldtbl->copy_node;
171
	newtbl->known_gates = oldtbl->known_gates;
172
	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
173

174 175
	oldhash = oldtbl->hash_buckets;
	for (i = 0; i <= oldtbl->hash_mask; i++)
176
		hlist_for_each(p, &oldhash[i])
177
			if (oldtbl->copy_node(p, newtbl) < 0)
178 179
				goto errcopy;

180
	return 0;
181 182 183 184

errcopy:
	for (i = 0; i <= newtbl->hash_mask; i++) {
		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
185
			oldtbl->free_node(p, 0);
186
	}
187
	return -ENOMEM;
188 189
}

190 191 192 193 194 195 196
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
			   struct mesh_table *tbl)
{
	/* Use last four bytes of hw addr and interface index as hash index */
	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
		& tbl->hash_mask;
}
197

198 199 200 201 202 203 204 205 206 207 208 209

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
210 211 212 213 214
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	struct sk_buff_head tmpq;
	unsigned long flags;

215
	rcu_assign_pointer(mpath->next_hop, sta);
216 217 218 219 220 221 222 223

	__skb_queue_head_init(&tmpq);

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);

	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
224
		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
225 226 227 228 229
		__skb_queue_tail(&tmpq, skb);
	}

	skb_queue_splice(&tmpq, &mpath->frame_queue);
	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
230 231
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
			     struct mesh_path *gate_mpath)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211s_hdr *mshdr;
	int mesh_hdrlen, hdrlen;
	char *next_hop;

	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);

	if (!(mshdr->flags & MESH_FLAGS_AE)) {
		/* size of the fixed part of the mesh header */
		mesh_hdrlen = 6;

		/* make room for the two extended addresses */
		skb_push(skb, 2 * ETH_ALEN);
		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);

		hdr = (struct ieee80211_hdr *) skb->data;

		/* we preserve the previous mesh header and only add
		 * the new addreses */
		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
		mshdr->flags = MESH_FLAGS_AE_A5_A6;
		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
	}

	/* update next hop */
	hdr = (struct ieee80211_hdr *) skb->data;
	rcu_read_lock();
	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
	memcpy(hdr->addr1, next_hop, ETH_ALEN);
	rcu_read_unlock();
268
	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}

/**
 *
 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
 *
 * This function is used to transfer or copy frames from an unresolved mpath to
 * a gate mpath.  The function also adds the Address Extension field and
 * updates the next hop.
 *
 * If a frame already has an Address Extension field, only the next hop and
 * destination addresses are updated.
 *
 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 *
 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
 * @from_mpath: The failed mpath
 * @copy: When true, copy all the frames to the new mpath queue.  When false,
 * move them.
 */
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
{
T
Thomas Pedersen 已提交
294
	struct sk_buff *skb, *cp_skb = NULL;
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
	struct sk_buff_head gateq, failq;
	unsigned long flags;
	int num_skbs;

	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(!gate_mpath->next_hop);

	__skb_queue_head_init(&gateq);
	__skb_queue_head_init(&failq);

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);

	num_skbs = skb_queue_len(&failq);

	while (num_skbs--) {
		skb = __skb_dequeue(&failq);
313
		if (copy) {
314
			cp_skb = skb_copy(skb, GFP_ATOMIC);
315 316 317
			if (cp_skb)
				__skb_queue_tail(&failq, cp_skb);
		}
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
		__skb_queue_tail(&gateq, skb);
	}

	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
	mpath_dbg("Mpath queue for gate %pM has %d frames\n",
			gate_mpath->dst,
			skb_queue_len(&gate_mpath->frame_queue));
	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);

	if (!copy)
		return;

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice(&failq, &from_mpath->frame_queue);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}

338

339
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
340
					  struct ieee80211_sub_if_data *sdata)
341 342 343 344 345 346
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mpath_node *node;

347
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
348 349
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
350
		if (mpath->sdata == sdata &&
351
				compare_ether_addr(dst, mpath->dst) == 0) {
352 353
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
354
				mpath->flags &= ~MESH_PATH_ACTIVE;
355 356 357 358 359 360 361 362
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

363 364 365 366 367 368 369 370 371 372
/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @dst: hardware address (ETH_ALEN length) of destination
 * @sdata: local subif
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
373
{
374
	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
375
}
376

377 378
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
379
	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
380 381 382
}


383 384 385
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
386
 * @sdata: local subif, or NULL for all entries
387 388 389 390 391
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
392
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
393
{
394
	struct mesh_table *tbl = rcu_dereference(mesh_paths);
395 396 397 398 399
	struct mpath_node *node;
	struct hlist_node *p;
	int i;
	int j = 0;

400
	for_each_mesh_entry(tbl, p, node, i) {
401
		if (sdata && node->mpath->sdata != sdata)
402
			continue;
403 404 405
		if (j++ == idx) {
			if (MPATH_EXPIRED(node->mpath)) {
				spin_lock_bh(&node->mpath->state_lock);
406
				node->mpath->flags &= ~MESH_PATH_ACTIVE;
407 408 409 410
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
411
	}
412 413 414 415

	return NULL;
}

416
/**
J
Johannes Berg 已提交
417 418
 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
 * @mpath: gate path to add to table
419
 */
J
Johannes Berg 已提交
420
int mesh_path_add_gate(struct mesh_path *mpath)
421
{
J
Johannes Berg 已提交
422
	struct mesh_table *tbl;
423 424 425 426 427
	struct mpath_node *gate, *new_gate;
	struct hlist_node *n;
	int err;

	rcu_read_lock();
J
Johannes Berg 已提交
428
	tbl = rcu_dereference(mesh_paths);
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			err = -EEXIST;
			goto err_rcu;
		}

	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
	if (!new_gate) {
		err = -ENOMEM;
		goto err_rcu;
	}

	mpath->is_gate = true;
	mpath->sdata->u.mesh.num_gates++;
	new_gate->mpath = mpath;
	spin_lock_bh(&tbl->gates_lock);
	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
	spin_unlock_bh(&tbl->gates_lock);
	rcu_read_unlock();
	mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
		  mpath->sdata->name, mpath->dst,
		  mpath->sdata->u.mesh.num_gates);
	return 0;
err_rcu:
	rcu_read_unlock();
	return err;
}

/**
 * mesh_gate_del - remove a mesh gate from the list of known gates
 * @tbl: table which holds our list of known gates
 * @mpath: gate mpath
 *
 * Returns: 0 on success
 *
 * Locking: must be called inside rcu_read_lock() section
 */
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
	struct mpath_node *gate;
	struct hlist_node *p, *q;

	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			spin_lock_bh(&tbl->gates_lock);
			hlist_del_rcu(&gate->list);
476
			kfree_rcu(gate, rcu);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
			spin_unlock_bh(&tbl->gates_lock);
			mpath->sdata->u.mesh.num_gates--;
			mpath->is_gate = false;
			mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
				  "%d known gates\n", mpath->sdata->name,
				  mpath->dst, mpath->sdata->u.mesh.num_gates);
			break;
		}

	return 0;
}

/**
 * mesh_gate_num - number of gates known to this interface
 * @sdata: subif data
 */
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
	return sdata->u.mesh.num_gates;
}

498 499 500
/**
 * mesh_path_add - allocate and add a new path to the mesh path table
 * @addr: destination address of the path (ETH_ALEN length)
501
 * @sdata: local subif
502
 *
503
 * Returns: 0 on success
504 505 506
 *
 * State: the initial state of the new path is set to 0
 */
507
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
508
{
509 510
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
511
	struct mesh_table *tbl;
512 513 514 515 516 517 518 519
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

520
	if (compare_ether_addr(dst, sdata->vif.addr) == 0)
521 522 523 524 525 526
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

527
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
528 529
		return -ENOSPC;

530
	err = -ENOMEM;
531
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
532 533 534
	if (!new_mpath)
		goto err_path_alloc;

535
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
536 537
	if (!new_node)
		goto err_node_alloc;
538

539
	read_lock_bh(&pathtbl_resize_lock);
540
	memcpy(new_mpath->dst, dst, ETH_ALEN);
541 542
	memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
	new_mpath->is_root = false;
543
	new_mpath->sdata = sdata;
544 545 546 547 548 549 550 551 552
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

553
	tbl = resize_dereference_mesh_paths();
554

555 556
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
557

558
	spin_lock(&tbl->hashwlock[hash_idx]);
559

560
	err = -EEXIST;
561 562
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
563 564
		if (mpath->sdata == sdata &&
		    compare_ether_addr(dst, mpath->dst) == 0)
565
			goto err_exists;
566 567 568
	}

	hlist_add_head_rcu(&new_node->list, bucket);
569 570
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
571 572
		grow = 1;

573 574
	mesh_paths_generation++;

575
	spin_unlock(&tbl->hashwlock[hash_idx]);
576
	read_unlock_bh(&pathtbl_resize_lock);
577
	if (grow) {
578
		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
579
		ieee80211_queue_work(&local->hw, &sdata->work);
580
	}
581 582 583
	return 0;

err_exists:
584
	spin_unlock(&tbl->hashwlock[hash_idx]);
585
	read_unlock_bh(&pathtbl_resize_lock);
586 587 588 589
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
590
	atomic_dec(&sdata->u.mesh.mpaths);
591 592 593
	return err;
}

594 595 596 597 598 599 600
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);

	mesh_table_free(tbl, false);
}

601 602 603 604
void mesh_mpath_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

605
	write_lock_bh(&pathtbl_resize_lock);
606 607
	oldtbl = resize_dereference_mesh_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
608 609
	if (!newtbl)
		goto out;
610
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
611
		__mesh_table_free(newtbl);
612
		goto out;
613 614 615
	}
	rcu_assign_pointer(mesh_paths, newtbl);

616 617 618 619
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);

 out:
	write_unlock_bh(&pathtbl_resize_lock);
620 621 622 623 624 625
}

void mesh_mpp_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

626
	write_lock_bh(&pathtbl_resize_lock);
627 628
	oldtbl = resize_dereference_mpp_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
629 630
	if (!newtbl)
		goto out;
631
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
632
		__mesh_table_free(newtbl);
633
		goto out;
634 635
	}
	rcu_assign_pointer(mpp_paths, newtbl);
636
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
637

638 639
 out:
	write_unlock_bh(&pathtbl_resize_lock);
640
}
641

642 643
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
644 645
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
646
	struct mesh_table *tbl;
647 648 649 650 651 652 653 654
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

655
	if (compare_ether_addr(dst, sdata->vif.addr) == 0)
656 657 658 659 660 661 662
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
663
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
664 665 666
	if (!new_mpath)
		goto err_path_alloc;

667
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
668 669 670
	if (!new_node)
		goto err_node_alloc;

671
	read_lock_bh(&pathtbl_resize_lock);
672 673 674 675 676 677
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
T
Thomas Pedersen 已提交
678
	init_timer(&new_mpath->timer);
679 680 681
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

682
	tbl = resize_dereference_mpp_paths();
683

684 685 686
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

687
	spin_lock(&tbl->hashwlock[hash_idx]);
688 689 690 691

	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
692 693
		if (mpath->sdata == sdata &&
		    compare_ether_addr(dst, mpath->dst) == 0)
694 695 696 697
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
698 699
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
700 701
		grow = 1;

702
	spin_unlock(&tbl->hashwlock[hash_idx]);
703
	read_unlock_bh(&pathtbl_resize_lock);
704
	if (grow) {
705
		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
J
Johannes Berg 已提交
706
		ieee80211_queue_work(&local->hw, &sdata->work);
707 708 709 710
	}
	return 0;

err_exists:
711
	spin_unlock(&tbl->hashwlock[hash_idx]);
712
	read_unlock_bh(&pathtbl_resize_lock);
713 714 715 716 717 718 719 720
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


721 722 723 724 725 726 727 728 729 730
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
731
	struct mesh_table *tbl;
732
	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
733 734 735
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
736
	struct ieee80211_sub_if_data *sdata = sta->sdata;
737
	int i;
738
	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
739 740

	rcu_read_lock();
741 742
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
743
		mpath = node->mpath;
744
		if (rcu_dereference(mpath->next_hop) == sta &&
745 746
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
747
			spin_lock_bh(&mpath->state_lock);
748
			mpath->flags &= ~MESH_PATH_ACTIVE;
749
			++mpath->sn;
750
			spin_unlock_bh(&mpath->state_lock);
751 752
			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
					mpath->dst, cpu_to_le32(mpath->sn),
753
					reason, bcast, sdata);
754
		}
755 756 757 758
	}
	rcu_read_unlock();
}

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;

	del_timer_sync(&node->mpath->timer);
	atomic_dec(&sdata->u.mesh.mpaths);
	kfree(node->mpath);
	kfree(node);
}

/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
	struct mesh_path *mpath;
	mpath = node->mpath;
	spin_lock(&mpath->state_lock);
	mpath->flags |= MESH_PATH_RESOLVING;
	if (mpath->is_gate)
		mesh_gate_del(tbl, mpath);
	hlist_del_rcu(&node->list);
	call_rcu(&node->rcu, mesh_path_node_reclaim);
	spin_unlock(&mpath->state_lock);
	atomic_dec(&tbl->entries);
}

785 786 787 788 789
/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
 * @sta - mesh peer to match
 *
790 791 792
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
793 794
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
795 796 797
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
798
	struct mesh_table *tbl;
799 800 801 802 803
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

804
	rcu_read_lock();
805 806
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
807
	for_each_mesh_entry(tbl, p, node, i) {
808
		mpath = node->mpath;
809
		if (rcu_dereference(mpath->next_hop) == sta) {
810
			spin_lock(&tbl->hashwlock[i]);
811
			__mesh_path_del(tbl, node);
812
			spin_unlock(&tbl->hashwlock[i]);
813
		}
814
	}
815
	read_unlock_bh(&pathtbl_resize_lock);
816
	rcu_read_unlock();
817 818
}

819 820
static void table_flush_by_iface(struct mesh_table *tbl,
				 struct ieee80211_sub_if_data *sdata)
821 822 823 824 825 826
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

827
	WARN_ON(!rcu_read_lock_held());
828
	for_each_mesh_entry(tbl, p, node, i) {
829
		mpath = node->mpath;
830 831
		if (mpath->sdata != sdata)
			continue;
832
		spin_lock_bh(&tbl->hashwlock[i]);
833
		__mesh_path_del(tbl, node);
834
		spin_unlock_bh(&tbl->hashwlock[i]);
835 836 837
	}
}

838 839 840 841 842 843 844 845 846
/**
 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
 *
 * This function deletes both mesh paths as well as mesh portal paths.
 *
 * @sdata - interface data to match
 *
 */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
847
{
848
	struct mesh_table *tbl;
849

850
	rcu_read_lock();
851 852
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
853
	table_flush_by_iface(tbl, sdata);
854
	tbl = resize_dereference_mpp_paths();
855
	table_flush_by_iface(tbl, sdata);
856
	read_unlock_bh(&pathtbl_resize_lock);
857
	rcu_read_unlock();
858 859 860 861 862 863
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
864
 * @sdata: local subif
865
 *
866
 * Returns: 0 if successful
867
 */
868
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
869
{
870
	struct mesh_table *tbl;
871 872 873 874 875 876 877
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int hash_idx;
	int err = 0;

878
	read_lock_bh(&pathtbl_resize_lock);
879 880 881
	tbl = resize_dereference_mesh_paths();
	hash_idx = mesh_table_hash(addr, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
882

883
	spin_lock(&tbl->hashwlock[hash_idx]);
884 885
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
886
		if (mpath->sdata == sdata &&
887
		    compare_ether_addr(addr, mpath->dst) == 0) {
888
			__mesh_path_del(tbl, node);
889 890 891 892 893 894
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
895
	mesh_paths_generation++;
896
	spin_unlock(&tbl->hashwlock[hash_idx]);
897
	read_unlock_bh(&pathtbl_resize_lock);
898 899 900 901 902 903 904 905 906 907 908 909 910
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
911 912 913
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
914 915
}

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
/**
 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
 *
 * @mpath: mesh path whose queue will be emptied
 *
 * If there is only one gate, the frames are transferred from the failed mpath
 * queue to that gate's queue.  If there are more than one gates, the frames
 * are copied from each gate to the next.  After frames are copied, the
 * mpath queues are emptied onto the transmission queue.
 */
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
	struct hlist_node *n;
	struct mesh_table *tbl;
	struct mesh_path *from_mpath = mpath;
	struct mpath_node *gate = NULL;
	bool copy = false;
	struct hlist_head *known_gates;

	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	known_gates = tbl->known_gates;
	rcu_read_unlock();

	if (!known_gates)
		return -EHOSTUNREACH;

	hlist_for_each_entry_rcu(gate, n, known_gates, list) {
		if (gate->mpath->sdata != sdata)
			continue;

		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
			mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
			from_mpath = gate->mpath;
			copy = true;
		} else {
			mpath_dbg("Not forwarding %p\n", gate->mpath);
			mpath_dbg("flags %x\n", gate->mpath->flags);
		}
	}

	hlist_for_each_entry_rcu(gate, n, known_gates, list)
		if (gate->mpath->sdata == sdata) {
			mpath_dbg("Sending to %pM\n", gate->mpath->dst);
			mesh_path_tx_pending(gate->mpath);
		}

	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}

968 969 970 971
/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
972
 * @sdata: network subif the frame was to be sent through
973 974 975
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
976 977
void mesh_path_discard_frame(struct sk_buff *skb,
			     struct ieee80211_sub_if_data *sdata)
978 979
{
	kfree_skb(skb);
980
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
981 982 983 984 985 986 987
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
L
Lucas De Marchi 已提交
988
 * Locking: the function must me called within a rcu_read_lock region
989 990 991 992 993
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

994
	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
995
		mesh_path_discard_frame(skb, mpath->sdata);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
1010
	mpath->sn = 0xffff;
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
1026 1027
	if (free_leafs) {
		del_timer_sync(&mpath->timer);
1028
		kfree(mpath);
1029
	}
1030 1031 1032
	kfree(node);
}

1033
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1034 1035 1036 1037 1038
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

1039
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1040 1041 1042
	if (new_node == NULL)
		return -ENOMEM;

1043 1044 1045
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
1046
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1047 1048
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
1049
	return 0;
1050 1051 1052 1053
}

int mesh_pathtbl_init(void)
{
1054
	struct mesh_table *tbl_path, *tbl_mpp;
1055
	int ret;
1056 1057 1058

	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_path)
1059
		return -ENOMEM;
1060 1061 1062
	tbl_path->free_node = &mesh_path_node_free;
	tbl_path->copy_node = &mesh_path_node_copy;
	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1063
	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1064 1065 1066 1067
	if (!tbl_path->known_gates) {
		ret = -ENOMEM;
		goto free_path;
	}
1068 1069
	INIT_HLIST_HEAD(tbl_path->known_gates);

1070

1071 1072
	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_mpp) {
1073 1074
		ret = -ENOMEM;
		goto free_path;
1075
	}
1076 1077 1078
	tbl_mpp->free_node = &mesh_path_node_free;
	tbl_mpp->copy_node = &mesh_path_node_copy;
	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1079
	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1080 1081 1082 1083
	if (!tbl_mpp->known_gates) {
		ret = -ENOMEM;
		goto free_mpp;
	}
1084
	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1085 1086 1087 1088

	/* Need no locking since this is during init */
	RCU_INIT_POINTER(mesh_paths, tbl_path);
	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1089

1090
	return 0;
1091 1092 1093 1094 1095 1096

free_mpp:
	mesh_table_free(tbl_mpp, true);
free_path:
	mesh_table_free(tbl_path, true);
	return ret;
1097 1098
}

1099
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1100
{
1101
	struct mesh_table *tbl;
1102 1103 1104 1105 1106
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

1107 1108 1109
	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
1110
		if (node->mpath->sdata != sdata)
1111 1112 1113 1114
			continue;
		mpath = node->mpath;
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1115
		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1116
			mesh_path_del(mpath->dst, mpath->sdata);
1117
	}
1118
	rcu_read_unlock();
1119 1120 1121 1122
}

void mesh_pathtbl_unregister(void)
{
1123
	/* no need for locking during exit path */
1124 1125
	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1126
}