dst.c 8.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * net/core/dst.c	Protocol independent destination cache.
 *
 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 */

#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
12
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
19
#include <net/net_namespace.h>
20
#include <linux/sched.h>
L
Linus Torvalds 已提交
21 22 23

#include <net/dst.h>

24 25 26 27 28 29 30 31 32 33
/*
 * Theory of operations:
 * 1) We use a list, protected by a spinlock, to add
 *    new entries from both BH and non-BH context.
 * 2) In order to keep spinlock held for a small delay,
 *    we use a second list where are stored long lived
 *    entries, that are handled by the garbage collect thread
 *    fired by a workqueue.
 * 3) This list is guarded by a mutex,
 *    so that the gc_task and dst_dev_event() can be synchronized.
L
Linus Torvalds 已提交
34
 */
35
#if RT_CACHE_DEBUG >= 2
L
Linus Torvalds 已提交
36 37 38
static atomic_t			 dst_total = ATOMIC_INIT(0);
#endif

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/*
 * We want to keep lock & list close together
 * to dirty as few cache lines as possible in __dst_free().
 * As this is not a very strong hint, we dont force an alignment on SMP.
 */
static struct {
	spinlock_t		lock;
	struct dst_entry 	*list;
	unsigned long		timer_inc;
	unsigned long		timer_expires;
} dst_garbage = {
	.lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
	.timer_inc = DST_GC_MAX,
};
static void dst_gc_task(struct work_struct *work);
L
Linus Torvalds 已提交
54 55
static void ___dst_free(struct dst_entry * dst);

56
static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
L
Linus Torvalds 已提交
57

58 59 60 61 62 63 64
static DEFINE_MUTEX(dst_gc_mutex);
/*
 * long lived entries are maintained in this list, guarded by dst_gc_mutex
 */
static struct dst_entry         *dst_busy_list;

static void dst_gc_task(struct work_struct *work)
L
Linus Torvalds 已提交
65 66
{
	int    delayed = 0;
67 68 69 70 71 72 73 74
	int    work_performed = 0;
	unsigned long expires = ~0L;
	struct dst_entry *dst, *next, head;
	struct dst_entry *last = &head;
#if RT_CACHE_DEBUG >= 2
	ktime_t time_start = ktime_get();
	struct timespec elapsed;
#endif
L
Linus Torvalds 已提交
75

76 77
	mutex_lock(&dst_gc_mutex);
	next = dst_busy_list;
L
Linus Torvalds 已提交
78

79 80 81 82
loop:
	while ((dst = next) != NULL) {
		next = dst->next;
		prefetch(&next->next);
83
		cond_resched();
84 85 86
		if (likely(atomic_read(&dst->__refcnt))) {
			last->next = dst;
			last = dst;
L
Linus Torvalds 已提交
87 88 89
			delayed++;
			continue;
		}
90
		work_performed++;
L
Linus Torvalds 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

		dst = dst_destroy(dst);
		if (dst) {
			/* NOHASH and still referenced. Unless it is already
			 * on gc list, invalidate it and add to gc list.
			 *
			 * Note: this is temporary. Actually, NOHASH dst's
			 * must be obsoleted when parent is obsoleted.
			 * But we do not have state "obsoleted, but
			 * referenced by parent", so it is right.
			 */
			if (dst->obsolete > 1)
				continue;

			___dst_free(dst);
106 107
			dst->next = next;
			next = dst;
L
Linus Torvalds 已提交
108 109
		}
	}
110 111 112 113 114 115 116

	spin_lock_bh(&dst_garbage.lock);
	next = dst_garbage.list;
	if (next) {
		dst_garbage.list = NULL;
		spin_unlock_bh(&dst_garbage.lock);
		goto loop;
L
Linus Torvalds 已提交
117
	}
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
	last->next = NULL;
	dst_busy_list = head.next;
	if (!dst_busy_list)
		dst_garbage.timer_inc = DST_GC_MAX;
	else {
		/*
		 * if we freed less than 1/10 of delayed entries,
		 * we can sleep longer.
		 */
		if (work_performed <= delayed/10) {
			dst_garbage.timer_expires += dst_garbage.timer_inc;
			if (dst_garbage.timer_expires > DST_GC_MAX)
				dst_garbage.timer_expires = DST_GC_MAX;
			dst_garbage.timer_inc += DST_GC_INC;
		} else {
			dst_garbage.timer_inc = DST_GC_INC;
			dst_garbage.timer_expires = DST_GC_MIN;
		}
		expires = dst_garbage.timer_expires;
		/*
		 * if the next desired timer is more than 4 seconds in the future
		 * then round the timer to whole seconds
		 */
		if (expires > 4*HZ)
			expires = round_jiffies_relative(expires);
		schedule_delayed_work(&dst_gc_work, expires);
144
	}
145 146 147

	spin_unlock_bh(&dst_garbage.lock);
	mutex_unlock(&dst_gc_mutex);
L
Linus Torvalds 已提交
148
#if RT_CACHE_DEBUG >= 2
149 150 151 152 153 154
	elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
	printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
		" expires: %lu elapsed: %lu us\n",
		atomic_read(&dst_total), delayed, work_performed,
		expires,
		elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC);
L
Linus Torvalds 已提交
155 156 157
#endif
}

158
int dst_discard(struct sk_buff *skb)
L
Linus Torvalds 已提交
159 160 161 162
{
	kfree_skb(skb);
	return 0;
}
163
EXPORT_SYMBOL(dst_discard);
L
Linus Torvalds 已提交
164 165 166 167 168 169

void * dst_alloc(struct dst_ops * ops)
{
	struct dst_entry * dst;

	if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
170
		if (ops->gc(ops))
L
Linus Torvalds 已提交
171 172
			return NULL;
	}
173
	dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
L
Linus Torvalds 已提交
174 175 176 177 178 179
	if (!dst)
		return NULL;
	atomic_set(&dst->__refcnt, 0);
	dst->ops = ops;
	dst->lastuse = jiffies;
	dst->path = dst;
180
	dst->input = dst->output = dst_discard;
181
#if RT_CACHE_DEBUG >= 2
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190 191 192 193
	atomic_inc(&dst_total);
#endif
	atomic_inc(&ops->entries);
	return dst;
}

static void ___dst_free(struct dst_entry * dst)
{
	/* The first case (dev==NULL) is required, when
	   protocol module is unloaded.
	 */
	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
194
		dst->input = dst->output = dst_discard;
L
Linus Torvalds 已提交
195 196 197 198 199 200
	}
	dst->obsolete = 2;
}

void __dst_free(struct dst_entry * dst)
{
201
	spin_lock_bh(&dst_garbage.lock);
L
Linus Torvalds 已提交
202
	___dst_free(dst);
203 204 205 206 207
	dst->next = dst_garbage.list;
	dst_garbage.list = dst;
	if (dst_garbage.timer_inc > DST_GC_INC) {
		dst_garbage.timer_inc = DST_GC_INC;
		dst_garbage.timer_expires = DST_GC_MIN;
208
		cancel_delayed_work(&dst_gc_work);
209
		schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
L
Linus Torvalds 已提交
210
	}
211
	spin_unlock_bh(&dst_garbage.lock);
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
}

struct dst_entry *dst_destroy(struct dst_entry * dst)
{
	struct dst_entry *child;
	struct neighbour *neigh;
	struct hh_cache *hh;

	smp_rmb();

again:
	neigh = dst->neighbour;
	hh = dst->hh;
	child = dst->child;

	dst->hh = NULL;
	if (hh && atomic_dec_and_test(&hh->hh_refcnt))
		kfree(hh);

	if (neigh) {
		dst->neighbour = NULL;
		neigh_release(neigh);
	}

	atomic_dec(&dst->ops->entries);

	if (dst->ops->destroy)
		dst->ops->destroy(dst);
	if (dst->dev)
		dev_put(dst->dev);
242
#if RT_CACHE_DEBUG >= 2
L
Linus Torvalds 已提交
243 244 245 246 247 248
	atomic_dec(&dst_total);
#endif
	kmem_cache_free(dst->ops->kmem_cachep, dst);

	dst = child;
	if (dst) {
H
Herbert Xu 已提交
249 250
		int nohash = dst->flags & DST_NOHASH;

L
Linus Torvalds 已提交
251 252
		if (atomic_dec_and_test(&dst->__refcnt)) {
			/* We were real parent of this dst, so kill child. */
H
Herbert Xu 已提交
253
			if (nohash)
L
Linus Torvalds 已提交
254 255 256
				goto again;
		} else {
			/* Child is still referenced, return it for freeing. */
H
Herbert Xu 已提交
257
			if (nohash)
L
Linus Torvalds 已提交
258 259 260 261 262 263 264
				return dst;
			/* Child is still in his hash table */
		}
	}
	return NULL;
}

I
Ilpo Järvinen 已提交
265 266 267
void dst_release(struct dst_entry *dst)
{
	if (dst) {
E
Eric Dumazet 已提交
268 269
               int newrefcnt;

I
Ilpo Järvinen 已提交
270
		smp_mb__before_atomic_dec();
E
Eric Dumazet 已提交
271 272
               newrefcnt = atomic_dec_return(&dst->__refcnt);
               WARN_ON(newrefcnt < 0);
I
Ilpo Järvinen 已提交
273 274 275 276
	}
}
EXPORT_SYMBOL(dst_release);

L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
/* Dirty hack. We did it in 2.2 (in __dst_free),
 * we have _very_ good reasons not to repeat
 * this mistake in 2.3, but we have no choice
 * now. _It_ _is_ _explicit_ _deliberate_
 * _race_ _condition_.
 *
 * Commented and originally written by Alexey.
 */
static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
			      int unregister)
{
	if (dst->ops->ifdown)
		dst->ops->ifdown(dst, dev, unregister);

	if (dev != dst->dev)
		return;

	if (!unregister) {
295
		dst->input = dst->output = dst_discard;
L
Linus Torvalds 已提交
296
	} else {
297
		dst->dev = dev_net(dst->dev)->loopback_dev;
298
		dev_hold(dst->dev);
L
Linus Torvalds 已提交
299 300
		dev_put(dev);
		if (dst->neighbour && dst->neighbour->dev == dev) {
301
			dst->neighbour->dev = dst->dev;
E
Eric Dumazet 已提交
302
			dev_hold(dst->dev);
L
Linus Torvalds 已提交
303 304 305 306 307 308 309 310
			dev_put(dev);
		}
	}
}

static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
	struct net_device *dev = ptr;
311
	struct dst_entry *dst, *last = NULL;
L
Linus Torvalds 已提交
312 313 314 315

	switch (event) {
	case NETDEV_UNREGISTER:
	case NETDEV_DOWN:
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
		mutex_lock(&dst_gc_mutex);
		for (dst = dst_busy_list; dst; dst = dst->next) {
			last = dst;
			dst_ifdown(dst, dev, event != NETDEV_DOWN);
		}

		spin_lock_bh(&dst_garbage.lock);
		dst = dst_garbage.list;
		dst_garbage.list = NULL;
		spin_unlock_bh(&dst_garbage.lock);

		if (last)
			last->next = dst;
		else
			dst_busy_list = dst;
		for (; dst; dst = dst->next) {
L
Linus Torvalds 已提交
332 333
			dst_ifdown(dst, dev, event != NETDEV_DOWN);
		}
334
		mutex_unlock(&dst_gc_mutex);
L
Linus Torvalds 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
		break;
	}
	return NOTIFY_DONE;
}

static struct notifier_block dst_dev_notifier = {
	.notifier_call	= dst_dev_event,
};

void __init dst_init(void)
{
	register_netdevice_notifier(&dst_dev_notifier);
}

EXPORT_SYMBOL(__dst_free);
EXPORT_SYMBOL(dst_alloc);
EXPORT_SYMBOL(dst_destroy);