inet_fragment.c 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * inet fragments management
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
 *				Started as consolidation of ipv4/ip_fragment.c,
 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
 */

#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
19
#include <linux/random.h>
20 21
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
22
#include <linux/slab.h>
23

24
#include <net/sock.h>
25
#include <net/inet_frag.h>
26 27
#include <net/inet_ecn.h>

28 29 30
#define INETFRAGS_EVICT_BUCKETS   128
#define INETFRAGS_EVICT_MAX	  512

31 32 33
/* don't rebuild inetfrag table with new secret more often than this */
#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 * Value : 0xff if frame should be dropped.
 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 */
const u8 ip_frag_ecn_table[16] = {
	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,

	/* invalid combinations : drop frame */
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
};
EXPORT_SYMBOL(ip_frag_ecn_table);
54

55 56 57 58 59 60
static unsigned int
inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
{
	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
}

61 62 63 64 65 66 67
static bool inet_frag_may_rebuild(struct inet_frags *f)
{
	return time_after(jiffies,
	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
}

static void inet_frag_secret_rebuild(struct inet_frags *f)
68 69 70
{
	int i;

71
	write_seqlock_bh(&f->rnd_seqlock);
72 73 74

	if (!inet_frag_may_rebuild(f))
		goto out;
75

76
	get_random_bytes(&f->rnd, sizeof(u32));
77

78
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79
		struct inet_frag_bucket *hb;
80
		struct inet_frag_queue *q;
81
		struct hlist_node *n;
82

83
		hb = &f->hash[i];
84 85
		spin_lock(&hb->chain_lock);

86
		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87
			unsigned int hval = inet_frag_hashfn(f, q);
88 89

			if (hval != i) {
90 91
				struct inet_frag_bucket *hb_dest;

92 93 94
				hlist_del(&q->list);

				/* Relink to new hash chain. */
95
				hb_dest = &f->hash[hval];
96 97 98 99 100 101 102 103 104 105 106

				/* This is the only place where we take
				 * another chain_lock while already holding
				 * one.  As this will not run concurrently,
				 * we cannot deadlock on hb_dest lock below, if its
				 * already locked it will be released soon since
				 * other caller cannot be waiting for hb lock
				 * that we've taken above.
				 */
				spin_lock_nested(&hb_dest->chain_lock,
						 SINGLE_DEPTH_NESTING);
107
				hlist_add_head(&q->list, &hb_dest->chain);
108
				spin_unlock(&hb_dest->chain_lock);
109 110
			}
		}
111
		spin_unlock(&hb->chain_lock);
112 113
	}

114 115 116
	f->rebuild = false;
	f->last_rebuild_jiffies = jiffies;
out:
117
	write_sequnlock_bh(&f->rnd_seqlock);
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
	return q->net->low_thresh == 0 ||
	       frag_mem_limit(q->net) >= q->net->low_thresh;
}

static unsigned int
inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
{
	struct inet_frag_queue *fq;
	struct hlist_node *n;
	unsigned int evicted = 0;
	HLIST_HEAD(expired);

	spin_lock(&hb->chain_lock);

	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
		if (!inet_fragq_should_evict(fq))
			continue;

140 141
		if (!del_timer(&fq->timer))
			continue;
142

143
		fq->flags |= INET_FRAG_EVICTED;
144
		hlist_add_head(&fq->list_evictor, &expired);
145 146 147 148 149
		++evicted;
	}

	spin_unlock(&hb->chain_lock);

150
	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
		f->frag_expire((unsigned long) fq);

	return evicted;
}

static void inet_frag_worker(struct work_struct *work)
{
	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
	unsigned int i, evicted = 0;
	struct inet_frags *f;

	f = container_of(work, struct inet_frags, frags_work);

	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);

166
	local_bh_disable();
167 168 169 170 171 172 173 174 175 176

	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
		evicted += inet_evict_bucket(f, &f->hash[i]);
		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
		if (evicted > INETFRAGS_EVICT_MAX)
			break;
	}

	f->next_bucket = i;

177 178
	local_bh_enable();

179 180
	if (f->rebuild && inet_frag_may_rebuild(f))
		inet_frag_secret_rebuild(f);
181 182 183 184 185 186 187 188
}

static void inet_frag_schedule_worker(struct inet_frags *f)
{
	if (unlikely(!work_pending(&f->frags_work)))
		schedule_work(&f->frags_work);
}

189
int inet_frags_init(struct inet_frags *f)
190 191 192
{
	int i;

193 194
	INIT_WORK(&f->frags_work, inet_frag_worker);

195 196
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
		struct inet_frag_bucket *hb = &f->hash[i];
197

198 199 200
		spin_lock_init(&hb->chain_lock);
		INIT_HLIST_HEAD(&hb->chain);
	}
201 202

	seqlock_init(&f->rnd_seqlock);
203
	f->last_rebuild_jiffies = 0;
204 205 206 207 208 209
	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
					    NULL);
	if (!f->frags_cachep)
		return -ENOMEM;

	return 0;
210 211 212
}
EXPORT_SYMBOL(inet_frags_init);

213 214
void inet_frags_init_net(struct netns_frags *nf)
{
215
	init_frag_mem_limit(nf);
216 217 218
}
EXPORT_SYMBOL(inet_frags_init_net);

219 220
void inet_frags_fini(struct inet_frags *f)
{
221
	cancel_work_sync(&f->frags_work);
222
	kmem_cache_destroy(f->frags_cachep);
223 224
}
EXPORT_SYMBOL(inet_frags_fini);
225

226 227
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
{
228
	unsigned int seq;
229 230
	int i;

231
	nf->low_thresh = 0;
232

233
evict_again:
234
	local_bh_disable();
235
	seq = read_seqbegin(&f->rnd_seqlock);
236 237 238 239

	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
		inet_evict_bucket(f, &f->hash[i]);

240
	local_bh_enable();
241 242 243 244 245
	cond_resched();

	if (read_seqretry(&f->rnd_seqlock, seq) ||
	    percpu_counter_sum(&nf->mem))
		goto evict_again;
246 247

	percpu_counter_destroy(&nf->mem);
248 249 250
}
EXPORT_SYMBOL(inet_frags_exit_net);

251 252 253
static struct inet_frag_bucket *
get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
__acquires(hb->chain_lock)
254
{
255
	struct inet_frag_bucket *hb;
256 257 258 259
	unsigned int seq, hash;

 restart:
	seq = read_seqbegin(&f->rnd_seqlock);
260

261
	hash = inet_frag_hashfn(f, fq);
262 263 264
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
265 266 267 268 269 270 271 272 273 274 275 276 277
	if (read_seqretry(&f->rnd_seqlock, seq)) {
		spin_unlock(&hb->chain_lock);
		goto restart;
	}

	return hb;
}

static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
{
	struct inet_frag_bucket *hb;

	hb = get_frag_bucket_locked(fq, f);
278
	hlist_del(&fq->list);
279
	fq->flags |= INET_FRAG_COMPLETE;
280
	spin_unlock(&hb->chain_lock);
281 282 283 284 285 286 287
}

void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
{
	if (del_timer(&fq->timer))
		atomic_dec(&fq->refcnt);

288
	if (!(fq->flags & INET_FRAG_COMPLETE)) {
289 290 291 292 293
		fq_unlink(fq, f);
		atomic_dec(&fq->refcnt);
	}
}
EXPORT_SYMBOL(inet_frag_kill);
294

295
static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
296
				  struct sk_buff *skb)
297 298 299 300 301 302
{
	if (f->skb_free)
		f->skb_free(skb);
	kfree_skb(skb);
}

F
Florian Westphal 已提交
303
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
304 305
{
	struct sk_buff *fp;
306
	struct netns_frags *nf;
307
	unsigned int sum, sum_truesize = 0;
308

309
	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
310
	WARN_ON(del_timer(&q->timer) != 0);
311 312 313

	/* Release all fragment data. */
	fp = q->fragments;
314
	nf = q->net;
315 316 317
	while (fp) {
		struct sk_buff *xp = fp->next;

318 319
		sum_truesize += fp->truesize;
		frag_kfree_skb(nf, f, fp);
320 321
		fp = xp;
	}
322
	sum = sum_truesize + f->qsize;
323

324 325
	if (f->destructor)
		f->destructor(q);
326
	kmem_cache_free(f->frags_cachep, q);
327 328

	sub_frag_mem_limit(nf, sum);
329 330
}
EXPORT_SYMBOL(inet_frag_destroy);
331

332
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
333 334 335
						struct inet_frag_queue *qp_in,
						struct inet_frags *f,
						void *arg)
336
{
337
	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
338
	struct inet_frag_queue *qp;
339

340 341
#ifdef CONFIG_SMP
	/* With SMP race we have to recheck hash table, because
342 343
	 * such entry could have been created on other cpu before
	 * we acquired hash bucket lock.
344
	 */
345
	hlist_for_each_entry(qp, &hb->chain, list) {
346
		if (qp->net == nf && f->match(qp, arg)) {
347
			atomic_inc(&qp->refcnt);
348
			spin_unlock(&hb->chain_lock);
349
			qp_in->flags |= INET_FRAG_COMPLETE;
350 351 352 353 354 355
			inet_frag_put(qp_in, f);
			return qp;
		}
	}
#endif
	qp = qp_in;
356
	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
357 358 359
		atomic_inc(&qp->refcnt);

	atomic_inc(&qp->refcnt);
360
	hlist_add_head(&qp->list, &hb->chain);
F
Florian Westphal 已提交
361

362
	spin_unlock(&hb->chain_lock);
363

364 365
	return qp;
}
366

367
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
368 369
					       struct inet_frags *f,
					       void *arg)
370 371 372
{
	struct inet_frag_queue *q;

373 374
	if (frag_mem_limit(nf) > nf->high_thresh) {
		inet_frag_schedule_worker(f);
375
		return NULL;
376
	}
377

378
	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
379
	if (!q)
380 381
		return NULL;

382
	q->net = nf;
383
	f->constructor(q, arg);
384
	add_frag_mem_limit(nf, f->qsize);
385

386 387 388 389 390 391
	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
	spin_lock_init(&q->lock);
	atomic_set(&q->refcnt, 1);

	return q;
}
392

393
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
394 395
						struct inet_frags *f,
						void *arg)
396 397 398
{
	struct inet_frag_queue *q;

399
	q = inet_frag_alloc(nf, f, arg);
400
	if (!q)
401 402
		return NULL;

403
	return inet_frag_intern(nf, q, f, arg);
404
}
405

406
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
407 408
				       struct inet_frags *f, void *key,
				       unsigned int hash)
409
{
410
	struct inet_frag_bucket *hb;
411
	struct inet_frag_queue *q;
412
	int depth = 0;
413

414 415
	if (frag_mem_limit(nf) > nf->low_thresh)
		inet_frag_schedule_worker(f);
416

417
	hash &= (INETFRAGS_HASHSZ - 1);
418 419 420 421
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
	hlist_for_each_entry(q, &hb->chain, list) {
422
		if (q->net == nf && f->match(q, key)) {
423
			atomic_inc(&q->refcnt);
424
			spin_unlock(&hb->chain_lock);
425 426
			return q;
		}
427
		depth++;
428
	}
429
	spin_unlock(&hb->chain_lock);
430

431 432
	if (depth <= INETFRAGS_MAXDEPTH)
		return inet_frag_create(nf, f, key);
433 434

	if (inet_frag_may_rebuild(f)) {
435 436
		if (!f->rebuild)
			f->rebuild = true;
437 438 439 440
		inet_frag_schedule_worker(f);
	}

	return ERR_PTR(-ENOBUFS);
441 442
}
EXPORT_SYMBOL(inet_frag_find);
443 444 445 446 447 448 449 450 451

void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix)
{
	static const char msg[] = "inet_frag_find: Fragment hash bucket"
		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
		". Dropping fragment.\n";

	if (PTR_ERR(q) == -ENOBUFS)
452
		net_dbg_ratelimited("%s%s", prefix, msg);
453 454
}
EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);