inet_fragment.c 10.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * inet fragments management
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
 *				Started as consolidation of ipv4/ip_fragment.c,
 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
 */

#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
19
#include <linux/random.h>
20 21
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
22
#include <linux/slab.h>
23

24
#include <net/sock.h>
25
#include <net/inet_frag.h>
26 27
#include <net/inet_ecn.h>

28 29 30
#define INETFRAGS_EVICT_BUCKETS   128
#define INETFRAGS_EVICT_MAX	  512

31 32 33
/* don't rebuild inetfrag table with new secret more often than this */
#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 * Value : 0xff if frame should be dropped.
 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 */
const u8 ip_frag_ecn_table[16] = {
	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,

	/* invalid combinations : drop frame */
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
};
EXPORT_SYMBOL(ip_frag_ecn_table);
54

55 56 57 58 59 60
static unsigned int
inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
{
	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
}

61 62 63 64 65 66 67
static bool inet_frag_may_rebuild(struct inet_frags *f)
{
	return time_after(jiffies,
	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
}

static void inet_frag_secret_rebuild(struct inet_frags *f)
68 69 70
{
	int i;

71
	write_seqlock_bh(&f->rnd_seqlock);
72 73 74

	if (!inet_frag_may_rebuild(f))
		goto out;
75

76
	get_random_bytes(&f->rnd, sizeof(u32));
77

78
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79
		struct inet_frag_bucket *hb;
80
		struct inet_frag_queue *q;
81
		struct hlist_node *n;
82

83
		hb = &f->hash[i];
84 85
		spin_lock(&hb->chain_lock);

86
		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87
			unsigned int hval = inet_frag_hashfn(f, q);
88 89

			if (hval != i) {
90 91
				struct inet_frag_bucket *hb_dest;

92 93 94
				hlist_del(&q->list);

				/* Relink to new hash chain. */
95
				hb_dest = &f->hash[hval];
96 97 98 99 100 101 102 103 104 105 106

				/* This is the only place where we take
				 * another chain_lock while already holding
				 * one.  As this will not run concurrently,
				 * we cannot deadlock on hb_dest lock below, if its
				 * already locked it will be released soon since
				 * other caller cannot be waiting for hb lock
				 * that we've taken above.
				 */
				spin_lock_nested(&hb_dest->chain_lock,
						 SINGLE_DEPTH_NESTING);
107
				hlist_add_head(&q->list, &hb_dest->chain);
108
				spin_unlock(&hb_dest->chain_lock);
109 110
			}
		}
111
		spin_unlock(&hb->chain_lock);
112 113
	}

114 115 116
	f->rebuild = false;
	f->last_rebuild_jiffies = jiffies;
out:
117
	write_sequnlock_bh(&f->rnd_seqlock);
118 119
}

120 121
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
122 123 124
	if (!hlist_unhashed(&q->list_evictor))
		return false;

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	return q->net->low_thresh == 0 ||
	       frag_mem_limit(q->net) >= q->net->low_thresh;
}

static unsigned int
inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
{
	struct inet_frag_queue *fq;
	struct hlist_node *n;
	unsigned int evicted = 0;
	HLIST_HEAD(expired);

	spin_lock(&hb->chain_lock);

	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
		if (!inet_fragq_should_evict(fq))
			continue;

143 144
		if (!del_timer(&fq->timer))
			continue;
145

146
		hlist_add_head(&fq->list_evictor, &expired);
147 148 149 150 151
		++evicted;
	}

	spin_unlock(&hb->chain_lock);

152
	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
153
		f->frag_expire(&fq->timer);
154 155 156 157 158 159 160 161 162 163 164 165 166 167

	return evicted;
}

static void inet_frag_worker(struct work_struct *work)
{
	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
	unsigned int i, evicted = 0;
	struct inet_frags *f;

	f = container_of(work, struct inet_frags, frags_work);

	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);

168
	local_bh_disable();
169

170
	for (i = READ_ONCE(f->next_bucket); budget; --budget) {
171 172 173 174 175 176 177 178
		evicted += inet_evict_bucket(f, &f->hash[i]);
		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
		if (evicted > INETFRAGS_EVICT_MAX)
			break;
	}

	f->next_bucket = i;

179 180
	local_bh_enable();

181 182
	if (f->rebuild && inet_frag_may_rebuild(f))
		inet_frag_secret_rebuild(f);
183 184 185 186 187 188 189 190
}

static void inet_frag_schedule_worker(struct inet_frags *f)
{
	if (unlikely(!work_pending(&f->frags_work)))
		schedule_work(&f->frags_work);
}

191
int inet_frags_init(struct inet_frags *f)
192 193 194
{
	int i;

195 196
	INIT_WORK(&f->frags_work, inet_frag_worker);

197 198
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
		struct inet_frag_bucket *hb = &f->hash[i];
199

200 201 202
		spin_lock_init(&hb->chain_lock);
		INIT_HLIST_HEAD(&hb->chain);
	}
203 204

	seqlock_init(&f->rnd_seqlock);
205
	f->last_rebuild_jiffies = 0;
206 207 208 209 210 211
	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
					    NULL);
	if (!f->frags_cachep)
		return -ENOMEM;

	return 0;
212 213 214 215 216
}
EXPORT_SYMBOL(inet_frags_init);

void inet_frags_fini(struct inet_frags *f)
{
217
	cancel_work_sync(&f->frags_work);
218
	kmem_cache_destroy(f->frags_cachep);
219 220
}
EXPORT_SYMBOL(inet_frags_fini);
221

222
void inet_frags_exit_net(struct netns_frags *nf)
223
{
224
	struct inet_frags *f =nf->f;
225
	unsigned int seq;
226 227
	int i;

228
	nf->low_thresh = 0;
229

230
evict_again:
231
	local_bh_disable();
232
	seq = read_seqbegin(&f->rnd_seqlock);
233 234 235 236

	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
		inet_evict_bucket(f, &f->hash[i]);

237
	local_bh_enable();
238 239 240
	cond_resched();

	if (read_seqretry(&f->rnd_seqlock, seq) ||
241
	    sum_frag_mem_limit(nf))
242
		goto evict_again;
243 244 245
}
EXPORT_SYMBOL(inet_frags_exit_net);

246 247 248
static struct inet_frag_bucket *
get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
__acquires(hb->chain_lock)
249
{
250
	struct inet_frag_bucket *hb;
251 252 253 254
	unsigned int seq, hash;

 restart:
	seq = read_seqbegin(&f->rnd_seqlock);
255

256
	hash = inet_frag_hashfn(f, fq);
257 258 259
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
260 261 262 263 264 265 266 267
	if (read_seqretry(&f->rnd_seqlock, seq)) {
		spin_unlock(&hb->chain_lock);
		goto restart;
	}

	return hb;
}

268
static inline void fq_unlink(struct inet_frag_queue *fq)
269 270 271
{
	struct inet_frag_bucket *hb;

272
	hb = get_frag_bucket_locked(fq, fq->net->f);
273
	hlist_del(&fq->list);
274
	fq->flags |= INET_FRAG_COMPLETE;
275
	spin_unlock(&hb->chain_lock);
276 277
}

278
void inet_frag_kill(struct inet_frag_queue *fq)
279 280
{
	if (del_timer(&fq->timer))
281
		refcount_dec(&fq->refcnt);
282

283
	if (!(fq->flags & INET_FRAG_COMPLETE)) {
284
		fq_unlink(fq);
285
		refcount_dec(&fq->refcnt);
286 287 288
	}
}
EXPORT_SYMBOL(inet_frag_kill);
289

290
void inet_frag_destroy(struct inet_frag_queue *q)
291 292
{
	struct sk_buff *fp;
293
	struct netns_frags *nf;
294
	unsigned int sum, sum_truesize = 0;
295
	struct inet_frags *f;
296

297
	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
298
	WARN_ON(del_timer(&q->timer) != 0);
299 300 301

	/* Release all fragment data. */
	fp = q->fragments;
302
	nf = q->net;
303
	f = nf->f;
304 305 306
	while (fp) {
		struct sk_buff *xp = fp->next;

307
		sum_truesize += fp->truesize;
F
Florian Westphal 已提交
308
		kfree_skb(fp);
309 310
		fp = xp;
	}
311
	sum = sum_truesize + f->qsize;
312

313 314
	if (f->destructor)
		f->destructor(q);
315
	kmem_cache_free(f->frags_cachep, q);
316 317

	sub_frag_mem_limit(nf, sum);
318 319
}
EXPORT_SYMBOL(inet_frag_destroy);
320

321
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
322 323 324
						struct inet_frag_queue *qp_in,
						struct inet_frags *f,
						void *arg)
325
{
326
	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
327
	struct inet_frag_queue *qp;
328

329 330
#ifdef CONFIG_SMP
	/* With SMP race we have to recheck hash table, because
331 332
	 * such entry could have been created on other cpu before
	 * we acquired hash bucket lock.
333
	 */
334
	hlist_for_each_entry(qp, &hb->chain, list) {
335
		if (qp->net == nf && f->match(qp, arg)) {
336
			refcount_inc(&qp->refcnt);
337
			spin_unlock(&hb->chain_lock);
338
			qp_in->flags |= INET_FRAG_COMPLETE;
339
			inet_frag_put(qp_in);
340 341 342 343 344
			return qp;
		}
	}
#endif
	qp = qp_in;
345
	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
346
		refcount_inc(&qp->refcnt);
347

348
	refcount_inc(&qp->refcnt);
349
	hlist_add_head(&qp->list, &hb->chain);
F
Florian Westphal 已提交
350

351
	spin_unlock(&hb->chain_lock);
352

353 354
	return qp;
}
355

356
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
357 358
					       struct inet_frags *f,
					       void *arg)
359 360 361
{
	struct inet_frag_queue *q;

362
	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
363
		inet_frag_schedule_worker(f);
364
		return NULL;
365
	}
366

367
	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
368
	if (!q)
369 370
		return NULL;

371
	q->net = nf;
372
	f->constructor(q, arg);
373
	add_frag_mem_limit(nf, f->qsize);
374

375
	timer_setup(&q->timer, f->frag_expire, 0);
376
	spin_lock_init(&q->lock);
377
	refcount_set(&q->refcnt, 1);
378 379 380

	return q;
}
381

382
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
383 384
						struct inet_frags *f,
						void *arg)
385 386 387
{
	struct inet_frag_queue *q;

388
	q = inet_frag_alloc(nf, f, arg);
389
	if (!q)
390 391
		return NULL;

392
	return inet_frag_intern(nf, q, f, arg);
393
}
394

395
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
396 397
				       struct inet_frags *f, void *key,
				       unsigned int hash)
398
{
399
	struct inet_frag_bucket *hb;
400
	struct inet_frag_queue *q;
401
	int depth = 0;
402

403 404
	if (frag_mem_limit(nf) > nf->low_thresh)
		inet_frag_schedule_worker(f);
405

406
	hash &= (INETFRAGS_HASHSZ - 1);
407 408 409 410
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
	hlist_for_each_entry(q, &hb->chain, list) {
411
		if (q->net == nf && f->match(q, key)) {
412
			refcount_inc(&q->refcnt);
413
			spin_unlock(&hb->chain_lock);
414 415
			return q;
		}
416
		depth++;
417
	}
418
	spin_unlock(&hb->chain_lock);
419

420 421
	if (depth <= INETFRAGS_MAXDEPTH)
		return inet_frag_create(nf, f, key);
422 423

	if (inet_frag_may_rebuild(f)) {
424 425
		if (!f->rebuild)
			f->rebuild = true;
426 427 428 429
		inet_frag_schedule_worker(f);
	}

	return ERR_PTR(-ENOBUFS);
430 431
}
EXPORT_SYMBOL(inet_frag_find);
432 433 434 435 436 437 438 439 440

void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix)
{
	static const char msg[] = "inet_frag_find: Fragment hash bucket"
		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
		". Dropping fragment.\n";

	if (PTR_ERR(q) == -ENOBUFS)
441
		net_dbg_ratelimited("%s%s", prefix, msg);
442 443
}
EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);