inet_fragment.c 10.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * inet fragments management
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
 *				Started as consolidation of ipv4/ip_fragment.c,
 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
 */

#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
19
#include <linux/random.h>
20 21
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
22
#include <linux/slab.h>
23

24
#include <net/sock.h>
25
#include <net/inet_frag.h>
26 27
#include <net/inet_ecn.h>

28 29 30
#define INETFRAGS_EVICT_BUCKETS   128
#define INETFRAGS_EVICT_MAX	  512

31 32 33
/* don't rebuild inetfrag table with new secret more often than this */
#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 * Value : 0xff if frame should be dropped.
 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 */
const u8 ip_frag_ecn_table[16] = {
	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,

	/* invalid combinations : drop frame */
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
};
EXPORT_SYMBOL(ip_frag_ecn_table);
54

55 56 57 58 59 60
static unsigned int
inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
{
	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
}

61 62 63 64 65 66 67
static bool inet_frag_may_rebuild(struct inet_frags *f)
{
	return time_after(jiffies,
	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
}

static void inet_frag_secret_rebuild(struct inet_frags *f)
68 69 70
{
	int i;

71
	write_seqlock_bh(&f->rnd_seqlock);
72 73 74

	if (!inet_frag_may_rebuild(f))
		goto out;
75

76
	get_random_bytes(&f->rnd, sizeof(u32));
77

78
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79
		struct inet_frag_bucket *hb;
80
		struct inet_frag_queue *q;
81
		struct hlist_node *n;
82

83
		hb = &f->hash[i];
84 85
		spin_lock(&hb->chain_lock);

86
		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87
			unsigned int hval = inet_frag_hashfn(f, q);
88 89

			if (hval != i) {
90 91
				struct inet_frag_bucket *hb_dest;

92 93 94
				hlist_del(&q->list);

				/* Relink to new hash chain. */
95
				hb_dest = &f->hash[hval];
96 97 98 99 100 101 102 103 104 105 106

				/* This is the only place where we take
				 * another chain_lock while already holding
				 * one.  As this will not run concurrently,
				 * we cannot deadlock on hb_dest lock below, if its
				 * already locked it will be released soon since
				 * other caller cannot be waiting for hb lock
				 * that we've taken above.
				 */
				spin_lock_nested(&hb_dest->chain_lock,
						 SINGLE_DEPTH_NESTING);
107
				hlist_add_head(&q->list, &hb_dest->chain);
108
				spin_unlock(&hb_dest->chain_lock);
109 110
			}
		}
111
		spin_unlock(&hb->chain_lock);
112 113
	}

114 115 116
	f->rebuild = false;
	f->last_rebuild_jiffies = jiffies;
out:
117
	write_sequnlock_bh(&f->rnd_seqlock);
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
	return q->net->low_thresh == 0 ||
	       frag_mem_limit(q->net) >= q->net->low_thresh;
}

static unsigned int
inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
{
	struct inet_frag_queue *fq;
	struct hlist_node *n;
	unsigned int evicted = 0;
	HLIST_HEAD(expired);

	spin_lock(&hb->chain_lock);

	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
		if (!inet_fragq_should_evict(fq))
			continue;

140 141
		if (!del_timer(&fq->timer))
			continue;
142

143
		hlist_add_head(&fq->list_evictor, &expired);
144 145 146 147 148
		++evicted;
	}

	spin_unlock(&hb->chain_lock);

149
	hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
150
		f->frag_expire(&fq->timer);
151 152 153 154 155 156 157 158 159 160 161 162 163 164

	return evicted;
}

static void inet_frag_worker(struct work_struct *work)
{
	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
	unsigned int i, evicted = 0;
	struct inet_frags *f;

	f = container_of(work, struct inet_frags, frags_work);

	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);

165
	local_bh_disable();
166 167 168 169 170 171 172 173 174 175

	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
		evicted += inet_evict_bucket(f, &f->hash[i]);
		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
		if (evicted > INETFRAGS_EVICT_MAX)
			break;
	}

	f->next_bucket = i;

176 177
	local_bh_enable();

178 179
	if (f->rebuild && inet_frag_may_rebuild(f))
		inet_frag_secret_rebuild(f);
180 181 182 183 184 185 186 187
}

static void inet_frag_schedule_worker(struct inet_frags *f)
{
	if (unlikely(!work_pending(&f->frags_work)))
		schedule_work(&f->frags_work);
}

188
int inet_frags_init(struct inet_frags *f)
189 190 191
{
	int i;

192 193
	INIT_WORK(&f->frags_work, inet_frag_worker);

194 195
	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
		struct inet_frag_bucket *hb = &f->hash[i];
196

197 198 199
		spin_lock_init(&hb->chain_lock);
		INIT_HLIST_HEAD(&hb->chain);
	}
200 201

	seqlock_init(&f->rnd_seqlock);
202
	f->last_rebuild_jiffies = 0;
203 204 205 206 207 208
	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
					    NULL);
	if (!f->frags_cachep)
		return -ENOMEM;

	return 0;
209 210 211 212 213
}
EXPORT_SYMBOL(inet_frags_init);

void inet_frags_fini(struct inet_frags *f)
{
214
	cancel_work_sync(&f->frags_work);
215
	kmem_cache_destroy(f->frags_cachep);
216 217
}
EXPORT_SYMBOL(inet_frags_fini);
218

219 220
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
{
221
	unsigned int seq;
222 223
	int i;

224
	nf->low_thresh = 0;
225

226
evict_again:
227
	local_bh_disable();
228
	seq = read_seqbegin(&f->rnd_seqlock);
229 230 231 232

	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
		inet_evict_bucket(f, &f->hash[i]);

233
	local_bh_enable();
234 235 236
	cond_resched();

	if (read_seqretry(&f->rnd_seqlock, seq) ||
237
	    sum_frag_mem_limit(nf))
238
		goto evict_again;
239 240 241
}
EXPORT_SYMBOL(inet_frags_exit_net);

242 243 244
static struct inet_frag_bucket *
get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
__acquires(hb->chain_lock)
245
{
246
	struct inet_frag_bucket *hb;
247 248 249 250
	unsigned int seq, hash;

 restart:
	seq = read_seqbegin(&f->rnd_seqlock);
251

252
	hash = inet_frag_hashfn(f, fq);
253 254 255
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
256 257 258 259 260 261 262 263 264 265 266 267 268
	if (read_seqretry(&f->rnd_seqlock, seq)) {
		spin_unlock(&hb->chain_lock);
		goto restart;
	}

	return hb;
}

static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
{
	struct inet_frag_bucket *hb;

	hb = get_frag_bucket_locked(fq, f);
269
	hlist_del(&fq->list);
270
	fq->flags |= INET_FRAG_COMPLETE;
271
	spin_unlock(&hb->chain_lock);
272 273 274 275 276
}

void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
{
	if (del_timer(&fq->timer))
277
		refcount_dec(&fq->refcnt);
278

279
	if (!(fq->flags & INET_FRAG_COMPLETE)) {
280
		fq_unlink(fq, f);
281
		refcount_dec(&fq->refcnt);
282 283 284
	}
}
EXPORT_SYMBOL(inet_frag_kill);
285

F
Florian Westphal 已提交
286
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
287 288
{
	struct sk_buff *fp;
289
	struct netns_frags *nf;
290
	unsigned int sum, sum_truesize = 0;
291

292
	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
293
	WARN_ON(del_timer(&q->timer) != 0);
294 295 296

	/* Release all fragment data. */
	fp = q->fragments;
297
	nf = q->net;
298 299 300
	while (fp) {
		struct sk_buff *xp = fp->next;

301
		sum_truesize += fp->truesize;
F
Florian Westphal 已提交
302
		kfree_skb(fp);
303 304
		fp = xp;
	}
305
	sum = sum_truesize + f->qsize;
306

307 308
	if (f->destructor)
		f->destructor(q);
309
	kmem_cache_free(f->frags_cachep, q);
310 311

	sub_frag_mem_limit(nf, sum);
312 313
}
EXPORT_SYMBOL(inet_frag_destroy);
314

315
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
316 317 318
						struct inet_frag_queue *qp_in,
						struct inet_frags *f,
						void *arg)
319
{
320
	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
321
	struct inet_frag_queue *qp;
322

323 324
#ifdef CONFIG_SMP
	/* With SMP race we have to recheck hash table, because
325 326
	 * such entry could have been created on other cpu before
	 * we acquired hash bucket lock.
327
	 */
328
	hlist_for_each_entry(qp, &hb->chain, list) {
329
		if (qp->net == nf && f->match(qp, arg)) {
330
			refcount_inc(&qp->refcnt);
331
			spin_unlock(&hb->chain_lock);
332
			qp_in->flags |= INET_FRAG_COMPLETE;
333 334 335 336 337 338
			inet_frag_put(qp_in, f);
			return qp;
		}
	}
#endif
	qp = qp_in;
339
	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
340
		refcount_inc(&qp->refcnt);
341

342
	refcount_inc(&qp->refcnt);
343
	hlist_add_head(&qp->list, &hb->chain);
F
Florian Westphal 已提交
344

345
	spin_unlock(&hb->chain_lock);
346

347 348
	return qp;
}
349

350
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
351 352
					       struct inet_frags *f,
					       void *arg)
353 354 355
{
	struct inet_frag_queue *q;

356
	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
357
		inet_frag_schedule_worker(f);
358
		return NULL;
359
	}
360

361
	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
362
	if (!q)
363 364
		return NULL;

365
	q->net = nf;
366
	f->constructor(q, arg);
367
	add_frag_mem_limit(nf, f->qsize);
368

369
	timer_setup(&q->timer, f->frag_expire, 0);
370
	spin_lock_init(&q->lock);
371
	refcount_set(&q->refcnt, 1);
372 373 374

	return q;
}
375

376
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
377 378
						struct inet_frags *f,
						void *arg)
379 380 381
{
	struct inet_frag_queue *q;

382
	q = inet_frag_alloc(nf, f, arg);
383
	if (!q)
384 385
		return NULL;

386
	return inet_frag_intern(nf, q, f, arg);
387
}
388

389
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
390 391
				       struct inet_frags *f, void *key,
				       unsigned int hash)
392
{
393
	struct inet_frag_bucket *hb;
394
	struct inet_frag_queue *q;
395
	int depth = 0;
396

397 398
	if (frag_mem_limit(nf) > nf->low_thresh)
		inet_frag_schedule_worker(f);
399

400
	hash &= (INETFRAGS_HASHSZ - 1);
401 402 403 404
	hb = &f->hash[hash];

	spin_lock(&hb->chain_lock);
	hlist_for_each_entry(q, &hb->chain, list) {
405
		if (q->net == nf && f->match(q, key)) {
406
			refcount_inc(&q->refcnt);
407
			spin_unlock(&hb->chain_lock);
408 409
			return q;
		}
410
		depth++;
411
	}
412
	spin_unlock(&hb->chain_lock);
413

414 415
	if (depth <= INETFRAGS_MAXDEPTH)
		return inet_frag_create(nf, f, key);
416 417

	if (inet_frag_may_rebuild(f)) {
418 419
		if (!f->rebuild)
			f->rebuild = true;
420 421 422 423
		inet_frag_schedule_worker(f);
	}

	return ERR_PTR(-ENOBUFS);
424 425
}
EXPORT_SYMBOL(inet_frag_find);
426 427 428 429 430 431 432 433 434

void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix)
{
	static const char msg[] = "inet_frag_find: Fragment hash bucket"
		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
		". Dropping fragment.\n";

	if (PTR_ERR(q) == -ENOBUFS)
435
		net_dbg_ratelimited("%s%s", prefix, msg);
436 437
}
EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);