inet_frag.h 4.2 KB
Newer Older
1 2 3
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__

4 5
#include <linux/percpu_counter.h>

6
struct netns_frags {
7 8
	/* The percpu_counter "mem" need to be cacheline aligned.
	 *  mem.count must not share cacheline with other writers
9
	 */
10 11
	struct percpu_counter   mem ____cacheline_aligned_in_smp;

12 13
	/* sysctls */
	int			timeout;
14 15
	int			high_thresh;
	int			low_thresh;
16 17
};

18 19 20
struct inet_frag_queue {
	spinlock_t		lock;
	struct timer_list	timer;      /* when will this queue expire? */
21 22
	struct hlist_node	list;
	atomic_t		refcnt;
23
	struct sk_buff		*fragments; /* list of received fragments */
24
	struct sk_buff		*fragments_tail;
25 26 27 28 29
	ktime_t			stamp;
	int			len;        /* total length of orig datagram */
	int			meat;
	__u8			last_in;    /* first/last segment arrived? */

30
#define INET_FRAG_EVICTED	8
31 32 33
#define INET_FRAG_COMPLETE	4
#define INET_FRAG_FIRST_IN	2
#define INET_FRAG_LAST_IN	1
34 35

	u16			max_size;
36 37

	struct netns_frags	*net;
38 39
};

40
#define INETFRAGS_HASHSZ	1024
41

42 43 44 45 46
/* averaged:
 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
 *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
 *	       struct frag_queue))
 */
47
#define INETFRAGS_MAXDEPTH	128
48

49 50 51 52 53
struct inet_frag_bucket {
	struct hlist_head	chain;
	spinlock_t		chain_lock;
};

54
struct inet_frags {
55
	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];
56 57
	/* This rwlock is a global lock (seperate per IPv4, IPv6 and
	 * netfilter). Important to keep this on a seperate cacheline.
58
	 * Its primarily a rebuild protection rwlock.
59 60
	 */
	rwlock_t		lock ____cacheline_aligned_in_smp;
61

62 63
	struct work_struct	frags_work;
	unsigned int next_bucket;
64 65
	unsigned long last_rebuild_jiffies;
	bool rebuild;
66

67 68 69
	/* The first call to hashfn is responsible to initialize
	 * rnd. This is best done with net_get_random_once.
	 */
70 71
	u32			rnd;
	int			qsize;
72

73 74 75
	unsigned int		(*hashfn)(const struct inet_frag_queue *);
	bool			(*match)(const struct inet_frag_queue *q,
					 const void *arg);
76
	void			(*constructor)(struct inet_frag_queue *q,
77
					       const void *arg);
78 79
	void			(*destructor)(struct inet_frag_queue *);
	void			(*skb_free)(struct sk_buff *);
80
	void			(*frag_expire)(unsigned long data);
81 82 83 84 85
};

void inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);

86
void inet_frags_init_net(struct netns_frags *nf);
87
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
88

89
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
F
Florian Westphal 已提交
90
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
91
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
92 93
		struct inet_frags *f, void *key, unsigned int hash)
	__releases(&f->lock);
94 95
void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix);
96

P
Pavel Emelyanov 已提交
97 98 99
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
	if (atomic_dec_and_test(&q->refcnt))
F
Florian Westphal 已提交
100
		inet_frag_destroy(q, f);
P
Pavel Emelyanov 已提交
101 102
}

103 104
/* Memory Tracking Functions. */

105 106 107 108 109 110 111
/* The default percpu_counter batch size is not big enough to scale to
 * fragmentation mem acct sizes.
 * The mem size of a 64K fragment is approx:
 *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
 */
static unsigned int frag_percpu_counter_batch = 130000;

112 113
static inline int frag_mem_limit(struct netns_frags *nf)
{
114
	return percpu_counter_read(&nf->mem);
115 116 117 118
}

static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
{
119
	__percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
120 121 122 123
}

static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
{
124
	__percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
125 126 127 128
}

static inline void init_frag_mem_limit(struct netns_frags *nf)
{
129
	percpu_counter_init(&nf->mem, 0);
130 131
}

132
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
133
{
134
	unsigned int res;
135 136 137 138 139 140

	local_bh_disable();
	res = percpu_counter_sum_positive(&nf->mem);
	local_bh_enable();

	return res;
141 142
}

143 144 145 146 147 148 149 150 151 152 153
/* RFC 3168 support :
 * We want to check ECN values of all fragments, do detect invalid combinations.
 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
 */
#define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
#define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
#define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
#define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */

extern const u8 ip_frag_ecn_table[16];

154
#endif