inet_frag.h 4.6 KB
Newer Older
1 2 3
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__

4 5
#include <linux/percpu_counter.h>

6
struct netns_frags {
7
	int			nqueues;
8
	struct list_head	lru_list;
9
	spinlock_t		lru_lock;
10

11 12
	/* The percpu_counter "mem" need to be cacheline aligned.
	 *  mem.count must not share cacheline with other writers
13
	 */
14 15
	struct percpu_counter   mem ____cacheline_aligned_in_smp;

16 17
	/* sysctls */
	int			timeout;
18 19
	int			high_thresh;
	int			low_thresh;
20 21
};

22 23 24
struct inet_frag_queue {
	spinlock_t		lock;
	struct timer_list	timer;      /* when will this queue expire? */
25 26 27
	struct list_head	lru_list;   /* lru list member */
	struct hlist_node	list;
	atomic_t		refcnt;
28
	struct sk_buff		*fragments; /* list of received fragments */
29
	struct sk_buff		*fragments_tail;
30 31 32 33 34
	ktime_t			stamp;
	int			len;        /* total length of orig datagram */
	int			meat;
	__u8			last_in;    /* first/last segment arrived? */

35 36 37
#define INET_FRAG_COMPLETE	4
#define INET_FRAG_FIRST_IN	2
#define INET_FRAG_LAST_IN	1
38 39

	u16			max_size;
40 41

	struct netns_frags	*net;
42 43
};

44 45
#define INETFRAGS_HASHSZ		64

46 47 48 49 50 51 52
/* averaged:
 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
 *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
 *	       struct frag_queue))
 */
#define INETFRAGS_MAXDEPTH		128

53 54
struct inet_frags {
	struct hlist_head	hash[INETFRAGS_HASHSZ];
55 56 57 58
	/* This rwlock is a global lock (seperate per IPv4, IPv6 and
	 * netfilter). Important to keep this on a seperate cacheline.
	 */
	rwlock_t		lock ____cacheline_aligned_in_smp;
59
	int			secret_interval;
60
	struct timer_list	secret_timer;
61 62
	u32			rnd;
	int			qsize;
63 64

	unsigned int		(*hashfn)(struct inet_frag_queue *);
65
	bool			(*match)(struct inet_frag_queue *q, void *arg);
66 67
	void			(*constructor)(struct inet_frag_queue *q,
						void *arg);
68 69
	void			(*destructor)(struct inet_frag_queue *);
	void			(*skb_free)(struct sk_buff *);
70
	void			(*frag_expire)(unsigned long data);
71 72 73 74 75
};

void inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);

76
void inet_frags_init_net(struct netns_frags *nf);
77
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
78

79
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
80 81
void inet_frag_destroy(struct inet_frag_queue *q,
				struct inet_frags *f, int *work);
82
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
83
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
84 85
		struct inet_frags *f, void *key, unsigned int hash)
	__releases(&f->lock);
86 87
void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
				   const char *prefix);
88

P
Pavel Emelyanov 已提交
89 90 91 92 93 94
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
	if (atomic_dec_and_test(&q->refcnt))
		inet_frag_destroy(q, f, NULL);
}

95 96
/* Memory Tracking Functions. */

97 98 99 100 101 102 103
/* The default percpu_counter batch size is not big enough to scale to
 * fragmentation mem acct sizes.
 * The mem size of a 64K fragment is approx:
 *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
 */
static unsigned int frag_percpu_counter_batch = 130000;

104 105
static inline int frag_mem_limit(struct netns_frags *nf)
{
106
	return percpu_counter_read(&nf->mem);
107 108 109 110
}

static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
{
111
	__percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
112 113 114 115
}

static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
{
116
	__percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
117 118 119 120
}

static inline void init_frag_mem_limit(struct netns_frags *nf)
{
121
	percpu_counter_init(&nf->mem, 0);
122 123 124 125
}

static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
126 127 128 129 130 131 132
	int res;

	local_bh_disable();
	res = percpu_counter_sum_positive(&nf->mem);
	local_bh_enable();

	return res;
133 134
}

135 136 137 138 139 140 141 142 143 144 145
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
{
	spin_lock(&q->net->lru_lock);
	list_move_tail(&q->lru_list, &q->net->lru_list);
	spin_unlock(&q->net->lru_lock);
}

static inline void inet_frag_lru_del(struct inet_frag_queue *q)
{
	spin_lock(&q->net->lru_lock);
	list_del(&q->lru_list);
146
	q->net->nqueues--;
147 148 149 150 151 152 153 154
	spin_unlock(&q->net->lru_lock);
}

static inline void inet_frag_lru_add(struct netns_frags *nf,
				     struct inet_frag_queue *q)
{
	spin_lock(&nf->lru_lock);
	list_add_tail(&q->lru_list, &nf->lru_list);
155
	q->net->nqueues++;
156 157
	spin_unlock(&nf->lru_lock);
}
158 159 160 161 162 163 164 165 166 167 168 169

/* RFC 3168 support :
 * We want to check ECN values of all fragments, do detect invalid combinations.
 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
 */
#define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
#define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
#define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
#define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */

extern const u8 ip_frag_ecn_table[16];

170
#endif