garbage.c 10.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * NET3:	Garbage Collector For AF_UNIX sockets
 *
 * Garbage Collector:
 *	Copyright (C) Barak A. Pearlmutter.
 *	Released under the GPL version 2 or later.
 *
 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
 * If it doesn't work blame me, it worked when Barak sent it.
 *
 * Assumptions:
 *
 *  - object w/ a bit
 *  - free list
 *
 * Current optimizations:
 *
 *  - explicit stack instead of recursion
 *  - tail recurse on first born instead of immediate push/pop
 *  - we gather the stuff that should not be killed into tree
 *    and stack is just a path from root to the current pointer.
 *
 *  Future optimizations:
 *
 *  - don't just push entire root set; process in place
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 *  Fixes:
 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 *					Cope with changing max_files.
 *	Al Viro		11 Oct 1998
 *		Graph may have cycles. That is, we can send the descriptor
 *		of foo to bar and vice versa. Current code chokes on that.
 *		Fix: move SCM_RIGHTS ones into the separate list and then
 *		skb_free() them all instead of doing explicit fput's.
 *		Another problem: since fput() may block somebody may
 *		create a new unix_socket when we are in the middle of sweep
 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 *		upon the beginning and unmark non-junk ones.
 *
 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 *		sent to connect()'ed but still not accept()'ed sockets.
 *		Fixed. Old code had slightly different problem here:
 *		extra fput() in situation when we passed the descriptor via
 *		such socket and closed it (descriptor). That would happen on
 *		each unix_gc() until the accept(). Since the struct file in
 *		question would go to the free list and might be reused...
 *		That might be the reason of random oopses on filp_close()
 *		in unrelated processes.
 *
 *	AV		28 Feb 1999
 *		Kill the explicit allocation of stack. Now we keep the tree
 *		with root in dummy + pointer (gc_current) to one of the nodes.
 *		Stack is represented as path from gc_current to dummy. Unmark
 *		now means "add to tree". Push == "make it a son of gc_current".
 *		Pop == "move gc_current to parent". We keep only pointers to
 *		parents (->gc_tree).
 *	AV		1 Mar 1999
 *		Damn. Added missing check for ->dead in listen queues scanning.
 *
65 66 67 68
 *	Miklos Szeredi 25 Jun 2007
 *		Reimplement with a cycle collecting algorithm. This should
 *		solve several problems with the previous code, like being racy
 *		wrt receive and holding up unrelated socket operations.
L
Linus Torvalds 已提交
69
 */
70

L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
A
Arjan van de Ven 已提交
81
#include <linux/mutex.h>
82
#include <linux/wait.h>
L
Linus Torvalds 已提交
83 84 85 86

#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
87
#include <net/tcp_states.h>
L
Linus Torvalds 已提交
88 89 90

/* Internal data structures and random procedures: */

91 92 93
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
94
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
L
Linus Torvalds 已提交
95

96
unsigned int unix_tot_inflight;
L
Linus Torvalds 已提交
97 98 99 100 101


static struct sock *unix_get_socket(struct file *filp)
{
	struct sock *u_sock = NULL;
J
Josef Sipek 已提交
102
	struct inode *inode = filp->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
103 104 105 106 107

	/*
	 *	Socket ?
	 */
	if (S_ISSOCK(inode->i_mode)) {
108 109
		struct socket *sock = SOCKET_I(inode);
		struct sock *s = sock->sk;
L
Linus Torvalds 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123

		/*
		 *	PF_UNIX ?
		 */
		if (s && sock->ops && sock->ops->family == PF_UNIX)
			u_sock = s;
	}
	return u_sock;
}

/*
 *	Keep the number of times in flight count for the file
 *	descriptor if it is for an AF_UNIX socket.
 */
124

L
Linus Torvalds 已提交
125 126 127
void unix_inflight(struct file *fp)
{
	struct sock *s = unix_get_socket(fp);
128
	if (s) {
129 130
		struct unix_sock *u = unix_sk(s);
		spin_lock(&unix_gc_lock);
A
Al Viro 已提交
131
		if (atomic_long_inc_return(&u->inflight) == 1) {
132 133 134 135 136
			BUG_ON(!list_empty(&u->link));
			list_add_tail(&u->link, &gc_inflight_list);
		} else {
			BUG_ON(list_empty(&u->link));
		}
137
		unix_tot_inflight++;
138
		spin_unlock(&unix_gc_lock);
L
Linus Torvalds 已提交
139 140 141 142 143 144
	}
}

void unix_notinflight(struct file *fp)
{
	struct sock *s = unix_get_socket(fp);
145
	if (s) {
146 147 148
		struct unix_sock *u = unix_sk(s);
		spin_lock(&unix_gc_lock);
		BUG_ON(list_empty(&u->link));
A
Al Viro 已提交
149
		if (atomic_long_dec_and_test(&u->inflight))
150
			list_del_init(&u->link);
151
		unix_tot_inflight--;
152
		spin_unlock(&unix_gc_lock);
L
Linus Torvalds 已提交
153 154 155
	}
}

156 157
static inline struct sk_buff *sock_queue_head(struct sock *sk)
{
158
	return (struct sk_buff *)&sk->sk_receive_queue;
159
}
L
Linus Torvalds 已提交
160

161 162 163
#define receive_queue_for_each_skb(sk, next, skb) \
	for (skb = sock_queue_head(sk)->next, next = skb->next; \
	     skb != sock_queue_head(sk); skb = next, next = skb->next)
L
Linus Torvalds 已提交
164

165
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
166
			  struct sk_buff_head *hitlist)
L
Linus Torvalds 已提交
167
{
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	struct sk_buff *skb;
	struct sk_buff *next;

	spin_lock(&x->sk_receive_queue.lock);
	receive_queue_for_each_skb(x, next, skb) {
		/*
		 *	Do we have file descriptors ?
		 */
		if (UNIXCB(skb).fp) {
			bool hit = false;
			/*
			 *	Process the descriptors of this socket
			 */
			int nfd = UNIXCB(skb).fp->count;
			struct file **fp = UNIXCB(skb).fp->fp;
			while (nfd--) {
				/*
				 *	Get the socket the fd matches
				 *	if it indeed does so
				 */
				struct sock *sk = unix_get_socket(*fp++);
189
				if (sk) {
190 191 192 193 194 195 196 197 198 199 200
					struct unix_sock *u = unix_sk(sk);

					/*
					 * Ignore non-candidates, they could
					 * have been added to the queues after
					 * starting the garbage collection
					 */
					if (u->gc_candidate) {
						hit = true;
						func(u);
					}
201 202 203 204 205 206 207 208 209
				}
			}
			if (hit && hitlist != NULL) {
				__skb_unlink(skb, &x->sk_receive_queue);
				__skb_queue_tail(hitlist, skb);
			}
		}
	}
	spin_unlock(&x->sk_receive_queue.lock);
L
Linus Torvalds 已提交
210 211
}

212
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
213
			  struct sk_buff_head *hitlist)
L
Linus Torvalds 已提交
214
{
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	if (x->sk_state != TCP_LISTEN)
		scan_inflight(x, func, hitlist);
	else {
		struct sk_buff *skb;
		struct sk_buff *next;
		struct unix_sock *u;
		LIST_HEAD(embryos);

		/*
		 * For a listening socket collect the queued embryos
		 * and perform a scan on them as well.
		 */
		spin_lock(&x->sk_receive_queue.lock);
		receive_queue_for_each_skb(x, next, skb) {
			u = unix_sk(skb->sk);

			/*
			 * An embryo cannot be in-flight, so it's safe
			 * to use the list link.
			 */
			BUG_ON(!list_empty(&u->link));
			list_add_tail(&u->link, &embryos);
		}
		spin_unlock(&x->sk_receive_queue.lock);

		while (!list_empty(&embryos)) {
			u = list_entry(embryos.next, struct unix_sock, link);
			scan_inflight(&u->sk, func, hitlist);
			list_del_init(&u->link);
		}
	}
L
Linus Torvalds 已提交
246 247
}

248
static void dec_inflight(struct unix_sock *usk)
L
Linus Torvalds 已提交
249
{
A
Al Viro 已提交
250
	atomic_long_dec(&usk->inflight);
251
}
L
Linus Torvalds 已提交
252

253
static void inc_inflight(struct unix_sock *usk)
254
{
A
Al Viro 已提交
255
	atomic_long_inc(&usk->inflight);
L
Linus Torvalds 已提交
256 257
}

258
static void inc_inflight_move_tail(struct unix_sock *u)
259
{
A
Al Viro 已提交
260
	atomic_long_inc(&u->inflight);
261
	/*
262 263 264
	 * If this still might be part of a cycle, move it to the end
	 * of the list, so that it's checked even if it was already
	 * passed over
265
	 */
266
	if (u->gc_maybe_cycle)
267 268
		list_move_tail(&u->link, &gc_candidates);
}
L
Linus Torvalds 已提交
269

270
static bool gc_in_progress = false;
L
Linus Torvalds 已提交
271

272
void wait_for_unix_gc(void)
L
Linus Torvalds 已提交
273
{
274 275
	wait_event(unix_gc_wait, gc_in_progress == false);
}
L
Linus Torvalds 已提交
276

277 278 279
/* The external entry point: unix_gc() */
void unix_gc(void)
{
280 281 282 283
	struct unix_sock *u;
	struct unix_sock *next;
	struct sk_buff_head hitlist;
	struct list_head cursor;
284
	LIST_HEAD(not_cycle_list);
L
Linus Torvalds 已提交
285

286
	spin_lock(&unix_gc_lock);
L
Linus Torvalds 已提交
287

288 289 290
	/* Avoid a recursive GC. */
	if (gc_in_progress)
		goto out;
L
Linus Torvalds 已提交
291

292
	gc_in_progress = true;
L
Linus Torvalds 已提交
293
	/*
294 295 296 297 298 299
	 * First, select candidates for garbage collection.  Only
	 * in-flight sockets are considered, and from those only ones
	 * which don't have any external reference.
	 *
	 * Holding unix_gc_lock will protect these candidates from
	 * being detached, and hence from gaining an external
300 301 302 303 304 305 306 307
	 * reference.  Since there are no possible receivers, all
	 * buffers currently on the candidates' queues stay there
	 * during the garbage collection.
	 *
	 * We also know that no new candidate can be added onto the
	 * receive queues.  Other, non candidate sockets _can_ be
	 * added to queue, so we must make sure only to touch
	 * candidates.
L
Linus Torvalds 已提交
308
	 */
309
	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
A
Al Viro 已提交
310 311
		long total_refs;
		long inflight_refs;
312 313

		total_refs = file_count(u->sk.sk_socket->file);
A
Al Viro 已提交
314
		inflight_refs = atomic_long_read(&u->inflight);
315 316 317 318 319 320

		BUG_ON(inflight_refs < 1);
		BUG_ON(total_refs < inflight_refs);
		if (total_refs == inflight_refs) {
			list_move_tail(&u->link, &gc_candidates);
			u->gc_candidate = 1;
321
			u->gc_maybe_cycle = 1;
322 323
		}
	}
L
Linus Torvalds 已提交
324 325

	/*
326 327
	 * Now remove all internal in-flight reference to children of
	 * the candidates.
L
Linus Torvalds 已提交
328
	 */
329 330
	list_for_each_entry(u, &gc_candidates, link)
		scan_children(&u->sk, dec_inflight, NULL);
L
Linus Torvalds 已提交
331 332

	/*
333 334 335 336 337 338
	 * Restore the references for children of all candidates,
	 * which have remaining references.  Do this recursively, so
	 * only those remain, which form cyclic references.
	 *
	 * Use a "cursor" link, to make the list traversal safe, even
	 * though elements might be moved about.
L
Linus Torvalds 已提交
339
	 */
340 341 342
	list_add(&cursor, &gc_candidates);
	while (cursor.next != &gc_candidates) {
		u = list_entry(cursor.next, struct unix_sock, link);
L
Linus Torvalds 已提交
343

344 345
		/* Move cursor to after the current position. */
		list_move(&cursor, &u->link);
346

A
Al Viro 已提交
347
		if (atomic_long_read(&u->inflight) > 0) {
348 349
			list_move_tail(&u->link, &not_cycle_list);
			u->gc_maybe_cycle = 0;
350
			scan_children(&u->sk, inc_inflight_move_tail, NULL);
L
Linus Torvalds 已提交
351 352
		}
	}
353
	list_del(&cursor);
L
Linus Torvalds 已提交
354

355 356 357 358 359 360 361 362 363 364
	/*
	 * not_cycle_list contains those sockets which do not make up a
	 * cycle.  Restore these to the inflight list.
	 */
	while (!list_empty(&not_cycle_list)) {
		u = list_entry(not_cycle_list.next, struct unix_sock, link);
		u->gc_candidate = 0;
		list_move_tail(&u->link, &gc_inflight_list);
	}

365 366 367 368 369
	/*
	 * Now gc_candidates contains only garbage.  Restore original
	 * inflight counters for these as well, and remove the skbuffs
	 * which are creating the cycle(s).
	 */
L
Linus Torvalds 已提交
370
	skb_queue_head_init(&hitlist);
371
	list_for_each_entry(u, &gc_candidates, link)
372
	scan_children(&u->sk, inc_inflight, &hitlist);
L
Linus Torvalds 已提交
373

374
	spin_unlock(&unix_gc_lock);
L
Linus Torvalds 已提交
375

376 377
	/* Here we are. Hitlist is filled. Die. */
	__skb_queue_purge(&hitlist);
L
Linus Torvalds 已提交
378

379
	spin_lock(&unix_gc_lock);
L
Linus Torvalds 已提交
380

381 382 383
	/* All candidates should have been detached by now. */
	BUG_ON(!list_empty(&gc_candidates));
	gc_in_progress = false;
384
	wake_up(&unix_gc_wait);
L
Linus Torvalds 已提交
385

386 387
 out:
	spin_unlock(&unix_gc_lock);
L
Linus Torvalds 已提交
388
}