dev.c 25.1 KB
Newer Older
M
Miklos Szeredi 已提交
1 2
/*
  FUSE: Filesystem in Userspace
3
  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
M
Miklos Szeredi 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>

MODULE_ALIAS_MISCDEV(FUSE_MINOR);

static kmem_cache_t *fuse_req_cachep;

24
static struct fuse_conn *fuse_get_conn(struct file *file)
M
Miklos Szeredi 已提交
25
{
M
Miklos Szeredi 已提交
26 27 28 29 30
	/*
	 * Lockless access is OK, because file->private data is set
	 * once during mount and is valid until the file is released.
	 */
	return file->private_data;
M
Miklos Szeredi 已提交
31 32
}

33
static void fuse_request_init(struct fuse_req *req)
M
Miklos Szeredi 已提交
34 35 36
{
	memset(req, 0, sizeof(*req));
	INIT_LIST_HEAD(&req->list);
37
	INIT_LIST_HEAD(&req->intr_entry);
M
Miklos Szeredi 已提交
38 39 40 41 42 43
	init_waitqueue_head(&req->waitq);
	atomic_set(&req->count, 1);
}

struct fuse_req *fuse_request_alloc(void)
{
44
	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
M
Miklos Szeredi 已提交
45 46 47 48 49 50 51 52 53 54
	if (req)
		fuse_request_init(req);
	return req;
}

void fuse_request_free(struct fuse_req *req)
{
	kmem_cache_free(fuse_req_cachep, req);
}

55
static void block_sigs(sigset_t *oldset)
M
Miklos Szeredi 已提交
56 57 58 59 60 61 62
{
	sigset_t mask;

	siginitsetinv(&mask, sigmask(SIGKILL));
	sigprocmask(SIG_BLOCK, &mask, oldset);
}

63
static void restore_sigs(sigset_t *oldset)
M
Miklos Szeredi 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
{
	sigprocmask(SIG_SETMASK, oldset, NULL);
}

static void __fuse_get_request(struct fuse_req *req)
{
	atomic_inc(&req->count);
}

/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
	BUG_ON(atomic_read(&req->count) < 2);
	atomic_dec(&req->count);
}

80 81 82 83 84 85 86
static void fuse_req_init_context(struct fuse_req *req)
{
	req->in.h.uid = current->fsuid;
	req->in.h.gid = current->fsgid;
	req->in.h.pid = current->pid;
}

87
struct fuse_req *fuse_get_req(struct fuse_conn *fc)
M
Miklos Szeredi 已提交
88
{
89 90
	struct fuse_req *req;
	sigset_t oldset;
91
	int intr;
92 93
	int err;

94
	atomic_inc(&fc->num_waiting);
95
	block_sigs(&oldset);
96
	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
97
	restore_sigs(&oldset);
98 99 100
	err = -EINTR;
	if (intr)
		goto out;
101

102 103 104 105
	err = -ENOTCONN;
	if (!fc->connected)
		goto out;

106
	req = fuse_request_alloc();
107
	err = -ENOMEM;
108
	if (!req)
109
		goto out;
M
Miklos Szeredi 已提交
110

111
	fuse_req_init_context(req);
112
	req->waiting = 1;
M
Miklos Szeredi 已提交
113
	return req;
114 115 116 117

 out:
	atomic_dec(&fc->num_waiting);
	return ERR_PTR(err);
M
Miklos Szeredi 已提交
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/*
 * Return request in fuse_file->reserved_req.  However that may
 * currently be in use.  If that is the case, wait for it to become
 * available.
 */
static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
					 struct file *file)
{
	struct fuse_req *req = NULL;
	struct fuse_file *ff = file->private_data;

	do {
		wait_event(fc->blocked_waitq, ff->reserved_req);
		spin_lock(&fc->lock);
		if (ff->reserved_req) {
			req = ff->reserved_req;
			ff->reserved_req = NULL;
			get_file(file);
			req->stolen_file = file;
		}
		spin_unlock(&fc->lock);
	} while (!req);

	return req;
}

/*
 * Put stolen request back into fuse_file->reserved_req
 */
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
	struct file *file = req->stolen_file;
	struct fuse_file *ff = file->private_data;

	spin_lock(&fc->lock);
	fuse_request_init(req);
	BUG_ON(ff->reserved_req);
	ff->reserved_req = req;
	wake_up(&fc->blocked_waitq);
	spin_unlock(&fc->lock);
	fput(file);
}

/*
 * Gets a requests for a file operation, always succeeds
 *
 * This is used for sending the FLUSH request, which must get to
 * userspace, due to POSIX locks which may need to be unlocked.
 *
 * If allocation fails due to OOM, use the reserved request in
 * fuse_file.
 *
 * This is very unlikely to deadlock accidentally, since the
 * filesystem should not have it's own file open.  If deadlock is
 * intentional, it can still be broken by "aborting" the filesystem.
 */
struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
{
	struct fuse_req *req;

	atomic_inc(&fc->num_waiting);
	wait_event(fc->blocked_waitq, !fc->blocked);
	req = fuse_request_alloc();
	if (!req)
		req = get_reserved_req(fc, file);

	fuse_req_init_context(req);
	req->waiting = 1;
	return req;
}

M
Miklos Szeredi 已提交
191
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
192 193
{
	if (atomic_dec_and_test(&req->count)) {
194 195
		if (req->waiting)
			atomic_dec(&fc->num_waiting);
196 197 198 199 200

		if (req->stolen_file)
			put_reserved_req(fc, req);
		else
			fuse_request_free(req);
201 202 203
	}
}

M
Miklos Szeredi 已提交
204 205
/*
 * This function is called when a request is finished.  Either a reply
206
 * has arrived or it was aborted (and not yet sent) or some error
M
Miklos Szeredi 已提交
207
 * occurred during communication with userspace, or the device file
208 209 210
 * was closed.  The requester thread is woken up (if still waiting),
 * the 'end' callback is called if given, else the reference to the
 * request is released
211
 *
212
 * Called with fc->lock, unlocks it
M
Miklos Szeredi 已提交
213 214
 */
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
215
	__releases(fc->lock)
M
Miklos Szeredi 已提交
216
{
217 218
	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
	req->end = NULL;
219
	list_del(&req->list);
220
	list_del(&req->intr_entry);
221
	req->state = FUSE_REQ_FINISHED;
222 223 224 225 226 227
	if (req->background) {
		if (fc->num_background == FUSE_MAX_BACKGROUND) {
			fc->blocked = 0;
			wake_up_all(&fc->blocked_waitq);
		}
		fc->num_background--;
M
Miklos Szeredi 已提交
228
	}
229 230 231
	spin_unlock(&fc->lock);
	dput(req->dentry);
	mntput(req->vfsmount);
M
Miklos Szeredi 已提交
232
	if (req->file)
233 234 235 236 237 238
		fput(req->file);
	wake_up(&req->waitq);
	if (end)
		end(fc, req);
	else
		fuse_put_request(fc, req);
M
Miklos Szeredi 已提交
239 240
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void wait_answer_interruptible(struct fuse_conn *fc,
				      struct fuse_req *req)
{
	if (signal_pending(current))
		return;

	spin_unlock(&fc->lock);
	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
	spin_lock(&fc->lock);
}

static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
{
	list_add_tail(&req->intr_entry, &fc->interrupts);
	wake_up(&fc->waitq);
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}

259
/* Called with fc->lock held.  Releases, and then reacquires it. */
260
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
261
{
262 263 264
	if (!fc->no_interrupt) {
		/* Any signal may interrupt this */
		wait_answer_interruptible(fc, req);
M
Miklos Szeredi 已提交
265

266 267 268 269 270 271 272 273 274 275 276 277
		if (req->aborted)
			goto aborted;
		if (req->state == FUSE_REQ_FINISHED)
			return;

		req->interrupted = 1;
		if (req->state == FUSE_REQ_SENT)
			queue_interrupt(fc, req);
	}

	if (req->force) {
		spin_unlock(&fc->lock);
278
		wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
279 280 281 282 283
		spin_lock(&fc->lock);
	} else {
		sigset_t oldset;

		/* Only fatal signals may interrupt this */
284
		block_sigs(&oldset);
285
		wait_answer_interruptible(fc, req);
286 287
		restore_sigs(&oldset);
	}
M
Miklos Szeredi 已提交
288

289 290 291 292 293 294 295 296 297
	if (req->aborted)
		goto aborted;
	if (req->state == FUSE_REQ_FINISHED)
 		return;

	req->out.h.error = -EINTR;
	req->aborted = 1;

 aborted:
M
Miklos Szeredi 已提交
298 299 300 301 302 303
	if (req->locked) {
		/* This is uninterruptible sleep, because data is
		   being copied to/from the buffers of req.  During
		   locked state, there mustn't be any filesystem
		   operation (e.g. page fault), since that could lead
		   to deadlock */
304
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
305
		wait_event(req->waitq, !req->locked);
306
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
307
	}
308
	if (req->state == FUSE_REQ_PENDING) {
M
Miklos Szeredi 已提交
309 310
		list_del(&req->list);
		__fuse_put_request(req);
311 312 313 314 315
	} else if (req->state == FUSE_REQ_SENT) {
		spin_unlock(&fc->lock);
		wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
		spin_lock(&fc->lock);
	}
M
Miklos Szeredi 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328
}

static unsigned len_args(unsigned numargs, struct fuse_arg *args)
{
	unsigned nbytes = 0;
	unsigned i;

	for (i = 0; i < numargs; i++)
		nbytes += args[i].size;

	return nbytes;
}

329 330 331 332 333 334 335 336 337 338
static u64 fuse_get_unique(struct fuse_conn *fc)
 {
 	fc->reqctr++;
 	/* zero is special */
 	if (fc->reqctr == 0)
 		fc->reqctr = 1;

	return fc->reqctr;
}

M
Miklos Szeredi 已提交
339 340
static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
{
341
	req->in.h.unique = fuse_get_unique(fc);
M
Miklos Szeredi 已提交
342 343 344
	req->in.h.len = sizeof(struct fuse_in_header) +
		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
	list_add_tail(&req->list, &fc->pending);
345
	req->state = FUSE_REQ_PENDING;
346 347 348 349
	if (!req->waiting) {
		req->waiting = 1;
		atomic_inc(&fc->num_waiting);
	}
M
Miklos Szeredi 已提交
350
	wake_up(&fc->waitq);
351
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
M
Miklos Szeredi 已提交
352 353
}

354
void request_send(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
355 356
{
	req->isreply = 1;
357
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
358
	if (!fc->connected)
M
Miklos Szeredi 已提交
359 360 361 362 363 364 365 366 367
		req->out.h.error = -ENOTCONN;
	else if (fc->conn_error)
		req->out.h.error = -ECONNREFUSED;
	else {
		queue_request(fc, req);
		/* acquire extra reference, since request is still needed
		   after request_end() */
		__fuse_get_request(req);

368
		request_wait_answer(fc, req);
M
Miklos Szeredi 已提交
369
	}
370
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
371 372 373 374
}

static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
{
375
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
376
	if (fc->connected) {
377 378 379 380 381
		req->background = 1;
		fc->num_background++;
		if (fc->num_background == FUSE_MAX_BACKGROUND)
			fc->blocked = 1;

M
Miklos Szeredi 已提交
382
		queue_request(fc, req);
383
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	} else {
		req->out.h.error = -ENOTCONN;
		request_end(fc, req);
	}
}

void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
{
	req->isreply = 0;
	request_send_nowait(fc, req);
}

void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
	req->isreply = 1;
	request_send_nowait(fc, req);
}

/*
 * Lock the request.  Up to the next unlock_request() there mustn't be
 * anything that could cause a page-fault.  If the request was already
405
 * aborted bail out.
M
Miklos Szeredi 已提交
406
 */
407
static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
408 409 410
{
	int err = 0;
	if (req) {
411
		spin_lock(&fc->lock);
412
		if (req->aborted)
M
Miklos Szeredi 已提交
413 414 415
			err = -ENOENT;
		else
			req->locked = 1;
416
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
417 418 419 420 421
	}
	return err;
}

/*
422
 * Unlock request.  If it was aborted during being locked, the
M
Miklos Szeredi 已提交
423 424 425
 * requester thread is currently waiting for it to be unlocked, so
 * wake it up.
 */
426
static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
427 428
{
	if (req) {
429
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
430
		req->locked = 0;
431
		if (req->aborted)
M
Miklos Szeredi 已提交
432
			wake_up(&req->waitq);
433
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
434 435 436 437
	}
}

struct fuse_copy_state {
438
	struct fuse_conn *fc;
M
Miklos Szeredi 已提交
439 440 441 442 443 444 445 446 447 448 449 450
	int write;
	struct fuse_req *req;
	const struct iovec *iov;
	unsigned long nr_segs;
	unsigned long seglen;
	unsigned long addr;
	struct page *pg;
	void *mapaddr;
	void *buf;
	unsigned len;
};

451 452 453
static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
			   int write, struct fuse_req *req,
			   const struct iovec *iov, unsigned long nr_segs)
M
Miklos Szeredi 已提交
454 455
{
	memset(cs, 0, sizeof(*cs));
456
	cs->fc = fc;
M
Miklos Szeredi 已提交
457 458 459 460 461 462 463
	cs->write = write;
	cs->req = req;
	cs->iov = iov;
	cs->nr_segs = nr_segs;
}

/* Unmap and put previous page of userspace buffer */
464
static void fuse_copy_finish(struct fuse_copy_state *cs)
M
Miklos Szeredi 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
{
	if (cs->mapaddr) {
		kunmap_atomic(cs->mapaddr, KM_USER0);
		if (cs->write) {
			flush_dcache_page(cs->pg);
			set_page_dirty_lock(cs->pg);
		}
		put_page(cs->pg);
		cs->mapaddr = NULL;
	}
}

/*
 * Get another pagefull of userspace buffer, and map it to kernel
 * address space, and lock request
 */
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
	unsigned long offset;
	int err;

486
	unlock_request(cs->fc, cs->req);
M
Miklos Szeredi 已提交
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	fuse_copy_finish(cs);
	if (!cs->seglen) {
		BUG_ON(!cs->nr_segs);
		cs->seglen = cs->iov[0].iov_len;
		cs->addr = (unsigned long) cs->iov[0].iov_base;
		cs->iov ++;
		cs->nr_segs --;
	}
	down_read(&current->mm->mmap_sem);
	err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
			     &cs->pg, NULL);
	up_read(&current->mm->mmap_sem);
	if (err < 0)
		return err;
	BUG_ON(err != 1);
	offset = cs->addr % PAGE_SIZE;
	cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
	cs->buf = cs->mapaddr + offset;
	cs->len = min(PAGE_SIZE - offset, cs->seglen);
	cs->seglen -= cs->len;
	cs->addr += cs->len;

509
	return lock_request(cs->fc, cs->req);
M
Miklos Szeredi 已提交
510 511 512
}

/* Do as much copy to/from userspace buffer as we can */
513
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
M
Miklos Szeredi 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
{
	unsigned ncpy = min(*size, cs->len);
	if (val) {
		if (cs->write)
			memcpy(cs->buf, *val, ncpy);
		else
			memcpy(*val, cs->buf, ncpy);
		*val += ncpy;
	}
	*size -= ncpy;
	cs->len -= ncpy;
	cs->buf += ncpy;
	return ncpy;
}

/*
 * Copy a page in the request to/from the userspace buffer.  Must be
 * done atomically
 */
533 534
static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
			  unsigned offset, unsigned count, int zeroing)
M
Miklos Szeredi 已提交
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
{
	if (page && zeroing && count < PAGE_SIZE) {
		void *mapaddr = kmap_atomic(page, KM_USER1);
		memset(mapaddr, 0, PAGE_SIZE);
		kunmap_atomic(mapaddr, KM_USER1);
	}
	while (count) {
		int err;
		if (!cs->len && (err = fuse_copy_fill(cs)))
			return err;
		if (page) {
			void *mapaddr = kmap_atomic(page, KM_USER1);
			void *buf = mapaddr + offset;
			offset += fuse_copy_do(cs, &buf, &count);
			kunmap_atomic(mapaddr, KM_USER1);
		} else
			offset += fuse_copy_do(cs, NULL, &count);
	}
	if (page && !cs->write)
		flush_dcache_page(page);
	return 0;
}

/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
			   int zeroing)
{
	unsigned i;
	struct fuse_req *req = cs->req;
	unsigned offset = req->page_offset;
	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);

	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
		struct page *page = req->pages[i];
		int err = fuse_copy_page(cs, page, offset, count, zeroing);
		if (err)
			return err;

		nbytes -= count;
		count = min(nbytes, (unsigned) PAGE_SIZE);
		offset = 0;
	}
	return 0;
}

/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{
	while (size) {
		int err;
		if (!cs->len && (err = fuse_copy_fill(cs)))
			return err;
		fuse_copy_do(cs, &val, &size);
	}
	return 0;
}

/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
			  unsigned argpages, struct fuse_arg *args,
			  int zeroing)
{
	int err = 0;
	unsigned i;

	for (i = 0; !err && i < numargs; i++)  {
		struct fuse_arg *arg = &args[i];
		if (i == numargs - 1 && argpages)
			err = fuse_copy_pages(cs, arg->size, zeroing);
		else
			err = fuse_copy_one(cs, arg->value, arg->size);
	}
	return err;
}

610 611 612 613 614
static int request_pending(struct fuse_conn *fc)
{
	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
}

M
Miklos Szeredi 已提交
615 616 617 618 619 620
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
{
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue_exclusive(&fc->waitq, &wait);
621
	while (fc->connected && !request_pending(fc)) {
M
Miklos Szeredi 已提交
622 623 624 625
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current))
			break;

626
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
627
		schedule();
628
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
629 630 631 632 633
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&fc->waitq, &wait);
}

634 635 636 637 638 639 640 641 642 643
/*
 * Transfer an interrupt request to userspace
 *
 * Unlike other requests this is assembled on demand, without a need
 * to allocate a separate fuse_req structure.
 *
 * Called with fc->lock held, releases it
 */
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
			       const struct iovec *iov, unsigned long nr_segs)
644
	__releases(fc->lock)
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
{
	struct fuse_copy_state cs;
	struct fuse_in_header ih;
	struct fuse_interrupt_in arg;
	unsigned reqsize = sizeof(ih) + sizeof(arg);
	int err;

	list_del_init(&req->intr_entry);
	req->intr_unique = fuse_get_unique(fc);
	memset(&ih, 0, sizeof(ih));
	memset(&arg, 0, sizeof(arg));
	ih.len = reqsize;
	ih.opcode = FUSE_INTERRUPT;
	ih.unique = req->intr_unique;
	arg.unique = req->in.h.unique;

	spin_unlock(&fc->lock);
	if (iov_length(iov, nr_segs) < reqsize)
		return -EINVAL;

	fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
	err = fuse_copy_one(&cs, &ih, sizeof(ih));
	if (!err)
		err = fuse_copy_one(&cs, &arg, sizeof(arg));
	fuse_copy_finish(&cs);

	return err ? err : reqsize;
}

M
Miklos Szeredi 已提交
674 675 676 677
/*
 * Read a single request into the userspace filesystem's buffer.  This
 * function waits until a request is available, then removes it from
 * the pending list and copies request data to userspace buffer.  If
678 679
 * no reply is needed (FORGET) or request has been aborted or there
 * was an error during the copying then it's finished by calling
M
Miklos Szeredi 已提交
680 681 682
 * request_end().  Otherwise add it to the processing list, and set
 * the 'sent' flag.
 */
683 684
static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
			      unsigned long nr_segs, loff_t pos)
M
Miklos Szeredi 已提交
685 686 687 688 689 690
{
	int err;
	struct fuse_req *req;
	struct fuse_in *in;
	struct fuse_copy_state cs;
	unsigned reqsize;
691
	struct file *file = iocb->ki_filp;
M
Miklos Szeredi 已提交
692 693 694
	struct fuse_conn *fc = fuse_get_conn(file);
	if (!fc)
		return -EPERM;
M
Miklos Szeredi 已提交
695

696
 restart:
697
	spin_lock(&fc->lock);
698 699
	err = -EAGAIN;
	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
700
	    !request_pending(fc))
701 702
		goto err_unlock;

M
Miklos Szeredi 已提交
703 704
	request_wait(fc);
	err = -ENODEV;
705
	if (!fc->connected)
M
Miklos Szeredi 已提交
706 707
		goto err_unlock;
	err = -ERESTARTSYS;
708
	if (!request_pending(fc))
M
Miklos Szeredi 已提交
709 710
		goto err_unlock;

711 712 713 714 715 716
	if (!list_empty(&fc->interrupts)) {
		req = list_entry(fc->interrupts.next, struct fuse_req,
				 intr_entry);
		return fuse_read_interrupt(fc, req, iov, nr_segs);
	}

M
Miklos Szeredi 已提交
717
	req = list_entry(fc->pending.next, struct fuse_req, list);
718
	req->state = FUSE_REQ_READING;
719
	list_move(&req->list, &fc->io);
M
Miklos Szeredi 已提交
720 721

	in = &req->in;
722 723 724 725 726 727 728 729 730
	reqsize = in->h.len;
	/* If request is too large, reply with an error and restart the read */
	if (iov_length(iov, nr_segs) < reqsize) {
		req->out.h.error = -EIO;
		/* SETXATTR is special, since it may contain too large data */
		if (in->h.opcode == FUSE_SETXATTR)
			req->out.h.error = -E2BIG;
		request_end(fc, req);
		goto restart;
M
Miklos Szeredi 已提交
731
	}
732 733
	spin_unlock(&fc->lock);
	fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
734 735 736 737
	err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
	if (!err)
		err = fuse_copy_args(&cs, in->numargs, in->argpages,
				     (struct fuse_arg *) in->args, 0);
M
Miklos Szeredi 已提交
738
	fuse_copy_finish(&cs);
739
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
740
	req->locked = 0;
741
	if (!err && req->aborted)
M
Miklos Szeredi 已提交
742 743
		err = -ENOENT;
	if (err) {
744
		if (!req->aborted)
M
Miklos Szeredi 已提交
745 746 747 748 749 750 751
			req->out.h.error = -EIO;
		request_end(fc, req);
		return err;
	}
	if (!req->isreply)
		request_end(fc, req);
	else {
752
		req->state = FUSE_REQ_SENT;
753
		list_move_tail(&req->list, &fc->processing);
754 755
		if (req->interrupted)
			queue_interrupt(fc, req);
756
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
757 758 759 760
	}
	return reqsize;

 err_unlock:
761
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
762 763 764 765 766 767 768 769 770 771 772
	return err;
}

/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
{
	struct list_head *entry;

	list_for_each(entry, &fc->processing) {
		struct fuse_req *req;
		req = list_entry(entry, struct fuse_req, list);
773
		if (req->in.h.unique == unique || req->intr_unique == unique)
M
Miklos Szeredi 已提交
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
			return req;
	}
	return NULL;
}

static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
			 unsigned nbytes)
{
	unsigned reqsize = sizeof(struct fuse_out_header);

	if (out->h.error)
		return nbytes != reqsize ? -EINVAL : 0;

	reqsize += len_args(out->numargs, out->args);

	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
		return -EINVAL;
	else if (reqsize > nbytes) {
		struct fuse_arg *lastarg = &out->args[out->numargs-1];
		unsigned diffsize = reqsize - nbytes;
		if (diffsize > lastarg->size)
			return -EINVAL;
		lastarg->size -= diffsize;
	}
	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
			      out->page_zeroing);
}

/*
 * Write a single reply to a request.  First the header is copied from
 * the write buffer.  The request is then searched on the processing
 * list by the unique ID found in the header.  If found, then remove
 * it from the list and copy the rest of the buffer to the request.
 * The request is finished by calling request_end()
 */
809 810
static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
			       unsigned long nr_segs, loff_t pos)
M
Miklos Szeredi 已提交
811 812 813 814 815 816
{
	int err;
	unsigned nbytes = iov_length(iov, nr_segs);
	struct fuse_req *req;
	struct fuse_out_header oh;
	struct fuse_copy_state cs;
817
	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
M
Miklos Szeredi 已提交
818
	if (!fc)
819
		return -EPERM;
M
Miklos Szeredi 已提交
820

821
	fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
M
Miklos Szeredi 已提交
822 823 824 825 826 827 828 829 830 831 832
	if (nbytes < sizeof(struct fuse_out_header))
		return -EINVAL;

	err = fuse_copy_one(&cs, &oh, sizeof(oh));
	if (err)
		goto err_finish;
	err = -EINVAL;
	if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
	    oh.len != nbytes)
		goto err_finish;

833
	spin_lock(&fc->lock);
834 835 836 837
	err = -ENOENT;
	if (!fc->connected)
		goto err_unlock;

M
Miklos Szeredi 已提交
838 839 840 841
	req = request_find(fc, oh.unique);
	if (!req)
		goto err_unlock;

842
	if (req->aborted) {
843
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
844
		fuse_copy_finish(&cs);
845
		spin_lock(&fc->lock);
846
		request_end(fc, req);
M
Miklos Szeredi 已提交
847 848
		return -ENOENT;
	}
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
	/* Is it an interrupt reply? */
	if (req->intr_unique == oh.unique) {
		err = -EINVAL;
		if (nbytes != sizeof(struct fuse_out_header))
			goto err_unlock;

		if (oh.error == -ENOSYS)
			fc->no_interrupt = 1;
		else if (oh.error == -EAGAIN)
			queue_interrupt(fc, req);

		spin_unlock(&fc->lock);
		fuse_copy_finish(&cs);
		return nbytes;
	}

	req->state = FUSE_REQ_WRITING;
866
	list_move(&req->list, &fc->io);
M
Miklos Szeredi 已提交
867 868 869
	req->out.h = oh;
	req->locked = 1;
	cs.req = req;
870
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
871 872 873 874

	err = copy_out_args(&cs, &req->out, nbytes);
	fuse_copy_finish(&cs);

875
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
876 877
	req->locked = 0;
	if (!err) {
878
		if (req->aborted)
M
Miklos Szeredi 已提交
879
			err = -ENOENT;
880
	} else if (!req->aborted)
M
Miklos Szeredi 已提交
881 882 883 884 885 886
		req->out.h.error = -EIO;
	request_end(fc, req);

	return err ? err : nbytes;

 err_unlock:
887
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
888 889 890 891 892 893 894 895
 err_finish:
	fuse_copy_finish(&cs);
	return err;
}

static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
{
	unsigned mask = POLLOUT | POLLWRNORM;
896
	struct fuse_conn *fc = fuse_get_conn(file);
M
Miklos Szeredi 已提交
897
	if (!fc)
898
		return POLLERR;
M
Miklos Szeredi 已提交
899 900 901

	poll_wait(file, &fc->waitq, wait);

902
	spin_lock(&fc->lock);
903 904
	if (!fc->connected)
		mask = POLLERR;
905
	else if (request_pending(fc))
906
		mask |= POLLIN | POLLRDNORM;
907
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
908 909 910 911

	return mask;
}

912 913 914
/*
 * Abort all requests on the given list (pending or processing)
 *
915
 * This function releases and reacquires fc->lock
916
 */
M
Miklos Szeredi 已提交
917 918 919 920 921 922 923
static void end_requests(struct fuse_conn *fc, struct list_head *head)
{
	while (!list_empty(head)) {
		struct fuse_req *req;
		req = list_entry(head->next, struct fuse_req, list);
		req->out.h.error = -ECONNABORTED;
		request_end(fc, req);
924
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
925 926 927
	}
}

928 929 930
/*
 * Abort requests under I/O
 *
931
 * The requests are set to aborted and finished, and the request
932 933
 * waiter is woken up.  This will make request_wait_answer() wait
 * until the request is unlocked and then return.
934 935 936 937
 *
 * If the request is asynchronous, then the end function needs to be
 * called after waiting for the request to be unlocked (if it was
 * locked).
938 939 940 941
 */
static void end_io_requests(struct fuse_conn *fc)
{
	while (!list_empty(&fc->io)) {
942 943 944 945
		struct fuse_req *req =
			list_entry(fc->io.next, struct fuse_req, list);
		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;

946
		req->aborted = 1;
947 948 949 950
		req->out.h.error = -ECONNABORTED;
		req->state = FUSE_REQ_FINISHED;
		list_del_init(&req->list);
		wake_up(&req->waitq);
951 952 953 954
		if (end) {
			req->end = NULL;
			/* The end function will consume this reference */
			__fuse_get_request(req);
955
			spin_unlock(&fc->lock);
956 957
			wait_event(req->waitq, !req->locked);
			end(fc, req);
958
			spin_lock(&fc->lock);
959
		}
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
	}
}

/*
 * Abort all requests.
 *
 * Emergency exit in case of a malicious or accidental deadlock, or
 * just a hung filesystem.
 *
 * The same effect is usually achievable through killing the
 * filesystem daemon and all users of the filesystem.  The exception
 * is the combination of an asynchronous request and the tricky
 * deadlock (see Documentation/filesystems/fuse.txt).
 *
 * During the aborting, progression of requests from the pending and
 * processing lists onto the io list, and progression of new requests
 * onto the pending list is prevented by req->connected being false.
 *
 * Progression of requests under I/O to the processing list is
979 980
 * prevented by the req->aborted flag being true for these requests.
 * For this reason requests on the io list must be aborted first.
981 982 983
 */
void fuse_abort_conn(struct fuse_conn *fc)
{
984
	spin_lock(&fc->lock);
985 986
	if (fc->connected) {
		fc->connected = 0;
987
		fc->blocked = 0;
988 989 990 991
		end_io_requests(fc);
		end_requests(fc, &fc->pending);
		end_requests(fc, &fc->processing);
		wake_up_all(&fc->waitq);
992
		wake_up_all(&fc->blocked_waitq);
993
		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
994
	}
995
	spin_unlock(&fc->lock);
996 997
}

M
Miklos Szeredi 已提交
998 999
static int fuse_dev_release(struct inode *inode, struct file *file)
{
M
Miklos Szeredi 已提交
1000
	struct fuse_conn *fc = fuse_get_conn(file);
M
Miklos Szeredi 已提交
1001
	if (fc) {
1002
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
1003
		fc->connected = 0;
M
Miklos Szeredi 已提交
1004 1005
		end_requests(fc, &fc->pending);
		end_requests(fc, &fc->processing);
1006
		spin_unlock(&fc->lock);
1007
		fasync_helper(-1, file, 0, &fc->fasync);
1008
		fuse_conn_put(fc);
1009
	}
1010

M
Miklos Szeredi 已提交
1011 1012 1013
	return 0;
}

1014 1015 1016 1017
static int fuse_dev_fasync(int fd, struct file *file, int on)
{
	struct fuse_conn *fc = fuse_get_conn(file);
	if (!fc)
1018
		return -EPERM;
1019 1020 1021 1022 1023

	/* No locking - fasync_helper does its own locking */
	return fasync_helper(fd, file, on, &fc->fasync);
}

1024
const struct file_operations fuse_dev_operations = {
M
Miklos Szeredi 已提交
1025 1026
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
1027 1028 1029 1030
	.read		= do_sync_read,
	.aio_read	= fuse_dev_read,
	.write		= do_sync_write,
	.aio_write	= fuse_dev_write,
M
Miklos Szeredi 已提交
1031 1032
	.poll		= fuse_dev_poll,
	.release	= fuse_dev_release,
1033
	.fasync		= fuse_dev_fasync,
M
Miklos Szeredi 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
};

static struct miscdevice fuse_miscdevice = {
	.minor = FUSE_MINOR,
	.name  = "fuse",
	.fops = &fuse_dev_operations,
};

int __init fuse_dev_init(void)
{
	int err = -ENOMEM;
	fuse_req_cachep = kmem_cache_create("fuse_request",
					    sizeof(struct fuse_req),
					    0, 0, NULL, NULL);
	if (!fuse_req_cachep)
		goto out;

	err = misc_register(&fuse_miscdevice);
	if (err)
		goto out_cache_clean;

	return 0;

 out_cache_clean:
	kmem_cache_destroy(fuse_req_cachep);
 out:
	return err;
}

void fuse_dev_cleanup(void)
{
	misc_deregister(&fuse_miscdevice);
	kmem_cache_destroy(fuse_req_cachep);
}