dev.c 37.4 KB
Newer Older
M
Miklos Szeredi 已提交
1 2
/*
  FUSE: Filesystem in Userspace
M
Miklos Szeredi 已提交
3
  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
M
Miklos Szeredi 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
19
#include <linux/pipe_fs_i.h>
20 21
#include <linux/swap.h>
#include <linux/splice.h>
M
Miklos Szeredi 已提交
22 23

MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24
MODULE_ALIAS("devname:fuse");
M
Miklos Szeredi 已提交
25

26
static struct kmem_cache *fuse_req_cachep;
M
Miklos Szeredi 已提交
27

28
static struct fuse_conn *fuse_get_conn(struct file *file)
M
Miklos Szeredi 已提交
29
{
M
Miklos Szeredi 已提交
30 31 32 33 34
	/*
	 * Lockless access is OK, because file->private data is set
	 * once during mount and is valid until the file is released.
	 */
	return file->private_data;
M
Miklos Szeredi 已提交
35 36
}

37
static void fuse_request_init(struct fuse_req *req)
M
Miklos Szeredi 已提交
38 39 40
{
	memset(req, 0, sizeof(*req));
	INIT_LIST_HEAD(&req->list);
41
	INIT_LIST_HEAD(&req->intr_entry);
M
Miklos Szeredi 已提交
42 43 44 45 46 47
	init_waitqueue_head(&req->waitq);
	atomic_set(&req->count, 1);
}

struct fuse_req *fuse_request_alloc(void)
{
48
	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
M
Miklos Szeredi 已提交
49 50 51 52
	if (req)
		fuse_request_init(req);
	return req;
}
53
EXPORT_SYMBOL_GPL(fuse_request_alloc);
M
Miklos Szeredi 已提交
54

M
Miklos Szeredi 已提交
55 56 57 58 59 60 61 62
struct fuse_req *fuse_request_alloc_nofs(void)
{
	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
	if (req)
		fuse_request_init(req);
	return req;
}

M
Miklos Szeredi 已提交
63 64 65 66 67
void fuse_request_free(struct fuse_req *req)
{
	kmem_cache_free(fuse_req_cachep, req);
}

68
static void block_sigs(sigset_t *oldset)
M
Miklos Szeredi 已提交
69 70 71 72 73 74 75
{
	sigset_t mask;

	siginitsetinv(&mask, sigmask(SIGKILL));
	sigprocmask(SIG_BLOCK, &mask, oldset);
}

76
static void restore_sigs(sigset_t *oldset)
M
Miklos Szeredi 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
{
	sigprocmask(SIG_SETMASK, oldset, NULL);
}

static void __fuse_get_request(struct fuse_req *req)
{
	atomic_inc(&req->count);
}

/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
	BUG_ON(atomic_read(&req->count) < 2);
	atomic_dec(&req->count);
}

93 94
static void fuse_req_init_context(struct fuse_req *req)
{
95 96
	req->in.h.uid = current_fsuid();
	req->in.h.gid = current_fsgid();
97 98 99
	req->in.h.pid = current->pid;
}

100
struct fuse_req *fuse_get_req(struct fuse_conn *fc)
M
Miklos Szeredi 已提交
101
{
102 103
	struct fuse_req *req;
	sigset_t oldset;
104
	int intr;
105 106
	int err;

107
	atomic_inc(&fc->num_waiting);
108
	block_sigs(&oldset);
109
	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
110
	restore_sigs(&oldset);
111 112 113
	err = -EINTR;
	if (intr)
		goto out;
114

115 116 117 118
	err = -ENOTCONN;
	if (!fc->connected)
		goto out;

119
	req = fuse_request_alloc();
120
	err = -ENOMEM;
121
	if (!req)
122
		goto out;
M
Miklos Szeredi 已提交
123

124
	fuse_req_init_context(req);
125
	req->waiting = 1;
M
Miklos Szeredi 已提交
126
	return req;
127 128 129 130

 out:
	atomic_dec(&fc->num_waiting);
	return ERR_PTR(err);
M
Miklos Szeredi 已提交
131
}
132
EXPORT_SYMBOL_GPL(fuse_get_req);
M
Miklos Szeredi 已提交
133

134 135 136 137 138 139 140 141 142 143 144 145
/*
 * Return request in fuse_file->reserved_req.  However that may
 * currently be in use.  If that is the case, wait for it to become
 * available.
 */
static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
					 struct file *file)
{
	struct fuse_req *req = NULL;
	struct fuse_file *ff = file->private_data;

	do {
146
		wait_event(fc->reserved_req_waitq, ff->reserved_req);
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
		spin_lock(&fc->lock);
		if (ff->reserved_req) {
			req = ff->reserved_req;
			ff->reserved_req = NULL;
			get_file(file);
			req->stolen_file = file;
		}
		spin_unlock(&fc->lock);
	} while (!req);

	return req;
}

/*
 * Put stolen request back into fuse_file->reserved_req
 */
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
	struct file *file = req->stolen_file;
	struct fuse_file *ff = file->private_data;

	spin_lock(&fc->lock);
	fuse_request_init(req);
	BUG_ON(ff->reserved_req);
	ff->reserved_req = req;
172
	wake_up_all(&fc->reserved_req_waitq);
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	spin_unlock(&fc->lock);
	fput(file);
}

/*
 * Gets a requests for a file operation, always succeeds
 *
 * This is used for sending the FLUSH request, which must get to
 * userspace, due to POSIX locks which may need to be unlocked.
 *
 * If allocation fails due to OOM, use the reserved request in
 * fuse_file.
 *
 * This is very unlikely to deadlock accidentally, since the
 * filesystem should not have it's own file open.  If deadlock is
 * intentional, it can still be broken by "aborting" the filesystem.
 */
struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
{
	struct fuse_req *req;

	atomic_inc(&fc->num_waiting);
	wait_event(fc->blocked_waitq, !fc->blocked);
	req = fuse_request_alloc();
	if (!req)
		req = get_reserved_req(fc, file);

	fuse_req_init_context(req);
	req->waiting = 1;
	return req;
}

M
Miklos Szeredi 已提交
205
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
206 207
{
	if (atomic_dec_and_test(&req->count)) {
208 209
		if (req->waiting)
			atomic_dec(&fc->num_waiting);
210 211 212 213 214

		if (req->stolen_file)
			put_reserved_req(fc, req);
		else
			fuse_request_free(req);
215 216
	}
}
217
EXPORT_SYMBOL_GPL(fuse_put_request);
218

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
static unsigned len_args(unsigned numargs, struct fuse_arg *args)
{
	unsigned nbytes = 0;
	unsigned i;

	for (i = 0; i < numargs; i++)
		nbytes += args[i].size;

	return nbytes;
}

static u64 fuse_get_unique(struct fuse_conn *fc)
{
	fc->reqctr++;
	/* zero is special */
	if (fc->reqctr == 0)
		fc->reqctr = 1;

	return fc->reqctr;
}

static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
{
	req->in.h.unique = fuse_get_unique(fc);
	req->in.h.len = sizeof(struct fuse_in_header) +
		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
	list_add_tail(&req->list, &fc->pending);
	req->state = FUSE_REQ_PENDING;
	if (!req->waiting) {
		req->waiting = 1;
		atomic_inc(&fc->num_waiting);
	}
	wake_up(&fc->waitq);
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}

static void flush_bg_queue(struct fuse_conn *fc)
{
257
	while (fc->active_background < fc->max_background &&
258 259 260 261 262 263 264 265 266 267
	       !list_empty(&fc->bg_queue)) {
		struct fuse_req *req;

		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
		list_del(&req->list);
		fc->active_background++;
		queue_request(fc, req);
	}
}

M
Miklos Szeredi 已提交
268 269
/*
 * This function is called when a request is finished.  Either a reply
270
 * has arrived or it was aborted (and not yet sent) or some error
M
Miklos Szeredi 已提交
271
 * occurred during communication with userspace, or the device file
272 273 274
 * was closed.  The requester thread is woken up (if still waiting),
 * the 'end' callback is called if given, else the reference to the
 * request is released
275
 *
276
 * Called with fc->lock, unlocks it
M
Miklos Szeredi 已提交
277 278
 */
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
279
__releases(&fc->lock)
M
Miklos Szeredi 已提交
280
{
281 282
	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
	req->end = NULL;
283
	list_del(&req->list);
284
	list_del(&req->intr_entry);
285
	req->state = FUSE_REQ_FINISHED;
286
	if (req->background) {
287
		if (fc->num_background == fc->max_background) {
288 289 290
			fc->blocked = 0;
			wake_up_all(&fc->blocked_waitq);
		}
291
		if (fc->num_background == fc->congestion_threshold &&
292
		    fc->connected && fc->bdi_initialized) {
293 294
			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
			clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
295
		}
296
		fc->num_background--;
297 298
		fc->active_background--;
		flush_bg_queue(fc);
M
Miklos Szeredi 已提交
299
	}
300 301 302 303
	spin_unlock(&fc->lock);
	wake_up(&req->waitq);
	if (end)
		end(fc, req);
304
	fuse_put_request(fc, req);
M
Miklos Szeredi 已提交
305 306
}

307 308
static void wait_answer_interruptible(struct fuse_conn *fc,
				      struct fuse_req *req)
309 310
__releases(&fc->lock)
__acquires(&fc->lock)
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
{
	if (signal_pending(current))
		return;

	spin_unlock(&fc->lock);
	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
	spin_lock(&fc->lock);
}

static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
{
	list_add_tail(&req->intr_entry, &fc->interrupts);
	wake_up(&fc->waitq);
	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}

327
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
328 329
__releases(&fc->lock)
__acquires(&fc->lock)
M
Miklos Szeredi 已提交
330
{
331 332 333
	if (!fc->no_interrupt) {
		/* Any signal may interrupt this */
		wait_answer_interruptible(fc, req);
M
Miklos Szeredi 已提交
334

335 336 337 338 339 340 341 342 343 344
		if (req->aborted)
			goto aborted;
		if (req->state == FUSE_REQ_FINISHED)
			return;

		req->interrupted = 1;
		if (req->state == FUSE_REQ_SENT)
			queue_interrupt(fc, req);
	}

M
Miklos Szeredi 已提交
345
	if (!req->force) {
346 347 348
		sigset_t oldset;

		/* Only fatal signals may interrupt this */
349
		block_sigs(&oldset);
350
		wait_answer_interruptible(fc, req);
351
		restore_sigs(&oldset);
M
Miklos Szeredi 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364

		if (req->aborted)
			goto aborted;
		if (req->state == FUSE_REQ_FINISHED)
			return;

		/* Request is not yet in userspace, bail out */
		if (req->state == FUSE_REQ_PENDING) {
			list_del(&req->list);
			__fuse_put_request(req);
			req->out.h.error = -EINTR;
			return;
		}
365
	}
M
Miklos Szeredi 已提交
366

M
Miklos Szeredi 已提交
367 368 369 370 371 372 373
	/*
	 * Either request is already in userspace, or it was forced.
	 * Wait it out.
	 */
	spin_unlock(&fc->lock);
	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
	spin_lock(&fc->lock);
374

M
Miklos Szeredi 已提交
375 376
	if (!req->aborted)
		return;
377 378

 aborted:
M
Miklos Szeredi 已提交
379
	BUG_ON(req->state != FUSE_REQ_FINISHED);
M
Miklos Szeredi 已提交
380 381 382 383 384 385
	if (req->locked) {
		/* This is uninterruptible sleep, because data is
		   being copied to/from the buffers of req.  During
		   locked state, there mustn't be any filesystem
		   operation (e.g. page fault), since that could lead
		   to deadlock */
386
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
387
		wait_event(req->waitq, !req->locked);
388
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
389 390 391
	}
}

392
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
393 394
{
	req->isreply = 1;
395
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
396
	if (!fc->connected)
M
Miklos Szeredi 已提交
397 398 399 400 401 402 403 404 405
		req->out.h.error = -ENOTCONN;
	else if (fc->conn_error)
		req->out.h.error = -ECONNREFUSED;
	else {
		queue_request(fc, req);
		/* acquire extra reference, since request is still needed
		   after request_end() */
		__fuse_get_request(req);

406
		request_wait_answer(fc, req);
M
Miklos Szeredi 已提交
407
	}
408
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
409
}
410
EXPORT_SYMBOL_GPL(fuse_request_send);
M
Miklos Szeredi 已提交
411

412 413
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
					    struct fuse_req *req)
414 415 416
{
	req->background = 1;
	fc->num_background++;
417
	if (fc->num_background == fc->max_background)
418
		fc->blocked = 1;
419
	if (fc->num_background == fc->congestion_threshold &&
420
	    fc->bdi_initialized) {
421 422
		set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
		set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
423 424 425 426 427
	}
	list_add_tail(&req->list, &fc->bg_queue);
	flush_bg_queue(fc);
}

428
static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
429
{
430
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
431
	if (fc->connected) {
432
		fuse_request_send_nowait_locked(fc, req);
433
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
434 435 436 437 438 439
	} else {
		req->out.h.error = -ENOTCONN;
		request_end(fc, req);
	}
}

440
void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
441 442
{
	req->isreply = 0;
443
	fuse_request_send_nowait(fc, req);
M
Miklos Szeredi 已提交
444 445
}

446
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
447 448
{
	req->isreply = 1;
449
	fuse_request_send_nowait(fc, req);
M
Miklos Szeredi 已提交
450
}
451
EXPORT_SYMBOL_GPL(fuse_request_send_background);
M
Miklos Szeredi 已提交
452

M
Miklos Szeredi 已提交
453 454 455 456 457
/*
 * Called under fc->lock
 *
 * fc->connected must have been checked previously
 */
458 459
void fuse_request_send_background_locked(struct fuse_conn *fc,
					 struct fuse_req *req)
M
Miklos Szeredi 已提交
460 461
{
	req->isreply = 1;
462
	fuse_request_send_nowait_locked(fc, req);
M
Miklos Szeredi 已提交
463 464
}

M
Miklos Szeredi 已提交
465 466 467
/*
 * Lock the request.  Up to the next unlock_request() there mustn't be
 * anything that could cause a page-fault.  If the request was already
468
 * aborted bail out.
M
Miklos Szeredi 已提交
469
 */
470
static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
471 472 473
{
	int err = 0;
	if (req) {
474
		spin_lock(&fc->lock);
475
		if (req->aborted)
M
Miklos Szeredi 已提交
476 477 478
			err = -ENOENT;
		else
			req->locked = 1;
479
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
480 481 482 483 484
	}
	return err;
}

/*
485
 * Unlock request.  If it was aborted during being locked, the
M
Miklos Szeredi 已提交
486 487 488
 * requester thread is currently waiting for it to be unlocked, so
 * wake it up.
 */
489
static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
M
Miklos Szeredi 已提交
490 491
{
	if (req) {
492
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
493
		req->locked = 0;
494
		if (req->aborted)
M
Miklos Szeredi 已提交
495
			wake_up(&req->waitq);
496
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
497 498 499 500
	}
}

struct fuse_copy_state {
501
	struct fuse_conn *fc;
M
Miklos Szeredi 已提交
502 503 504
	int write;
	struct fuse_req *req;
	const struct iovec *iov;
505 506 507
	struct pipe_buffer *pipebufs;
	struct pipe_buffer *currbuf;
	struct pipe_inode_info *pipe;
M
Miklos Szeredi 已提交
508 509 510 511 512 513 514
	unsigned long nr_segs;
	unsigned long seglen;
	unsigned long addr;
	struct page *pg;
	void *mapaddr;
	void *buf;
	unsigned len;
515
	unsigned move_pages:1;
M
Miklos Szeredi 已提交
516 517
};

518
static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
519
			   int write,
520
			   const struct iovec *iov, unsigned long nr_segs)
M
Miklos Szeredi 已提交
521 522
{
	memset(cs, 0, sizeof(*cs));
523
	cs->fc = fc;
M
Miklos Szeredi 已提交
524 525 526 527 528 529
	cs->write = write;
	cs->iov = iov;
	cs->nr_segs = nr_segs;
}

/* Unmap and put previous page of userspace buffer */
530
static void fuse_copy_finish(struct fuse_copy_state *cs)
M
Miklos Szeredi 已提交
531
{
532 533 534
	if (cs->currbuf) {
		struct pipe_buffer *buf = cs->currbuf;

535 536 537 538 539 540
		if (!cs->write) {
			buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
		} else {
			kunmap_atomic(cs->mapaddr, KM_USER0);
			buf->len = PAGE_SIZE - cs->len;
		}
541 542 543
		cs->currbuf = NULL;
		cs->mapaddr = NULL;
	} else if (cs->mapaddr) {
M
Miklos Szeredi 已提交
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
		kunmap_atomic(cs->mapaddr, KM_USER0);
		if (cs->write) {
			flush_dcache_page(cs->pg);
			set_page_dirty_lock(cs->pg);
		}
		put_page(cs->pg);
		cs->mapaddr = NULL;
	}
}

/*
 * Get another pagefull of userspace buffer, and map it to kernel
 * address space, and lock request
 */
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
	unsigned long offset;
	int err;

563
	unlock_request(cs->fc, cs->req);
M
Miklos Szeredi 已提交
564
	fuse_copy_finish(cs);
565 566 567
	if (cs->pipebufs) {
		struct pipe_buffer *buf = cs->pipebufs;

568 569 570 571 572 573 574 575 576 577 578 579 580 581
		if (!cs->write) {
			err = buf->ops->confirm(cs->pipe, buf);
			if (err)
				return err;

			BUG_ON(!cs->nr_segs);
			cs->currbuf = buf;
			cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
			cs->len = buf->len;
			cs->buf = cs->mapaddr + buf->offset;
			cs->pipebufs++;
			cs->nr_segs--;
		} else {
			struct page *page;
582

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
			if (cs->nr_segs == cs->pipe->buffers)
				return -EIO;

			page = alloc_page(GFP_HIGHUSER);
			if (!page)
				return -ENOMEM;

			buf->page = page;
			buf->offset = 0;
			buf->len = 0;

			cs->currbuf = buf;
			cs->mapaddr = kmap_atomic(page, KM_USER0);
			cs->buf = cs->mapaddr;
			cs->len = PAGE_SIZE;
			cs->pipebufs++;
			cs->nr_segs++;
		}
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	} else {
		if (!cs->seglen) {
			BUG_ON(!cs->nr_segs);
			cs->seglen = cs->iov[0].iov_len;
			cs->addr = (unsigned long) cs->iov[0].iov_base;
			cs->iov++;
			cs->nr_segs--;
		}
		err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
		if (err < 0)
			return err;
		BUG_ON(err != 1);
		offset = cs->addr % PAGE_SIZE;
		cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
		cs->buf = cs->mapaddr + offset;
		cs->len = min(PAGE_SIZE - offset, cs->seglen);
		cs->seglen -= cs->len;
		cs->addr += cs->len;
M
Miklos Szeredi 已提交
619 620
	}

621
	return lock_request(cs->fc, cs->req);
M
Miklos Szeredi 已提交
622 623 624
}

/* Do as much copy to/from userspace buffer as we can */
625
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
M
Miklos Szeredi 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
{
	unsigned ncpy = min(*size, cs->len);
	if (val) {
		if (cs->write)
			memcpy(cs->buf, *val, ncpy);
		else
			memcpy(*val, cs->buf, ncpy);
		*val += ncpy;
	}
	*size -= ncpy;
	cs->len -= ncpy;
	cs->buf += ncpy;
	return ncpy;
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
static int fuse_check_page(struct page *page)
{
	if (page_mapcount(page) ||
	    page->mapping != NULL ||
	    page_count(page) != 1 ||
	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
	     ~(1 << PG_locked |
	       1 << PG_referenced |
	       1 << PG_uptodate |
	       1 << PG_lru |
	       1 << PG_active |
	       1 << PG_reclaim))) {
		printk(KERN_WARNING "fuse: trying to steal weird page\n");
		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
		return 1;
	}
	return 0;
}

static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{
	int err;
	struct page *oldpage = *pagep;
	struct page *newpage;
	struct pipe_buffer *buf = cs->pipebufs;
	struct address_space *mapping;
	pgoff_t index;

	unlock_request(cs->fc, cs->req);
	fuse_copy_finish(cs);

	err = buf->ops->confirm(cs->pipe, buf);
	if (err)
		return err;

	BUG_ON(!cs->nr_segs);
	cs->currbuf = buf;
	cs->len = buf->len;
	cs->pipebufs++;
	cs->nr_segs--;

	if (cs->len != PAGE_SIZE)
		goto out_fallback;

	if (buf->ops->steal(cs->pipe, buf) != 0)
		goto out_fallback;

	newpage = buf->page;

	if (WARN_ON(!PageUptodate(newpage)))
		return -EIO;

	ClearPageMappedToDisk(newpage);

	if (fuse_check_page(newpage) != 0)
		goto out_fallback_unlock;

	mapping = oldpage->mapping;
	index = oldpage->index;

	/*
	 * This is a new and locked page, it shouldn't be mapped or
	 * have any special flags on it
	 */
	if (WARN_ON(page_mapped(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(page_has_private(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(PageMlocked(oldpage)))
		goto out_fallback_unlock;

	remove_from_page_cache(oldpage);
	page_cache_release(oldpage);

	err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
	if (err) {
		printk(KERN_WARNING "fuse_try_move_page: failed to add page");
		goto out_fallback_unlock;
	}
	page_cache_get(newpage);

	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
		lru_cache_add_file(newpage);

	err = 0;
	spin_lock(&cs->fc->lock);
	if (cs->req->aborted)
		err = -ENOENT;
	else
		*pagep = newpage;
	spin_unlock(&cs->fc->lock);

	if (err) {
		unlock_page(newpage);
		page_cache_release(newpage);
		return err;
	}

	unlock_page(oldpage);
	page_cache_release(oldpage);
	cs->len = 0;

	return 0;

out_fallback_unlock:
	unlock_page(newpage);
out_fallback:
	cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
	cs->buf = cs->mapaddr + buf->offset;

	err = lock_request(cs->fc, cs->req);
	if (err)
		return err;

	return 1;
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
			 unsigned offset, unsigned count)
{
	struct pipe_buffer *buf;

	if (cs->nr_segs == cs->pipe->buffers)
		return -EIO;

	unlock_request(cs->fc, cs->req);
	fuse_copy_finish(cs);

	buf = cs->pipebufs;
	page_cache_get(page);
	buf->page = page;
	buf->offset = offset;
	buf->len = count;

	cs->pipebufs++;
	cs->nr_segs++;
	cs->len = 0;

	return 0;
}

M
Miklos Szeredi 已提交
784 785 786 787
/*
 * Copy a page in the request to/from the userspace buffer.  Must be
 * done atomically
 */
788
static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
789
			  unsigned offset, unsigned count, int zeroing)
M
Miklos Szeredi 已提交
790
{
791 792 793
	int err;
	struct page *page = *pagep;

M
Miklos Szeredi 已提交
794 795 796 797 798 799
	if (page && zeroing && count < PAGE_SIZE) {
		void *mapaddr = kmap_atomic(page, KM_USER1);
		memset(mapaddr, 0, PAGE_SIZE);
		kunmap_atomic(mapaddr, KM_USER1);
	}
	while (count) {
800 801 802
		if (cs->write && cs->pipebufs && page) {
			return fuse_ref_page(cs, page, offset, count);
		} else if (!cs->len) {
803 804 805 806 807 808 809 810 811 812
			if (cs->move_pages && page &&
			    offset == 0 && count == PAGE_SIZE) {
				err = fuse_try_move_page(cs, pagep);
				if (err <= 0)
					return err;
			} else {
				err = fuse_copy_fill(cs);
				if (err)
					return err;
			}
M
Miklos Szeredi 已提交
813
		}
M
Miklos Szeredi 已提交
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
		if (page) {
			void *mapaddr = kmap_atomic(page, KM_USER1);
			void *buf = mapaddr + offset;
			offset += fuse_copy_do(cs, &buf, &count);
			kunmap_atomic(mapaddr, KM_USER1);
		} else
			offset += fuse_copy_do(cs, NULL, &count);
	}
	if (page && !cs->write)
		flush_dcache_page(page);
	return 0;
}

/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
			   int zeroing)
{
	unsigned i;
	struct fuse_req *req = cs->req;
	unsigned offset = req->page_offset;
	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);

	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
837 838 839 840
		int err;

		err = fuse_copy_page(cs, &req->pages[i], offset, count,
				     zeroing);
M
Miklos Szeredi 已提交
841 842 843 844 845 846 847 848 849 850 851 852 853 854
		if (err)
			return err;

		nbytes -= count;
		count = min(nbytes, (unsigned) PAGE_SIZE);
		offset = 0;
	}
	return 0;
}

/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{
	while (size) {
M
Miklos Szeredi 已提交
855 856 857 858 859
		if (!cs->len) {
			int err = fuse_copy_fill(cs);
			if (err)
				return err;
		}
M
Miklos Szeredi 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
		fuse_copy_do(cs, &val, &size);
	}
	return 0;
}

/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
			  unsigned argpages, struct fuse_arg *args,
			  int zeroing)
{
	int err = 0;
	unsigned i;

	for (i = 0; !err && i < numargs; i++)  {
		struct fuse_arg *arg = &args[i];
		if (i == numargs - 1 && argpages)
			err = fuse_copy_pages(cs, arg->size, zeroing);
		else
			err = fuse_copy_one(cs, arg->value, arg->size);
	}
	return err;
}

883 884 885 886 887
static int request_pending(struct fuse_conn *fc)
{
	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
}

M
Miklos Szeredi 已提交
888 889
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
890 891
__releases(&fc->lock)
__acquires(&fc->lock)
M
Miklos Szeredi 已提交
892 893 894 895
{
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue_exclusive(&fc->waitq, &wait);
896
	while (fc->connected && !request_pending(fc)) {
M
Miklos Szeredi 已提交
897 898 899 900
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current))
			break;

901
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
902
		schedule();
903
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
904 905 906 907 908
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&fc->waitq, &wait);
}

909 910 911 912 913 914 915 916
/*
 * Transfer an interrupt request to userspace
 *
 * Unlike other requests this is assembled on demand, without a need
 * to allocate a separate fuse_req structure.
 *
 * Called with fc->lock held, releases it
 */
917 918
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
			       size_t nbytes, struct fuse_req *req)
919
__releases(&fc->lock)
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
{
	struct fuse_in_header ih;
	struct fuse_interrupt_in arg;
	unsigned reqsize = sizeof(ih) + sizeof(arg);
	int err;

	list_del_init(&req->intr_entry);
	req->intr_unique = fuse_get_unique(fc);
	memset(&ih, 0, sizeof(ih));
	memset(&arg, 0, sizeof(arg));
	ih.len = reqsize;
	ih.opcode = FUSE_INTERRUPT;
	ih.unique = req->intr_unique;
	arg.unique = req->in.h.unique;

	spin_unlock(&fc->lock);
936
	if (nbytes < reqsize)
937 938
		return -EINVAL;

939
	err = fuse_copy_one(cs, &ih, sizeof(ih));
940
	if (!err)
941 942
		err = fuse_copy_one(cs, &arg, sizeof(arg));
	fuse_copy_finish(cs);
943 944 945 946

	return err ? err : reqsize;
}

M
Miklos Szeredi 已提交
947 948 949 950
/*
 * Read a single request into the userspace filesystem's buffer.  This
 * function waits until a request is available, then removes it from
 * the pending list and copies request data to userspace buffer.  If
951 952
 * no reply is needed (FORGET) or request has been aborted or there
 * was an error during the copying then it's finished by calling
M
Miklos Szeredi 已提交
953 954 955
 * request_end().  Otherwise add it to the processing list, and set
 * the 'sent' flag.
 */
956 957
static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
				struct fuse_copy_state *cs, size_t nbytes)
M
Miklos Szeredi 已提交
958 959 960 961 962 963
{
	int err;
	struct fuse_req *req;
	struct fuse_in *in;
	unsigned reqsize;

964
 restart:
965
	spin_lock(&fc->lock);
966 967
	err = -EAGAIN;
	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
968
	    !request_pending(fc))
969 970
		goto err_unlock;

M
Miklos Szeredi 已提交
971 972
	request_wait(fc);
	err = -ENODEV;
973
	if (!fc->connected)
M
Miklos Szeredi 已提交
974 975
		goto err_unlock;
	err = -ERESTARTSYS;
976
	if (!request_pending(fc))
M
Miklos Szeredi 已提交
977 978
		goto err_unlock;

979 980 981
	if (!list_empty(&fc->interrupts)) {
		req = list_entry(fc->interrupts.next, struct fuse_req,
				 intr_entry);
982
		return fuse_read_interrupt(fc, cs, nbytes, req);
983 984
	}

M
Miklos Szeredi 已提交
985
	req = list_entry(fc->pending.next, struct fuse_req, list);
986
	req->state = FUSE_REQ_READING;
987
	list_move(&req->list, &fc->io);
M
Miklos Szeredi 已提交
988 989

	in = &req->in;
990 991
	reqsize = in->h.len;
	/* If request is too large, reply with an error and restart the read */
992
	if (nbytes < reqsize) {
993 994 995 996 997 998
		req->out.h.error = -EIO;
		/* SETXATTR is special, since it may contain too large data */
		if (in->h.opcode == FUSE_SETXATTR)
			req->out.h.error = -E2BIG;
		request_end(fc, req);
		goto restart;
M
Miklos Szeredi 已提交
999
	}
1000
	spin_unlock(&fc->lock);
1001 1002
	cs->req = req;
	err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1003
	if (!err)
1004
		err = fuse_copy_args(cs, in->numargs, in->argpages,
1005
				     (struct fuse_arg *) in->args, 0);
1006
	fuse_copy_finish(cs);
1007
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
1008
	req->locked = 0;
1009 1010 1011 1012
	if (req->aborted) {
		request_end(fc, req);
		return -ENODEV;
	}
M
Miklos Szeredi 已提交
1013
	if (err) {
1014
		req->out.h.error = -EIO;
M
Miklos Szeredi 已提交
1015 1016 1017 1018 1019 1020
		request_end(fc, req);
		return err;
	}
	if (!req->isreply)
		request_end(fc, req);
	else {
1021
		req->state = FUSE_REQ_SENT;
1022
		list_move_tail(&req->list, &fc->processing);
1023 1024
		if (req->interrupted)
			queue_interrupt(fc, req);
1025
		spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
1026 1027 1028 1029
	}
	return reqsize;

 err_unlock:
1030
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
1031 1032 1033
	return err;
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
			      unsigned long nr_segs, loff_t pos)
{
	struct fuse_copy_state cs;
	struct file *file = iocb->ki_filp;
	struct fuse_conn *fc = fuse_get_conn(file);
	if (!fc)
		return -EPERM;

	fuse_copy_init(&cs, fc, 1, iov, nr_segs);

	return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
}

static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
				   struct pipe_buffer *buf)
{
	return 1;
}

static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
	.can_merge = 0,
	.map = generic_pipe_buf_map,
	.unmap = generic_pipe_buf_unmap,
	.confirm = generic_pipe_buf_confirm,
	.release = generic_pipe_buf_release,
	.steal = fuse_dev_pipe_buf_steal,
	.get = generic_pipe_buf_get,
};

static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
				    struct pipe_inode_info *pipe,
				    size_t len, unsigned int flags)
{
	int ret;
	int page_nr = 0;
	int do_wakeup = 0;
	struct pipe_buffer *bufs;
	struct fuse_copy_state cs;
	struct fuse_conn *fc = fuse_get_conn(in);
	if (!fc)
		return -EPERM;

	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
	if (!bufs)
		return -ENOMEM;

	fuse_copy_init(&cs, fc, 1, NULL, 0);
	cs.pipebufs = bufs;
	cs.pipe = pipe;
	ret = fuse_dev_do_read(fc, in, &cs, len);
	if (ret < 0)
		goto out;

	ret = 0;
	pipe_lock(pipe);

	if (!pipe->readers) {
		send_sig(SIGPIPE, current, 0);
		if (!ret)
			ret = -EPIPE;
		goto out_unlock;
	}

	if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
		ret = -EIO;
		goto out_unlock;
	}

	while (page_nr < cs.nr_segs) {
		int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
		struct pipe_buffer *buf = pipe->bufs + newbuf;

		buf->page = bufs[page_nr].page;
		buf->offset = bufs[page_nr].offset;
		buf->len = bufs[page_nr].len;
		buf->ops = &fuse_dev_pipe_buf_ops;

		pipe->nrbufs++;
		page_nr++;
		ret += buf->len;

		if (pipe->inode)
			do_wakeup = 1;
	}

out_unlock:
	pipe_unlock(pipe);

	if (do_wakeup) {
		smp_mb();
		if (waitqueue_active(&pipe->wait))
			wake_up_interruptible(&pipe->wait);
		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
	}

out:
	for (; page_nr < cs.nr_segs; page_nr++)
		page_cache_release(bufs[page_nr].page);

	kfree(bufs);
	return ret;
}

T
Tejun Heo 已提交
1138 1139 1140 1141
static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
			    struct fuse_copy_state *cs)
{
	struct fuse_notify_poll_wakeup_out outarg;
M
Miklos Szeredi 已提交
1142
	int err = -EINVAL;
T
Tejun Heo 已提交
1143 1144

	if (size != sizeof(outarg))
M
Miklos Szeredi 已提交
1145
		goto err;
T
Tejun Heo 已提交
1146 1147 1148

	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
	if (err)
M
Miklos Szeredi 已提交
1149
		goto err;
T
Tejun Heo 已提交
1150

M
Miklos Szeredi 已提交
1151
	fuse_copy_finish(cs);
T
Tejun Heo 已提交
1152
	return fuse_notify_poll_wakeup(fc, &outarg);
M
Miklos Szeredi 已提交
1153 1154 1155 1156

err:
	fuse_copy_finish(cs);
	return err;
T
Tejun Heo 已提交
1157 1158
}

J
John Muir 已提交
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
				   struct fuse_copy_state *cs)
{
	struct fuse_notify_inval_inode_out outarg;
	int err = -EINVAL;

	if (size != sizeof(outarg))
		goto err;

	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
	if (err)
		goto err;
	fuse_copy_finish(cs);

	down_read(&fc->killsb);
	err = -ENOENT;
1175 1176 1177 1178
	if (fc->sb) {
		err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
					       outarg.off, outarg.len);
	}
J
John Muir 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	up_read(&fc->killsb);
	return err;

err:
	fuse_copy_finish(cs);
	return err;
}

static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
				   struct fuse_copy_state *cs)
{
	struct fuse_notify_inval_entry_out outarg;
F
Fang Wenqi 已提交
1191 1192
	int err = -ENOMEM;
	char *buf;
J
John Muir 已提交
1193 1194
	struct qstr name;

F
Fang Wenqi 已提交
1195 1196 1197 1198 1199
	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
	if (!buf)
		goto err;

	err = -EINVAL;
J
John Muir 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	if (size < sizeof(outarg))
		goto err;

	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
	if (err)
		goto err;

	err = -ENAMETOOLONG;
	if (outarg.namelen > FUSE_NAME_MAX)
		goto err;

	name.name = buf;
	name.len = outarg.namelen;
	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
	if (err)
		goto err;
	fuse_copy_finish(cs);
	buf[outarg.namelen] = 0;
	name.hash = full_name_hash(name.name, name.len);

	down_read(&fc->killsb);
	err = -ENOENT;
1222 1223
	if (fc->sb)
		err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
J
John Muir 已提交
1224
	up_read(&fc->killsb);
F
Fang Wenqi 已提交
1225
	kfree(buf);
J
John Muir 已提交
1226 1227 1228
	return err;

err:
F
Fang Wenqi 已提交
1229
	kfree(buf);
J
John Muir 已提交
1230 1231 1232 1233
	fuse_copy_finish(cs);
	return err;
}

1234 1235 1236 1237
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
		       unsigned int size, struct fuse_copy_state *cs)
{
	switch (code) {
T
Tejun Heo 已提交
1238 1239 1240
	case FUSE_NOTIFY_POLL:
		return fuse_notify_poll(fc, size, cs);

J
John Muir 已提交
1241 1242 1243 1244 1245 1246
	case FUSE_NOTIFY_INVAL_INODE:
		return fuse_notify_inval_inode(fc, size, cs);

	case FUSE_NOTIFY_INVAL_ENTRY:
		return fuse_notify_inval_entry(fc, size, cs);

1247
	default:
M
Miklos Szeredi 已提交
1248
		fuse_copy_finish(cs);
1249 1250 1251 1252
		return -EINVAL;
	}
}

M
Miklos Szeredi 已提交
1253 1254 1255 1256 1257 1258 1259 1260
/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
{
	struct list_head *entry;

	list_for_each(entry, &fc->processing) {
		struct fuse_req *req;
		req = list_entry(entry, struct fuse_req, list);
1261
		if (req->in.h.unique == unique || req->intr_unique == unique)
M
Miklos Szeredi 已提交
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
			return req;
	}
	return NULL;
}

static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
			 unsigned nbytes)
{
	unsigned reqsize = sizeof(struct fuse_out_header);

	if (out->h.error)
		return nbytes != reqsize ? -EINVAL : 0;

	reqsize += len_args(out->numargs, out->args);

	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
		return -EINVAL;
	else if (reqsize > nbytes) {
		struct fuse_arg *lastarg = &out->args[out->numargs-1];
		unsigned diffsize = reqsize - nbytes;
		if (diffsize > lastarg->size)
			return -EINVAL;
		lastarg->size -= diffsize;
	}
	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
			      out->page_zeroing);
}

/*
 * Write a single reply to a request.  First the header is copied from
 * the write buffer.  The request is then searched on the processing
 * list by the unique ID found in the header.  If found, then remove
 * it from the list and copy the rest of the buffer to the request.
 * The request is finished by calling request_end()
 */
1297 1298
static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
				 struct fuse_copy_state *cs, size_t nbytes)
M
Miklos Szeredi 已提交
1299 1300 1301 1302 1303 1304 1305 1306
{
	int err;
	struct fuse_req *req;
	struct fuse_out_header oh;

	if (nbytes < sizeof(struct fuse_out_header))
		return -EINVAL;

1307
	err = fuse_copy_one(cs, &oh, sizeof(oh));
M
Miklos Szeredi 已提交
1308 1309
	if (err)
		goto err_finish;
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

	err = -EINVAL;
	if (oh.len != nbytes)
		goto err_finish;

	/*
	 * Zero oh.unique indicates unsolicited notification message
	 * and error contains notification code.
	 */
	if (!oh.unique) {
1320
		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1321 1322 1323
		return err ? err : nbytes;
	}

M
Miklos Szeredi 已提交
1324
	err = -EINVAL;
1325
	if (oh.error <= -1000 || oh.error > 0)
M
Miklos Szeredi 已提交
1326 1327
		goto err_finish;

1328
	spin_lock(&fc->lock);
1329 1330 1331 1332
	err = -ENOENT;
	if (!fc->connected)
		goto err_unlock;

M
Miklos Szeredi 已提交
1333 1334 1335 1336
	req = request_find(fc, oh.unique);
	if (!req)
		goto err_unlock;

1337
	if (req->aborted) {
1338
		spin_unlock(&fc->lock);
1339
		fuse_copy_finish(cs);
1340
		spin_lock(&fc->lock);
1341
		request_end(fc, req);
M
Miklos Szeredi 已提交
1342 1343
		return -ENOENT;
	}
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
	/* Is it an interrupt reply? */
	if (req->intr_unique == oh.unique) {
		err = -EINVAL;
		if (nbytes != sizeof(struct fuse_out_header))
			goto err_unlock;

		if (oh.error == -ENOSYS)
			fc->no_interrupt = 1;
		else if (oh.error == -EAGAIN)
			queue_interrupt(fc, req);

		spin_unlock(&fc->lock);
1356
		fuse_copy_finish(cs);
1357 1358 1359 1360
		return nbytes;
	}

	req->state = FUSE_REQ_WRITING;
1361
	list_move(&req->list, &fc->io);
M
Miklos Szeredi 已提交
1362 1363
	req->out.h = oh;
	req->locked = 1;
1364
	cs->req = req;
1365 1366
	if (!req->out.page_replace)
		cs->move_pages = 0;
1367
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
1368

1369 1370
	err = copy_out_args(cs, &req->out, nbytes);
	fuse_copy_finish(cs);
M
Miklos Szeredi 已提交
1371

1372
	spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
1373 1374
	req->locked = 0;
	if (!err) {
1375
		if (req->aborted)
M
Miklos Szeredi 已提交
1376
			err = -ENOENT;
1377
	} else if (!req->aborted)
M
Miklos Szeredi 已提交
1378 1379 1380 1381 1382 1383
		req->out.h.error = -EIO;
	request_end(fc, req);

	return err ? err : nbytes;

 err_unlock:
1384
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
1385
 err_finish:
1386
	fuse_copy_finish(cs);
M
Miklos Szeredi 已提交
1387 1388 1389
	return err;
}

1390 1391 1392 1393 1394 1395 1396 1397
static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
			      unsigned long nr_segs, loff_t pos)
{
	struct fuse_copy_state cs;
	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
	if (!fc)
		return -EPERM;

1398
	fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

	return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
}

static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
				     struct file *out, loff_t *ppos,
				     size_t len, unsigned int flags)
{
	unsigned nbuf;
	unsigned idx;
	struct pipe_buffer *bufs;
	struct fuse_copy_state cs;
	struct fuse_conn *fc;
	size_t rem;
	ssize_t ret;

	fc = fuse_get_conn(out);
	if (!fc)
		return -EPERM;

	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
	if (!bufs)
		return -ENOMEM;

	pipe_lock(pipe);
	nbuf = 0;
	rem = 0;
	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
		rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;

	ret = -EINVAL;
	if (rem < len) {
		pipe_unlock(pipe);
		goto out;
	}

	rem = len;
	while (rem) {
		struct pipe_buffer *ibuf;
		struct pipe_buffer *obuf;

		BUG_ON(nbuf >= pipe->buffers);
		BUG_ON(!pipe->nrbufs);
		ibuf = &pipe->bufs[pipe->curbuf];
		obuf = &bufs[nbuf];

		if (rem >= ibuf->len) {
			*obuf = *ibuf;
			ibuf->ops = NULL;
			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
			pipe->nrbufs--;
		} else {
			ibuf->ops->get(pipe, ibuf);
			*obuf = *ibuf;
			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
			obuf->len = rem;
			ibuf->offset += obuf->len;
			ibuf->len -= obuf->len;
		}
		nbuf++;
		rem -= obuf->len;
	}
	pipe_unlock(pipe);

1463
	fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1464 1465 1466
	cs.pipebufs = bufs;
	cs.pipe = pipe;

1467 1468 1469
	if (flags & SPLICE_F_MOVE)
		cs.move_pages = 1;

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	ret = fuse_dev_do_write(fc, &cs, len);

	for (idx = 0; idx < nbuf; idx++) {
		struct pipe_buffer *buf = &bufs[idx];
		buf->ops->release(pipe, buf);
	}
out:
	kfree(bufs);
	return ret;
}

M
Miklos Szeredi 已提交
1481 1482 1483
static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
{
	unsigned mask = POLLOUT | POLLWRNORM;
1484
	struct fuse_conn *fc = fuse_get_conn(file);
M
Miklos Szeredi 已提交
1485
	if (!fc)
1486
		return POLLERR;
M
Miklos Szeredi 已提交
1487 1488 1489

	poll_wait(file, &fc->waitq, wait);

1490
	spin_lock(&fc->lock);
1491 1492
	if (!fc->connected)
		mask = POLLERR;
1493
	else if (request_pending(fc))
1494
		mask |= POLLIN | POLLRDNORM;
1495
	spin_unlock(&fc->lock);
M
Miklos Szeredi 已提交
1496 1497 1498 1499

	return mask;
}

1500 1501 1502
/*
 * Abort all requests on the given list (pending or processing)
 *
1503
 * This function releases and reacquires fc->lock
1504
 */
M
Miklos Szeredi 已提交
1505
static void end_requests(struct fuse_conn *fc, struct list_head *head)
1506 1507
__releases(&fc->lock)
__acquires(&fc->lock)
M
Miklos Szeredi 已提交
1508 1509 1510 1511 1512 1513
{
	while (!list_empty(head)) {
		struct fuse_req *req;
		req = list_entry(head->next, struct fuse_req, list);
		req->out.h.error = -ECONNABORTED;
		request_end(fc, req);
1514
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
1515 1516 1517
	}
}

1518 1519 1520
/*
 * Abort requests under I/O
 *
1521
 * The requests are set to aborted and finished, and the request
1522 1523
 * waiter is woken up.  This will make request_wait_answer() wait
 * until the request is unlocked and then return.
1524 1525 1526 1527
 *
 * If the request is asynchronous, then the end function needs to be
 * called after waiting for the request to be unlocked (if it was
 * locked).
1528 1529
 */
static void end_io_requests(struct fuse_conn *fc)
1530 1531
__releases(&fc->lock)
__acquires(&fc->lock)
1532 1533
{
	while (!list_empty(&fc->io)) {
1534 1535 1536 1537
		struct fuse_req *req =
			list_entry(fc->io.next, struct fuse_req, list);
		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;

1538
		req->aborted = 1;
1539 1540 1541 1542
		req->out.h.error = -ECONNABORTED;
		req->state = FUSE_REQ_FINISHED;
		list_del_init(&req->list);
		wake_up(&req->waitq);
1543 1544 1545
		if (end) {
			req->end = NULL;
			__fuse_get_request(req);
1546
			spin_unlock(&fc->lock);
1547 1548
			wait_event(req->waitq, !req->locked);
			end(fc, req);
1549
			fuse_put_request(fc, req);
1550
			spin_lock(&fc->lock);
1551
		}
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
	}
}

/*
 * Abort all requests.
 *
 * Emergency exit in case of a malicious or accidental deadlock, or
 * just a hung filesystem.
 *
 * The same effect is usually achievable through killing the
 * filesystem daemon and all users of the filesystem.  The exception
 * is the combination of an asynchronous request and the tricky
 * deadlock (see Documentation/filesystems/fuse.txt).
 *
 * During the aborting, progression of requests from the pending and
 * processing lists onto the io list, and progression of new requests
 * onto the pending list is prevented by req->connected being false.
 *
 * Progression of requests under I/O to the processing list is
1571 1572
 * prevented by the req->aborted flag being true for these requests.
 * For this reason requests on the io list must be aborted first.
1573 1574 1575
 */
void fuse_abort_conn(struct fuse_conn *fc)
{
1576
	spin_lock(&fc->lock);
1577 1578
	if (fc->connected) {
		fc->connected = 0;
1579
		fc->blocked = 0;
1580 1581 1582 1583
		end_io_requests(fc);
		end_requests(fc, &fc->pending);
		end_requests(fc, &fc->processing);
		wake_up_all(&fc->waitq);
1584
		wake_up_all(&fc->blocked_waitq);
1585
		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1586
	}
1587
	spin_unlock(&fc->lock);
1588
}
1589
EXPORT_SYMBOL_GPL(fuse_abort_conn);
1590

1591
int fuse_dev_release(struct inode *inode, struct file *file)
M
Miklos Szeredi 已提交
1592
{
M
Miklos Szeredi 已提交
1593
	struct fuse_conn *fc = fuse_get_conn(file);
M
Miklos Szeredi 已提交
1594
	if (fc) {
1595
		spin_lock(&fc->lock);
M
Miklos Szeredi 已提交
1596
		fc->connected = 0;
M
Miklos Szeredi 已提交
1597 1598
		end_requests(fc, &fc->pending);
		end_requests(fc, &fc->processing);
1599
		spin_unlock(&fc->lock);
1600
		fuse_conn_put(fc);
1601
	}
1602

M
Miklos Szeredi 已提交
1603 1604
	return 0;
}
1605
EXPORT_SYMBOL_GPL(fuse_dev_release);
M
Miklos Szeredi 已提交
1606

1607 1608 1609 1610
static int fuse_dev_fasync(int fd, struct file *file, int on)
{
	struct fuse_conn *fc = fuse_get_conn(file);
	if (!fc)
1611
		return -EPERM;
1612 1613 1614 1615 1616

	/* No locking - fasync_helper does its own locking */
	return fasync_helper(fd, file, on, &fc->fasync);
}

1617
const struct file_operations fuse_dev_operations = {
M
Miklos Szeredi 已提交
1618 1619
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
1620 1621
	.read		= do_sync_read,
	.aio_read	= fuse_dev_read,
1622
	.splice_read	= fuse_dev_splice_read,
1623 1624
	.write		= do_sync_write,
	.aio_write	= fuse_dev_write,
1625
	.splice_write	= fuse_dev_splice_write,
M
Miklos Szeredi 已提交
1626 1627
	.poll		= fuse_dev_poll,
	.release	= fuse_dev_release,
1628
	.fasync		= fuse_dev_fasync,
M
Miklos Szeredi 已提交
1629
};
1630
EXPORT_SYMBOL_GPL(fuse_dev_operations);
M
Miklos Szeredi 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642

static struct miscdevice fuse_miscdevice = {
	.minor = FUSE_MINOR,
	.name  = "fuse",
	.fops = &fuse_dev_operations,
};

int __init fuse_dev_init(void)
{
	int err = -ENOMEM;
	fuse_req_cachep = kmem_cache_create("fuse_request",
					    sizeof(struct fuse_req),
1643
					    0, 0, NULL);
M
Miklos Szeredi 已提交
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
	if (!fuse_req_cachep)
		goto out;

	err = misc_register(&fuse_miscdevice);
	if (err)
		goto out_cache_clean;

	return 0;

 out_cache_clean:
	kmem_cache_destroy(fuse_req_cachep);
 out:
	return err;
}

void fuse_dev_cleanup(void)
{
	misc_deregister(&fuse_miscdevice);
	kmem_cache_destroy(fuse_req_cachep);
}