direct.c 25.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/direct.c
 *
 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
 *
 * High-performance uncached I/O for the Linux NFS client
 *
 * There are important applications whose performance or correctness
 * depends on uncached access to file data.  Database clusters
10
 * (multiple copies of the same instance running on separate hosts)
L
Linus Torvalds 已提交
11
 * implement their own cache coherency protocol that subsumes file
12 13 14
 * system cache protocols.  Applications that process datasets
 * considerably larger than the client's memory do not always benefit
 * from a local cache.  A streaming video server, for instance, has no
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * need to cache the contents of a file.
 *
 * When an application requests uncached I/O, all read and write requests
 * are made directly to the server; data stored or fetched via these
 * requests is not cached in the Linux page cache.  The client does not
 * correct unaligned requests from applications.  All requested bytes are
 * held on permanent storage before a direct write system call returns to
 * an application.
 *
 * Solaris implements an uncached I/O facility called directio() that
 * is used for backups and sequential I/O to very large files.  Solaris
 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 * an undocumented mount option.
 *
 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 * help from Andrew Morton.
 *
 * 18 Dec 2001	Initial implementation for 2.4  --cel
 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 * 08 Jun 2003	Port to 2.5 APIs  --cel
 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 * 15 Sep 2004	Parallel async reads  --cel
37
 * 04 May 2005	support O_DIRECT with aio  --cel
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 *
 */

#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/smp_lock.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/kref.h>

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/sunrpc/clnt.h>

#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>

C
Chuck Lever 已提交
58 59
#include "iostat.h"

L
Linus Torvalds 已提交
60 61
#define NFSDBG_FACILITY		NFSDBG_VFS

62
static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
L
Linus Torvalds 已提交
63 64 65 66 67 68 69
static kmem_cache_t *nfs_direct_cachep;

/*
 * This represents a set of asynchronous requests that we're waiting on
 */
struct nfs_direct_req {
	struct kref		kref;		/* release manager */
70 71

	/* I/O parameters */
72 73
	struct list_head	list,		/* nfs_read/write_data structs */
				rewrite_list;	/* saved nfs_write_data structs */
74 75
	struct file *		filp;		/* file descriptor */
	struct kiocb *		iocb;		/* controlling i/o request */
L
Linus Torvalds 已提交
76
	wait_queue_head_t	wait;		/* wait for i/o completion */
77
	struct inode *		inode;		/* target file of i/o */
78 79 80
	unsigned long		user_addr;	/* location of user's buffer */
	size_t			user_count;	/* total bytes to move */
	loff_t			pos;		/* starting offset in file */
L
Linus Torvalds 已提交
81 82
	struct page **		pages;		/* pages in our buffer */
	unsigned int		npages;		/* count of pages */
83 84 85 86 87

	/* completion state */
	spinlock_t		lock;		/* protect completion state */
	int			outstanding;	/* i/os we're waiting for */
	ssize_t			count,		/* bytes actually processed */
L
Linus Torvalds 已提交
88
				error;		/* any reported error */
89 90 91 92 93 94 95

	/* commit state */
	struct nfs_write_data *	commit_data;	/* special write_data for commits */
	int			flags;
#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
	struct nfs_writeverf	verf;		/* unstable write verifier */
L
Linus Torvalds 已提交
96 97
};

98 99 100
static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/**
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
 * @iocb: target I/O control block
 * @iov: array of vectors that define I/O buffer
 * @pos: offset in file to begin the operation
 * @nr_segs: size of iovec array
 *
 * The presence of this routine in the address space ops vector means
 * the NFS client supports direct I/O.  However, we shunt off direct
 * read and write requests before the VFS gets them, so this method
 * should never be called.
 */
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
	struct dentry *dentry = iocb->ki_filp->f_dentry;

	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
			dentry->d_name.name, (long long) pos, nr_segs);

	return -EINVAL;
}

124
static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
L
Linus Torvalds 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
{
	int result = -ENOMEM;
	unsigned long page_count;
	size_t array_size;

	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	page_count -= user_addr >> PAGE_SHIFT;

	array_size = (page_count * sizeof(struct page *));
	*pages = kmalloc(array_size, GFP_KERNEL);
	if (*pages) {
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					page_count, (rw == READ), 0,
					*pages, NULL);
		up_read(&current->mm->mmap_sem);
141 142 143 144 145 146 147 148 149
		/*
		 * If we got fewer pages than expected from get_user_pages(),
		 * the user buffer runs off the end of a mapping; return EFAULT.
		 */
		if (result >= 0 && result < page_count) {
			nfs_free_user_pages(*pages, result, 0);
			*pages = NULL;
			result = -EFAULT;
		}
L
Linus Torvalds 已提交
150 151 152 153
	}
	return result;
}

154
static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
L
Linus Torvalds 已提交
155 156 157
{
	int i;
	for (i = 0; i < npages; i++) {
158 159 160 161
		struct page *page = pages[i];
		if (do_dirty && !PageCompound(page))
			set_page_dirty_lock(page);
		page_cache_release(page);
L
Linus Torvalds 已提交
162 163 164 165
	}
	kfree(pages);
}

166 167 168 169 170 171 172 173 174 175 176
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
	struct nfs_direct_req *dreq;

	dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
	if (!dreq)
		return NULL;

	kref_init(&dreq->kref);
	init_waitqueue_head(&dreq->wait);
	INIT_LIST_HEAD(&dreq->list);
177
	INIT_LIST_HEAD(&dreq->rewrite_list);
178
	dreq->iocb = NULL;
179 180 181 182
	spin_lock_init(&dreq->lock);
	dreq->outstanding = 0;
	dreq->count = 0;
	dreq->error = 0;
183
	dreq->flags = 0;
184 185 186 187

	return dreq;
}

L
Linus Torvalds 已提交
188 189 190 191 192 193
static void nfs_direct_req_release(struct kref *kref)
{
	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
	kmem_cache_free(nfs_direct_cachep, dreq);
}

194 195 196 197 198
/*
 * Collects and returns the final error value/byte-count.
 */
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
199
	ssize_t result = -EIOCBQUEUED;
200 201 202 203 204

	/* Async requests don't wait here */
	if (dreq->iocb)
		goto out;

205
	result = wait_event_interruptible(dreq->wait, (dreq->outstanding == 0));
206 207

	if (!result)
208
		result = dreq->error;
209
	if (!result)
210
		result = dreq->count;
211 212 213 214 215 216

out:
	kref_put(&dreq->kref, nfs_direct_req_release);
	return (ssize_t) result;
}

217 218 219 220 221 222 223 224 225 226 227 228 229 230
/*
 * We must hold a reference to all the pages in this direct read request
 * until the RPCs complete.  This could be long *after* we are woken up in
 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
 *
 * In addition, synchronous I/O uses a stack-allocated iocb.  Thus we
 * can't trust the iocb is still valid here if this is a synchronous
 * request.  If the waiter is woken prematurely, the iocb is long gone.
 */
static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
	nfs_free_user_pages(dreq->pages, dreq->npages, 1);

	if (dreq->iocb) {
231
		long res = (long) dreq->error;
232
		if (!res)
233
			res = (long) dreq->count;
234 235 236 237
		aio_complete(dreq->iocb, res, 0);
	} else
		wake_up(&dreq->wait);

238
	iput(dreq->inode);
239 240 241
	kref_put(&dreq->kref, nfs_direct_req_release);
}

242
/*
L
Linus Torvalds 已提交
243 244 245 246
 * Note we also set the number of requests we have in the dreq when we are
 * done.  This prevents races with I/O completion so we will always wait
 * until all requests have been dispatched and completed.
 */
247
static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
L
Linus Torvalds 已提交
248 249 250
{
	struct list_head *list;
	struct nfs_direct_req *dreq;
251
	unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
L
Linus Torvalds 已提交
252

253
	dreq = nfs_direct_req_alloc();
L
Linus Torvalds 已提交
254 255 256 257 258
	if (!dreq)
		return NULL;

	list = &dreq->list;
	for(;;) {
259
		struct nfs_read_data *data = nfs_readdata_alloc(rpages);
L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

		if (unlikely(!data)) {
			while (!list_empty(list)) {
				data = list_entry(list->next,
						  struct nfs_read_data, pages);
				list_del(&data->pages);
				nfs_readdata_free(data);
			}
			kref_put(&dreq->kref, nfs_direct_req_release);
			return NULL;
		}

		INIT_LIST_HEAD(&data->pages);
		list_add(&data->pages, list);

		data->req = (struct nfs_page *) dreq;
276
		dreq->outstanding++;
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284
		if (nbytes <= rsize)
			break;
		nbytes -= rsize;
	}
	kref_get(&dreq->kref);
	return dreq;
}

T
Trond Myklebust 已提交
285
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
286
{
T
Trond Myklebust 已提交
287
	struct nfs_read_data *data = calldata;
L
Linus Torvalds 已提交
288 289
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;

T
Trond Myklebust 已提交
290 291
	if (nfs_readpage_result(task, data) != 0)
		return;
292 293 294

	spin_lock(&dreq->lock);

T
Trond Myklebust 已提交
295
	if (likely(task->tk_status >= 0))
296
		dreq->count += data->res.count;
L
Linus Torvalds 已提交
297
	else
298 299 300 301 302 303
		dreq->error = task->tk_status;

	if (--dreq->outstanding) {
		spin_unlock(&dreq->lock);
		return;
	}
L
Linus Torvalds 已提交
304

305 306
	spin_unlock(&dreq->lock);
	nfs_direct_complete(dreq);
L
Linus Torvalds 已提交
307 308
}

T
Trond Myklebust 已提交
309 310 311 312 313
static const struct rpc_call_ops nfs_read_direct_ops = {
	.rpc_call_done = nfs_direct_read_result,
	.rpc_release = nfs_readdata_release,
};

314
/*
L
Linus Torvalds 已提交
315 316 317
 * For each nfs_read_data struct that was allocated on the list, dispatch
 * an NFS READ operation
 */
318
static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
319
{
320 321 322 323
	struct file *file = dreq->filp;
	struct inode *inode = file->f_mapping->host;
	struct nfs_open_context *ctx = (struct nfs_open_context *)
							file->private_data;
L
Linus Torvalds 已提交
324 325
	struct list_head *list = &dreq->list;
	struct page **pages = dreq->pages;
326 327
	size_t count = dreq->user_count;
	loff_t pos = dreq->pos;
328
	size_t rsize = NFS_SERVER(inode)->rsize;
L
Linus Torvalds 已提交
329 330 331
	unsigned int curpage, pgbase;

	curpage = 0;
332
	pgbase = dreq->user_addr & ~PAGE_MASK;
L
Linus Torvalds 已提交
333 334
	do {
		struct nfs_read_data *data;
335
		size_t bytes;
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346 347

		bytes = rsize;
		if (count < rsize)
			bytes = count;

		data = list_entry(list->next, struct nfs_read_data, pages);
		list_del_init(&data->pages);

		data->inode = inode;
		data->cred = ctx->cred;
		data->args.fh = NFS_FH(inode);
		data->args.context = ctx;
348
		data->args.offset = pos;
L
Linus Torvalds 已提交
349 350 351 352 353 354 355
		data->args.pgbase = pgbase;
		data->args.pages = &pages[curpage];
		data->args.count = bytes;
		data->res.fattr = &data->fattr;
		data->res.eof = 0;
		data->res.count = bytes;

T
Trond Myklebust 已提交
356 357
		rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
				&nfs_read_direct_ops, data);
L
Linus Torvalds 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
		NFS_PROTO(inode)->read_setup(data);

		data->task.tk_cookie = (unsigned long) inode;

		lock_kernel();
		rpc_execute(&data->task);
		unlock_kernel();

		dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
				data->task.tk_pid,
				inode->i_sb->s_id,
				(long long)NFS_FILEID(inode),
				bytes,
				(unsigned long long)data->args.offset);

373
		pos += bytes;
L
Linus Torvalds 已提交
374 375 376 377 378 379 380 381
		pgbase += bytes;
		curpage += pgbase >> PAGE_SHIFT;
		pgbase &= ~PAGE_MASK;

		count -= bytes;
	} while (count != 0);
}

382
static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
L
Linus Torvalds 已提交
383 384 385
{
	ssize_t result;
	sigset_t oldset;
386
	struct inode *inode = iocb->ki_filp->f_mapping->host;
L
Linus Torvalds 已提交
387 388 389 390 391 392 393
	struct rpc_clnt *clnt = NFS_CLIENT(inode);
	struct nfs_direct_req *dreq;

	dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
	if (!dreq)
		return -ENOMEM;

394 395 396
	dreq->user_addr = user_addr;
	dreq->user_count = count;
	dreq->pos = pos;
L
Linus Torvalds 已提交
397 398
	dreq->pages = pages;
	dreq->npages = nr_pages;
399
	igrab(inode);
C
Chuck Lever 已提交
400
	dreq->inode = inode;
401
	dreq->filp = iocb->ki_filp;
402 403
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
404

C
Chuck Lever 已提交
405
	nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
L
Linus Torvalds 已提交
406
	rpc_clnt_sigmask(clnt, &oldset);
407
	nfs_direct_read_schedule(dreq);
408
	result = nfs_direct_wait(dreq);
L
Linus Torvalds 已提交
409 410 411 412 413
	rpc_clnt_sigunmask(clnt, &oldset);

	return result;
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
{
	list_splice_init(&dreq->rewrite_list, &dreq->list);
	while (!list_empty(&dreq->list)) {
		struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_writedata_release(data);
	}
}

#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
	struct list_head *pos;

	list_splice_init(&dreq->rewrite_list, &dreq->list);
	list_for_each(pos, &dreq->list)
		dreq->outstanding++;
	dreq->count = 0;

	nfs_direct_write_schedule(dreq, FLUSH_STABLE);
}

static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;

	/* Call the NFS version-specific code */
	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
		return;
	if (unlikely(task->tk_status < 0)) {
		dreq->error = task->tk_status;
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
	}
	if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
		dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
	}

	dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
	nfs_direct_write_complete(dreq, data->inode);
}

static const struct rpc_call_ops nfs_commit_direct_ops = {
	.rpc_call_done = nfs_direct_commit_result,
	.rpc_release = nfs_commit_release,
};

static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
{
	struct file *file = dreq->filp;
	struct nfs_open_context *ctx = (struct nfs_open_context *)
							file->private_data;
	struct nfs_write_data *data = dreq->commit_data;
	struct rpc_task *task = &data->task;

	data->inode = dreq->inode;
	data->cred = ctx->cred;

	data->args.fh = NFS_FH(data->inode);
	data->args.offset = dreq->pos;
	data->args.count = dreq->user_count;
	data->res.count = 0;
	data->res.fattr = &data->fattr;
	data->res.verf = &data->verf;

	rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
				&nfs_commit_direct_ops, data);
	NFS_PROTO(data->inode)->commit_setup(data, 0);

	data->task.tk_priority = RPC_PRIORITY_NORMAL;
	data->task.tk_cookie = (unsigned long)data->inode;
	/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
	dreq->commit_data = NULL;

	dprintk("NFS: %5u initiated commit call\n", task->tk_pid);

	lock_kernel();
	rpc_execute(&data->task);
	unlock_kernel();
}

static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
	int flags = dreq->flags;

	dreq->flags = 0;
	switch (flags) {
		case NFS_ODIRECT_DO_COMMIT:
			nfs_direct_commit_schedule(dreq);
			break;
		case NFS_ODIRECT_RESCHED_WRITES:
			nfs_direct_write_reschedule(dreq);
			break;
		default:
			nfs_end_data_update(inode);
			if (dreq->commit_data != NULL)
				nfs_commit_free(dreq->commit_data);
			nfs_direct_free_writedata(dreq);
			nfs_direct_complete(dreq);
	}
}

static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
	dreq->commit_data = nfs_commit_alloc(0);
	if (dreq->commit_data != NULL)
		dreq->commit_data->req = (struct nfs_page *) dreq;
}
#else
static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
	dreq->commit_data = NULL;
}

static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
	nfs_end_data_update(inode);
	nfs_direct_free_writedata(dreq);
	nfs_direct_complete(dreq);
}
#endif

538
static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
L
Linus Torvalds 已提交
539
{
540 541 542
	struct list_head *list;
	struct nfs_direct_req *dreq;
	unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
L
Linus Torvalds 已提交
543

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	dreq = nfs_direct_req_alloc();
	if (!dreq)
		return NULL;

	list = &dreq->list;
	for(;;) {
		struct nfs_write_data *data = nfs_writedata_alloc(wpages);

		if (unlikely(!data)) {
			while (!list_empty(list)) {
				data = list_entry(list->next,
						  struct nfs_write_data, pages);
				list_del(&data->pages);
				nfs_writedata_free(data);
			}
			kref_put(&dreq->kref, nfs_direct_req_release);
			return NULL;
		}

		INIT_LIST_HEAD(&data->pages);
		list_add(&data->pages, list);

		data->req = (struct nfs_page *) dreq;
567
		dreq->outstanding++;
568 569 570 571
		if (nbytes <= wsize)
			break;
		nbytes -= wsize;
	}
572 573 574

	nfs_alloc_commit_data(dreq);

575 576 577 578 579 580 581 582 583 584 585 586 587
	kref_get(&dreq->kref);
	return dreq;
}

static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
	int status = task->tk_status;

	if (nfs_writeback_done(task, data) != 0)
		return;

588 589
	spin_lock(&dreq->lock);

590
	if (likely(status >= 0))
591
		dreq->count += data->res.count;
592
	else
593
		dreq->error = task->tk_status;
594

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	if (data->res.verf->committed != NFS_FILE_SYNC) {
		switch (dreq->flags) {
			case 0:
				memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
				dreq->flags = NFS_ODIRECT_DO_COMMIT;
				break;
			case NFS_ODIRECT_DO_COMMIT:
				if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
					dprintk("NFS: %5u write verify failed\n", task->tk_pid);
					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
				}
		}
	}
	/* In case we have to resend */
	data->args.stable = NFS_FILE_SYNC;

	spin_unlock(&dreq->lock);
}

/*
 * NB: Return the value of the first error return code.  Subsequent
 *     errors after the first one are ignored.
 */
static void nfs_direct_write_release(void *calldata)
{
	struct nfs_write_data *data = calldata;
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;

	spin_lock(&dreq->lock);
624 625 626
	if (--dreq->outstanding) {
		spin_unlock(&dreq->lock);
		return;
627
	}
628 629
	spin_unlock(&dreq->lock);

630
	nfs_direct_write_complete(dreq, data->inode);
631 632 633 634
}

static const struct rpc_call_ops nfs_write_direct_ops = {
	.rpc_call_done = nfs_direct_write_result,
635
	.rpc_release = nfs_direct_write_release,
636 637 638 639 640 641
};

/*
 * For each nfs_write_data struct that was allocated on the list, dispatch
 * an NFS WRITE operation
 */
642
static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
643
{
644 645 646 647
	struct file *file = dreq->filp;
	struct inode *inode = file->f_mapping->host;
	struct nfs_open_context *ctx = (struct nfs_open_context *)
							file->private_data;
648 649
	struct list_head *list = &dreq->list;
	struct page **pages = dreq->pages;
650 651
	size_t count = dreq->user_count;
	loff_t pos = dreq->pos;
652 653
	size_t wsize = NFS_SERVER(inode)->wsize;
	unsigned int curpage, pgbase;
L
Linus Torvalds 已提交
654 655

	curpage = 0;
656
	pgbase = dreq->user_addr & ~PAGE_MASK;
L
Linus Torvalds 已提交
657
	do {
658 659 660 661 662 663 664 665
		struct nfs_write_data *data;
		size_t bytes;

		bytes = wsize;
		if (count < wsize)
			bytes = count;

		data = list_entry(list->next, struct nfs_write_data, pages);
666
		list_move_tail(&data->pages, &dreq->rewrite_list);
667 668 669 670 671

		data->inode = inode;
		data->cred = ctx->cred;
		data->args.fh = NFS_FH(inode);
		data->args.context = ctx;
672
		data->args.offset = pos;
673 674 675 676 677
		data->args.pgbase = pgbase;
		data->args.pages = &pages[curpage];
		data->args.count = bytes;
		data->res.fattr = &data->fattr;
		data->res.count = bytes;
678
		data->res.verf = &data->verf;
679 680 681

		rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
				&nfs_write_direct_ops, data);
682
		NFS_PROTO(inode)->write_setup(data, sync);
L
Linus Torvalds 已提交
683

684 685
		data->task.tk_priority = RPC_PRIORITY_NORMAL;
		data->task.tk_cookie = (unsigned long) inode;
L
Linus Torvalds 已提交
686 687

		lock_kernel();
688
		rpc_execute(&data->task);
L
Linus Torvalds 已提交
689 690
		unlock_kernel();

691 692 693 694 695 696
		dfprintk(VFS, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
				data->task.tk_pid,
				inode->i_sb->s_id,
				(long long)NFS_FILEID(inode),
				bytes,
				(unsigned long long)data->args.offset);
L
Linus Torvalds 已提交
697

698
		pos += bytes;
699 700 701
		pgbase += bytes;
		curpage += pgbase >> PAGE_SHIFT;
		pgbase &= ~PAGE_MASK;
L
Linus Torvalds 已提交
702

703 704 705
		count -= bytes;
	} while (count != 0);
}
L
Linus Torvalds 已提交
706

707
static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
708 709 710
{
	ssize_t result;
	sigset_t oldset;
711
	struct inode *inode = iocb->ki_filp->f_mapping->host;
712 713
	struct rpc_clnt *clnt = NFS_CLIENT(inode);
	struct nfs_direct_req *dreq;
714 715
	size_t wsize = NFS_SERVER(inode)->wsize;
	int sync = 0;
L
Linus Torvalds 已提交
716

717
	dreq = nfs_direct_write_alloc(count, wsize);
718 719
	if (!dreq)
		return -ENOMEM;
720 721
	if (dreq->commit_data == NULL || count < wsize)
		sync = FLUSH_STABLE;
L
Linus Torvalds 已提交
722

723 724 725
	dreq->user_addr = user_addr;
	dreq->user_count = count;
	dreq->pos = pos;
726 727
	dreq->pages = pages;
	dreq->npages = nr_pages;
728
	igrab(inode);
729 730 731 732
	dreq->inode = inode;
	dreq->filp = iocb->ki_filp;
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
733

734 735
	nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);

736
	nfs_begin_data_update(inode);
L
Linus Torvalds 已提交
737

738
	rpc_clnt_sigmask(clnt, &oldset);
739
	nfs_direct_write_schedule(dreq, sync);
740
	result = nfs_direct_wait(dreq);
741
	rpc_clnt_sigunmask(clnt, &oldset);
L
Linus Torvalds 已提交
742

743
	return result;
L
Linus Torvalds 已提交
744 745 746 747 748 749
}

/**
 * nfs_file_direct_read - file direct read operation for NFS files
 * @iocb: target I/O control block
 * @buf: user's buffer into which to read data
750 751
 * @count: number of bytes to read
 * @pos: byte offset in file where reading starts
L
Linus Torvalds 已提交
752 753 754 755 756 757
 *
 * We use this function for direct reads instead of calling
 * generic_file_aio_read() in order to avoid gfar's check to see if
 * the request starts before the end of the file.  For that check
 * to work, we must generate a GETATTR before each direct read, and
 * even then there is a window between the GETATTR and the subsequent
758
 * READ where the file size could change.  Our preference is simply
L
Linus Torvalds 已提交
759 760
 * to do all reads the application wants, and the server will take
 * care of managing the end of file boundary.
761
 *
L
Linus Torvalds 已提交
762 763 764 765 766
 * This function also eliminates unnecessarily updating the file's
 * atime locally, as the NFS server sets the file's atime, and this
 * client must read the updated atime from the server back into its
 * cache.
 */
767
ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
L
Linus Torvalds 已提交
768 769
{
	ssize_t retval = -EINVAL;
770 771
	int page_count;
	struct page **pages;
L
Linus Torvalds 已提交
772 773 774
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;

775
	dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
776 777
		file->f_dentry->d_parent->d_name.name,
		file->f_dentry->d_name.name,
778
		(unsigned long) count, (long long) pos);
L
Linus Torvalds 已提交
779 780 781 782

	if (count < 0)
		goto out;
	retval = -EFAULT;
783
	if (!access_ok(VERIFY_WRITE, buf, count))
L
Linus Torvalds 已提交
784 785 786 787 788
		goto out;
	retval = 0;
	if (!count)
		goto out;

T
Trond Myklebust 已提交
789 790 791
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
792

793 794 795 796 797 798 799 800
	page_count = nfs_get_user_pages(READ, (unsigned long) buf,
						count, &pages);
	if (page_count < 0) {
		nfs_free_user_pages(pages, 0, 0);
		retval = page_count;
		goto out;
	}

801
	retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
802
						pages, page_count);
L
Linus Torvalds 已提交
803
	if (retval > 0)
804
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
805 806 807 808 809 810 811 812 813

out:
	return retval;
}

/**
 * nfs_file_direct_write - file direct write operation for NFS files
 * @iocb: target I/O control block
 * @buf: user's buffer from which to write data
814 815
 * @count: number of bytes to write
 * @pos: byte offset in file where writing starts
L
Linus Torvalds 已提交
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
 *
 * We use this function for direct writes instead of calling
 * generic_file_aio_write() in order to avoid taking the inode
 * semaphore and updating the i_size.  The NFS server will set
 * the new i_size and this client must read the updated size
 * back into its cache.  We let the server do generic write
 * parameter checking and report problems.
 *
 * We also avoid an unnecessary invocation of generic_osync_inode(),
 * as it is fairly meaningless to sync the metadata of an NFS file.
 *
 * We eliminate local atime updates, see direct read above.
 *
 * We avoid unnecessary page cache invalidations for normal cached
 * readers of this file.
 *
 * Note that O_APPEND is not supported for NFS direct writes, as there
 * is no atomic O_APPEND write facility in the NFS protocol.
 */
835
ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
L
Linus Torvalds 已提交
836
{
837
	ssize_t retval;
838 839
	int page_count;
	struct page **pages;
L
Linus Torvalds 已提交
840 841 842
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;

843
	dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
844
		file->f_dentry->d_parent->d_name.name,
845 846
		file->f_dentry->d_name.name,
		(unsigned long) count, (long long) pos);
L
Linus Torvalds 已提交
847

848 849
	retval = generic_write_checks(file, &pos, &count, 0);
	if (retval)
L
Linus Torvalds 已提交
850
		goto out;
851 852 853

	retval = -EINVAL;
	if ((ssize_t) count < 0)
L
Linus Torvalds 已提交
854 855 856 857
		goto out;
	retval = 0;
	if (!count)
		goto out;
858 859

	retval = -EFAULT;
860
	if (!access_ok(VERIFY_READ, buf, count))
861
		goto out;
L
Linus Torvalds 已提交
862

T
Trond Myklebust 已提交
863 864 865
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
866

867 868 869 870 871 872 873 874
	page_count = nfs_get_user_pages(WRITE, (unsigned long) buf,
						count, &pages);
	if (page_count < 0) {
		nfs_free_user_pages(pages, 0, 0);
		retval = page_count;
		goto out;
	}

875
	retval = nfs_direct_write(iocb, (unsigned long) buf, count,
876
					pos, pages, page_count);
877 878 879 880 881 882 883 884 885

	/*
	 * XXX: nfs_end_data_update() already ensures this file's
	 *      cached data is subsequently invalidated.  Do we really
	 *      need to call invalidate_inode_pages2() again here?
	 *
	 *      For aio writes, this invalidation will almost certainly
	 *      occur before the writes complete.  Kind of racey.
	 */
L
Linus Torvalds 已提交
886 887
	if (mapping->nrpages)
		invalidate_inode_pages2(mapping);
888

L
Linus Torvalds 已提交
889
	if (retval > 0)
890
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
891 892 893 894 895

out:
	return retval;
}

896 897 898 899
/**
 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
 *
 */
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907 908 909 910 911
int nfs_init_directcache(void)
{
	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
						sizeof(struct nfs_direct_req),
						0, SLAB_RECLAIM_ACCOUNT,
						NULL, NULL);
	if (nfs_direct_cachep == NULL)
		return -ENOMEM;

	return 0;
}

912 913 914 915
/**
 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
 *
 */
L
Linus Torvalds 已提交
916 917 918 919 920
void nfs_destroy_directcache(void)
{
	if (kmem_cache_destroy(nfs_direct_cachep))
		printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
}