direct.c 27.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/direct.c
 *
 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
 *
 * High-performance uncached I/O for the Linux NFS client
 *
 * There are important applications whose performance or correctness
 * depends on uncached access to file data.  Database clusters
10
 * (multiple copies of the same instance running on separate hosts)
L
Linus Torvalds 已提交
11
 * implement their own cache coherency protocol that subsumes file
12 13 14
 * system cache protocols.  Applications that process datasets
 * considerably larger than the client's memory do not always benefit
 * from a local cache.  A streaming video server, for instance, has no
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * need to cache the contents of a file.
 *
 * When an application requests uncached I/O, all read and write requests
 * are made directly to the server; data stored or fetched via these
 * requests is not cached in the Linux page cache.  The client does not
 * correct unaligned requests from applications.  All requested bytes are
 * held on permanent storage before a direct write system call returns to
 * an application.
 *
 * Solaris implements an uncached I/O facility called directio() that
 * is used for backups and sequential I/O to very large files.  Solaris
 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 * an undocumented mount option.
 *
 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 * help from Andrew Morton.
 *
 * 18 Dec 2001	Initial implementation for 2.4  --cel
 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 * 08 Jun 2003	Port to 2.5 APIs  --cel
 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 * 15 Sep 2004	Parallel async reads  --cel
37
 * 04 May 2005	support O_DIRECT with aio  --cel
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
 *
 */

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/kref.h>
47
#include <linux/slab.h>
48
#include <linux/task_io_accounting_ops.h>
49
#include <linux/module.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/sunrpc/clnt.h>

#include <asm/uaccess.h>
A
Arun Sharma 已提交
56
#include <linux/atomic.h>
L
Linus Torvalds 已提交
57

58
#include "internal.h"
C
Chuck Lever 已提交
59
#include "iostat.h"
60
#include "pnfs.h"
C
Chuck Lever 已提交
61

L
Linus Torvalds 已提交
62 63
#define NFSDBG_FACILITY		NFSDBG_VFS

64
static struct kmem_cache *nfs_direct_cachep;
L
Linus Torvalds 已提交
65 66 67 68 69 70

/*
 * This represents a set of asynchronous requests that we're waiting on
 */
struct nfs_direct_req {
	struct kref		kref;		/* release manager */
71 72

	/* I/O parameters */
73
	struct nfs_open_context	*ctx;		/* file open context info */
74
	struct nfs_lock_context *l_ctx;		/* Lock context info */
75
	struct kiocb *		iocb;		/* controlling i/o request */
76
	struct inode *		inode;		/* target file of i/o */
77 78

	/* completion state */
79
	atomic_t		io_count;	/* i/os we're waiting for */
80 81
	spinlock_t		lock;		/* protect completion state */
	ssize_t			count,		/* bytes actually processed */
P
Peng Tao 已提交
82
				bytes_left,	/* bytes left to be sent */
L
Linus Torvalds 已提交
83
				error;		/* any reported error */
84
	struct completion	completion;	/* wait for i/o completion */
85 86

	/* commit state */
87 88 89
	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
	struct work_struct	work;
90 91 92 93
	int			flags;
#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
	struct nfs_writeverf	verf;		/* unstable write verifier */
L
Linus Torvalds 已提交
94 95
};

96 97
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
98
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
99
static void nfs_direct_write_schedule_work(struct work_struct *work);
100 101 102 103 104 105 106 107 108 109 110

static inline void get_dreq(struct nfs_direct_req *dreq)
{
	atomic_inc(&dreq->io_count);
}

static inline int put_dreq(struct nfs_direct_req *dreq)
{
	return atomic_dec_and_test(&dreq->io_count);
}

L
Linus Torvalds 已提交
111
/**
112 113 114 115 116 117 118 119
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
 * @iocb: target I/O control block
 * @iov: array of vectors that define I/O buffer
 * @pos: offset in file to begin the operation
 * @nr_segs: size of iovec array
 *
 * The presence of this routine in the address space ops vector means
M
Mel Gorman 已提交
120 121 122
 * the NFS client supports direct I/O. However, for most direct IO, we
 * shunt off direct read and write requests before the VFS gets them,
 * so this method is only ever called for swap.
L
Linus Torvalds 已提交
123
 */
124 125
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
M
Mel Gorman 已提交
126
#ifndef CONFIG_NFS_SWAP
127 128
	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
			iocb->ki_filp, (long long) pos, nr_segs);
129 130

	return -EINVAL;
M
Mel Gorman 已提交
131 132 133 134 135 136 137 138 139
#else
	VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);

	if (rw == READ || rw == KERNEL_READ)
		return nfs_file_direct_read(iocb, iov, nr_segs, pos,
				rw == READ ? true : false);
	return nfs_file_direct_write(iocb, iov, nr_segs, pos,
				rw == WRITE ? true : false);
#endif /* CONFIG_NFS_SWAP */
140 141
}

142
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
143
{
144
	unsigned int i;
145 146
	for (i = 0; i < npages; i++)
		page_cache_release(pages[i]);
147 148
}

149 150 151 152 153 154 155 156 157 158
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
			      struct nfs_direct_req *dreq)
{
	cinfo->lock = &dreq->lock;
	cinfo->mds = &dreq->mds_cinfo;
	cinfo->ds = &dreq->ds_cinfo;
	cinfo->dreq = dreq;
	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}

159
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
L
Linus Torvalds 已提交
160
{
161 162
	struct nfs_direct_req *dreq;

163
	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
164 165 166 167
	if (!dreq)
		return NULL;

	kref_init(&dreq->kref);
168
	kref_get(&dreq->kref);
169
	init_completion(&dreq->completion);
170 171
	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
172
	spin_lock_init(&dreq->lock);
173 174

	return dreq;
L
Linus Torvalds 已提交
175 176
}

177
static void nfs_direct_req_free(struct kref *kref)
L
Linus Torvalds 已提交
178 179
{
	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
180

181 182
	if (dreq->l_ctx != NULL)
		nfs_put_lock_context(dreq->l_ctx);
183 184
	if (dreq->ctx != NULL)
		put_nfs_open_context(dreq->ctx);
L
Linus Torvalds 已提交
185 186 187
	kmem_cache_free(nfs_direct_cachep, dreq);
}

188 189 190 191 192
static void nfs_direct_req_release(struct nfs_direct_req *dreq)
{
	kref_put(&dreq->kref, nfs_direct_req_free);
}

193 194 195 196 197 198
ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
{
	return dreq->bytes_left;
}
EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);

199 200 201 202 203
/*
 * Collects and returns the final error value/byte-count.
 */
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
204
	ssize_t result = -EIOCBQUEUED;
205 206 207 208 209

	/* Async requests don't wait here */
	if (dreq->iocb)
		goto out;

210
	result = wait_for_completion_killable(&dreq->completion);
211 212

	if (!result)
213
		result = dreq->error;
214
	if (!result)
215
		result = dreq->count;
216 217 218 219 220

out:
	return (ssize_t) result;
}

221
/*
222 223
 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 * the iocb is still valid here if this is a synchronous request.
224
 */
225
static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
226
{
227 228
	struct inode *inode = dreq->inode;

229
	if (dreq->iocb) {
230
		loff_t pos = dreq->iocb->ki_pos + dreq->count;
231
		long res = (long) dreq->error;
232
		if (!res)
233
			res = (long) dreq->count;
234 235 236 237 238 239 240 241

		if (write) {
			spin_lock(&inode->i_lock);
			if (i_size_read(inode) < pos)
				i_size_write(inode, pos);
			spin_unlock(&inode->i_lock);
		}

242
		aio_complete(dreq->iocb, res, 0);
243 244
	}
	complete_all(&dreq->completion);
245

246
	nfs_direct_req_release(dreq);
247 248
}

T
Trond Myklebust 已提交
249
static void nfs_direct_readpage_release(struct nfs_page *req)
L
Linus Torvalds 已提交
250
{
251
	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
252
		req->wb_context->dentry->d_inode->i_sb->s_id,
253
		(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
254 255 256
		req->wb_bytes,
		(long long)req_offset(req));
	nfs_release_request(req);
257 258
}

259
static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
260
{
261 262
	unsigned long bytes = 0;
	struct nfs_direct_req *dreq = hdr->dreq;
263

264 265
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;
266 267

	spin_lock(&dreq->lock);
268 269 270 271 272 273
	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
		dreq->error = hdr->error;
	else
		dreq->count += hdr->good_bytes;
	spin_unlock(&dreq->lock);

274 275 276 277
	while (!list_empty(&hdr->pages)) {
		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
		struct page *page = req->wb_page;

278 279
		if (!PageCompound(page) && bytes < hdr->good_bytes)
			set_page_dirty(page);
280 281 282
		bytes += req->wb_bytes;
		nfs_list_remove_request(req);
		nfs_direct_readpage_release(req);
283
	}
284
out_put:
285
	if (put_dreq(dreq))
286
		nfs_direct_complete(dreq, false);
287
	hdr->release(hdr);
L
Linus Torvalds 已提交
288 289
}

290
static void nfs_read_sync_pgio_error(struct list_head *head)
291
{
292
	struct nfs_page *req;
293

294 295 296 297 298
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_release_request(req);
	}
299 300
}

301 302 303 304 305 306
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
	get_dreq(hdr->dreq);
}

static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
307
	.error_cleanup = nfs_read_sync_pgio_error,
308 309 310 311
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_read_completion,
};

312
/*
313 314 315 316 317
 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 * bail and stop sending more reads.  Read length accounting is
 * handled automatically by nfs_direct_read_result().  Otherwise, if
 * no requests have been sent, just return an error.
L
Linus Torvalds 已提交
318
 */
319
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
320
						const struct iovec *iov,
M
Mel Gorman 已提交
321
						loff_t pos, bool uio)
L
Linus Torvalds 已提交
322
{
323
	struct nfs_direct_req *dreq = desc->pg_dreq;
324
	struct nfs_open_context *ctx = dreq->ctx;
325
	struct inode *inode = ctx->dentry->d_inode;
326 327
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
328
	size_t rsize = NFS_SERVER(inode)->rsize;
329 330 331
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
332 333
	struct page **pagevec = NULL;
	unsigned int npages;
334

L
Linus Torvalds 已提交
335
	do {
336
		size_t bytes;
337
		int i;
L
Linus Torvalds 已提交
338

339
		pgbase = user_addr & ~PAGE_MASK;
340
		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
341

342
		result = -ENOMEM;
343 344 345 346 347
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *),
					  GFP_KERNEL);
		if (!pagevec)
348
			break;
M
Mel Gorman 已提交
349 350 351
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
352
					npages, 1, 0, pagevec, NULL);
M
Mel Gorman 已提交
353 354 355 356 357 358 359 360 361 362
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 1, pagevec);
			if (WARN_ON(result != 1))
				break;
		}

363
		if ((unsigned)result < npages) {
364 365
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
366
				nfs_direct_release_pages(pagevec, result);
367 368 369
				break;
			}
			bytes -= pgbase;
370
			npages = result;
371 372
		}

373 374
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
375
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
			/* XXX do we need to do the eof zeroing found in async_filler? */
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
				nfs_release_request(req);
				break;
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
P
Peng Tao 已提交
397
			dreq->bytes_left -= req_len;
398
		}
399 400
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
401
	} while (count != 0 && result >= 0);
402

403 404
	kfree(pagevec);

405
	if (started)
406
		return started;
407
	return result < 0 ? (ssize_t) result : -EFAULT;
L
Linus Torvalds 已提交
408 409
}

410 411 412
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
					      const struct iovec *iov,
					      unsigned long nr_segs,
M
Mel Gorman 已提交
413
					      loff_t pos, bool uio)
414
{
415
	struct nfs_pageio_descriptor desc;
416 417 418 419
	ssize_t result = -EINVAL;
	size_t requested_bytes = 0;
	unsigned long seg;

420
	NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
421
			     &nfs_direct_read_completion_ops);
422
	get_dreq(dreq);
423
	desc.pg_dreq = dreq;
424 425 426

	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
427
		result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
428 429 430 431 432 433 434 435
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}

436 437
	nfs_pageio_complete(&desc);

438 439 440 441 442 443 444 445 446
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

447
	if (put_dreq(dreq))
448
		nfs_direct_complete(dreq, false);
449
	return 0;
450 451
}

452
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
453
			       unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
454
{
455
	ssize_t result = -ENOMEM;
456
	struct inode *inode = iocb->ki_filp->f_mapping->host;
L
Linus Torvalds 已提交
457
	struct nfs_direct_req *dreq;
458
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
459

460
	dreq = nfs_direct_req_alloc();
461 462
	if (dreq == NULL)
		goto out;
L
Linus Torvalds 已提交
463

C
Chuck Lever 已提交
464
	dreq->inode = inode;
P
Peng Tao 已提交
465
	dreq->bytes_left = iov_length(iov, nr_segs);
466
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
467 468 469
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
470
		goto out_release;
471 472
	}
	dreq->l_ctx = l_ctx;
473 474
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
475

P
Peng Tao 已提交
476
	NFS_I(inode)->read_io += iov_length(iov, nr_segs);
M
Mel Gorman 已提交
477
	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
478 479
	if (!result)
		result = nfs_direct_wait(dreq);
480
out_release:
481
	nfs_direct_req_release(dreq);
482
out:
L
Linus Torvalds 已提交
483 484 485
	return result;
}

486 487 488 489 490 491
static void nfs_inode_dio_write_done(struct inode *inode)
{
	nfs_zap_mapping(inode, inode->i_mapping);
	inode_dio_done(inode);
}

B
Bryan Schumaker 已提交
492
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
493 494
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
495 496 497 498 499 500 501 502 503 504 505
	struct nfs_pageio_descriptor desc;
	struct nfs_page *req, *tmp;
	LIST_HEAD(reqs);
	struct nfs_commit_info cinfo;
	LIST_HEAD(failed);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
	spin_lock(cinfo.lock);
	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
	spin_unlock(cinfo.lock);
L
Linus Torvalds 已提交
506

507
	dreq->count = 0;
508 509
	get_dreq(dreq);

510
	NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
511 512
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
513

514 515
	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
		if (!nfs_pageio_add_request(&desc, req)) {
516
			nfs_list_remove_request(req);
517 518 519 520 521 522
			nfs_list_add_request(req, &failed);
			spin_lock(cinfo.lock);
			dreq->flags = 0;
			dreq->error = -EIO;
			spin_unlock(cinfo.lock);
		}
523
		nfs_release_request(req);
524 525
	}
	nfs_pageio_complete(&desc);
526

527 528 529
	while (!list_empty(&failed)) {
		req = nfs_list_entry(failed.next);
		nfs_list_remove_request(req);
530
		nfs_unlock_and_release_request(req);
531
	}
532

533 534
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
535 536
}

537
static void nfs_direct_commit_complete(struct nfs_commit_data *data)
538
{
539
	struct nfs_direct_req *dreq = data->dreq;
540 541
	struct nfs_commit_info cinfo;
	struct nfs_page *req;
542 543
	int status = data->task.tk_status;

544
	nfs_init_cinfo_from_dreq(&cinfo, dreq);
545
	if (status < 0) {
546
		dprintk("NFS: %5u commit failed with error %d.\n",
547
			data->task.tk_pid, status);
548
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
549
	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
550
		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
551
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
L
Linus Torvalds 已提交
552 553
	}

554
	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
555 556 557 558 559 560
	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
			/* Note the rewrite will go through mds */
			nfs_mark_request_commit(req, NULL, &cinfo);
561 562
		} else
			nfs_release_request(req);
563
		nfs_unlock_and_release_request(req);
564 565 566 567
	}

	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
		nfs_direct_write_complete(dreq, data->inode);
L
Linus Torvalds 已提交
568 569
}

570 571 572 573 574 575 576 577
static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
{
	/* There is no lock to clear */
}

static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
	.completion = nfs_direct_commit_complete,
	.error_cleanup = nfs_direct_error_cleanup,
578 579 580
};

static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
581
{
582 583 584 585 586 587 588 589 590
	int res;
	struct nfs_commit_info cinfo;
	LIST_HEAD(mds_list);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
	if (res < 0) /* res == -ENOMEM */
		nfs_direct_write_reschedule(dreq);
591
}
L
Linus Torvalds 已提交
592

593
static void nfs_direct_write_schedule_work(struct work_struct *work)
594
{
595
	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
596
	int flags = dreq->flags;
L
Linus Torvalds 已提交
597

598 599 600 601
	dreq->flags = 0;
	switch (flags) {
		case NFS_ODIRECT_DO_COMMIT:
			nfs_direct_commit_schedule(dreq);
L
Linus Torvalds 已提交
602
			break;
603 604 605 606
		case NFS_ODIRECT_RESCHED_WRITES:
			nfs_direct_write_reschedule(dreq);
			break;
		default:
607
			nfs_inode_dio_write_done(dreq->inode);
608
			nfs_direct_complete(dreq, true);
609 610
	}
}
L
Linus Torvalds 已提交
611

612
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
613
{
614
	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
615
}
616

617
#else
618 619 620
static void nfs_direct_write_schedule_work(struct work_struct *work)
{
}
L
Linus Torvalds 已提交
621

622 623
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
624
	nfs_inode_dio_write_done(inode);
625
	nfs_direct_complete(dreq, true);
626 627
}
#endif
L
Linus Torvalds 已提交
628

629 630 631 632
/*
 * NB: Return the value of the first error return code.  Subsequent
 *     errors after the first one are ignored.
 */
633
/*
634 635 636 637 638
 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 * bail and stop sending more writes.  Write length accounting is
 * handled automatically by nfs_direct_write_result().  Otherwise, if
 * no requests have been sent, just return an error.
639
 */
640
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
641
						 const struct iovec *iov,
M
Mel Gorman 已提交
642
						 loff_t pos, bool uio)
643
{
644
	struct nfs_direct_req *dreq = desc->pg_dreq;
645
	struct nfs_open_context *ctx = dreq->ctx;
646
	struct inode *inode = ctx->dentry->d_inode;
647 648
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
649
	size_t wsize = NFS_SERVER(inode)->wsize;
650 651 652
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
653 654
	struct page **pagevec = NULL;
	unsigned int npages;
655

L
Linus Torvalds 已提交
656
	do {
657
		size_t bytes;
658
		int i;
659

660
		pgbase = user_addr & ~PAGE_MASK;
661
		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
662

663
		result = -ENOMEM;
664 665 666 667
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
		if (!pagevec)
668 669
			break;

M
Mel Gorman 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
						npages, 0, 0, pagevec, NULL);
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 0, pagevec);
			if (WARN_ON(result != 1))
				break;
		}
683 684

		if ((unsigned)result < npages) {
685 686
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
687
				nfs_direct_release_pages(pagevec, result);
688 689 690
				break;
			}
			bytes -= pgbase;
691
			npages = result;
692 693
		}

694 695
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
696
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
L
Linus Torvalds 已提交
697

698 699 700 701 702 703 704 705 706 707 708 709
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			nfs_lock_request(req);
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
710
				nfs_unlock_and_release_request(req);
711
				break;
712 713 714 715 716 717 718
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
P
Peng Tao 已提交
719
			dreq->bytes_left -= req_len;
720
		}
721 722
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
723
	} while (count != 0 && result >= 0);
724

725 726
	kfree(pagevec);

727
	if (started)
728
		return started;
729
	return result < 0 ? (ssize_t) result : -EFAULT;
730
}
L
Linus Torvalds 已提交
731

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
	struct nfs_direct_req *dreq = hdr->dreq;
	struct nfs_commit_info cinfo;
	int bit = -1;
	struct nfs_page *req = nfs_list_entry(hdr->pages.next);

	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;

	nfs_init_cinfo_from_dreq(&cinfo, dreq);

	spin_lock(&dreq->lock);

	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
		dreq->flags = 0;
		dreq->error = hdr->error;
	}
	if (dreq->error != 0)
		bit = NFS_IOHDR_ERROR;
	else {
		dreq->count += hdr->good_bytes;
		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
			bit = NFS_IOHDR_NEED_RESCHED;
		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
				bit = NFS_IOHDR_NEED_RESCHED;
			else if (dreq->flags == 0) {
T
Trond Myklebust 已提交
761
				memcpy(&dreq->verf, hdr->verf,
762 763 764 765
				       sizeof(dreq->verf));
				bit = NFS_IOHDR_NEED_COMMIT;
				dreq->flags = NFS_ODIRECT_DO_COMMIT;
			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
T
Trond Myklebust 已提交
766
				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
					bit = NFS_IOHDR_NEED_RESCHED;
				} else
					bit = NFS_IOHDR_NEED_COMMIT;
			}
		}
	}
	spin_unlock(&dreq->lock);

	while (!list_empty(&hdr->pages)) {
		req = nfs_list_entry(hdr->pages.next);
		nfs_list_remove_request(req);
		switch (bit) {
		case NFS_IOHDR_NEED_RESCHED:
		case NFS_IOHDR_NEED_COMMIT:
782
			kref_get(&req->wb_kref);
783 784
			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
		}
785
		nfs_unlock_and_release_request(req);
786 787 788 789 790 791 792 793
	}

out_put:
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, hdr->inode);
	hdr->release(hdr);
}

794 795 796 797 798 799 800
static void nfs_write_sync_pgio_error(struct list_head *head)
{
	struct nfs_page *req;

	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
801
		nfs_unlock_and_release_request(req);
802 803 804
	}
}

805
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
806
	.error_cleanup = nfs_write_sync_pgio_error,
807 808 809 810
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_write_completion,
};

811 812 813
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
					       const struct iovec *iov,
					       unsigned long nr_segs,
M
Mel Gorman 已提交
814
					       loff_t pos, bool uio)
815
{
816
	struct nfs_pageio_descriptor desc;
817
	struct inode *inode = dreq->inode;
818 819 820 821
	ssize_t result = 0;
	size_t requested_bytes = 0;
	unsigned long seg;

822
	NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
823 824
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
825
	get_dreq(dreq);
826
	atomic_inc(&inode->i_dio_count);
827

P
Peng Tao 已提交
828
	NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
829 830
	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
831
		result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
832 833 834 835 836 837 838
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}
839
	nfs_pageio_complete(&desc);
840

841 842 843 844 845
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
846
		inode_dio_done(inode);
847 848 849 850
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

851 852
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
853
	return 0;
854 855
}

856 857
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos,
M
Mel Gorman 已提交
858
				size_t count, bool uio)
859
{
860
	ssize_t result = -ENOMEM;
861
	struct inode *inode = iocb->ki_filp->f_mapping->host;
862
	struct nfs_direct_req *dreq;
863
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
864

865
	dreq = nfs_direct_req_alloc();
866
	if (!dreq)
867
		goto out;
L
Linus Torvalds 已提交
868

869
	dreq->inode = inode;
P
Peng Tao 已提交
870
	dreq->bytes_left = count;
871
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
872 873 874
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
875
		goto out_release;
876 877
	}
	dreq->l_ctx = l_ctx;
878 879
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
880

M
Mel Gorman 已提交
881
	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
882 883
	if (!result)
		result = nfs_direct_wait(dreq);
884
out_release:
885
	nfs_direct_req_release(dreq);
886
out:
L
Linus Torvalds 已提交
887 888 889 890 891 892
	return result;
}

/**
 * nfs_file_direct_read - file direct read operation for NFS files
 * @iocb: target I/O control block
893 894
 * @iov: vector of user buffers into which to read data
 * @nr_segs: size of iov vector
895
 * @pos: byte offset in file where reading starts
L
Linus Torvalds 已提交
896 897 898 899 900 901
 *
 * We use this function for direct reads instead of calling
 * generic_file_aio_read() in order to avoid gfar's check to see if
 * the request starts before the end of the file.  For that check
 * to work, we must generate a GETATTR before each direct read, and
 * even then there is a window between the GETATTR and the subsequent
902
 * READ where the file size could change.  Our preference is simply
L
Linus Torvalds 已提交
903 904
 * to do all reads the application wants, and the server will take
 * care of managing the end of file boundary.
905
 *
L
Linus Torvalds 已提交
906 907 908 909 910
 * This function also eliminates unnecessarily updating the file's
 * atime locally, as the NFS server sets the file's atime, and this
 * client must read the updated atime from the server back into its
 * cache.
 */
911
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
912
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
913 914 915 916
{
	ssize_t retval = -EINVAL;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
917 918 919 920
	size_t count;

	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
L
Linus Torvalds 已提交
921

922 923
	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
		file, count, (long long) pos);
L
Linus Torvalds 已提交
924 925 926 927 928

	retval = 0;
	if (!count)
		goto out;

T
Trond Myklebust 已提交
929 930 931
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
932

933 934
	task_io_account_read(count);

M
Mel Gorman 已提交
935
	retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
L
Linus Torvalds 已提交
936
	if (retval > 0)
937
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
938 939 940 941 942 943 944 945

out:
	return retval;
}

/**
 * nfs_file_direct_write - file direct write operation for NFS files
 * @iocb: target I/O control block
946 947
 * @iov: vector of user buffers from which to write data
 * @nr_segs: size of iov vector
948
 * @pos: byte offset in file where writing starts
L
Linus Torvalds 已提交
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
 *
 * We use this function for direct writes instead of calling
 * generic_file_aio_write() in order to avoid taking the inode
 * semaphore and updating the i_size.  The NFS server will set
 * the new i_size and this client must read the updated size
 * back into its cache.  We let the server do generic write
 * parameter checking and report problems.
 *
 * We eliminate local atime updates, see direct read above.
 *
 * We avoid unnecessary page cache invalidations for normal cached
 * readers of this file.
 *
 * Note that O_APPEND is not supported for NFS direct writes, as there
 * is no atomic O_APPEND write facility in the NFS protocol.
 */
965
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
966
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
967
{
C
Chuck Lever 已提交
968
	ssize_t retval = -EINVAL;
L
Linus Torvalds 已提交
969 970
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
971
	size_t count;
L
Linus Torvalds 已提交
972

973 974 975
	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);

976 977
	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
		file, count, (long long) pos);
978

979 980
	retval = generic_write_checks(file, &pos, &count, 0);
	if (retval)
L
Linus Torvalds 已提交
981
		goto out;
982 983 984

	retval = -EINVAL;
	if ((ssize_t) count < 0)
L
Linus Torvalds 已提交
985 986 987 988
		goto out;
	retval = 0;
	if (!count)
		goto out;
989

T
Trond Myklebust 已提交
990 991 992
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
993

994 995
	task_io_account_write(count);

M
Mel Gorman 已提交
996
	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
997 998
	if (retval > 0) {
		struct inode *inode = mapping->host;
999

1000
		iocb->ki_pos = pos + retval;
1001 1002 1003 1004 1005
		spin_lock(&inode->i_lock);
		if (i_size_read(inode) < iocb->ki_pos)
			i_size_write(inode, iocb->ki_pos);
		spin_unlock(&inode->i_lock);
	}
L
Linus Torvalds 已提交
1006 1007 1008 1009
out:
	return retval;
}

1010 1011 1012 1013
/**
 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
 *
 */
D
David Howells 已提交
1014
int __init nfs_init_directcache(void)
L
Linus Torvalds 已提交
1015 1016 1017
{
	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
						sizeof(struct nfs_direct_req),
1018 1019
						0, (SLAB_RECLAIM_ACCOUNT|
							SLAB_MEM_SPREAD),
1020
						NULL);
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025 1026
	if (nfs_direct_cachep == NULL)
		return -ENOMEM;

	return 0;
}

1027
/**
D
David Howells 已提交
1028
 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1029 1030
 *
 */
1031
void nfs_destroy_directcache(void)
L
Linus Torvalds 已提交
1032
{
1033
	kmem_cache_destroy(nfs_direct_cachep);
L
Linus Torvalds 已提交
1034
}