direct.c 27.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/direct.c
 *
 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
 *
 * High-performance uncached I/O for the Linux NFS client
 *
 * There are important applications whose performance or correctness
 * depends on uncached access to file data.  Database clusters
10
 * (multiple copies of the same instance running on separate hosts)
L
Linus Torvalds 已提交
11
 * implement their own cache coherency protocol that subsumes file
12 13 14
 * system cache protocols.  Applications that process datasets
 * considerably larger than the client's memory do not always benefit
 * from a local cache.  A streaming video server, for instance, has no
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * need to cache the contents of a file.
 *
 * When an application requests uncached I/O, all read and write requests
 * are made directly to the server; data stored or fetched via these
 * requests is not cached in the Linux page cache.  The client does not
 * correct unaligned requests from applications.  All requested bytes are
 * held on permanent storage before a direct write system call returns to
 * an application.
 *
 * Solaris implements an uncached I/O facility called directio() that
 * is used for backups and sequential I/O to very large files.  Solaris
 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 * an undocumented mount option.
 *
 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 * help from Andrew Morton.
 *
 * 18 Dec 2001	Initial implementation for 2.4  --cel
 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 * 08 Jun 2003	Port to 2.5 APIs  --cel
 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 * 15 Sep 2004	Parallel async reads  --cel
37
 * 04 May 2005	support O_DIRECT with aio  --cel
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
 *
 */

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/kref.h>
47
#include <linux/slab.h>
48
#include <linux/task_io_accounting_ops.h>
49
#include <linux/module.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/sunrpc/clnt.h>

#include <asm/uaccess.h>
A
Arun Sharma 已提交
56
#include <linux/atomic.h>
L
Linus Torvalds 已提交
57

58
#include "internal.h"
C
Chuck Lever 已提交
59
#include "iostat.h"
60
#include "pnfs.h"
C
Chuck Lever 已提交
61

L
Linus Torvalds 已提交
62 63
#define NFSDBG_FACILITY		NFSDBG_VFS

64
static struct kmem_cache *nfs_direct_cachep;
L
Linus Torvalds 已提交
65 66 67 68 69 70

/*
 * This represents a set of asynchronous requests that we're waiting on
 */
struct nfs_direct_req {
	struct kref		kref;		/* release manager */
71 72

	/* I/O parameters */
73
	struct nfs_open_context	*ctx;		/* file open context info */
74
	struct nfs_lock_context *l_ctx;		/* Lock context info */
75
	struct kiocb *		iocb;		/* controlling i/o request */
76
	struct inode *		inode;		/* target file of i/o */
77 78

	/* completion state */
79
	atomic_t		io_count;	/* i/os we're waiting for */
80 81
	spinlock_t		lock;		/* protect completion state */
	ssize_t			count,		/* bytes actually processed */
P
Peng Tao 已提交
82
				bytes_left,	/* bytes left to be sent */
L
Linus Torvalds 已提交
83
				error;		/* any reported error */
84
	struct completion	completion;	/* wait for i/o completion */
85 86

	/* commit state */
87 88 89
	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
	struct work_struct	work;
90 91 92 93
	int			flags;
#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
	struct nfs_writeverf	verf;		/* unstable write verifier */
L
Linus Torvalds 已提交
94 95
};

96 97
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
98
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
99
static void nfs_direct_write_schedule_work(struct work_struct *work);
100 101 102 103 104 105 106 107 108 109 110

static inline void get_dreq(struct nfs_direct_req *dreq)
{
	atomic_inc(&dreq->io_count);
}

static inline int put_dreq(struct nfs_direct_req *dreq)
{
	return atomic_dec_and_test(&dreq->io_count);
}

L
Linus Torvalds 已提交
111
/**
112 113 114 115 116 117 118 119
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
 * @iocb: target I/O control block
 * @iov: array of vectors that define I/O buffer
 * @pos: offset in file to begin the operation
 * @nr_segs: size of iovec array
 *
 * The presence of this routine in the address space ops vector means
M
Mel Gorman 已提交
120 121 122
 * the NFS client supports direct I/O. However, for most direct IO, we
 * shunt off direct read and write requests before the VFS gets them,
 * so this method is only ever called for swap.
L
Linus Torvalds 已提交
123
 */
124 125
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
M
Mel Gorman 已提交
126
#ifndef CONFIG_NFS_SWAP
127 128
	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
			iocb->ki_filp, (long long) pos, nr_segs);
129 130

	return -EINVAL;
M
Mel Gorman 已提交
131 132 133 134 135 136 137 138 139
#else
	VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);

	if (rw == READ || rw == KERNEL_READ)
		return nfs_file_direct_read(iocb, iov, nr_segs, pos,
				rw == READ ? true : false);
	return nfs_file_direct_write(iocb, iov, nr_segs, pos,
				rw == WRITE ? true : false);
#endif /* CONFIG_NFS_SWAP */
140 141
}

142
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
143
{
144
	unsigned int i;
145 146
	for (i = 0; i < npages; i++)
		page_cache_release(pages[i]);
147 148
}

149 150 151 152 153 154 155 156 157 158
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
			      struct nfs_direct_req *dreq)
{
	cinfo->lock = &dreq->lock;
	cinfo->mds = &dreq->mds_cinfo;
	cinfo->ds = &dreq->ds_cinfo;
	cinfo->dreq = dreq;
	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}

159
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
L
Linus Torvalds 已提交
160
{
161 162
	struct nfs_direct_req *dreq;

163
	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
164 165 166 167
	if (!dreq)
		return NULL;

	kref_init(&dreq->kref);
168
	kref_get(&dreq->kref);
169
	init_completion(&dreq->completion);
170 171
	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
172
	spin_lock_init(&dreq->lock);
173 174

	return dreq;
L
Linus Torvalds 已提交
175 176
}

177
static void nfs_direct_req_free(struct kref *kref)
L
Linus Torvalds 已提交
178 179
{
	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
180

181 182
	if (dreq->l_ctx != NULL)
		nfs_put_lock_context(dreq->l_ctx);
183 184
	if (dreq->ctx != NULL)
		put_nfs_open_context(dreq->ctx);
L
Linus Torvalds 已提交
185 186 187
	kmem_cache_free(nfs_direct_cachep, dreq);
}

188 189 190 191 192
static void nfs_direct_req_release(struct nfs_direct_req *dreq)
{
	kref_put(&dreq->kref, nfs_direct_req_free);
}

193 194 195 196 197 198
ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
{
	return dreq->bytes_left;
}
EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);

199 200 201 202 203
/*
 * Collects and returns the final error value/byte-count.
 */
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
204
	ssize_t result = -EIOCBQUEUED;
205 206 207 208 209

	/* Async requests don't wait here */
	if (dreq->iocb)
		goto out;

210
	result = wait_for_completion_killable(&dreq->completion);
211 212

	if (!result)
213
		result = dreq->error;
214
	if (!result)
215
		result = dreq->count;
216 217 218 219 220

out:
	return (ssize_t) result;
}

221
/*
222 223
 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 * the iocb is still valid here if this is a synchronous request.
224
 */
225
static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
226
{
227 228
	struct inode *inode = dreq->inode;

229
	if (dreq->iocb && write) {
230
		loff_t pos = dreq->iocb->ki_pos + dreq->count;
231 232 233 234 235 236 237

		spin_lock(&inode->i_lock);
		if (i_size_read(inode) < pos)
			i_size_write(inode, pos);
		spin_unlock(&inode->i_lock);
	}

238
	if (write)
239
		nfs_zap_mapping(inode, inode->i_mapping);
240 241

	inode_dio_done(inode);
242 243

	if (dreq->iocb) {
244
		long res = (long) dreq->error;
245
		if (!res)
246
			res = (long) dreq->count;
247
		aio_complete(dreq->iocb, res, 0);
248
	}
249

250
	complete_all(&dreq->completion);
251

252
	nfs_direct_req_release(dreq);
253 254
}

T
Trond Myklebust 已提交
255
static void nfs_direct_readpage_release(struct nfs_page *req)
L
Linus Torvalds 已提交
256
{
257
	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
258
		req->wb_context->dentry->d_inode->i_sb->s_id,
259
		(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
260 261 262
		req->wb_bytes,
		(long long)req_offset(req));
	nfs_release_request(req);
263 264
}

265
static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
266
{
267 268
	unsigned long bytes = 0;
	struct nfs_direct_req *dreq = hdr->dreq;
269

270 271
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;
272 273

	spin_lock(&dreq->lock);
274 275 276 277 278 279
	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
		dreq->error = hdr->error;
	else
		dreq->count += hdr->good_bytes;
	spin_unlock(&dreq->lock);

280 281 282 283
	while (!list_empty(&hdr->pages)) {
		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
		struct page *page = req->wb_page;

284 285
		if (!PageCompound(page) && bytes < hdr->good_bytes)
			set_page_dirty(page);
286 287 288
		bytes += req->wb_bytes;
		nfs_list_remove_request(req);
		nfs_direct_readpage_release(req);
289
	}
290
out_put:
291
	if (put_dreq(dreq))
292
		nfs_direct_complete(dreq, false);
293
	hdr->release(hdr);
L
Linus Torvalds 已提交
294 295
}

296
static void nfs_read_sync_pgio_error(struct list_head *head)
297
{
298
	struct nfs_page *req;
299

300 301 302 303 304
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_release_request(req);
	}
305 306
}

307 308 309 310 311 312
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
	get_dreq(hdr->dreq);
}

static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
313
	.error_cleanup = nfs_read_sync_pgio_error,
314 315 316 317
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_read_completion,
};

318
/*
319 320 321 322 323
 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 * bail and stop sending more reads.  Read length accounting is
 * handled automatically by nfs_direct_read_result().  Otherwise, if
 * no requests have been sent, just return an error.
L
Linus Torvalds 已提交
324
 */
325
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
326
						const struct iovec *iov,
M
Mel Gorman 已提交
327
						loff_t pos, bool uio)
L
Linus Torvalds 已提交
328
{
329
	struct nfs_direct_req *dreq = desc->pg_dreq;
330
	struct nfs_open_context *ctx = dreq->ctx;
331
	struct inode *inode = ctx->dentry->d_inode;
332 333
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
334
	size_t rsize = NFS_SERVER(inode)->rsize;
335 336 337
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
338 339
	struct page **pagevec = NULL;
	unsigned int npages;
340

L
Linus Torvalds 已提交
341
	do {
342
		size_t bytes;
343
		int i;
L
Linus Torvalds 已提交
344

345
		pgbase = user_addr & ~PAGE_MASK;
346
		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
347

348
		result = -ENOMEM;
349 350 351 352 353
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *),
					  GFP_KERNEL);
		if (!pagevec)
354
			break;
M
Mel Gorman 已提交
355 356 357
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
358
					npages, 1, 0, pagevec, NULL);
M
Mel Gorman 已提交
359 360 361 362 363 364 365 366 367 368
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 1, pagevec);
			if (WARN_ON(result != 1))
				break;
		}

369
		if ((unsigned)result < npages) {
370 371
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
372
				nfs_direct_release_pages(pagevec, result);
373 374 375
				break;
			}
			bytes -= pgbase;
376
			npages = result;
377 378
		}

379 380
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
381
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
			/* XXX do we need to do the eof zeroing found in async_filler? */
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
				nfs_release_request(req);
				break;
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
P
Peng Tao 已提交
403
			dreq->bytes_left -= req_len;
404
		}
405 406
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
407
	} while (count != 0 && result >= 0);
408

409 410
	kfree(pagevec);

411
	if (started)
412
		return started;
413
	return result < 0 ? (ssize_t) result : -EFAULT;
L
Linus Torvalds 已提交
414 415
}

416 417 418
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
					      const struct iovec *iov,
					      unsigned long nr_segs,
M
Mel Gorman 已提交
419
					      loff_t pos, bool uio)
420
{
421
	struct nfs_pageio_descriptor desc;
422
	struct inode *inode = dreq->inode;
423 424 425 426
	ssize_t result = -EINVAL;
	size_t requested_bytes = 0;
	unsigned long seg;

427
	NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
428
			     &nfs_direct_read_completion_ops);
429
	get_dreq(dreq);
430
	desc.pg_dreq = dreq;
431
	atomic_inc(&inode->i_dio_count);
432 433 434

	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
435
		result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
436 437 438 439 440 441 442 443
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}

444 445
	nfs_pageio_complete(&desc);

446 447 448 449 450
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
451
		inode_dio_done(inode);
452 453 454 455
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

456
	if (put_dreq(dreq))
457
		nfs_direct_complete(dreq, false);
458
	return 0;
459 460
}

461
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
462
			       unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
463
{
464
	ssize_t result = -ENOMEM;
465
	struct inode *inode = iocb->ki_filp->f_mapping->host;
L
Linus Torvalds 已提交
466
	struct nfs_direct_req *dreq;
467
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
468

469
	dreq = nfs_direct_req_alloc();
470 471
	if (dreq == NULL)
		goto out;
L
Linus Torvalds 已提交
472

C
Chuck Lever 已提交
473
	dreq->inode = inode;
P
Peng Tao 已提交
474
	dreq->bytes_left = iov_length(iov, nr_segs);
475
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
476 477 478
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
479
		goto out_release;
480 481
	}
	dreq->l_ctx = l_ctx;
482 483
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
484

P
Peng Tao 已提交
485
	NFS_I(inode)->read_io += iov_length(iov, nr_segs);
M
Mel Gorman 已提交
486
	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
487 488
	if (!result)
		result = nfs_direct_wait(dreq);
489
out_release:
490
	nfs_direct_req_release(dreq);
491
out:
L
Linus Torvalds 已提交
492 493 494
	return result;
}

B
Bryan Schumaker 已提交
495
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
496 497
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
498 499 500 501 502 503 504 505 506 507 508
	struct nfs_pageio_descriptor desc;
	struct nfs_page *req, *tmp;
	LIST_HEAD(reqs);
	struct nfs_commit_info cinfo;
	LIST_HEAD(failed);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
	spin_lock(cinfo.lock);
	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
	spin_unlock(cinfo.lock);
L
Linus Torvalds 已提交
509

510
	dreq->count = 0;
511 512
	get_dreq(dreq);

513
	NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
514 515
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
516

517 518
	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
		if (!nfs_pageio_add_request(&desc, req)) {
519
			nfs_list_remove_request(req);
520 521 522 523 524 525
			nfs_list_add_request(req, &failed);
			spin_lock(cinfo.lock);
			dreq->flags = 0;
			dreq->error = -EIO;
			spin_unlock(cinfo.lock);
		}
526
		nfs_release_request(req);
527 528
	}
	nfs_pageio_complete(&desc);
529

530 531 532
	while (!list_empty(&failed)) {
		req = nfs_list_entry(failed.next);
		nfs_list_remove_request(req);
533
		nfs_unlock_and_release_request(req);
534
	}
535

536 537
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
538 539
}

540
static void nfs_direct_commit_complete(struct nfs_commit_data *data)
541
{
542
	struct nfs_direct_req *dreq = data->dreq;
543 544
	struct nfs_commit_info cinfo;
	struct nfs_page *req;
545 546
	int status = data->task.tk_status;

547
	nfs_init_cinfo_from_dreq(&cinfo, dreq);
548
	if (status < 0) {
549
		dprintk("NFS: %5u commit failed with error %d.\n",
550
			data->task.tk_pid, status);
551
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
552
	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
553
		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
554
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
L
Linus Torvalds 已提交
555 556
	}

557
	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
558 559 560 561 562 563
	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
			/* Note the rewrite will go through mds */
			nfs_mark_request_commit(req, NULL, &cinfo);
564 565
		} else
			nfs_release_request(req);
566
		nfs_unlock_and_release_request(req);
567 568 569 570
	}

	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
		nfs_direct_write_complete(dreq, data->inode);
L
Linus Torvalds 已提交
571 572
}

573 574 575 576 577 578 579 580
static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
{
	/* There is no lock to clear */
}

static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
	.completion = nfs_direct_commit_complete,
	.error_cleanup = nfs_direct_error_cleanup,
581 582 583
};

static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
584
{
585 586 587 588 589 590 591 592 593
	int res;
	struct nfs_commit_info cinfo;
	LIST_HEAD(mds_list);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
	if (res < 0) /* res == -ENOMEM */
		nfs_direct_write_reschedule(dreq);
594
}
L
Linus Torvalds 已提交
595

596
static void nfs_direct_write_schedule_work(struct work_struct *work)
597
{
598
	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
599
	int flags = dreq->flags;
L
Linus Torvalds 已提交
600

601 602 603 604
	dreq->flags = 0;
	switch (flags) {
		case NFS_ODIRECT_DO_COMMIT:
			nfs_direct_commit_schedule(dreq);
L
Linus Torvalds 已提交
605
			break;
606 607 608 609
		case NFS_ODIRECT_RESCHED_WRITES:
			nfs_direct_write_reschedule(dreq);
			break;
		default:
610
			nfs_direct_complete(dreq, true);
611 612
	}
}
L
Linus Torvalds 已提交
613

614
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
615
{
616
	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
617
}
618

619
#else
620 621 622
static void nfs_direct_write_schedule_work(struct work_struct *work)
{
}
L
Linus Torvalds 已提交
623

624 625
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
626
	nfs_direct_complete(dreq, true);
627 628
}
#endif
L
Linus Torvalds 已提交
629

630 631 632 633
/*
 * NB: Return the value of the first error return code.  Subsequent
 *     errors after the first one are ignored.
 */
634
/*
635 636 637 638 639
 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 * bail and stop sending more writes.  Write length accounting is
 * handled automatically by nfs_direct_write_result().  Otherwise, if
 * no requests have been sent, just return an error.
640
 */
641
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
642
						 const struct iovec *iov,
M
Mel Gorman 已提交
643
						 loff_t pos, bool uio)
644
{
645
	struct nfs_direct_req *dreq = desc->pg_dreq;
646
	struct nfs_open_context *ctx = dreq->ctx;
647
	struct inode *inode = ctx->dentry->d_inode;
648 649
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
650
	size_t wsize = NFS_SERVER(inode)->wsize;
651 652 653
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
654 655
	struct page **pagevec = NULL;
	unsigned int npages;
656

L
Linus Torvalds 已提交
657
	do {
658
		size_t bytes;
659
		int i;
660

661
		pgbase = user_addr & ~PAGE_MASK;
662
		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
663

664
		result = -ENOMEM;
665 666 667 668
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
		if (!pagevec)
669 670
			break;

M
Mel Gorman 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
						npages, 0, 0, pagevec, NULL);
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 0, pagevec);
			if (WARN_ON(result != 1))
				break;
		}
684 685

		if ((unsigned)result < npages) {
686 687
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
688
				nfs_direct_release_pages(pagevec, result);
689 690 691
				break;
			}
			bytes -= pgbase;
692
			npages = result;
693 694
		}

695 696
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
697
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
L
Linus Torvalds 已提交
698

699 700 701 702 703 704 705 706 707 708 709 710
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			nfs_lock_request(req);
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
711
				nfs_unlock_and_release_request(req);
712
				break;
713 714 715 716 717 718 719
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
P
Peng Tao 已提交
720
			dreq->bytes_left -= req_len;
721
		}
722 723
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
724
	} while (count != 0 && result >= 0);
725

726 727
	kfree(pagevec);

728
	if (started)
729
		return started;
730
	return result < 0 ? (ssize_t) result : -EFAULT;
731
}
L
Linus Torvalds 已提交
732

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
	struct nfs_direct_req *dreq = hdr->dreq;
	struct nfs_commit_info cinfo;
	int bit = -1;
	struct nfs_page *req = nfs_list_entry(hdr->pages.next);

	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;

	nfs_init_cinfo_from_dreq(&cinfo, dreq);

	spin_lock(&dreq->lock);

	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
		dreq->flags = 0;
		dreq->error = hdr->error;
	}
	if (dreq->error != 0)
		bit = NFS_IOHDR_ERROR;
	else {
		dreq->count += hdr->good_bytes;
		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
			bit = NFS_IOHDR_NEED_RESCHED;
		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
				bit = NFS_IOHDR_NEED_RESCHED;
			else if (dreq->flags == 0) {
T
Trond Myklebust 已提交
762
				memcpy(&dreq->verf, hdr->verf,
763 764 765 766
				       sizeof(dreq->verf));
				bit = NFS_IOHDR_NEED_COMMIT;
				dreq->flags = NFS_ODIRECT_DO_COMMIT;
			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
T
Trond Myklebust 已提交
767
				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
					bit = NFS_IOHDR_NEED_RESCHED;
				} else
					bit = NFS_IOHDR_NEED_COMMIT;
			}
		}
	}
	spin_unlock(&dreq->lock);

	while (!list_empty(&hdr->pages)) {
		req = nfs_list_entry(hdr->pages.next);
		nfs_list_remove_request(req);
		switch (bit) {
		case NFS_IOHDR_NEED_RESCHED:
		case NFS_IOHDR_NEED_COMMIT:
783
			kref_get(&req->wb_kref);
784 785
			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
		}
786
		nfs_unlock_and_release_request(req);
787 788 789 790 791 792 793 794
	}

out_put:
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, hdr->inode);
	hdr->release(hdr);
}

795 796 797 798 799 800 801
static void nfs_write_sync_pgio_error(struct list_head *head)
{
	struct nfs_page *req;

	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
802
		nfs_unlock_and_release_request(req);
803 804 805
	}
}

806
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
807
	.error_cleanup = nfs_write_sync_pgio_error,
808 809 810 811
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_write_completion,
};

812 813 814
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
					       const struct iovec *iov,
					       unsigned long nr_segs,
M
Mel Gorman 已提交
815
					       loff_t pos, bool uio)
816
{
817
	struct nfs_pageio_descriptor desc;
818
	struct inode *inode = dreq->inode;
819 820 821 822
	ssize_t result = 0;
	size_t requested_bytes = 0;
	unsigned long seg;

823
	NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
824 825
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
826
	get_dreq(dreq);
827
	atomic_inc(&inode->i_dio_count);
828

P
Peng Tao 已提交
829
	NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
830 831
	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
832
		result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
833 834 835 836 837 838 839
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}
840
	nfs_pageio_complete(&desc);
841

842 843 844 845 846
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
847
		inode_dio_done(inode);
848 849 850 851
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

852 853
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
854
	return 0;
855 856
}

857 858
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos,
M
Mel Gorman 已提交
859
				size_t count, bool uio)
860
{
861
	ssize_t result = -ENOMEM;
862
	struct inode *inode = iocb->ki_filp->f_mapping->host;
863
	struct nfs_direct_req *dreq;
864
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
865

866
	dreq = nfs_direct_req_alloc();
867
	if (!dreq)
868
		goto out;
L
Linus Torvalds 已提交
869

870
	dreq->inode = inode;
P
Peng Tao 已提交
871
	dreq->bytes_left = count;
872
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
873 874 875
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
876
		goto out_release;
877 878
	}
	dreq->l_ctx = l_ctx;
879 880
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
881

M
Mel Gorman 已提交
882
	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
883 884
	if (!result)
		result = nfs_direct_wait(dreq);
885
out_release:
886
	nfs_direct_req_release(dreq);
887
out:
L
Linus Torvalds 已提交
888 889 890 891 892 893
	return result;
}

/**
 * nfs_file_direct_read - file direct read operation for NFS files
 * @iocb: target I/O control block
894 895
 * @iov: vector of user buffers into which to read data
 * @nr_segs: size of iov vector
896
 * @pos: byte offset in file where reading starts
L
Linus Torvalds 已提交
897 898 899 900 901 902
 *
 * We use this function for direct reads instead of calling
 * generic_file_aio_read() in order to avoid gfar's check to see if
 * the request starts before the end of the file.  For that check
 * to work, we must generate a GETATTR before each direct read, and
 * even then there is a window between the GETATTR and the subsequent
903
 * READ where the file size could change.  Our preference is simply
L
Linus Torvalds 已提交
904 905
 * to do all reads the application wants, and the server will take
 * care of managing the end of file boundary.
906
 *
L
Linus Torvalds 已提交
907 908 909 910 911
 * This function also eliminates unnecessarily updating the file's
 * atime locally, as the NFS server sets the file's atime, and this
 * client must read the updated atime from the server back into its
 * cache.
 */
912
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
913
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
914 915 916 917
{
	ssize_t retval = -EINVAL;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
918 919 920 921
	size_t count;

	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
L
Linus Torvalds 已提交
922

923 924
	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
		file, count, (long long) pos);
L
Linus Torvalds 已提交
925 926 927 928 929

	retval = 0;
	if (!count)
		goto out;

T
Trond Myklebust 已提交
930 931 932
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
933

934 935
	task_io_account_read(count);

M
Mel Gorman 已提交
936
	retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
L
Linus Torvalds 已提交
937
	if (retval > 0)
938
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
939 940 941 942 943 944 945 946

out:
	return retval;
}

/**
 * nfs_file_direct_write - file direct write operation for NFS files
 * @iocb: target I/O control block
947 948
 * @iov: vector of user buffers from which to write data
 * @nr_segs: size of iov vector
949
 * @pos: byte offset in file where writing starts
L
Linus Torvalds 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
 *
 * We use this function for direct writes instead of calling
 * generic_file_aio_write() in order to avoid taking the inode
 * semaphore and updating the i_size.  The NFS server will set
 * the new i_size and this client must read the updated size
 * back into its cache.  We let the server do generic write
 * parameter checking and report problems.
 *
 * We eliminate local atime updates, see direct read above.
 *
 * We avoid unnecessary page cache invalidations for normal cached
 * readers of this file.
 *
 * Note that O_APPEND is not supported for NFS direct writes, as there
 * is no atomic O_APPEND write facility in the NFS protocol.
 */
966
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
967
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
968
{
C
Chuck Lever 已提交
969
	ssize_t retval = -EINVAL;
L
Linus Torvalds 已提交
970 971
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
972
	size_t count;
L
Linus Torvalds 已提交
973

974 975 976
	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);

977 978
	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
		file, count, (long long) pos);
979

980 981
	retval = generic_write_checks(file, &pos, &count, 0);
	if (retval)
L
Linus Torvalds 已提交
982
		goto out;
983 984 985

	retval = -EINVAL;
	if ((ssize_t) count < 0)
L
Linus Torvalds 已提交
986 987 988 989
		goto out;
	retval = 0;
	if (!count)
		goto out;
990

T
Trond Myklebust 已提交
991 992 993
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
994

995 996
	task_io_account_write(count);

M
Mel Gorman 已提交
997
	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
998 999
	if (retval > 0) {
		struct inode *inode = mapping->host;
1000

1001
		iocb->ki_pos = pos + retval;
1002 1003 1004 1005 1006
		spin_lock(&inode->i_lock);
		if (i_size_read(inode) < iocb->ki_pos)
			i_size_write(inode, iocb->ki_pos);
		spin_unlock(&inode->i_lock);
	}
L
Linus Torvalds 已提交
1007 1008 1009 1010
out:
	return retval;
}

1011 1012 1013 1014
/**
 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
 *
 */
D
David Howells 已提交
1015
int __init nfs_init_directcache(void)
L
Linus Torvalds 已提交
1016 1017 1018
{
	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
						sizeof(struct nfs_direct_req),
1019 1020
						0, (SLAB_RECLAIM_ACCOUNT|
							SLAB_MEM_SPREAD),
1021
						NULL);
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027
	if (nfs_direct_cachep == NULL)
		return -ENOMEM;

	return 0;
}

1028
/**
D
David Howells 已提交
1029
 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1030 1031
 *
 */
1032
void nfs_destroy_directcache(void)
L
Linus Torvalds 已提交
1033
{
1034
	kmem_cache_destroy(nfs_direct_cachep);
L
Linus Torvalds 已提交
1035
}