direct.c 27.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/direct.c
 *
 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
 *
 * High-performance uncached I/O for the Linux NFS client
 *
 * There are important applications whose performance or correctness
 * depends on uncached access to file data.  Database clusters
10
 * (multiple copies of the same instance running on separate hosts)
L
Linus Torvalds 已提交
11
 * implement their own cache coherency protocol that subsumes file
12 13 14
 * system cache protocols.  Applications that process datasets
 * considerably larger than the client's memory do not always benefit
 * from a local cache.  A streaming video server, for instance, has no
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * need to cache the contents of a file.
 *
 * When an application requests uncached I/O, all read and write requests
 * are made directly to the server; data stored or fetched via these
 * requests is not cached in the Linux page cache.  The client does not
 * correct unaligned requests from applications.  All requested bytes are
 * held on permanent storage before a direct write system call returns to
 * an application.
 *
 * Solaris implements an uncached I/O facility called directio() that
 * is used for backups and sequential I/O to very large files.  Solaris
 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 * an undocumented mount option.
 *
 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 * help from Andrew Morton.
 *
 * 18 Dec 2001	Initial implementation for 2.4  --cel
 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 * 08 Jun 2003	Port to 2.5 APIs  --cel
 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 * 15 Sep 2004	Parallel async reads  --cel
37
 * 04 May 2005	support O_DIRECT with aio  --cel
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
 *
 */

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/kref.h>
47
#include <linux/slab.h>
48
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
49 50 51 52 53 54

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/sunrpc/clnt.h>

#include <asm/uaccess.h>
A
Arun Sharma 已提交
55
#include <linux/atomic.h>
L
Linus Torvalds 已提交
56

57
#include "internal.h"
C
Chuck Lever 已提交
58
#include "iostat.h"
59
#include "pnfs.h"
C
Chuck Lever 已提交
60

L
Linus Torvalds 已提交
61 62
#define NFSDBG_FACILITY		NFSDBG_VFS

63
static struct kmem_cache *nfs_direct_cachep;
L
Linus Torvalds 已提交
64 65 66 67 68 69

/*
 * This represents a set of asynchronous requests that we're waiting on
 */
struct nfs_direct_req {
	struct kref		kref;		/* release manager */
70 71

	/* I/O parameters */
72
	struct nfs_open_context	*ctx;		/* file open context info */
73
	struct nfs_lock_context *l_ctx;		/* Lock context info */
74
	struct kiocb *		iocb;		/* controlling i/o request */
75
	struct inode *		inode;		/* target file of i/o */
76 77

	/* completion state */
78
	atomic_t		io_count;	/* i/os we're waiting for */
79 80
	spinlock_t		lock;		/* protect completion state */
	ssize_t			count,		/* bytes actually processed */
L
Linus Torvalds 已提交
81
				error;		/* any reported error */
82
	struct completion	completion;	/* wait for i/o completion */
83 84

	/* commit state */
85 86 87
	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
	struct work_struct	work;
88 89 90 91
	int			flags;
#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
	struct nfs_writeverf	verf;		/* unstable write verifier */
L
Linus Torvalds 已提交
92 93
};

94 95
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
96
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
97
static void nfs_direct_write_schedule_work(struct work_struct *work);
98 99 100 101 102 103 104 105 106 107 108

static inline void get_dreq(struct nfs_direct_req *dreq)
{
	atomic_inc(&dreq->io_count);
}

static inline int put_dreq(struct nfs_direct_req *dreq)
{
	return atomic_dec_and_test(&dreq->io_count);
}

L
Linus Torvalds 已提交
109
/**
110 111 112 113 114 115 116 117
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
 * @iocb: target I/O control block
 * @iov: array of vectors that define I/O buffer
 * @pos: offset in file to begin the operation
 * @nr_segs: size of iovec array
 *
 * The presence of this routine in the address space ops vector means
M
Mel Gorman 已提交
118 119 120
 * the NFS client supports direct I/O. However, for most direct IO, we
 * shunt off direct read and write requests before the VFS gets them,
 * so this method is only ever called for swap.
L
Linus Torvalds 已提交
121
 */
122 123
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
M
Mel Gorman 已提交
124
#ifndef CONFIG_NFS_SWAP
125
	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
126
			iocb->ki_filp->f_path.dentry->d_name.name,
127
			(long long) pos, nr_segs);
128 129

	return -EINVAL;
M
Mel Gorman 已提交
130 131 132 133 134 135 136 137 138 139
#else
	VM_BUG_ON(iocb->ki_left != PAGE_SIZE);
	VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);

	if (rw == READ || rw == KERNEL_READ)
		return nfs_file_direct_read(iocb, iov, nr_segs, pos,
				rw == READ ? true : false);
	return nfs_file_direct_write(iocb, iov, nr_segs, pos,
				rw == WRITE ? true : false);
#endif /* CONFIG_NFS_SWAP */
140 141
}

142
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
143
{
144
	unsigned int i;
145 146
	for (i = 0; i < npages; i++)
		page_cache_release(pages[i]);
147 148
}

149 150 151 152 153 154 155 156 157 158
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
			      struct nfs_direct_req *dreq)
{
	cinfo->lock = &dreq->lock;
	cinfo->mds = &dreq->mds_cinfo;
	cinfo->ds = &dreq->ds_cinfo;
	cinfo->dreq = dreq;
	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}

159
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
L
Linus Torvalds 已提交
160
{
161 162
	struct nfs_direct_req *dreq;

163
	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
164 165 166 167
	if (!dreq)
		return NULL;

	kref_init(&dreq->kref);
168
	kref_get(&dreq->kref);
169
	init_completion(&dreq->completion);
170 171
	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
172
	spin_lock_init(&dreq->lock);
173 174

	return dreq;
L
Linus Torvalds 已提交
175 176
}

177
static void nfs_direct_req_free(struct kref *kref)
L
Linus Torvalds 已提交
178 179
{
	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
180

181 182
	if (dreq->l_ctx != NULL)
		nfs_put_lock_context(dreq->l_ctx);
183 184
	if (dreq->ctx != NULL)
		put_nfs_open_context(dreq->ctx);
L
Linus Torvalds 已提交
185 186 187
	kmem_cache_free(nfs_direct_cachep, dreq);
}

188 189 190 191 192
static void nfs_direct_req_release(struct nfs_direct_req *dreq)
{
	kref_put(&dreq->kref, nfs_direct_req_free);
}

193 194 195 196 197
/*
 * Collects and returns the final error value/byte-count.
 */
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
198
	ssize_t result = -EIOCBQUEUED;
199 200 201 202 203

	/* Async requests don't wait here */
	if (dreq->iocb)
		goto out;

204
	result = wait_for_completion_killable(&dreq->completion);
205 206

	if (!result)
207
		result = dreq->error;
208
	if (!result)
209
		result = dreq->count;
210 211 212 213 214

out:
	return (ssize_t) result;
}

215
/*
216 217
 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 * the iocb is still valid here if this is a synchronous request.
218 219 220 221
 */
static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
	if (dreq->iocb) {
222
		long res = (long) dreq->error;
223
		if (!res)
224
			res = (long) dreq->count;
225
		aio_complete(dreq->iocb, res, 0);
226 227
	}
	complete_all(&dreq->completion);
228

229
	nfs_direct_req_release(dreq);
230 231
}

T
Trond Myklebust 已提交
232
static void nfs_direct_readpage_release(struct nfs_page *req)
L
Linus Torvalds 已提交
233
{
234 235 236 237 238 239
	dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
		req->wb_context->dentry->d_inode->i_sb->s_id,
		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
		req->wb_bytes,
		(long long)req_offset(req));
	nfs_release_request(req);
240 241
}

242
static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
243
{
244 245
	unsigned long bytes = 0;
	struct nfs_direct_req *dreq = hdr->dreq;
246

247 248
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;
249 250

	spin_lock(&dreq->lock);
251 252 253 254 255 256
	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
		dreq->error = hdr->error;
	else
		dreq->count += hdr->good_bytes;
	spin_unlock(&dreq->lock);

257 258 259 260 261 262 263 264 265 266 267
	while (!list_empty(&hdr->pages)) {
		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
		struct page *page = req->wb_page;

		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
			if (bytes > hdr->good_bytes)
				zero_user(page, 0, PAGE_SIZE);
			else if (hdr->good_bytes - bytes < PAGE_SIZE)
				zero_user_segment(page,
					hdr->good_bytes & ~PAGE_MASK,
					PAGE_SIZE);
268
		}
269 270 271 272 273 274
		if (!PageCompound(page)) {
			if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
				if (bytes < hdr->good_bytes)
					set_page_dirty(page);
			} else
				set_page_dirty(page);
275
		}
276 277 278
		bytes += req->wb_bytes;
		nfs_list_remove_request(req);
		nfs_direct_readpage_release(req);
279
	}
280
out_put:
281 282
	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
283
	hdr->release(hdr);
L
Linus Torvalds 已提交
284 285
}

286
static void nfs_read_sync_pgio_error(struct list_head *head)
287
{
288
	struct nfs_page *req;
289

290 291 292 293 294
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_release_request(req);
	}
295 296
}

297 298 299 300 301 302
static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
{
	get_dreq(hdr->dreq);
}

static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
303
	.error_cleanup = nfs_read_sync_pgio_error,
304 305 306 307
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_read_completion,
};

308
/*
309 310 311 312 313
 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 * bail and stop sending more reads.  Read length accounting is
 * handled automatically by nfs_direct_read_result().  Otherwise, if
 * no requests have been sent, just return an error.
L
Linus Torvalds 已提交
314
 */
315
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
316
						const struct iovec *iov,
M
Mel Gorman 已提交
317
						loff_t pos, bool uio)
L
Linus Torvalds 已提交
318
{
319
	struct nfs_direct_req *dreq = desc->pg_dreq;
320
	struct nfs_open_context *ctx = dreq->ctx;
321
	struct inode *inode = ctx->dentry->d_inode;
322 323
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
324
	size_t rsize = NFS_SERVER(inode)->rsize;
325 326 327
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
328 329
	struct page **pagevec = NULL;
	unsigned int npages;
330

L
Linus Torvalds 已提交
331
	do {
332
		size_t bytes;
333
		int i;
L
Linus Torvalds 已提交
334

335
		pgbase = user_addr & ~PAGE_MASK;
336
		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
337

338
		result = -ENOMEM;
339 340 341 342 343
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *),
					  GFP_KERNEL);
		if (!pagevec)
344
			break;
M
Mel Gorman 已提交
345 346 347
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
348
					npages, 1, 0, pagevec, NULL);
M
Mel Gorman 已提交
349 350 351 352 353 354 355 356 357 358
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 1, pagevec);
			if (WARN_ON(result != 1))
				break;
		}

359
		if ((unsigned)result < npages) {
360 361
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
362
				nfs_direct_release_pages(pagevec, result);
363 364 365
				break;
			}
			bytes -= pgbase;
366
			npages = result;
367 368
		}

369 370
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
371
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
			/* XXX do we need to do the eof zeroing found in async_filler? */
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
				nfs_release_request(req);
				break;
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
		}
394 395
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
396
	} while (count != 0 && result >= 0);
397

398 399
	kfree(pagevec);

400
	if (started)
401
		return started;
402
	return result < 0 ? (ssize_t) result : -EFAULT;
L
Linus Torvalds 已提交
403 404
}

405 406 407
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
					      const struct iovec *iov,
					      unsigned long nr_segs,
M
Mel Gorman 已提交
408
					      loff_t pos, bool uio)
409
{
410
	struct nfs_pageio_descriptor desc;
411 412 413 414
	ssize_t result = -EINVAL;
	size_t requested_bytes = 0;
	unsigned long seg;

415
	NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
416
			     &nfs_direct_read_completion_ops);
417
	get_dreq(dreq);
418
	desc.pg_dreq = dreq;
419 420 421

	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
422
		result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
423 424 425 426 427 428 429 430
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}

431 432
	nfs_pageio_complete(&desc);

433 434 435 436 437 438 439 440 441
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

442 443
	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
444
	return 0;
445 446
}

447
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
448
			       unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
449
{
450
	ssize_t result = -ENOMEM;
451
	struct inode *inode = iocb->ki_filp->f_mapping->host;
L
Linus Torvalds 已提交
452
	struct nfs_direct_req *dreq;
453
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
454

455
	dreq = nfs_direct_req_alloc();
456 457
	if (dreq == NULL)
		goto out;
L
Linus Torvalds 已提交
458

C
Chuck Lever 已提交
459
	dreq->inode = inode;
460
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
461 462 463
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
464
		goto out_release;
465 466
	}
	dreq->l_ctx = l_ctx;
467 468
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
469

P
Peng Tao 已提交
470
	NFS_I(inode)->read_io += iov_length(iov, nr_segs);
M
Mel Gorman 已提交
471
	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
472 473
	if (!result)
		result = nfs_direct_wait(dreq);
474
out_release:
475
	nfs_direct_req_release(dreq);
476
out:
L
Linus Torvalds 已提交
477 478 479
	return result;
}

480 481 482 483 484 485
static void nfs_inode_dio_write_done(struct inode *inode)
{
	nfs_zap_mapping(inode, inode->i_mapping);
	inode_dio_done(inode);
}

B
Bryan Schumaker 已提交
486
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
487 488
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
489 490 491 492 493 494 495 496 497 498 499
	struct nfs_pageio_descriptor desc;
	struct nfs_page *req, *tmp;
	LIST_HEAD(reqs);
	struct nfs_commit_info cinfo;
	LIST_HEAD(failed);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
	spin_lock(cinfo.lock);
	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
	spin_unlock(cinfo.lock);
L
Linus Torvalds 已提交
500

501
	dreq->count = 0;
502 503
	get_dreq(dreq);

504
	NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
505 506
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
507

508 509
	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
		if (!nfs_pageio_add_request(&desc, req)) {
510
			nfs_list_remove_request(req);
511 512 513 514 515 516
			nfs_list_add_request(req, &failed);
			spin_lock(cinfo.lock);
			dreq->flags = 0;
			dreq->error = -EIO;
			spin_unlock(cinfo.lock);
		}
517
		nfs_release_request(req);
518 519
	}
	nfs_pageio_complete(&desc);
520

521 522 523
	while (!list_empty(&failed)) {
		req = nfs_list_entry(failed.next);
		nfs_list_remove_request(req);
524
		nfs_unlock_and_release_request(req);
525
	}
526

527 528
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
529 530
}

531
static void nfs_direct_commit_complete(struct nfs_commit_data *data)
532
{
533
	struct nfs_direct_req *dreq = data->dreq;
534 535
	struct nfs_commit_info cinfo;
	struct nfs_page *req;
536 537
	int status = data->task.tk_status;

538
	nfs_init_cinfo_from_dreq(&cinfo, dreq);
539
	if (status < 0) {
540
		dprintk("NFS: %5u commit failed with error %d.\n",
541
			data->task.tk_pid, status);
542
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
543
	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
544
		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
545
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
L
Linus Torvalds 已提交
546 547
	}

548
	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
549 550 551 552 553 554
	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
			/* Note the rewrite will go through mds */
			nfs_mark_request_commit(req, NULL, &cinfo);
555 556
		} else
			nfs_release_request(req);
557
		nfs_unlock_and_release_request(req);
558 559 560 561
	}

	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
		nfs_direct_write_complete(dreq, data->inode);
L
Linus Torvalds 已提交
562 563
}

564 565 566 567 568 569 570 571
static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
{
	/* There is no lock to clear */
}

static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
	.completion = nfs_direct_commit_complete,
	.error_cleanup = nfs_direct_error_cleanup,
572 573 574
};

static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
575
{
576 577 578 579 580 581 582 583 584
	int res;
	struct nfs_commit_info cinfo;
	LIST_HEAD(mds_list);

	nfs_init_cinfo_from_dreq(&cinfo, dreq);
	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
	if (res < 0) /* res == -ENOMEM */
		nfs_direct_write_reschedule(dreq);
585
}
L
Linus Torvalds 已提交
586

587
static void nfs_direct_write_schedule_work(struct work_struct *work)
588
{
589
	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
590
	int flags = dreq->flags;
L
Linus Torvalds 已提交
591

592 593 594 595
	dreq->flags = 0;
	switch (flags) {
		case NFS_ODIRECT_DO_COMMIT:
			nfs_direct_commit_schedule(dreq);
L
Linus Torvalds 已提交
596
			break;
597 598 599 600
		case NFS_ODIRECT_RESCHED_WRITES:
			nfs_direct_write_reschedule(dreq);
			break;
		default:
601
			nfs_inode_dio_write_done(dreq->inode);
602 603 604
			nfs_direct_complete(dreq);
	}
}
L
Linus Torvalds 已提交
605

606
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
607
{
608
	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
609
}
610

611
#else
612 613 614
static void nfs_direct_write_schedule_work(struct work_struct *work)
{
}
L
Linus Torvalds 已提交
615

616 617
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
618
	nfs_inode_dio_write_done(inode);
619 620 621
	nfs_direct_complete(dreq);
}
#endif
L
Linus Torvalds 已提交
622

623 624 625 626
/*
 * NB: Return the value of the first error return code.  Subsequent
 *     errors after the first one are ignored.
 */
627
/*
628 629 630 631 632
 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 * bail and stop sending more writes.  Write length accounting is
 * handled automatically by nfs_direct_write_result().  Otherwise, if
 * no requests have been sent, just return an error.
633
 */
634
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
635
						 const struct iovec *iov,
M
Mel Gorman 已提交
636
						 loff_t pos, bool uio)
637
{
638
	struct nfs_direct_req *dreq = desc->pg_dreq;
639
	struct nfs_open_context *ctx = dreq->ctx;
640
	struct inode *inode = ctx->dentry->d_inode;
641 642
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
643
	size_t wsize = NFS_SERVER(inode)->wsize;
644 645 646
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
647 648
	struct page **pagevec = NULL;
	unsigned int npages;
649

L
Linus Torvalds 已提交
650
	do {
651
		size_t bytes;
652
		int i;
653

654
		pgbase = user_addr & ~PAGE_MASK;
655
		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
656

657
		result = -ENOMEM;
658 659 660 661
		npages = nfs_page_array_len(pgbase, bytes);
		if (!pagevec)
			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
		if (!pagevec)
662 663
			break;

M
Mel Gorman 已提交
664 665 666 667 668 669 670 671 672 673 674 675 676
		if (uio) {
			down_read(&current->mm->mmap_sem);
			result = get_user_pages(current, current->mm, user_addr,
						npages, 0, 0, pagevec, NULL);
			up_read(&current->mm->mmap_sem);
			if (result < 0)
				break;
		} else {
			WARN_ON(npages != 1);
			result = get_kernel_page(user_addr, 0, pagevec);
			if (WARN_ON(result != 1))
				break;
		}
677 678

		if ((unsigned)result < npages) {
679 680
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
681
				nfs_direct_release_pages(pagevec, result);
682 683 684
				break;
			}
			bytes -= pgbase;
685
			npages = result;
686 687
		}

688 689
		for (i = 0; i < npages; i++) {
			struct nfs_page *req;
690
			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
L
Linus Torvalds 已提交
691

692 693 694 695 696 697 698 699 700 701 702 703
			req = nfs_create_request(dreq->ctx, dreq->inode,
						 pagevec[i],
						 pgbase, req_len);
			if (IS_ERR(req)) {
				result = PTR_ERR(req);
				break;
			}
			nfs_lock_request(req);
			req->wb_index = pos >> PAGE_SHIFT;
			req->wb_offset = pos & ~PAGE_MASK;
			if (!nfs_pageio_add_request(desc, req)) {
				result = desc->pg_error;
704
				nfs_unlock_and_release_request(req);
705
				break;
706 707 708 709 710 711 712 713
			}
			pgbase = 0;
			bytes -= req_len;
			started += req_len;
			user_addr += req_len;
			pos += req_len;
			count -= req_len;
		}
714 715
		/* The nfs_page now hold references to these pages */
		nfs_direct_release_pages(pagevec, npages);
716
	} while (count != 0 && result >= 0);
717

718 719
	kfree(pagevec);

720
	if (started)
721
		return started;
722
	return result < 0 ? (ssize_t) result : -EFAULT;
723
}
L
Linus Torvalds 已提交
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
{
	struct nfs_direct_req *dreq = hdr->dreq;
	struct nfs_commit_info cinfo;
	int bit = -1;
	struct nfs_page *req = nfs_list_entry(hdr->pages.next);

	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out_put;

	nfs_init_cinfo_from_dreq(&cinfo, dreq);

	spin_lock(&dreq->lock);

	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
		dreq->flags = 0;
		dreq->error = hdr->error;
	}
	if (dreq->error != 0)
		bit = NFS_IOHDR_ERROR;
	else {
		dreq->count += hdr->good_bytes;
		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
			bit = NFS_IOHDR_NEED_RESCHED;
		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
				bit = NFS_IOHDR_NEED_RESCHED;
			else if (dreq->flags == 0) {
T
Trond Myklebust 已提交
754
				memcpy(&dreq->verf, hdr->verf,
755 756 757 758
				       sizeof(dreq->verf));
				bit = NFS_IOHDR_NEED_COMMIT;
				dreq->flags = NFS_ODIRECT_DO_COMMIT;
			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
T
Trond Myklebust 已提交
759
				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
					bit = NFS_IOHDR_NEED_RESCHED;
				} else
					bit = NFS_IOHDR_NEED_COMMIT;
			}
		}
	}
	spin_unlock(&dreq->lock);

	while (!list_empty(&hdr->pages)) {
		req = nfs_list_entry(hdr->pages.next);
		nfs_list_remove_request(req);
		switch (bit) {
		case NFS_IOHDR_NEED_RESCHED:
		case NFS_IOHDR_NEED_COMMIT:
775
			kref_get(&req->wb_kref);
776 777
			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
		}
778
		nfs_unlock_and_release_request(req);
779 780 781 782 783 784 785 786
	}

out_put:
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, hdr->inode);
	hdr->release(hdr);
}

787 788 789 790 791 792 793
static void nfs_write_sync_pgio_error(struct list_head *head)
{
	struct nfs_page *req;

	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
794
		nfs_unlock_and_release_request(req);
795 796 797
	}
}

798
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
799
	.error_cleanup = nfs_write_sync_pgio_error,
800 801 802 803
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_write_completion,
};

804 805 806
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
					       const struct iovec *iov,
					       unsigned long nr_segs,
M
Mel Gorman 已提交
807
					       loff_t pos, bool uio)
808
{
809
	struct nfs_pageio_descriptor desc;
810
	struct inode *inode = dreq->inode;
811 812 813 814
	ssize_t result = 0;
	size_t requested_bytes = 0;
	unsigned long seg;

815
	NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
816 817
			      &nfs_direct_write_completion_ops);
	desc.pg_dreq = dreq;
818
	get_dreq(dreq);
819
	atomic_inc(&inode->i_dio_count);
820

P
Peng Tao 已提交
821
	NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
822 823
	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
M
Mel Gorman 已提交
824
		result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
825 826 827 828 829 830 831
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}
832
	nfs_pageio_complete(&desc);
833

834 835 836 837 838
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
839
		inode_dio_done(inode);
840 841 842 843
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

844 845
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
846
	return 0;
847 848
}

849 850
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos,
M
Mel Gorman 已提交
851
				size_t count, bool uio)
852
{
853
	ssize_t result = -ENOMEM;
854
	struct inode *inode = iocb->ki_filp->f_mapping->host;
855
	struct nfs_direct_req *dreq;
856
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
857

858
	dreq = nfs_direct_req_alloc();
859
	if (!dreq)
860
		goto out;
L
Linus Torvalds 已提交
861

862
	dreq->inode = inode;
863
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
864 865 866
	l_ctx = nfs_get_lock_context(dreq->ctx);
	if (IS_ERR(l_ctx)) {
		result = PTR_ERR(l_ctx);
867
		goto out_release;
868 869
	}
	dreq->l_ctx = l_ctx;
870 871
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
872

M
Mel Gorman 已提交
873
	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
874 875
	if (!result)
		result = nfs_direct_wait(dreq);
876
out_release:
877
	nfs_direct_req_release(dreq);
878
out:
L
Linus Torvalds 已提交
879 880 881 882 883 884
	return result;
}

/**
 * nfs_file_direct_read - file direct read operation for NFS files
 * @iocb: target I/O control block
885 886
 * @iov: vector of user buffers into which to read data
 * @nr_segs: size of iov vector
887
 * @pos: byte offset in file where reading starts
L
Linus Torvalds 已提交
888 889 890 891 892 893
 *
 * We use this function for direct reads instead of calling
 * generic_file_aio_read() in order to avoid gfar's check to see if
 * the request starts before the end of the file.  For that check
 * to work, we must generate a GETATTR before each direct read, and
 * even then there is a window between the GETATTR and the subsequent
894
 * READ where the file size could change.  Our preference is simply
L
Linus Torvalds 已提交
895 896
 * to do all reads the application wants, and the server will take
 * care of managing the end of file boundary.
897
 *
L
Linus Torvalds 已提交
898 899 900 901 902
 * This function also eliminates unnecessarily updating the file's
 * atime locally, as the NFS server sets the file's atime, and this
 * client must read the updated atime from the server back into its
 * cache.
 */
903
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
904
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
905 906 907 908
{
	ssize_t retval = -EINVAL;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
909 910 911 912
	size_t count;

	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
L
Linus Torvalds 已提交
913

C
Chuck Lever 已提交
914
	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
915 916
		file->f_path.dentry->d_parent->d_name.name,
		file->f_path.dentry->d_name.name,
917
		count, (long long) pos);
L
Linus Torvalds 已提交
918 919 920 921 922

	retval = 0;
	if (!count)
		goto out;

T
Trond Myklebust 已提交
923 924 925
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
926

927 928
	task_io_account_read(count);

M
Mel Gorman 已提交
929
	retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
L
Linus Torvalds 已提交
930
	if (retval > 0)
931
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
932 933 934 935 936 937 938 939

out:
	return retval;
}

/**
 * nfs_file_direct_write - file direct write operation for NFS files
 * @iocb: target I/O control block
940 941
 * @iov: vector of user buffers from which to write data
 * @nr_segs: size of iov vector
942
 * @pos: byte offset in file where writing starts
L
Linus Torvalds 已提交
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
 *
 * We use this function for direct writes instead of calling
 * generic_file_aio_write() in order to avoid taking the inode
 * semaphore and updating the i_size.  The NFS server will set
 * the new i_size and this client must read the updated size
 * back into its cache.  We let the server do generic write
 * parameter checking and report problems.
 *
 * We eliminate local atime updates, see direct read above.
 *
 * We avoid unnecessary page cache invalidations for normal cached
 * readers of this file.
 *
 * Note that O_APPEND is not supported for NFS direct writes, as there
 * is no atomic O_APPEND write facility in the NFS protocol.
 */
959
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
M
Mel Gorman 已提交
960
				unsigned long nr_segs, loff_t pos, bool uio)
L
Linus Torvalds 已提交
961
{
C
Chuck Lever 已提交
962
	ssize_t retval = -EINVAL;
L
Linus Torvalds 已提交
963 964
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
965
	size_t count;
L
Linus Torvalds 已提交
966

967 968 969
	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);

C
Chuck Lever 已提交
970
	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
971 972
		file->f_path.dentry->d_parent->d_name.name,
		file->f_path.dentry->d_name.name,
973
		count, (long long) pos);
974

975 976
	retval = generic_write_checks(file, &pos, &count, 0);
	if (retval)
L
Linus Torvalds 已提交
977
		goto out;
978 979 980

	retval = -EINVAL;
	if ((ssize_t) count < 0)
L
Linus Torvalds 已提交
981 982 983 984
		goto out;
	retval = 0;
	if (!count)
		goto out;
985

T
Trond Myklebust 已提交
986 987 988
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
989

990 991
	task_io_account_write(count);

M
Mel Gorman 已提交
992
	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
993 994
	if (retval > 0) {
		struct inode *inode = mapping->host;
995

996
		iocb->ki_pos = pos + retval;
997 998 999 1000 1001
		spin_lock(&inode->i_lock);
		if (i_size_read(inode) < iocb->ki_pos)
			i_size_write(inode, iocb->ki_pos);
		spin_unlock(&inode->i_lock);
	}
L
Linus Torvalds 已提交
1002 1003 1004 1005
out:
	return retval;
}

1006 1007 1008 1009
/**
 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
 *
 */
D
David Howells 已提交
1010
int __init nfs_init_directcache(void)
L
Linus Torvalds 已提交
1011 1012 1013
{
	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
						sizeof(struct nfs_direct_req),
1014 1015
						0, (SLAB_RECLAIM_ACCOUNT|
							SLAB_MEM_SPREAD),
1016
						NULL);
L
Linus Torvalds 已提交
1017 1018 1019 1020 1021 1022
	if (nfs_direct_cachep == NULL)
		return -ENOMEM;

	return 0;
}

1023
/**
D
David Howells 已提交
1024
 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1025 1026
 *
 */
1027
void nfs_destroy_directcache(void)
L
Linus Torvalds 已提交
1028
{
1029
	kmem_cache_destroy(nfs_direct_cachep);
L
Linus Torvalds 已提交
1030
}