direct.c 28.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/direct.c
 *
 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
 *
 * High-performance uncached I/O for the Linux NFS client
 *
 * There are important applications whose performance or correctness
 * depends on uncached access to file data.  Database clusters
10
 * (multiple copies of the same instance running on separate hosts)
L
Linus Torvalds 已提交
11
 * implement their own cache coherency protocol that subsumes file
12 13 14
 * system cache protocols.  Applications that process datasets
 * considerably larger than the client's memory do not always benefit
 * from a local cache.  A streaming video server, for instance, has no
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * need to cache the contents of a file.
 *
 * When an application requests uncached I/O, all read and write requests
 * are made directly to the server; data stored or fetched via these
 * requests is not cached in the Linux page cache.  The client does not
 * correct unaligned requests from applications.  All requested bytes are
 * held on permanent storage before a direct write system call returns to
 * an application.
 *
 * Solaris implements an uncached I/O facility called directio() that
 * is used for backups and sequential I/O to very large files.  Solaris
 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
 * an undocumented mount option.
 *
 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
 * help from Andrew Morton.
 *
 * 18 Dec 2001	Initial implementation for 2.4  --cel
 * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
 * 08 Jun 2003	Port to 2.5 APIs  --cel
 * 31 Mar 2004	Handle direct I/O without VFS support  --cel
 * 15 Sep 2004	Parallel async reads  --cel
37
 * 04 May 2005	support O_DIRECT with aio  --cel
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
 *
 */

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/kref.h>
47
#include <linux/slab.h>
48
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
49 50 51 52 53 54

#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/sunrpc/clnt.h>

#include <asm/uaccess.h>
A
Arun Sharma 已提交
55
#include <linux/atomic.h>
L
Linus Torvalds 已提交
56

57
#include "internal.h"
C
Chuck Lever 已提交
58 59
#include "iostat.h"

L
Linus Torvalds 已提交
60 61
#define NFSDBG_FACILITY		NFSDBG_VFS

62
static struct kmem_cache *nfs_direct_cachep;
L
Linus Torvalds 已提交
63 64 65 66 67 68

/*
 * This represents a set of asynchronous requests that we're waiting on
 */
struct nfs_direct_req {
	struct kref		kref;		/* release manager */
69 70

	/* I/O parameters */
71
	struct nfs_open_context	*ctx;		/* file open context info */
72
	struct nfs_lock_context *l_ctx;		/* Lock context info */
73
	struct kiocb *		iocb;		/* controlling i/o request */
74
	struct inode *		inode;		/* target file of i/o */
75 76

	/* completion state */
77
	atomic_t		io_count;	/* i/os we're waiting for */
78 79
	spinlock_t		lock;		/* protect completion state */
	ssize_t			count,		/* bytes actually processed */
L
Linus Torvalds 已提交
80
				error;		/* any reported error */
81
	struct completion	completion;	/* wait for i/o completion */
82 83

	/* commit state */
84
	struct list_head	rewrite_list;	/* saved nfs_write_data structs */
85
	struct nfs_commit_data *commit_data;	/* special write_data for commits */
86 87 88 89
	int			flags;
#define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
	struct nfs_writeverf	verf;		/* unstable write verifier */
L
Linus Torvalds 已提交
90 91
};

92
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
93 94 95 96 97 98 99 100 101 102 103 104
static const struct rpc_call_ops nfs_write_direct_ops;

static inline void get_dreq(struct nfs_direct_req *dreq)
{
	atomic_inc(&dreq->io_count);
}

static inline int put_dreq(struct nfs_direct_req *dreq)
{
	return atomic_dec_and_test(&dreq->io_count);
}

L
Linus Torvalds 已提交
105
/**
106 107 108 109 110 111 112 113 114 115 116
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
 * @iocb: target I/O control block
 * @iov: array of vectors that define I/O buffer
 * @pos: offset in file to begin the operation
 * @nr_segs: size of iovec array
 *
 * The presence of this routine in the address space ops vector means
 * the NFS client supports direct I/O.  However, we shunt off direct
 * read and write requests before the VFS gets them, so this method
 * should never be called.
L
Linus Torvalds 已提交
117
 */
118 119 120
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
121
			iocb->ki_filp->f_path.dentry->d_name.name,
122
			(long long) pos, nr_segs);
123 124 125 126

	return -EINVAL;
}

127
static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
128
{
129
	unsigned int npages;
130
	unsigned int i;
131 132 133 134 135

	if (count == 0)
		return;
	pages += (pgbase >> PAGE_SHIFT);
	npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
136 137
	for (i = 0; i < npages; i++) {
		struct page *page = pages[i];
138
		if (!PageCompound(page))
139
			set_page_dirty(page);
140
	}
141 142
}

143
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
144
{
145
	unsigned int i;
146 147
	for (i = 0; i < npages; i++)
		page_cache_release(pages[i]);
148 149
}

150
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
L
Linus Torvalds 已提交
151
{
152 153
	struct nfs_direct_req *dreq;

154
	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
155 156 157 158
	if (!dreq)
		return NULL;

	kref_init(&dreq->kref);
159
	kref_get(&dreq->kref);
160
	init_completion(&dreq->completion);
161
	INIT_LIST_HEAD(&dreq->rewrite_list);
162
	dreq->iocb = NULL;
163
	dreq->ctx = NULL;
164
	dreq->l_ctx = NULL;
165
	spin_lock_init(&dreq->lock);
166
	atomic_set(&dreq->io_count, 0);
167 168
	dreq->count = 0;
	dreq->error = 0;
169
	dreq->flags = 0;
170 171

	return dreq;
L
Linus Torvalds 已提交
172 173
}

174
static void nfs_direct_req_free(struct kref *kref)
L
Linus Torvalds 已提交
175 176
{
	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
177

178 179
	if (dreq->l_ctx != NULL)
		nfs_put_lock_context(dreq->l_ctx);
180 181
	if (dreq->ctx != NULL)
		put_nfs_open_context(dreq->ctx);
L
Linus Torvalds 已提交
182 183 184
	kmem_cache_free(nfs_direct_cachep, dreq);
}

185 186 187 188 189
static void nfs_direct_req_release(struct nfs_direct_req *dreq)
{
	kref_put(&dreq->kref, nfs_direct_req_free);
}

190 191 192 193 194
/*
 * Collects and returns the final error value/byte-count.
 */
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
195
	ssize_t result = -EIOCBQUEUED;
196 197 198 199 200

	/* Async requests don't wait here */
	if (dreq->iocb)
		goto out;

201
	result = wait_for_completion_killable(&dreq->completion);
202 203

	if (!result)
204
		result = dreq->error;
205
	if (!result)
206
		result = dreq->count;
207 208 209 210 211

out:
	return (ssize_t) result;
}

212
/*
213 214
 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 * the iocb is still valid here if this is a synchronous request.
215 216 217 218
 */
static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
	if (dreq->iocb) {
219
		long res = (long) dreq->error;
220
		if (!res)
221
			res = (long) dreq->count;
222
		aio_complete(dreq->iocb, res, 0);
223 224
	}
	complete_all(&dreq->completion);
225

226
	nfs_direct_req_release(dreq);
227 228
}

229
/*
230 231 232
 * We must hold a reference to all the pages in this direct read request
 * until the RPCs complete.  This could be long *after* we are woken up in
 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
233
 */
T
Trond Myklebust 已提交
234
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
235
{
T
Trond Myklebust 已提交
236
	struct nfs_read_data *data = calldata;
L
Linus Torvalds 已提交
237

238 239 240 241 242 243 244
	nfs_readpage_result(task, data);
}

static void nfs_direct_read_release(void *calldata)
{

	struct nfs_read_data *data = calldata;
245
	struct nfs_direct_req *dreq = (struct nfs_direct_req *)data->header->req;
246
	int status = data->task.tk_status;
247 248

	spin_lock(&dreq->lock);
249 250
	if (unlikely(status < 0)) {
		dreq->error = status;
251 252 253 254 255 256 257 258 259
		spin_unlock(&dreq->lock);
	} else {
		dreq->count += data->res.count;
		spin_unlock(&dreq->lock);
		nfs_direct_dirty_pages(data->pagevec,
				data->args.pgbase,
				data->res.count);
	}
	nfs_direct_release_pages(data->pagevec, data->npages);
260 261 262

	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
263
	nfs_readdata_release(data);
L
Linus Torvalds 已提交
264 265
}

T
Trond Myklebust 已提交
266
static const struct rpc_call_ops nfs_read_direct_ops = {
267
	.rpc_call_prepare = nfs_read_prepare,
T
Trond Myklebust 已提交
268
	.rpc_call_done = nfs_direct_read_result,
269
	.rpc_release = nfs_direct_read_release,
T
Trond Myklebust 已提交
270 271
};

272 273 274 275 276 277 278 279 280
static void nfs_direct_readhdr_release(struct nfs_read_header *rhdr)
{
	struct nfs_read_data *data = &rhdr->rpc_data;

	if (data->pagevec != data->page_array)
		kfree(data->pagevec);
	nfs_readhdr_free(&rhdr->header);
}

281
/*
282 283 284 285 286
 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
 * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
 * bail and stop sending more reads.  Read length accounting is
 * handled automatically by nfs_direct_read_result().  Otherwise, if
 * no requests have been sent, just return an error.
L
Linus Torvalds 已提交
287
 */
288 289 290
static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
						const struct iovec *iov,
						loff_t pos)
L
Linus Torvalds 已提交
291
{
292
	struct nfs_open_context *ctx = dreq->ctx;
293
	struct inode *inode = ctx->dentry->d_inode;
294 295
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
296
	size_t rsize = NFS_SERVER(inode)->rsize;
297
	struct rpc_task *task;
298 299 300
	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};
301 302
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
303
		.rpc_message = &msg,
304
		.callback_ops = &nfs_read_direct_ops,
305
		.workqueue = nfsiod_workqueue,
306 307
		.flags = RPC_TASK_ASYNC,
	};
308 309 310 311
	unsigned int pgbase;
	int result;
	ssize_t started = 0;

L
Linus Torvalds 已提交
312
	do {
313
		struct nfs_read_header *rhdr;
314
		struct nfs_read_data *data;
315
		size_t bytes;
L
Linus Torvalds 已提交
316

317 318 319
		pgbase = user_addr & ~PAGE_MASK;
		bytes = min(rsize,count);

320
		result = -ENOMEM;
321 322
		rhdr = nfs_readhdr_alloc(nfs_page_array_len(pgbase, bytes));
		if (unlikely(!rhdr))
323
			break;
324
		data = &rhdr->rpc_data;
325 326 327 328 329

		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					data->npages, 1, 0, data->pagevec, NULL);
		up_read(&current->mm->mmap_sem);
330
		if (result < 0) {
331
			nfs_direct_readhdr_release(rhdr);
332 333 334
			break;
		}
		if ((unsigned)result < data->npages) {
335 336 337
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
				nfs_direct_release_pages(data->pagevec, result);
338
				nfs_direct_readhdr_release(rhdr);
339 340 341 342
				break;
			}
			bytes -= pgbase;
			data->npages = result;
343 344 345
		}

		get_dreq(dreq);
346

347 348 349
		rhdr->header.req = (struct nfs_page *) dreq;
		rhdr->header.inode = inode;
		rhdr->header.cred = msg.rpc_cred;
L
Linus Torvalds 已提交
350
		data->args.fh = NFS_FH(inode);
351
		data->args.context = get_nfs_open_context(ctx);
352
		data->args.lock_context = dreq->l_ctx;
353
		data->args.offset = pos;
L
Linus Torvalds 已提交
354
		data->args.pgbase = pgbase;
355
		data->args.pages = data->pagevec;
L
Linus Torvalds 已提交
356 357 358 359
		data->args.count = bytes;
		data->res.fattr = &data->fattr;
		data->res.eof = 0;
		data->res.count = bytes;
360
		nfs_fattr_init(&data->fattr);
361 362
		msg.rpc_argp = &data->args;
		msg.rpc_resp = &data->res;
L
Linus Torvalds 已提交
363

364
		task_setup_data.task = &data->task;
365
		task_setup_data.callback_data = data;
366
		NFS_PROTO(inode)->read_setup(data, &msg);
L
Linus Torvalds 已提交
367

368
		task = rpc_run_task(&task_setup_data);
369 370
		if (IS_ERR(task))
			break;
L
Linus Torvalds 已提交
371

C
Chuck Lever 已提交
372 373
		dprintk("NFS: %5u initiated direct read call "
			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
374
				task->tk_pid,
L
Linus Torvalds 已提交
375 376 377 378
				inode->i_sb->s_id,
				(long long)NFS_FILEID(inode),
				bytes,
				(unsigned long long)data->args.offset);
379
		rpc_put_task(task);
L
Linus Torvalds 已提交
380

381 382
		started += bytes;
		user_addr += bytes;
383
		pos += bytes;
384
		/* FIXME: Remove this unnecessary math from final patch */
L
Linus Torvalds 已提交
385 386
		pgbase += bytes;
		pgbase &= ~PAGE_MASK;
387
		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
L
Linus Torvalds 已提交
388 389 390

		count -= bytes;
	} while (count != 0);
391 392

	if (started)
393
		return started;
394
	return result < 0 ? (ssize_t) result : -EFAULT;
L
Linus Torvalds 已提交
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
					      const struct iovec *iov,
					      unsigned long nr_segs,
					      loff_t pos)
{
	ssize_t result = -EINVAL;
	size_t requested_bytes = 0;
	unsigned long seg;

	get_dreq(dreq);

	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
410
		result = nfs_direct_read_schedule_segment(dreq, vec, pos);
411 412 413 414 415 416 417 418
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}

419 420 421 422 423 424 425 426 427
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

428 429
	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
430
	return 0;
431 432
}

433 434
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
			       unsigned long nr_segs, loff_t pos)
L
Linus Torvalds 已提交
435
{
436
	ssize_t result = -ENOMEM;
437
	struct inode *inode = iocb->ki_filp->f_mapping->host;
L
Linus Torvalds 已提交
438 439
	struct nfs_direct_req *dreq;

440
	dreq = nfs_direct_req_alloc();
441 442
	if (dreq == NULL)
		goto out;
L
Linus Torvalds 已提交
443

C
Chuck Lever 已提交
444
	dreq->inode = inode;
445
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
446 447 448
	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
	if (dreq->l_ctx == NULL)
		goto out_release;
449 450
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
451

452
	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
453 454
	if (!result)
		result = nfs_direct_wait(dreq);
455
out_release:
456
	nfs_direct_req_release(dreq);
457
out:
L
Linus Torvalds 已提交
458 459 460
	return result;
}

461 462 463 464 465 466 467 468 469
static void nfs_direct_writehdr_release(struct nfs_write_header *whdr)
{
	struct nfs_write_data *data = &whdr->rpc_data;

	if (data->pagevec != data->page_array)
		kfree(data->pagevec);
	nfs_writehdr_free(&whdr->header);
}

470
static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
471
{
472
	while (!list_empty(&dreq->rewrite_list)) {
473 474 475 476 477
		struct nfs_pgio_header *hdr = list_entry(dreq->rewrite_list.next, struct nfs_pgio_header, pages);
		struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
		list_del(&hdr->pages);
		nfs_direct_release_pages(whdr->rpc_data.pagevec, whdr->rpc_data.npages);
		nfs_direct_writehdr_release(whdr);
478 479
	}
}
L
Linus Torvalds 已提交
480

481 482 483
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
484 485 486
	struct inode *inode = dreq->inode;
	struct list_head *p;
	struct nfs_write_data *data;
487
	struct nfs_pgio_header *hdr;
488
	struct rpc_task *task;
489 490 491
	struct rpc_message msg = {
		.rpc_cred = dreq->ctx->cred,
	};
492 493
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
T
Terry Loftin 已提交
494
		.rpc_message = &msg,
495
		.callback_ops = &nfs_write_direct_ops,
496
		.workqueue = nfsiod_workqueue,
497 498
		.flags = RPC_TASK_ASYNC,
	};
L
Linus Torvalds 已提交
499

500
	dreq->count = 0;
501 502 503
	get_dreq(dreq);

	list_for_each(p, &dreq->rewrite_list) {
504 505
		hdr = list_entry(p, struct nfs_pgio_header, pages);
		data = &(container_of(hdr, struct nfs_write_header, header))->rpc_data;
506 507 508

		get_dreq(dreq);

509 510 511
		/* Use stable writes */
		data->args.stable = NFS_FILE_SYNC;

512 513 514 515 516 517 518 519 520 521 522
		/*
		 * Reset data->res.
		 */
		nfs_fattr_init(&data->fattr);
		data->res.count = data->args.count;
		memset(&data->verf, 0, sizeof(data->verf));

		/*
		 * Reuse data->task; data->args should not have changed
		 * since the original request was sent.
		 */
523
		task_setup_data.task = &data->task;
524
		task_setup_data.callback_data = data;
525 526 527
		msg.rpc_argp = &data->args;
		msg.rpc_resp = &data->res;
		NFS_PROTO(inode)->write_setup(data, &msg);
528 529 530 531

		/*
		 * We're called via an RPC callback, so BKL is already held.
		 */
532 533 534
		task = rpc_run_task(&task_setup_data);
		if (!IS_ERR(task))
			rpc_put_task(task);
535 536 537 538 539 540 541 542

		dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
				data->task.tk_pid,
				inode->i_sb->s_id,
				(long long)NFS_FILEID(inode),
				data->args.count,
				(unsigned long long)data->args.offset);
	}
543

544 545
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, inode);
546 547 548 549
}

static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
{
550
	struct nfs_commit_data *data = calldata;
551 552

	/* Call the NFS version-specific code */
553 554 555 556 557
	NFS_PROTO(data->inode)->commit_done(task, data);
}

static void nfs_direct_commit_release(void *calldata)
{
558 559
	struct nfs_commit_data *data = calldata;
	struct nfs_direct_req *dreq = data->dreq;
560 561 562
	int status = data->task.tk_status;

	if (status < 0) {
563
		dprintk("NFS: %5u commit failed with error %d.\n",
564
				data->task.tk_pid, status);
565
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
566
	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
567
		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
568
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
L
Linus Torvalds 已提交
569 570
	}

571
	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
572
	nfs_direct_write_complete(dreq, data->inode);
T
Trond Myklebust 已提交
573
	nfs_commit_free(data);
L
Linus Torvalds 已提交
574 575
}

576
static const struct rpc_call_ops nfs_commit_direct_ops = {
577
	.rpc_call_prepare = nfs_commit_prepare,
578
	.rpc_call_done = nfs_direct_commit_result,
579
	.rpc_release = nfs_direct_commit_release,
580 581 582
};

static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
L
Linus Torvalds 已提交
583
{
584
	struct nfs_commit_data *data = dreq->commit_data;
585
	struct rpc_task *task;
586 587 588 589 590
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = dreq->ctx->cred,
	};
591
	struct rpc_task_setup task_setup_data = {
592
		.task = &data->task,
593
		.rpc_client = NFS_CLIENT(dreq->inode),
594
		.rpc_message = &msg,
595 596
		.callback_ops = &nfs_commit_direct_ops,
		.callback_data = data,
597
		.workqueue = nfsiod_workqueue,
598 599
		.flags = RPC_TASK_ASYNC,
	};
L
Linus Torvalds 已提交
600

601
	data->inode = dreq->inode;
602
	data->cred = msg.rpc_cred;
L
Linus Torvalds 已提交
603

604
	data->args.fh = NFS_FH(data->inode);
605 606
	data->args.offset = 0;
	data->args.count = 0;
607 608
	data->res.fattr = &data->fattr;
	data->res.verf = &data->verf;
609
	nfs_fattr_init(&data->fattr);
L
Linus Torvalds 已提交
610

611
	NFS_PROTO(data->inode)->commit_setup(data, &msg);
L
Linus Torvalds 已提交
612

613 614
	/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
	dreq->commit_data = NULL;
L
Linus Torvalds 已提交
615

616
	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
L
Linus Torvalds 已提交
617

618 619 620
	task = rpc_run_task(&task_setup_data);
	if (!IS_ERR(task))
		rpc_put_task(task);
621
}
L
Linus Torvalds 已提交
622

623 624 625
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
	int flags = dreq->flags;
L
Linus Torvalds 已提交
626

627 628 629 630
	dreq->flags = 0;
	switch (flags) {
		case NFS_ODIRECT_DO_COMMIT:
			nfs_direct_commit_schedule(dreq);
L
Linus Torvalds 已提交
631
			break;
632 633 634 635 636 637 638
		case NFS_ODIRECT_RESCHED_WRITES:
			nfs_direct_write_reschedule(dreq);
			break;
		default:
			if (dreq->commit_data != NULL)
				nfs_commit_free(dreq->commit_data);
			nfs_direct_free_writedata(dreq);
639
			nfs_zap_mapping(inode, inode->i_mapping);
640 641 642
			nfs_direct_complete(dreq);
	}
}
L
Linus Torvalds 已提交
643

644 645
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
646
	dreq->commit_data = nfs_commitdata_alloc();
647
	if (dreq->commit_data != NULL)
648
		dreq->commit_data->dreq = dreq;
649 650 651 652 653 654
}
#else
static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
	dreq->commit_data = NULL;
}
L
Linus Torvalds 已提交
655

656 657 658
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
	nfs_direct_free_writedata(dreq);
659
	nfs_zap_mapping(inode, inode->i_mapping);
660 661 662
	nfs_direct_complete(dreq);
}
#endif
L
Linus Torvalds 已提交
663

664
static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
665
{
666 667
	struct nfs_write_data *data = calldata;

668
	nfs_writeback_done(task, data);
669 670 671 672 673 674 675 676 677
}

/*
 * NB: Return the value of the first error return code.  Subsequent
 *     errors after the first one are ignored.
 */
static void nfs_direct_write_release(void *calldata)
{
	struct nfs_write_data *data = calldata;
678 679
	struct nfs_pgio_header *hdr = data->header;
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) hdr->req;
680
	int status = data->task.tk_status;
681

682
	spin_lock(&dreq->lock);
L
Linus Torvalds 已提交
683

684
	if (unlikely(status < 0)) {
685
		/* An error has occurred, so we should not commit */
686
		dreq->flags = 0;
687 688
		dreq->error = status;
	}
689 690
	if (unlikely(dreq->error != 0))
		goto out_unlock;
691 692

	dreq->count += data->res.count;
L
Linus Torvalds 已提交
693

694 695 696 697 698
	if (data->res.verf->committed != NFS_FILE_SYNC) {
		switch (dreq->flags) {
			case 0:
				memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
				dreq->flags = NFS_ODIRECT_DO_COMMIT;
L
Linus Torvalds 已提交
699
				break;
700 701
			case NFS_ODIRECT_DO_COMMIT:
				if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
702
					dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
703 704
					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
				}
L
Linus Torvalds 已提交
705 706
		}
	}
707
out_unlock:
708
	spin_unlock(&dreq->lock);
L
Linus Torvalds 已提交
709

710
	if (put_dreq(dreq))
711
		nfs_direct_write_complete(dreq, hdr->inode);
712 713 714
}

static const struct rpc_call_ops nfs_write_direct_ops = {
715
	.rpc_call_prepare = nfs_write_prepare,
716
	.rpc_call_done = nfs_direct_write_result,
717
	.rpc_release = nfs_direct_write_release,
718 719 720
};

/*
721 722 723 724 725
 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
 * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
 * bail and stop sending more writes.  Write length accounting is
 * handled automatically by nfs_direct_write_result().  Otherwise, if
 * no requests have been sent, just return an error.
726
 */
727 728 729
static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
						 const struct iovec *iov,
						 loff_t pos, int sync)
730
{
731
	struct nfs_open_context *ctx = dreq->ctx;
732
	struct inode *inode = ctx->dentry->d_inode;
733 734
	unsigned long user_addr = (unsigned long)iov->iov_base;
	size_t count = iov->iov_len;
735
	struct rpc_task *task;
736 737 738
	struct rpc_message msg = {
		.rpc_cred = ctx->cred,
	};
739 740
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
741
		.rpc_message = &msg,
742
		.callback_ops = &nfs_write_direct_ops,
743
		.workqueue = nfsiod_workqueue,
744 745
		.flags = RPC_TASK_ASYNC,
	};
746
	size_t wsize = NFS_SERVER(inode)->wsize;
747 748 749
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
750

L
Linus Torvalds 已提交
751
	do {
752
		struct nfs_write_header *whdr;
753
		struct nfs_write_data *data;
754 755
		size_t bytes;

756 757 758
		pgbase = user_addr & ~PAGE_MASK;
		bytes = min(wsize,count);

759
		result = -ENOMEM;
760 761
		whdr = nfs_writehdr_alloc(nfs_page_array_len(pgbase, bytes));
		if (unlikely(!whdr))
762 763
			break;

764 765
		data = &whdr->rpc_data;

766 767 768 769
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					data->npages, 0, 0, data->pagevec, NULL);
		up_read(&current->mm->mmap_sem);
770
		if (result < 0) {
771
			nfs_direct_writehdr_release(whdr);
772 773 774
			break;
		}
		if ((unsigned)result < data->npages) {
775 776 777
			bytes = result * PAGE_SIZE;
			if (bytes <= pgbase) {
				nfs_direct_release_pages(data->pagevec, result);
778
				nfs_direct_writehdr_release(whdr);
779 780 781 782
				break;
			}
			bytes -= pgbase;
			data->npages = result;
783 784 785 786
		}

		get_dreq(dreq);

787
		list_move_tail(&whdr->header.pages, &dreq->rewrite_list);
788

789 790 791
		whdr->header.req = (struct nfs_page *) dreq;
		whdr->header.inode = inode;
		whdr->header.cred = msg.rpc_cred;
792
		data->args.fh = NFS_FH(inode);
T
Trond Myklebust 已提交
793
		data->args.context = ctx;
794
		data->args.lock_context = dreq->l_ctx;
795
		data->args.offset = pos;
796
		data->args.pgbase = pgbase;
797
		data->args.pages = data->pagevec;
798
		data->args.count = bytes;
799
		data->args.stable = sync;
800 801
		data->res.fattr = &data->fattr;
		data->res.count = bytes;
802
		data->res.verf = &data->verf;
803
		nfs_fattr_init(&data->fattr);
804

805
		task_setup_data.task = &data->task;
806
		task_setup_data.callback_data = data;
807 808 809
		msg.rpc_argp = &data->args;
		msg.rpc_resp = &data->res;
		NFS_PROTO(inode)->write_setup(data, &msg);
L
Linus Torvalds 已提交
810

811
		task = rpc_run_task(&task_setup_data);
812 813
		if (IS_ERR(task))
			break;
L
Linus Torvalds 已提交
814

C
Chuck Lever 已提交
815 816
		dprintk("NFS: %5u initiated direct write call "
			"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
817
				task->tk_pid,
818 819 820 821
				inode->i_sb->s_id,
				(long long)NFS_FILEID(inode),
				bytes,
				(unsigned long long)data->args.offset);
822
		rpc_put_task(task);
L
Linus Torvalds 已提交
823

824 825
		started += bytes;
		user_addr += bytes;
826
		pos += bytes;
827 828

		/* FIXME: Remove this useless math from the final patch */
829 830
		pgbase += bytes;
		pgbase &= ~PAGE_MASK;
831
		BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
L
Linus Torvalds 已提交
832

833 834
		count -= bytes;
	} while (count != 0);
835 836

	if (started)
837
		return started;
838
	return result < 0 ? (ssize_t) result : -EFAULT;
839
}
L
Linus Torvalds 已提交
840

841 842 843 844 845 846 847 848 849 850 851 852 853
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
					       const struct iovec *iov,
					       unsigned long nr_segs,
					       loff_t pos, int sync)
{
	ssize_t result = 0;
	size_t requested_bytes = 0;
	unsigned long seg;

	get_dreq(dreq);

	for (seg = 0; seg < nr_segs; seg++) {
		const struct iovec *vec = &iov[seg];
854 855
		result = nfs_direct_write_schedule_segment(dreq, vec,
							   pos, sync);
856 857 858 859 860 861 862 863
		if (result < 0)
			break;
		requested_bytes += result;
		if ((size_t)result < vec->iov_len)
			break;
		pos += vec->iov_len;
	}

864 865 866 867 868 869 870 871 872
	/*
	 * If no bytes were started, return the error, and let the
	 * generic layer handle the completion.
	 */
	if (requested_bytes == 0) {
		nfs_direct_req_release(dreq);
		return result < 0 ? result : -EIO;
	}

873 874
	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, dreq->inode);
875
	return 0;
876 877
}

878 879 880
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos,
				size_t count)
881
{
882
	ssize_t result = -ENOMEM;
883
	struct inode *inode = iocb->ki_filp->f_mapping->host;
884
	struct nfs_direct_req *dreq;
885
	size_t wsize = NFS_SERVER(inode)->wsize;
886
	int sync = NFS_UNSTABLE;
L
Linus Torvalds 已提交
887

888
	dreq = nfs_direct_req_alloc();
889
	if (!dreq)
890
		goto out;
891 892
	nfs_alloc_commit_data(dreq);

893
	if (dreq->commit_data == NULL || count <= wsize)
894
		sync = NFS_FILE_SYNC;
L
Linus Torvalds 已提交
895

896
	dreq->inode = inode;
897
	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
898
	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
899
	if (dreq->l_ctx == NULL)
900
		goto out_release;
901 902
	if (!is_sync_kiocb(iocb))
		dreq->iocb = iocb;
L
Linus Torvalds 已提交
903

904
	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
905 906
	if (!result)
		result = nfs_direct_wait(dreq);
907
out_release:
908
	nfs_direct_req_release(dreq);
909
out:
L
Linus Torvalds 已提交
910 911 912 913 914 915
	return result;
}

/**
 * nfs_file_direct_read - file direct read operation for NFS files
 * @iocb: target I/O control block
916 917
 * @iov: vector of user buffers into which to read data
 * @nr_segs: size of iov vector
918
 * @pos: byte offset in file where reading starts
L
Linus Torvalds 已提交
919 920 921 922 923 924
 *
 * We use this function for direct reads instead of calling
 * generic_file_aio_read() in order to avoid gfar's check to see if
 * the request starts before the end of the file.  For that check
 * to work, we must generate a GETATTR before each direct read, and
 * even then there is a window between the GETATTR and the subsequent
925
 * READ where the file size could change.  Our preference is simply
L
Linus Torvalds 已提交
926 927
 * to do all reads the application wants, and the server will take
 * care of managing the end of file boundary.
928
 *
L
Linus Torvalds 已提交
929 930 931 932 933
 * This function also eliminates unnecessarily updating the file's
 * atime locally, as the NFS server sets the file's atime, and this
 * client must read the updated atime from the server back into its
 * cache.
 */
934 935
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos)
L
Linus Torvalds 已提交
936 937 938 939
{
	ssize_t retval = -EINVAL;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
940 941 942 943
	size_t count;

	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
L
Linus Torvalds 已提交
944

C
Chuck Lever 已提交
945
	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
946 947
		file->f_path.dentry->d_parent->d_name.name,
		file->f_path.dentry->d_name.name,
948
		count, (long long) pos);
L
Linus Torvalds 已提交
949 950 951 952 953

	retval = 0;
	if (!count)
		goto out;

T
Trond Myklebust 已提交
954 955 956
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
957

958 959
	task_io_account_read(count);

960
	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
L
Linus Torvalds 已提交
961
	if (retval > 0)
962
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
963 964 965 966 967 968 969 970

out:
	return retval;
}

/**
 * nfs_file_direct_write - file direct write operation for NFS files
 * @iocb: target I/O control block
971 972
 * @iov: vector of user buffers from which to write data
 * @nr_segs: size of iov vector
973
 * @pos: byte offset in file where writing starts
L
Linus Torvalds 已提交
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
 *
 * We use this function for direct writes instead of calling
 * generic_file_aio_write() in order to avoid taking the inode
 * semaphore and updating the i_size.  The NFS server will set
 * the new i_size and this client must read the updated size
 * back into its cache.  We let the server do generic write
 * parameter checking and report problems.
 *
 * We eliminate local atime updates, see direct read above.
 *
 * We avoid unnecessary page cache invalidations for normal cached
 * readers of this file.
 *
 * Note that O_APPEND is not supported for NFS direct writes, as there
 * is no atomic O_APPEND write facility in the NFS protocol.
 */
990 991
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
				unsigned long nr_segs, loff_t pos)
L
Linus Torvalds 已提交
992
{
C
Chuck Lever 已提交
993
	ssize_t retval = -EINVAL;
L
Linus Torvalds 已提交
994 995
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
996
	size_t count;
L
Linus Torvalds 已提交
997

998 999 1000
	count = iov_length(iov, nr_segs);
	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);

C
Chuck Lever 已提交
1001
	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
1002 1003
		file->f_path.dentry->d_parent->d_name.name,
		file->f_path.dentry->d_name.name,
1004
		count, (long long) pos);
1005

1006 1007
	retval = generic_write_checks(file, &pos, &count, 0);
	if (retval)
L
Linus Torvalds 已提交
1008
		goto out;
1009 1010 1011

	retval = -EINVAL;
	if ((ssize_t) count < 0)
L
Linus Torvalds 已提交
1012 1013 1014 1015
		goto out;
	retval = 0;
	if (!count)
		goto out;
1016

T
Trond Myklebust 已提交
1017 1018 1019
	retval = nfs_sync_mapping(mapping);
	if (retval)
		goto out;
L
Linus Torvalds 已提交
1020

1021 1022
	task_io_account_write(count);

1023
	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1024

L
Linus Torvalds 已提交
1025
	if (retval > 0)
1026
		iocb->ki_pos = pos + retval;
L
Linus Torvalds 已提交
1027 1028 1029 1030 1031

out:
	return retval;
}

1032 1033 1034 1035
/**
 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
 *
 */
D
David Howells 已提交
1036
int __init nfs_init_directcache(void)
L
Linus Torvalds 已提交
1037 1038 1039
{
	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
						sizeof(struct nfs_direct_req),
1040 1041
						0, (SLAB_RECLAIM_ACCOUNT|
							SLAB_MEM_SPREAD),
1042
						NULL);
L
Linus Torvalds 已提交
1043 1044 1045 1046 1047 1048
	if (nfs_direct_cachep == NULL)
		return -ENOMEM;

	return 0;
}

1049
/**
D
David Howells 已提交
1050
 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1051 1052
 *
 */
1053
void nfs_destroy_directcache(void)
L
Linus Torvalds 已提交
1054
{
1055
	kmem_cache_destroy(nfs_direct_cachep);
L
Linus Torvalds 已提交
1056
}