write.c 40.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * linux/fs/nfs/write.c
 *
4
 * Write file data over NFS.
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
15
#include <linux/swap.h>
L
Linus Torvalds 已提交
16 17 18 19 20

#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
21 22
#include <linux/backing-dev.h>

L
Linus Torvalds 已提交
23 24 25
#include <asm/uaccess.h>

#include "delegation.h"
26
#include "internal.h"
C
Chuck Lever 已提交
27
#include "iostat.h"
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36 37 38 39

#define NFSDBG_FACILITY		NFSDBG_PAGECACHE

#define MIN_POOL_WRITE		(32)
#define MIN_POOL_COMMIT		(4)

/*
 * Local function declarations
 */
static struct nfs_page * nfs_update_request(struct nfs_open_context*,
					    struct page *,
					    unsigned int, unsigned int);
40 41
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
				  struct inode *inode, int ioflags);
42
static void nfs_redirty_request(struct nfs_page *req);
43 44 45
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops;
L
Linus Torvalds 已提交
46

47
static struct kmem_cache *nfs_wdata_cachep;
48
static mempool_t *nfs_wdata_mempool;
L
Linus Torvalds 已提交
49 50
static mempool_t *nfs_commit_mempool;

51
struct nfs_write_data *nfs_commitdata_alloc(void)
L
Linus Torvalds 已提交
52
{
C
Christoph Lameter 已提交
53
	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
54

L
Linus Torvalds 已提交
55 56 57 58 59 60 61
	if (p) {
		memset(p, 0, sizeof(*p));
		INIT_LIST_HEAD(&p->pages);
	}
	return p;
}

62
void nfs_commit_free(struct nfs_write_data *p)
L
Linus Torvalds 已提交
63
{
64 65
	if (p && (p->pagevec != &p->page_array[0]))
		kfree(p->pagevec);
L
Linus Torvalds 已提交
66 67 68
	mempool_free(p, nfs_commit_mempool);
}

69
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
70
{
C
Christoph Lameter 已提交
71
	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
72 73 74 75

	if (p) {
		memset(p, 0, sizeof(*p));
		INIT_LIST_HEAD(&p->pages);
76
		p->npages = pagecount;
77 78
		if (pagecount <= ARRAY_SIZE(p->page_array))
			p->pagevec = p->page_array;
79
		else {
80 81
			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
			if (!p->pagevec) {
82 83 84 85 86 87 88 89
				mempool_free(p, nfs_wdata_mempool);
				p = NULL;
			}
		}
	}
	return p;
}

90
static void nfs_writedata_free(struct nfs_write_data *p)
91 92 93 94 95 96
{
	if (p && (p->pagevec != &p->page_array[0]))
		kfree(p->pagevec);
	mempool_free(p, nfs_wdata_mempool);
}

97
void nfs_writedata_release(void *data)
L
Linus Torvalds 已提交
98
{
99 100 101
	struct nfs_write_data *wdata = data;

	put_nfs_open_context(wdata->args.context);
L
Linus Torvalds 已提交
102 103 104
	nfs_writedata_free(wdata);
}

105 106 107 108 109 110 111
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
	ctx->error = error;
	smp_wmb();
	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}

112 113 114 115 116 117 118
static struct nfs_page *nfs_page_find_request_locked(struct page *page)
{
	struct nfs_page *req = NULL;

	if (PagePrivate(page)) {
		req = (struct nfs_page *)page_private(page);
		if (req != NULL)
119
			kref_get(&req->wb_kref);
120 121 122 123 124 125
	}
	return req;
}

static struct nfs_page *nfs_page_find_request(struct page *page)
{
126
	struct inode *inode = page->mapping->host;
127 128
	struct nfs_page *req = NULL;

129
	spin_lock(&inode->i_lock);
130
	req = nfs_page_find_request_locked(page);
131
	spin_unlock(&inode->i_lock);
132 133 134
	return req;
}

L
Linus Torvalds 已提交
135 136 137 138 139
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
	struct inode *inode = page->mapping->host;
	loff_t end, i_size = i_size_read(inode);
140
	pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
L
Linus Torvalds 已提交
141 142 143 144 145 146

	if (i_size > 0 && page->index < end_index)
		return;
	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
	if (i_size >= end)
		return;
C
Chuck Lever 已提交
147
	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
L
Linus Torvalds 已提交
148 149 150
	i_size_write(inode, end);
}

151 152 153 154 155 156 157
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct page *page)
{
	SetPageError(page);
	nfs_zap_mapping(page->mapping->host, page->mapping);
}

L
Linus Torvalds 已提交
158 159 160 161 162 163 164 165 166
/* We can set the PG_uptodate flag if we see that a write request
 * covers the full page.
 */
static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
{
	if (PageUptodate(page))
		return;
	if (base != 0)
		return;
167
	if (count != nfs_page_length(page))
L
Linus Torvalds 已提交
168
		return;
169
	SetPageUptodate(page);
L
Linus Torvalds 已提交
170 171
}

172
static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
L
Linus Torvalds 已提交
173 174 175
		unsigned int offset, unsigned int count)
{
	struct nfs_page	*req;
176
	int ret;
L
Linus Torvalds 已提交
177

178 179 180 181 182 183 184 185 186 187 188
	for (;;) {
		req = nfs_update_request(ctx, page, offset, count);
		if (!IS_ERR(req))
			break;
		ret = PTR_ERR(req);
		if (ret != -EBUSY)
			return ret;
		ret = nfs_wb_page(page->mapping->host, page);
		if (ret != 0)
			return ret;
	}
L
Linus Torvalds 已提交
189 190
	/* Update file length */
	nfs_grow_file(page, offset, count);
191
	nfs_clear_page_tag_locked(req);
192
	return 0;
L
Linus Torvalds 已提交
193 194 195 196 197
}

static int wb_priority(struct writeback_control *wbc)
{
	if (wbc->for_reclaim)
198
		return FLUSH_HIGHPRI | FLUSH_STABLE;
L
Linus Torvalds 已提交
199 200 201 202 203
	if (wbc->for_kupdate)
		return FLUSH_LOWPRI;
	return 0;
}

204 205 206 207 208 209 210 211 212 213
/*
 * NFS congestion control
 */

int nfs_congestion_kb;

#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
#define NFS_CONGESTION_OFF_THRESH	\
	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))

214
static int nfs_set_page_writeback(struct page *page)
215
{
216 217 218
	int ret = test_set_page_writeback(page);

	if (!ret) {
219 220 221
		struct inode *inode = page->mapping->host;
		struct nfs_server *nfss = NFS_SERVER(inode);

222
		if (atomic_long_inc_return(&nfss->writeback) >
223 224 225
				NFS_CONGESTION_ON_THRESH)
			set_bdi_congested(&nfss->backing_dev_info, WRITE);
	}
226
	return ret;
227 228 229 230 231 232 233 234
}

static void nfs_end_page_writeback(struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct nfs_server *nfss = NFS_SERVER(inode);

	end_page_writeback(page);
P
Peter Zijlstra 已提交
235
	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
236 237 238
		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
}

239 240
/*
 * Find an associated nfs write request, and prepare to flush it out
T
Trond Myklebust 已提交
241
 * May return an error if the user signalled nfs_wait_on_request().
242
 */
243 244
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
				struct page *page)
245
{
246
	struct inode *inode = page->mapping->host;
247 248 249
	struct nfs_page *req;
	int ret;

250
	spin_lock(&inode->i_lock);
251 252 253
	for(;;) {
		req = nfs_page_find_request_locked(page);
		if (req == NULL) {
254
			spin_unlock(&inode->i_lock);
T
Trond Myklebust 已提交
255
			return 0;
256
		}
257
		if (nfs_set_page_tag_locked(req))
258 259
			break;
		/* Note: If we hold the page lock, as is the case in nfs_writepage,
260
		 *	 then the call to nfs_set_page_tag_locked() will always
261 262 263
		 *	 succeed provided that someone hasn't already marked the
		 *	 request as dirty (in which case we don't care).
		 */
264
		spin_unlock(&inode->i_lock);
265 266 267 268
		ret = nfs_wait_on_request(req);
		nfs_release_request(req);
		if (ret != 0)
			return ret;
269
		spin_lock(&inode->i_lock);
270
	}
271 272
	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
		/* This request is marked for commit */
273
		spin_unlock(&inode->i_lock);
274
		nfs_clear_page_tag_locked(req);
275
		nfs_pageio_complete(pgio);
T
Trond Myklebust 已提交
276
		return 0;
277
	}
278
	if (nfs_set_page_writeback(page) != 0) {
279
		spin_unlock(&inode->i_lock);
280 281
		BUG();
	}
282
	spin_unlock(&inode->i_lock);
283 284 285 286
	if (!nfs_pageio_add_request(pgio, req)) {
		nfs_redirty_request(req);
		return pgio->pg_error;
	}
T
Trond Myklebust 已提交
287
	return 0;
288 289
}

T
Trond Myklebust 已提交
290
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
L
Linus Torvalds 已提交
291 292 293
{
	struct inode *inode = page->mapping->host;

C
Chuck Lever 已提交
294 295 296
	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);

297
	nfs_pageio_cond_complete(pgio, page->index);
T
Trond Myklebust 已提交
298 299
	return nfs_page_async_flush(pgio, page);
}
300

T
Trond Myklebust 已提交
301 302 303 304 305 306 307
/*
 * Write an mmapped page to the server.
 */
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
	struct nfs_pageio_descriptor pgio;
	int err;
308

T
Trond Myklebust 已提交
309 310 311 312 313 314 315 316
	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
	err = nfs_do_writepage(page, wbc, &pgio);
	nfs_pageio_complete(&pgio);
	if (err < 0)
		return err;
	if (pgio.pg_error < 0)
		return pgio.pg_error;
	return 0;
317 318 319 320
}

int nfs_writepage(struct page *page, struct writeback_control *wbc)
{
T
Trond Myklebust 已提交
321
	int ret;
322

T
Trond Myklebust 已提交
323
	ret = nfs_writepage_locked(page, wbc);
L
Linus Torvalds 已提交
324
	unlock_page(page);
T
Trond Myklebust 已提交
325 326 327 328 329 330 331 332 333 334
	return ret;
}

static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
{
	int ret;

	ret = nfs_do_writepage(page, wbc, data);
	unlock_page(page);
	return ret;
L
Linus Torvalds 已提交
335 336 337 338 339
}

int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
340
	struct nfs_pageio_descriptor pgio;
L
Linus Torvalds 已提交
341 342
	int err;

C
Chuck Lever 已提交
343 344
	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);

345
	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
T
Trond Myklebust 已提交
346
	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
347
	nfs_pageio_complete(&pgio);
T
Trond Myklebust 已提交
348
	if (err < 0)
L
Linus Torvalds 已提交
349
		return err;
T
Trond Myklebust 已提交
350
	if (pgio.pg_error < 0)
351 352
		return pgio.pg_error;
	return 0;
L
Linus Torvalds 已提交
353 354 355 356 357
}

/*
 * Insert a write request into an inode
 */
358
static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
L
Linus Torvalds 已提交
359 360 361 362 363
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int error;

	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
364
	BUG_ON(error);
L
Linus Torvalds 已提交
365 366 367 368 369
	if (!nfsi->npages) {
		igrab(inode);
		if (nfs_have_delegation(inode, FMODE_WRITE))
			nfsi->change_attr++;
	}
370
	SetPagePrivate(req->wb_page);
371
	set_page_private(req->wb_page, (unsigned long)req);
L
Linus Torvalds 已提交
372
	nfsi->npages++;
373
	kref_get(&req->wb_kref);
374 375
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
				NFS_PAGE_TAG_LOCKED);
L
Linus Torvalds 已提交
376 377 378
}

/*
379
 * Remove a write request from an inode
L
Linus Torvalds 已提交
380 381 382
 */
static void nfs_inode_remove_request(struct nfs_page *req)
{
383
	struct inode *inode = req->wb_context->path.dentry->d_inode;
L
Linus Torvalds 已提交
384 385 386 387
	struct nfs_inode *nfsi = NFS_I(inode);

	BUG_ON (!NFS_WBACK_BUSY(req));

388
	spin_lock(&inode->i_lock);
389
	set_page_private(req->wb_page, 0);
390
	ClearPagePrivate(req->wb_page);
L
Linus Torvalds 已提交
391 392 393
	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
	nfsi->npages--;
	if (!nfsi->npages) {
394
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
395 396
		iput(inode);
	} else
397
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
398 399 400 401
	nfs_clear_request(req);
	nfs_release_request(req);
}

402
static void
F
Fred 已提交
403
nfs_mark_request_dirty(struct nfs_page *req)
404 405 406 407
{
	__set_page_dirty_nobuffers(req->wb_page);
}

L
Linus Torvalds 已提交
408 409 410 411 412 413
/*
 * Check if a request is dirty
 */
static inline int
nfs_dirty_request(struct nfs_page *req)
{
414 415
	struct page *page = req->wb_page;

416
	if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
417
		return 0;
F
Fred Isaman 已提交
418
	return !PageWriteback(page);
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427
}

#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/*
 * Add a request to the inode's commit list.
 */
static void
nfs_mark_request_commit(struct nfs_page *req)
{
428
	struct inode *inode = req->wb_context->path.dentry->d_inode;
L
Linus Torvalds 已提交
429 430
	struct nfs_inode *nfsi = NFS_I(inode);

431
	spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
432
	nfsi->ncommit++;
433
	set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
434 435 436
	radix_tree_tag_set(&nfsi->nfs_page_tree,
			req->wb_index,
			NFS_PAGE_TAG_COMMIT);
437
	spin_unlock(&inode->i_lock);
438
	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
439
	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
440
	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
L
Linus Torvalds 已提交
441
}
442 443 444 445 446 447 448 449 450 451

static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
	return data->verf.committed != NFS_FILE_SYNC;
}

static inline
int nfs_reschedule_unstable_write(struct nfs_page *req)
{
452
	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
453 454 455 456
		nfs_mark_request_commit(req);
		return 1;
	}
	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
F
Fred 已提交
457
		nfs_mark_request_dirty(req);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
		return 1;
	}
	return 0;
}
#else
static inline void
nfs_mark_request_commit(struct nfs_page *req)
{
}

static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
	return 0;
}

static inline
int nfs_reschedule_unstable_write(struct nfs_page *req)
{
	return 0;
}
L
Linus Torvalds 已提交
479 480 481 482 483
#endif

/*
 * Wait for a request to complete.
 *
484
 * Interruptible by fatal signals only.
L
Linus Torvalds 已提交
485
 */
486
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
L
Linus Torvalds 已提交
487 488 489
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct nfs_page *req;
490
	pgoff_t idx_end, next;
L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498 499
	unsigned int		res = 0;
	int			error;

	if (npages == 0)
		idx_end = ~0;
	else
		idx_end = idx_start + npages - 1;

	next = idx_start;
500
	while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
L
Linus Torvalds 已提交
501 502 503 504
		if (req->wb_index > idx_end)
			break;

		next = req->wb_index + 1;
505
		BUG_ON(!NFS_WBACK_BUSY(req));
L
Linus Torvalds 已提交
506

507
		kref_get(&req->wb_kref);
508
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
509 510
		error = nfs_wait_on_request(req);
		nfs_release_request(req);
511
		spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
512 513 514 515 516 517 518
		if (error < 0)
			return error;
		res++;
	}
	return res;
}

T
Trond Myklebust 已提交
519 520 521 522 523 524
static void nfs_cancel_commit_list(struct list_head *head)
{
	struct nfs_page *req;

	while(!list_empty(head)) {
		req = nfs_list_entry(head->next);
525
		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
526 527
		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
				BDI_RECLAIMABLE);
T
Trond Myklebust 已提交
528
		nfs_list_remove_request(req);
529
		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
T
Trond Myklebust 已提交
530
		nfs_inode_remove_request(req);
531
		nfs_unlock_request(req);
T
Trond Myklebust 已提交
532 533 534
	}
}

L
Linus Torvalds 已提交
535 536 537 538 539 540 541 542 543 544 545 546
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/*
 * nfs_scan_commit - Scan an inode for commit requests
 * @inode: NFS inode to scan
 * @dst: destination list
 * @idx_start: lower bound of page->index to scan.
 * @npages: idx_start + npages sets the upper bound to scan.
 *
 * Moves requests from the inode's 'commit' request list.
 * The requests are *not* checked to ensure that they form a contiguous set.
 */
static int
547
nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
L
Linus Torvalds 已提交
548 549
{
	struct nfs_inode *nfsi = NFS_I(inode);
550 551 552
	int res = 0;

	if (nfsi->ncommit != 0) {
553 554
		res = nfs_scan_list(nfsi, dst, idx_start, npages,
				NFS_PAGE_TAG_COMMIT);
555 556
		nfsi->ncommit -= res;
	}
L
Linus Torvalds 已提交
557 558
	return res;
}
559
#else
560
static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
561 562 563
{
	return 0;
}
L
Linus Torvalds 已提交
564 565 566 567 568 569 570 571 572 573
#endif

/*
 * Try to update any existing write request, or create one if there is none.
 * In order to match, the request's credentials must match those of
 * the calling process.
 *
 * Note: Should always be called with the Page Lock held!
 */
static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
574
		struct page *page, unsigned int offset, unsigned int bytes)
L
Linus Torvalds 已提交
575
{
576 577
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;
L
Linus Torvalds 已提交
578
	struct nfs_page		*req, *new = NULL;
579
	pgoff_t		rqend, end;
L
Linus Torvalds 已提交
580 581 582 583 584 585 586

	end = offset + bytes;

	for (;;) {
		/* Loop over all inode entries and see if we find
		 * A request for the page we wish to update
		 */
587 588 589 590 591 592 593
		if (new) {
			if (radix_tree_preload(GFP_NOFS)) {
				nfs_release_request(new);
				return ERR_PTR(-ENOMEM);
			}
		}

594
		spin_lock(&inode->i_lock);
595
		req = nfs_page_find_request_locked(page);
L
Linus Torvalds 已提交
596
		if (req) {
597
			if (!nfs_set_page_tag_locked(req)) {
L
Linus Torvalds 已提交
598
				int error;
599

600
				spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
601 602
				error = nfs_wait_on_request(req);
				nfs_release_request(req);
N
Neil Brown 已提交
603
				if (error < 0) {
604 605
					if (new) {
						radix_tree_preload_end();
N
Neil Brown 已提交
606
						nfs_release_request(new);
607
					}
L
Linus Torvalds 已提交
608
					return ERR_PTR(error);
N
Neil Brown 已提交
609
				}
L
Linus Torvalds 已提交
610 611
				continue;
			}
612
			spin_unlock(&inode->i_lock);
613 614
			if (new) {
				radix_tree_preload_end();
L
Linus Torvalds 已提交
615
				nfs_release_request(new);
616
			}
L
Linus Torvalds 已提交
617 618 619 620 621
			break;
		}

		if (new) {
			nfs_lock_request_dontget(new);
622
			nfs_inode_add_request(inode, new);
623
			spin_unlock(&inode->i_lock);
624
			radix_tree_preload_end();
T
Trond Myklebust 已提交
625 626
			req = new;
			goto zero_page;
L
Linus Torvalds 已提交
627
		}
628
		spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

		new = nfs_create_request(ctx, inode, page, offset, bytes);
		if (IS_ERR(new))
			return new;
	}

	/* We have a request for our page.
	 * If the creds don't match, or the
	 * page addresses don't match,
	 * tell the caller to wait on the conflicting
	 * request.
	 */
	rqend = req->wb_offset + req->wb_bytes;
	if (req->wb_context != ctx
	    || req->wb_page != page
	    || !nfs_dirty_request(req)
	    || offset > rqend || end < req->wb_offset) {
646
		nfs_clear_page_tag_locked(req);
L
Linus Torvalds 已提交
647 648 649 650 651 652 653
		return ERR_PTR(-EBUSY);
	}

	/* Okay, the request matches. Update the region */
	if (offset < req->wb_offset) {
		req->wb_offset = offset;
		req->wb_pgbase = offset;
T
Trond Myklebust 已提交
654 655
		req->wb_bytes = max(end, rqend) - req->wb_offset;
		goto zero_page;
L
Linus Torvalds 已提交
656 657 658 659 660 661
	}

	if (end > rqend)
		req->wb_bytes = end - req->wb_offset;

	return req;
T
Trond Myklebust 已提交
662 663 664 665 666
zero_page:
	/* If this page might potentially be marked as up to date,
	 * then we need to zero any uninitalised data. */
	if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
			&& !PageUptodate(req->wb_page))
667
		zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
T
Trond Myklebust 已提交
668
	return req;
L
Linus Torvalds 已提交
669 670 671 672
}

int nfs_flush_incompatible(struct file *file, struct page *page)
{
673
	struct nfs_open_context *ctx = nfs_file_open_context(file);
L
Linus Torvalds 已提交
674
	struct nfs_page	*req;
T
Trond Myklebust 已提交
675
	int do_flush, status;
L
Linus Torvalds 已提交
676 677 678 679 680 681 682 683
	/*
	 * Look for a request corresponding to this page. If there
	 * is one, and it belongs to another file, we flush it out
	 * before we try to copy anything into the page. Do this
	 * due to the lack of an ACCESS-type call in NFSv2.
	 * Also do the same if we find a request from an existing
	 * dropped page.
	 */
T
Trond Myklebust 已提交
684 685 686 687 688
	do {
		req = nfs_page_find_request(page);
		if (req == NULL)
			return 0;
		do_flush = req->wb_page != page || req->wb_context != ctx
689
			|| !nfs_dirty_request(req);
L
Linus Torvalds 已提交
690
		nfs_release_request(req);
T
Trond Myklebust 已提交
691 692 693 694 695
		if (!do_flush)
			return 0;
		status = nfs_wb_page(page->mapping->host, page);
	} while (status == 0);
	return status;
L
Linus Torvalds 已提交
696 697
}

698 699 700 701 702 703 704 705 706 707 708
/*
 * If the page cache is marked as unsafe or invalid, then we can't rely on
 * the PageUptodate() flag. In this case, we will need to turn off
 * write optimisations that depend on the page contents being correct.
 */
static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
	return PageUptodate(page) &&
		!(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
}

L
Linus Torvalds 已提交
709 710 711 712 713 714 715 716 717
/*
 * Update and possibly write a cached page of an NFS file.
 *
 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
 * things with a page scheduled for an RPC call (e.g. invalidate it).
 */
int nfs_updatepage(struct file *file, struct page *page,
		unsigned int offset, unsigned int count)
{
718
	struct nfs_open_context *ctx = nfs_file_open_context(file);
L
Linus Torvalds 已提交
719 720 721
	struct inode	*inode = page->mapping->host;
	int		status = 0;

C
Chuck Lever 已提交
722 723
	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);

L
Linus Torvalds 已提交
724
	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
725 726
		file->f_path.dentry->d_parent->d_name.name,
		file->f_path.dentry->d_name.name, count,
727
		(long long)(page_offset(page) +offset));
L
Linus Torvalds 已提交
728 729

	/* If we're not using byte range locks, and we know the page
730 731 732
	 * is up to date, it may be more efficient to extend the write
	 * to cover the entire page in order to avoid fragmentation
	 * inefficiencies.
L
Linus Torvalds 已提交
733
	 */
734 735
	if (nfs_write_pageuptodate(page, inode) &&
			inode->i_flock == NULL &&
736
			!(file->f_flags & O_SYNC)) {
737
		count = max(count + offset, nfs_page_length(page));
L
Linus Torvalds 已提交
738 739 740
		offset = 0;
	}

741
	status = nfs_writepage_setup(ctx, page, offset, count);
742
	__set_page_dirty_nobuffers(page);
L
Linus Torvalds 已提交
743 744 745 746

        dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
			status, (long long)i_size_read(inode));
	if (status < 0)
747
		nfs_set_pageerror(page);
L
Linus Torvalds 已提交
748 749 750 751 752 753
	return status;
}

static void nfs_writepage_release(struct nfs_page *req)
{

754 755 756 757 758 759
	if (PageError(req->wb_page)) {
		nfs_end_page_writeback(req->wb_page);
		nfs_inode_remove_request(req);
	} else if (!nfs_reschedule_unstable_write(req)) {
		/* Set the PG_uptodate flag */
		nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes);
760 761 762 763
		nfs_end_page_writeback(req->wb_page);
		nfs_inode_remove_request(req);
	} else
		nfs_end_page_writeback(req->wb_page);
764
	nfs_clear_page_tag_locked(req);
L
Linus Torvalds 已提交
765 766
}

767
static int flush_task_priority(int how)
L
Linus Torvalds 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780
{
	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
		case FLUSH_HIGHPRI:
			return RPC_PRIORITY_HIGH;
		case FLUSH_LOWPRI:
			return RPC_PRIORITY_LOW;
	}
	return RPC_PRIORITY_NORMAL;
}

/*
 * Set up the argument/result storage required for the RPC call.
 */
781
static int nfs_write_rpcsetup(struct nfs_page *req,
L
Linus Torvalds 已提交
782
		struct nfs_write_data *data,
783
		const struct rpc_call_ops *call_ops,
L
Linus Torvalds 已提交
784 785 786
		unsigned int count, unsigned int offset,
		int how)
{
787 788
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
789
	int priority = flush_task_priority(how);
790
	struct rpc_task *task;
791 792 793 794 795
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = req->wb_context->cred,
	};
796 797
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
798
		.task = &data->task,
799
		.rpc_message = &msg,
800 801
		.callback_ops = call_ops,
		.callback_data = data,
802
		.workqueue = nfsiod_workqueue,
803
		.flags = flags,
804
		.priority = priority,
805
	};
L
Linus Torvalds 已提交
806 807 808 809 810

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	data->req = req;
811
	data->inode = inode = req->wb_context->path.dentry->d_inode;
812
	data->cred = msg.rpc_cred;
L
Linus Torvalds 已提交
813 814 815 816 817 818

	data->args.fh     = NFS_FH(inode);
	data->args.offset = req_offset(req) + offset;
	data->args.pgbase = req->wb_pgbase + offset;
	data->args.pages  = data->pagevec;
	data->args.count  = count;
819
	data->args.context = get_nfs_open_context(req->wb_context);
820 821 822 823 824 825
	data->args.stable  = NFS_UNSTABLE;
	if (how & FLUSH_STABLE) {
		data->args.stable = NFS_DATA_SYNC;
		if (!NFS_I(inode)->ncommit)
			data->args.stable = NFS_FILE_SYNC;
	}
L
Linus Torvalds 已提交
826 827 828 829

	data->res.fattr   = &data->fattr;
	data->res.count   = count;
	data->res.verf    = &data->verf;
830
	nfs_fattr_init(&data->fattr);
L
Linus Torvalds 已提交
831

832
	/* Set up the initial task struct.  */
833
	NFS_PROTO(inode)->write_setup(data, &msg);
L
Linus Torvalds 已提交
834

C
Chuck Lever 已提交
835 836
	dprintk("NFS: %5u initiated write call "
		"(req %s/%Ld, %u bytes @ offset %Lu)\n",
837
		data->task.tk_pid,
L
Linus Torvalds 已提交
838 839 840 841 842
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode),
		count,
		(unsigned long long)data->args.offset);

843
	task = rpc_run_task(&task_setup_data);
844 845 846 847
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
L
Linus Torvalds 已提交
848 849
}

F
Fred 已提交
850 851 852 853 854 855 856 857 858 859 860
/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{
	nfs_mark_request_dirty(req);
	nfs_end_page_writeback(req->wb_page);
	nfs_clear_page_tag_locked(req);
}

L
Linus Torvalds 已提交
861 862 863 864
/*
 * Generate multiple small requests to write out a single
 * contiguous dirty area on one page.
 */
865
static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
L
Linus Torvalds 已提交
866 867 868 869
{
	struct nfs_page *req = nfs_list_entry(head->next);
	struct page *page = req->wb_page;
	struct nfs_write_data *data;
870 871
	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
	unsigned int offset;
L
Linus Torvalds 已提交
872
	int requests = 0;
873
	int ret = 0;
L
Linus Torvalds 已提交
874 875 876 877
	LIST_HEAD(list);

	nfs_list_remove_request(req);

878
	nbytes = count;
879 880 881
	do {
		size_t len = min(nbytes, wsize);

882
		data = nfs_writedata_alloc(1);
L
Linus Torvalds 已提交
883 884 885 886
		if (!data)
			goto out_bad;
		list_add(&data->pages, &list);
		requests++;
887 888
		nbytes -= len;
	} while (nbytes != 0);
L
Linus Torvalds 已提交
889 890 891 892
	atomic_set(&req->wb_complete, requests);

	ClearPageError(page);
	offset = 0;
893
	nbytes = count;
L
Linus Torvalds 已提交
894
	do {
895 896
		int ret2;

L
Linus Torvalds 已提交
897 898 899 900 901
		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del_init(&data->pages);

		data->pagevec[0] = page;

902 903
		if (nbytes < wsize)
			wsize = nbytes;
904
		ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
905
				   wsize, offset, how);
906 907
		if (ret == 0)
			ret = ret2;
908 909
		offset += wsize;
		nbytes -= wsize;
L
Linus Torvalds 已提交
910 911
	} while (nbytes != 0);

912
	return ret;
L
Linus Torvalds 已提交
913 914 915 916 917

out_bad:
	while (!list_empty(&list)) {
		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
918
		nfs_writedata_release(data);
L
Linus Torvalds 已提交
919
	}
920
	nfs_redirty_request(req);
L
Linus Torvalds 已提交
921 922 923 924 925 926 927 928 929 930 931
	return -ENOMEM;
}

/*
 * Create an RPC task for the given write request and kick it.
 * The page must have been locked by the caller.
 *
 * It may happen that the page we're passed is not marked dirty.
 * This is the case if nfs_updatepage detects a conflicting request
 * that has been written but not committed.
 */
932
static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
L
Linus Torvalds 已提交
933 934 935 936 937
{
	struct nfs_page		*req;
	struct page		**pages;
	struct nfs_write_data	*data;

938
	data = nfs_writedata_alloc(npages);
L
Linus Torvalds 已提交
939 940 941 942 943 944 945 946 947 948 949 950 951 952
	if (!data)
		goto out_bad;

	pages = data->pagevec;
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_list_add_request(req, &data->pages);
		ClearPageError(req->wb_page);
		*pages++ = req->wb_page;
	}
	req = nfs_list_entry(data->pages.next);

	/* Set up the argument struct */
953
	return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
L
Linus Torvalds 已提交
954 955
 out_bad:
	while (!list_empty(head)) {
956
		req = nfs_list_entry(head->next);
L
Linus Torvalds 已提交
957
		nfs_list_remove_request(req);
958
		nfs_redirty_request(req);
L
Linus Torvalds 已提交
959 960 961 962
	}
	return -ENOMEM;
}

963 964
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
				  struct inode *inode, int ioflags)
L
Linus Torvalds 已提交
965
{
966
	size_t wsize = NFS_SERVER(inode)->wsize;
L
Linus Torvalds 已提交
967

968
	if (wsize < PAGE_CACHE_SIZE)
969
		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
970
	else
971
		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
L
Linus Torvalds 已提交
972 973 974 975 976
}

/*
 * Handle a write reply that flushed part of a page.
 */
977
static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
978
{
979
	struct nfs_write_data	*data = calldata;
L
Linus Torvalds 已提交
980 981 982
	struct nfs_page		*req = data->req;

	dprintk("NFS: write (%s/%Ld %d@%Ld)",
983 984
		req->wb_context->path.dentry->d_inode->i_sb->s_id,
		(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
L
Linus Torvalds 已提交
985 986 987
		req->wb_bytes,
		(long long)req_offset(req));

988 989
	nfs_writeback_done(task, data);
}
990

991 992 993 994 995 996 997 998
static void nfs_writeback_release_partial(void *calldata)
{
	struct nfs_write_data	*data = calldata;
	struct nfs_page		*req = data->req;
	struct page		*page = req->wb_page;
	int status = data->task.tk_status;

	if (status < 0) {
999
		nfs_set_pageerror(page);
1000 1001
		nfs_context_set_write_error(req->wb_context, status);
		dprintk(", error = %d\n", status);
1002
		goto out;
L
Linus Torvalds 已提交
1003 1004
	}

1005
	if (nfs_write_need_commit(data)) {
1006
		struct inode *inode = page->mapping->host;
1007

1008
		spin_lock(&inode->i_lock);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
			/* Do nothing we need to resend the writes */
		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
			dprintk(" defer commit\n");
		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
			set_bit(PG_NEED_RESCHED, &req->wb_flags);
			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
			dprintk(" server reboot detected\n");
		}
1019
		spin_unlock(&inode->i_lock);
1020 1021 1022 1023
	} else
		dprintk(" OK\n");

out:
L
Linus Torvalds 已提交
1024 1025
	if (atomic_dec_and_test(&req->wb_complete))
		nfs_writepage_release(req);
1026
	nfs_writedata_release(calldata);
L
Linus Torvalds 已提交
1027 1028
}

1029 1030
static const struct rpc_call_ops nfs_write_partial_ops = {
	.rpc_call_done = nfs_writeback_done_partial,
1031
	.rpc_release = nfs_writeback_release_partial,
1032 1033
};

L
Linus Torvalds 已提交
1034 1035 1036 1037 1038 1039 1040
/*
 * Handle a write reply that flushes a whole page.
 *
 * FIXME: There is an inherent race with invalidate_inode_pages and
 *	  writebacks since the page->count is kept > 1 for as long
 *	  as the page has a write request pending.
 */
1041
static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
1042
{
1043
	struct nfs_write_data	*data = calldata;
L
Linus Torvalds 已提交
1044

1045 1046 1047 1048 1049 1050 1051
	nfs_writeback_done(task, data);
}

static void nfs_writeback_release_full(void *calldata)
{
	struct nfs_write_data	*data = calldata;
	int status = data->task.tk_status;
1052

L
Linus Torvalds 已提交
1053 1054
	/* Update attributes as result of writeback. */
	while (!list_empty(&data->pages)) {
1055 1056 1057
		struct nfs_page *req = nfs_list_entry(data->pages.next);
		struct page *page = req->wb_page;

L
Linus Torvalds 已提交
1058 1059 1060
		nfs_list_remove_request(req);

		dprintk("NFS: write (%s/%Ld %d@%Ld)",
1061 1062
			req->wb_context->path.dentry->d_inode->i_sb->s_id,
			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
L
Linus Torvalds 已提交
1063 1064 1065
			req->wb_bytes,
			(long long)req_offset(req));

1066
		if (status < 0) {
1067
			nfs_set_pageerror(page);
1068 1069
			nfs_context_set_write_error(req->wb_context, status);
			dprintk(", error = %d\n", status);
1070
			goto remove_request;
L
Linus Torvalds 已提交
1071 1072
		}

1073 1074 1075 1076 1077
		if (nfs_write_need_commit(data)) {
			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
			nfs_mark_request_commit(req);
			nfs_end_page_writeback(page);
			dprintk(" marked for commit\n");
L
Linus Torvalds 已提交
1078 1079
			goto next;
		}
1080 1081
		/* Set the PG_uptodate flag? */
		nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
1082 1083 1084
		dprintk(" OK\n");
remove_request:
		nfs_end_page_writeback(page);
L
Linus Torvalds 已提交
1085 1086
		nfs_inode_remove_request(req);
	next:
1087
		nfs_clear_page_tag_locked(req);
L
Linus Torvalds 已提交
1088
	}
1089
	nfs_writedata_release(calldata);
L
Linus Torvalds 已提交
1090 1091
}

1092 1093
static const struct rpc_call_ops nfs_write_full_ops = {
	.rpc_call_done = nfs_writeback_done_full,
1094
	.rpc_release = nfs_writeback_release_full,
1095 1096 1097
};


L
Linus Torvalds 已提交
1098 1099 1100
/*
 * This function is called when the WRITE call is complete.
 */
1101
int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
L
Linus Torvalds 已提交
1102 1103 1104
{
	struct nfs_writeargs	*argp = &data->args;
	struct nfs_writeres	*resp = &data->res;
1105
	int status;
L
Linus Torvalds 已提交
1106

C
Chuck Lever 已提交
1107
	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
L
Linus Torvalds 已提交
1108 1109
		task->tk_pid, task->tk_status);

1110 1111 1112 1113 1114 1115 1116
	/*
	 * ->write_done will attempt to use post-op attributes to detect
	 * conflicting writes by other clients.  A strict interpretation
	 * of close-to-open would allow us to continue caching even if
	 * another writer had changed the file, but some applications
	 * depend on tighter cache coherency when writing.
	 */
1117 1118 1119
	status = NFS_PROTO(data->inode)->write_done(task, data);
	if (status != 0)
		return status;
C
Chuck Lever 已提交
1120 1121
	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);

L
Linus Torvalds 已提交
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
		/* We tried a write call, but the server did not
		 * commit data to stable storage even though we
		 * requested it.
		 * Note: There is a known bug in Tru64 < 5.0 in which
		 *	 the server reports NFS_DATA_SYNC, but performs
		 *	 NFS_FILE_SYNC. We therefore implement this checking
		 *	 as a dprintk() in order to avoid filling syslog.
		 */
		static unsigned long    complain;

		if (time_before(complain, jiffies)) {
			dprintk("NFS: faulty NFS server %s:"
				" (committed = %d) != (stable = %d)\n",
1137
				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
L
Linus Torvalds 已提交
1138 1139 1140 1141 1142 1143 1144 1145 1146
				resp->verf->committed, argp->stable);
			complain = jiffies + 300 * HZ;
		}
	}
#endif
	/* Is this a short write? */
	if (task->tk_status >= 0 && resp->count < argp->count) {
		static unsigned long    complain;

C
Chuck Lever 已提交
1147 1148
		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);

L
Linus Torvalds 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
		/* Has the server at least made some progress? */
		if (resp->count != 0) {
			/* Was this an NFSv2 write or an NFSv3 stable write? */
			if (resp->verf->committed != NFS_UNSTABLE) {
				/* Resend from where the server left off */
				argp->offset += resp->count;
				argp->pgbase += resp->count;
				argp->count -= resp->count;
			} else {
				/* Resend as a stable write in order to avoid
				 * headaches in the case of a server crash.
				 */
				argp->stable = NFS_FILE_SYNC;
			}
			rpc_restart_call(task);
1164
			return -EAGAIN;
L
Linus Torvalds 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
		}
		if (time_before(complain, jiffies)) {
			printk(KERN_WARNING
			       "NFS: Server wrote zero bytes, expected %u.\n",
					argp->count);
			complain = jiffies + 300 * HZ;
		}
		/* Can't do anything about it except throw an error. */
		task->tk_status = -EIO;
	}
1175
	return 0;
L
Linus Torvalds 已提交
1176 1177 1178 1179
}


#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1180
void nfs_commitdata_release(void *data)
L
Linus Torvalds 已提交
1181
{
1182 1183 1184
	struct nfs_write_data *wdata = data;

	put_nfs_open_context(wdata->args.context);
L
Linus Torvalds 已提交
1185 1186 1187 1188 1189 1190
	nfs_commit_free(wdata);
}

/*
 * Set up the argument/result storage required for the RPC call.
 */
1191
static int nfs_commit_rpcsetup(struct list_head *head,
1192 1193
		struct nfs_write_data *data,
		int how)
L
Linus Torvalds 已提交
1194
{
1195 1196 1197
	struct nfs_page *first = nfs_list_entry(head->next);
	struct inode *inode = first->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1198
	int priority = flush_task_priority(how);
1199
	struct rpc_task *task;
1200 1201 1202 1203 1204
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = first->wb_context->cred,
	};
1205
	struct rpc_task_setup task_setup_data = {
1206
		.task = &data->task,
1207
		.rpc_client = NFS_CLIENT(inode),
1208
		.rpc_message = &msg,
1209 1210
		.callback_ops = &nfs_commit_ops,
		.callback_data = data,
1211
		.workqueue = nfsiod_workqueue,
1212
		.flags = flags,
1213
		.priority = priority,
1214
	};
L
Linus Torvalds 已提交
1215 1216 1217 1218 1219 1220 1221

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	list_splice_init(head, &data->pages);

	data->inode	  = inode;
1222
	data->cred	  = msg.rpc_cred;
L
Linus Torvalds 已提交
1223 1224

	data->args.fh     = NFS_FH(data->inode);
1225 1226 1227
	/* Note: we always request a commit of the entire inode */
	data->args.offset = 0;
	data->args.count  = 0;
1228
	data->args.context = get_nfs_open_context(first->wb_context);
1229
	data->res.count   = 0;
L
Linus Torvalds 已提交
1230 1231
	data->res.fattr   = &data->fattr;
	data->res.verf    = &data->verf;
1232
	nfs_fattr_init(&data->fattr);
1233 1234

	/* Set up the initial task struct.  */
1235
	NFS_PROTO(inode)->commit_setup(data, &msg);
L
Linus Torvalds 已提交
1236

C
Chuck Lever 已提交
1237
	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1238

1239
	task = rpc_run_task(&task_setup_data);
1240 1241 1242 1243
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
L
Linus Torvalds 已提交
1244 1245 1246 1247 1248 1249
}

/*
 * Commit dirty pages
 */
static int
1250
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
L
Linus Torvalds 已提交
1251 1252 1253 1254
{
	struct nfs_write_data	*data;
	struct nfs_page         *req;

1255
	data = nfs_commitdata_alloc();
L
Linus Torvalds 已提交
1256 1257 1258 1259 1260

	if (!data)
		goto out_bad;

	/* Set up the argument struct */
1261
	return nfs_commit_rpcsetup(head, data, how);
L
Linus Torvalds 已提交
1262 1263 1264 1265 1266
 out_bad:
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_mark_request_commit(req);
T
Trond Myklebust 已提交
1267
		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1268 1269
		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
				BDI_RECLAIMABLE);
1270
		nfs_clear_page_tag_locked(req);
L
Linus Torvalds 已提交
1271 1272 1273 1274 1275 1276 1277
	}
	return -ENOMEM;
}

/*
 * COMMIT call returned
 */
1278
static void nfs_commit_done(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
1279
{
1280
	struct nfs_write_data	*data = calldata;
L
Linus Torvalds 已提交
1281

C
Chuck Lever 已提交
1282
        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
L
Linus Torvalds 已提交
1283 1284
                                task->tk_pid, task->tk_status);

1285 1286 1287
	/* Call the NFS version-specific code */
	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
		return;
1288 1289 1290 1291 1292 1293 1294
}

static void nfs_commit_release(void *calldata)
{
	struct nfs_write_data	*data = calldata;
	struct nfs_page		*req;
	int status = data->task.tk_status;
1295

L
Linus Torvalds 已提交
1296 1297 1298
	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
1299
		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1300
		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1301 1302
		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
				BDI_RECLAIMABLE);
L
Linus Torvalds 已提交
1303 1304

		dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1305 1306
			req->wb_context->path.dentry->d_inode->i_sb->s_id,
			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
L
Linus Torvalds 已提交
1307 1308
			req->wb_bytes,
			(long long)req_offset(req));
1309 1310
		if (status < 0) {
			nfs_context_set_write_error(req->wb_context, status);
L
Linus Torvalds 已提交
1311
			nfs_inode_remove_request(req);
1312
			dprintk(", error = %d\n", status);
L
Linus Torvalds 已提交
1313 1314 1315 1316 1317 1318 1319
			goto next;
		}

		/* Okay, COMMIT succeeded, apparently. Check the verifier
		 * returned by the server against all stored verfs. */
		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
			/* We have a match */
1320 1321 1322
			/* Set the PG_uptodate flag */
			nfs_mark_uptodate(req->wb_page, req->wb_pgbase,
					req->wb_bytes);
L
Linus Torvalds 已提交
1323 1324 1325 1326 1327 1328
			nfs_inode_remove_request(req);
			dprintk(" OK\n");
			goto next;
		}
		/* We have a mismatch. Write the page again */
		dprintk(" mismatch\n");
F
Fred 已提交
1329
		nfs_mark_request_dirty(req);
L
Linus Torvalds 已提交
1330
	next:
1331
		nfs_clear_page_tag_locked(req);
L
Linus Torvalds 已提交
1332
	}
1333
	nfs_commitdata_release(calldata);
L
Linus Torvalds 已提交
1334
}
1335 1336 1337 1338 1339

static const struct rpc_call_ops nfs_commit_ops = {
	.rpc_call_done = nfs_commit_done,
	.rpc_release = nfs_commit_release,
};
L
Linus Torvalds 已提交
1340

1341
int nfs_commit_inode(struct inode *inode, int how)
L
Linus Torvalds 已提交
1342 1343
{
	LIST_HEAD(head);
T
Trond Myklebust 已提交
1344
	int res;
L
Linus Torvalds 已提交
1345

1346
	spin_lock(&inode->i_lock);
1347
	res = nfs_scan_commit(inode, &head, 0, 0);
1348
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
1349
	if (res) {
T
Trond Myklebust 已提交
1350
		int error = nfs_commit_list(inode, &head, how);
1351 1352 1353
		if (error < 0)
			return error;
	}
L
Linus Torvalds 已提交
1354 1355
	return res;
}
1356 1357 1358 1359 1360
#else
static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
	return 0;
}
L
Linus Torvalds 已提交
1361 1362
#endif

1363
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
L
Linus Torvalds 已提交
1364
{
1365
	struct inode *inode = mapping->host;
1366
	pgoff_t idx_start, idx_end;
1367
	unsigned int npages = 0;
1368
	LIST_HEAD(head);
1369
	int nocommit = how & FLUSH_NOCOMMIT;
T
Trond Myklebust 已提交
1370
	long pages, ret;
L
Linus Torvalds 已提交
1371

1372 1373 1374 1375 1376 1377 1378
	/* FIXME */
	if (wbc->range_cyclic)
		idx_start = 0;
	else {
		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (idx_end > idx_start) {
1379
			pgoff_t l_npages = 1 + idx_end - idx_start;
1380 1381
			npages = l_npages;
			if (sizeof(npages) != sizeof(l_npages) &&
1382
					(pgoff_t)npages != l_npages)
1383 1384 1385
				npages = 0;
		}
	}
1386
	how &= ~FLUSH_NOCOMMIT;
1387
	spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
1388
	do {
1389 1390
		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
		if (ret != 0)
1391
			continue;
1392 1393
		if (nocommit)
			break;
1394
		pages = nfs_scan_commit(inode, &head, idx_start, npages);
1395
		if (pages == 0)
1396
			break;
1397
		if (how & FLUSH_INVALIDATE) {
1398
			spin_unlock(&inode->i_lock);
T
Trond Myklebust 已提交
1399
			nfs_cancel_commit_list(&head);
1400
			ret = pages;
1401
			spin_lock(&inode->i_lock);
1402 1403 1404
			continue;
		}
		pages += nfs_scan_commit(inode, &head, 0, 0);
1405
		spin_unlock(&inode->i_lock);
1406
		ret = nfs_commit_list(inode, &head, how);
1407 1408
		spin_lock(&inode->i_lock);

1409
	} while (ret >= 0);
1410
	spin_unlock(&inode->i_lock);
1411
	return ret;
L
Linus Torvalds 已提交
1412 1413
}

T
Trond Myklebust 已提交
1414
static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1415 1416 1417
{
	int ret;

T
Trond Myklebust 已提交
1418
	ret = nfs_writepages(mapping, wbc);
1419 1420
	if (ret < 0)
		goto out;
T
Trond Myklebust 已提交
1421
	ret = nfs_sync_mapping_wait(mapping, wbc, how);
1422 1423 1424
	if (ret < 0)
		goto out;
	return 0;
1425
out:
1426
	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1427 1428 1429
	return ret;
}

T
Trond Myklebust 已提交
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_NONE,
		.nr_to_write = LONG_MAX,
		.for_writepages = 1,
		.range_cyclic = 1,
	};
	int ret;

	ret = __nfs_write_mapping(mapping, &wbc, how);
	if (ret < 0)
		return ret;
	wbc.sync_mode = WB_SYNC_ALL;
	return __nfs_write_mapping(mapping, &wbc, how);
}

1449 1450 1451 1452
/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
1453
{
1454 1455
	return nfs_write_mapping(inode->i_mapping, 0);
}
1456

1457 1458 1459
int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

1506 1507
static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
1508 1509 1510
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1511 1512 1513 1514 1515 1516 1517 1518
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;
1519

1520 1521 1522 1523 1524 1525 1526 1527
	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1528
		if (ret < 0)
1529 1530 1531 1532
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
1533
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
1534
	return ret;
1535 1536 1537 1538 1539 1540 1541
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
1542
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1543 1544
}

D
David Howells 已提交
1545
int __init nfs_init_writepagecache(void)
L
Linus Torvalds 已提交
1546 1547 1548 1549
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
1550
					     NULL);
L
Linus Torvalds 已提交
1551 1552 1553
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

1554 1555
	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
L
Linus Torvalds 已提交
1556 1557 1558
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

1559 1560
	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
L
Linus Torvalds 已提交
1561 1562 1563
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

L
Linus Torvalds 已提交
1584 1585 1586
	return 0;
}

1587
void nfs_destroy_writepagecache(void)
L
Linus Torvalds 已提交
1588 1589 1590
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
1591
	kmem_cache_destroy(nfs_wdata_cachep);
L
Linus Torvalds 已提交
1592 1593
}