write.c 44.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * linux/fs/nfs/write.c
 *
4
 * Write file data over NFS.
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
15
#include <linux/swap.h>
16
#include <linux/migrate.h>
L
Linus Torvalds 已提交
17 18 19 20 21

#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
22
#include <linux/backing-dev.h>
23
#include <linux/export.h>
24

L
Linus Torvalds 已提交
25 26 27
#include <asm/uaccess.h>

#include "delegation.h"
28
#include "internal.h"
C
Chuck Lever 已提交
29
#include "iostat.h"
30
#include "nfs4_fs.h"
31
#include "fscache.h"
32
#include "pnfs.h"
L
Linus Torvalds 已提交
33

34 35
#include "nfstrace.h"

L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43
#define NFSDBG_FACILITY		NFSDBG_PAGECACHE

#define MIN_POOL_WRITE		(32)
#define MIN_POOL_COMMIT		(4)

/*
 * Local function declarations
 */
44
static void nfs_redirty_request(struct nfs_page *req);
45
static const struct rpc_call_ops nfs_commit_ops;
46
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48
static const struct nfs_rw_ops nfs_rw_write_ops;
L
Linus Torvalds 已提交
49

50
static struct kmem_cache *nfs_wdata_cachep;
51
static mempool_t *nfs_wdata_mempool;
52
static struct kmem_cache *nfs_cdata_cachep;
L
Linus Torvalds 已提交
53 54
static mempool_t *nfs_commit_mempool;

55
struct nfs_commit_data *nfs_commitdata_alloc(void)
L
Linus Torvalds 已提交
56
{
57
	struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
58

L
Linus Torvalds 已提交
59 60 61 62 63 64
	if (p) {
		memset(p, 0, sizeof(*p));
		INIT_LIST_HEAD(&p->pages);
	}
	return p;
}
65
EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
L
Linus Torvalds 已提交
66

67
void nfs_commit_free(struct nfs_commit_data *p)
L
Linus Torvalds 已提交
68 69 70
{
	mempool_free(p, nfs_commit_mempool);
}
71
EXPORT_SYMBOL_GPL(nfs_commit_free);
L
Linus Torvalds 已提交
72

73
static struct nfs_rw_header *nfs_writehdr_alloc(void)
74
{
75
	struct nfs_rw_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
76

77
	if (p)
78 79 80 81
		memset(p, 0, sizeof(*p));
	return p;
}

82
static void nfs_writehdr_free(struct nfs_rw_header *whdr)
83
{
84
	mempool_free(whdr, nfs_wdata_mempool);
85 86
}

87 88 89 90 91 92 93
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
	ctx->error = error;
	smp_wmb();
	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}

94 95
static struct nfs_page *
nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
96 97 98
{
	struct nfs_page *req = NULL;

99
	if (PagePrivate(page))
100
		req = (struct nfs_page *)page_private(page);
101 102 103 104 105 106 107 108 109 110
	else if (unlikely(PageSwapCache(page))) {
		struct nfs_page *freq, *t;

		/* Linearly search the commit list for the correct req */
		list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
			if (freq->wb_page == page) {
				req = freq;
				break;
			}
		}
111
	}
112 113 114 115

	if (req)
		kref_get(&req->wb_kref);

116 117 118 119 120
	return req;
}

static struct nfs_page *nfs_page_find_request(struct page *page)
{
121
	struct inode *inode = page_file_mapping(page)->host;
122 123
	struct nfs_page *req = NULL;

124
	spin_lock(&inode->i_lock);
125
	req = nfs_page_find_request_locked(NFS_I(inode), page);
126
	spin_unlock(&inode->i_lock);
127 128 129
	return req;
}

L
Linus Torvalds 已提交
130 131 132
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
133
	struct inode *inode = page_file_mapping(page)->host;
134 135
	loff_t end, i_size;
	pgoff_t end_index;
L
Linus Torvalds 已提交
136

137 138 139
	spin_lock(&inode->i_lock);
	i_size = i_size_read(inode);
	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
140
	if (i_size > 0 && page_file_index(page) < end_index)
141
		goto out;
142
	end = page_file_offset(page) + ((loff_t)offset+count);
L
Linus Torvalds 已提交
143
	if (i_size >= end)
144
		goto out;
L
Linus Torvalds 已提交
145
	i_size_write(inode, end);
146 147 148
	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out:
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
149 150
}

151 152 153
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct page *page)
{
154
	nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
155 156
}

L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164 165
/* We can set the PG_uptodate flag if we see that a write request
 * covers the full page.
 */
static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
{
	if (PageUptodate(page))
		return;
	if (base != 0)
		return;
166
	if (count != nfs_page_length(page))
L
Linus Torvalds 已提交
167
		return;
168
	SetPageUptodate(page);
L
Linus Torvalds 已提交
169 170 171 172 173
}

static int wb_priority(struct writeback_control *wbc)
{
	if (wbc->for_reclaim)
174
		return FLUSH_HIGHPRI | FLUSH_STABLE;
175
	if (wbc->for_kupdate || wbc->for_background)
176 177
		return FLUSH_LOWPRI | FLUSH_COND_STABLE;
	return FLUSH_COND_STABLE;
L
Linus Torvalds 已提交
178 179
}

180 181 182 183 184 185 186 187 188 189
/*
 * NFS congestion control
 */

int nfs_congestion_kb;

#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
#define NFS_CONGESTION_OFF_THRESH	\
	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))

190
static void nfs_set_page_writeback(struct page *page)
191
{
192
	struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
193 194
	int ret = test_set_page_writeback(page);

195
	WARN_ON_ONCE(ret != 0);
196

197 198 199 200
	if (atomic_long_inc_return(&nfss->writeback) >
			NFS_CONGESTION_ON_THRESH) {
		set_bdi_congested(&nfss->backing_dev_info,
					BLK_RW_ASYNC);
201 202 203 204 205
	}
}

static void nfs_end_page_writeback(struct page *page)
{
206
	struct inode *inode = page_file_mapping(page)->host;
207 208 209
	struct nfs_server *nfss = NFS_SERVER(inode);

	end_page_writeback(page);
P
Peter Zijlstra 已提交
210
	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
211
		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
212 213
}

214
static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
215
{
216
	struct inode *inode = page_file_mapping(page)->host;
217 218 219
	struct nfs_page *req;
	int ret;

220
	spin_lock(&inode->i_lock);
221
	for (;;) {
222
		req = nfs_page_find_request_locked(NFS_I(inode), page);
223 224
		if (req == NULL)
			break;
225
		if (nfs_lock_request(req))
226 227
			break;
		/* Note: If we hold the page lock, as is the case in nfs_writepage,
228
		 *	 then the call to nfs_lock_request() will always
229 230 231
		 *	 succeed provided that someone hasn't already marked the
		 *	 request as dirty (in which case we don't care).
		 */
232
		spin_unlock(&inode->i_lock);
233 234 235 236
		if (!nonblock)
			ret = nfs_wait_on_request(req);
		else
			ret = -EAGAIN;
237 238
		nfs_release_request(req);
		if (ret != 0)
239
			return ERR_PTR(ret);
240
		spin_lock(&inode->i_lock);
241
	}
242
	spin_unlock(&inode->i_lock);
243 244 245 246 247 248 249 250
	return req;
}

/*
 * Find an associated nfs write request, and prepare to flush it out
 * May return an error if the user signalled nfs_wait_on_request().
 */
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
251
				struct page *page, bool nonblock)
252 253 254 255
{
	struct nfs_page *req;
	int ret = 0;

256
	req = nfs_find_and_lock_request(page, nonblock);
257 258 259 260 261 262
	if (!req)
		goto out;
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

263 264
	nfs_set_page_writeback(page);
	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
265

266
	ret = 0;
267 268
	if (!nfs_pageio_add_request(pgio, req)) {
		nfs_redirty_request(req);
269
		ret = pgio->pg_error;
270
	}
271 272
out:
	return ret;
273 274
}

T
Trond Myklebust 已提交
275
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
L
Linus Torvalds 已提交
276
{
277
	struct inode *inode = page_file_mapping(page)->host;
278
	int ret;
L
Linus Torvalds 已提交
279

C
Chuck Lever 已提交
280 281 282
	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);

283
	nfs_pageio_cond_complete(pgio, page_file_index(page));
284
	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
285 286 287 288 289
	if (ret == -EAGAIN) {
		redirty_page_for_writepage(wbc, page);
		ret = 0;
	}
	return ret;
T
Trond Myklebust 已提交
290
}
291

T
Trond Myklebust 已提交
292 293 294 295 296 297 298
/*
 * Write an mmapped page to the server.
 */
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
	struct nfs_pageio_descriptor pgio;
	int err;
299

300 301
	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
				false, &nfs_async_write_completion_ops);
T
Trond Myklebust 已提交
302 303 304 305 306 307 308
	err = nfs_do_writepage(page, wbc, &pgio);
	nfs_pageio_complete(&pgio);
	if (err < 0)
		return err;
	if (pgio.pg_error < 0)
		return pgio.pg_error;
	return 0;
309 310 311 312
}

int nfs_writepage(struct page *page, struct writeback_control *wbc)
{
T
Trond Myklebust 已提交
313
	int ret;
314

T
Trond Myklebust 已提交
315
	ret = nfs_writepage_locked(page, wbc);
L
Linus Torvalds 已提交
316
	unlock_page(page);
T
Trond Myklebust 已提交
317 318 319 320 321 322 323 324 325 326
	return ret;
}

static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
{
	int ret;

	ret = nfs_do_writepage(page, wbc, data);
	unlock_page(page);
	return ret;
L
Linus Torvalds 已提交
327 328 329 330 331
}

int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
332
	unsigned long *bitlock = &NFS_I(inode)->flags;
333
	struct nfs_pageio_descriptor pgio;
L
Linus Torvalds 已提交
334 335
	int err;

336 337 338 339 340 341
	/* Stop dirtying of new pages while we sync */
	err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
			nfs_wait_bit_killable, TASK_KILLABLE);
	if (err)
		goto out_err;

C
Chuck Lever 已提交
342 343
	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);

344 345
	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
				&nfs_async_write_completion_ops);
T
Trond Myklebust 已提交
346
	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
347
	nfs_pageio_complete(&pgio);
348 349 350 351 352

	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
	smp_mb__after_clear_bit();
	wake_up_bit(bitlock, NFS_INO_FLUSHING);

T
Trond Myklebust 已提交
353
	if (err < 0)
354 355 356 357
		goto out_err;
	err = pgio.pg_error;
	if (err < 0)
		goto out_err;
358
	return 0;
359 360
out_err:
	return err;
L
Linus Torvalds 已提交
361 362 363 364 365
}

/*
 * Insert a write request into an inode
 */
F
Fred Isaman 已提交
366
static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
L
Linus Torvalds 已提交
367 368
{
	struct nfs_inode *nfsi = NFS_I(inode);
369 370

	/* Lock the request! */
371
	nfs_lock_request(req);
372 373

	spin_lock(&inode->i_lock);
374
	if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
375
		inode->i_version++;
376 377 378 379 380 381 382 383 384
	/*
	 * Swap-space should not get truncated. Hence no need to plug the race
	 * with invalidate/truncate.
	 */
	if (likely(!PageSwapCache(req->wb_page))) {
		set_bit(PG_MAPPED, &req->wb_flags);
		SetPagePrivate(req->wb_page);
		set_page_private(req->wb_page, (unsigned long)req);
	}
L
Linus Torvalds 已提交
385
	nfsi->npages++;
386
	kref_get(&req->wb_kref);
387
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
388 389 390
}

/*
391
 * Remove a write request from an inode
L
Linus Torvalds 已提交
392 393 394
 */
static void nfs_inode_remove_request(struct nfs_page *req)
{
395
	struct inode *inode = req->wb_context->dentry->d_inode;
L
Linus Torvalds 已提交
396 397
	struct nfs_inode *nfsi = NFS_I(inode);

398
	spin_lock(&inode->i_lock);
399 400 401 402 403
	if (likely(!PageSwapCache(req->wb_page))) {
		set_page_private(req->wb_page, 0);
		ClearPagePrivate(req->wb_page);
		clear_bit(PG_MAPPED, &req->wb_flags);
	}
L
Linus Torvalds 已提交
404
	nfsi->npages--;
405
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
406 407 408
	nfs_release_request(req);
}

409
static void
F
Fred 已提交
410
nfs_mark_request_dirty(struct nfs_page *req)
411 412 413 414
{
	__set_page_dirty_nobuffers(req->wb_page);
}

B
Bryan Schumaker 已提交
415
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
416 417 418
/**
 * nfs_request_add_commit_list - add request to a commit list
 * @req: pointer to a struct nfs_page
F
Fred Isaman 已提交
419 420
 * @dst: commit list head
 * @cinfo: holds list lock and accounting info
421
 *
F
Fred Isaman 已提交
422
 * This sets the PG_CLEAN bit, updates the cinfo count of
423 424 425
 * number of outstanding requests requiring a commit as well as
 * the MM page stats.
 *
F
Fred Isaman 已提交
426
 * The caller must _not_ hold the cinfo->lock, but must be
427
 * holding the nfs_page lock.
L
Linus Torvalds 已提交
428
 */
429
void
F
Fred Isaman 已提交
430 431
nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
			    struct nfs_commit_info *cinfo)
L
Linus Torvalds 已提交
432
{
433
	set_bit(PG_CLEAN, &(req)->wb_flags);
F
Fred Isaman 已提交
434 435 436 437
	spin_lock(cinfo->lock);
	nfs_list_add_request(req, dst);
	cinfo->mds->ncommit++;
	spin_unlock(cinfo->lock);
438 439
	if (!cinfo->dreq) {
		inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
440
		inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
441 442 443 444
			     BDI_RECLAIMABLE);
		__mark_inode_dirty(req->wb_context->dentry->d_inode,
				   I_DIRTY_DATASYNC);
	}
L
Linus Torvalds 已提交
445
}
446 447 448 449 450
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);

/**
 * nfs_request_remove_commit_list - Remove request from a commit list
 * @req: pointer to a nfs_page
F
Fred Isaman 已提交
451
 * @cinfo: holds list lock and accounting info
452
 *
F
Fred Isaman 已提交
453
 * This clears the PG_CLEAN bit, and updates the cinfo's count of
454 455 456
 * number of outstanding requests requiring a commit
 * It does not update the MM page stats.
 *
F
Fred Isaman 已提交
457
 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
458 459
 */
void
F
Fred Isaman 已提交
460 461
nfs_request_remove_commit_list(struct nfs_page *req,
			       struct nfs_commit_info *cinfo)
462 463 464 465
{
	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
		return;
	nfs_list_remove_request(req);
F
Fred Isaman 已提交
466
	cinfo->mds->ncommit--;
467 468 469
}
EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);

F
Fred Isaman 已提交
470 471 472 473 474 475
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
				      struct inode *inode)
{
	cinfo->lock = &inode->i_lock;
	cinfo->mds = &NFS_I(inode)->commit_info;
	cinfo->ds = pnfs_get_ds_info(inode);
F
Fred Isaman 已提交
476
	cinfo->dreq = NULL;
477
	cinfo->completion_ops = &nfs_commit_completion_ops;
F
Fred Isaman 已提交
478 479 480 481 482 483
}

void nfs_init_cinfo(struct nfs_commit_info *cinfo,
		    struct inode *inode,
		    struct nfs_direct_req *dreq)
{
484 485 486 487
	if (dreq)
		nfs_init_cinfo_from_dreq(cinfo, dreq);
	else
		nfs_init_cinfo_from_inode(cinfo, inode);
F
Fred Isaman 已提交
488 489
}
EXPORT_SYMBOL_GPL(nfs_init_cinfo);
490 491 492 493

/*
 * Add a request to the inode's commit list.
 */
494
void
F
Fred Isaman 已提交
495 496
nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
			struct nfs_commit_info *cinfo)
497
{
F
Fred Isaman 已提交
498
	if (pnfs_mark_request_commit(req, lseg, cinfo))
499
		return;
F
Fred Isaman 已提交
500
	nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
501
}
502

F
Fred Isaman 已提交
503 504 505 506
static void
nfs_clear_page_commit(struct page *page)
{
	dec_zone_page_state(page, NR_UNSTABLE_NFS);
507
	dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
F
Fred Isaman 已提交
508 509
}

510
static void
511 512
nfs_clear_request_commit(struct nfs_page *req)
{
513 514
	if (test_bit(PG_CLEAN, &req->wb_flags)) {
		struct inode *inode = req->wb_context->dentry->d_inode;
F
Fred Isaman 已提交
515
		struct nfs_commit_info cinfo;
516

F
Fred Isaman 已提交
517 518 519 520 521
		nfs_init_cinfo_from_inode(&cinfo, inode);
		if (!pnfs_clear_request_commit(req, &cinfo)) {
			spin_lock(cinfo.lock);
			nfs_request_remove_commit_list(req, &cinfo);
			spin_unlock(cinfo.lock);
522
		}
F
Fred Isaman 已提交
523
		nfs_clear_page_commit(req->wb_page);
524 525 526
	}
}

527
static inline
528
int nfs_write_need_commit(struct nfs_pgio_data *data)
529
{
530
	if (data->verf.committed == NFS_DATA_SYNC)
531 532
		return data->header->lseg == NULL;
	return data->verf.committed != NFS_FILE_SYNC;
533 534 535
}

#else
536 537 538 539 540 541 542 543 544 545 546
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
				      struct inode *inode)
{
}

void nfs_init_cinfo(struct nfs_commit_info *cinfo,
		    struct inode *inode,
		    struct nfs_direct_req *dreq)
{
}

547
void
F
Fred Isaman 已提交
548 549
nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
			struct nfs_commit_info *cinfo)
550 551 552
{
}

553
static void
554 555 556 557
nfs_clear_request_commit(struct nfs_page *req)
{
}

558
static inline
559
int nfs_write_need_commit(struct nfs_pgio_data *data)
560 561 562 563
{
	return 0;
}

564 565
#endif

566
static void nfs_write_completion(struct nfs_pgio_header *hdr)
567
{
F
Fred Isaman 已提交
568
	struct nfs_commit_info cinfo;
569 570 571 572
	unsigned long bytes = 0;

	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
		goto out;
F
Fred Isaman 已提交
573
	nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
574 575 576 577 578 579 580
	while (!list_empty(&hdr->pages)) {
		struct nfs_page *req = nfs_list_entry(hdr->pages.next);

		bytes += req->wb_bytes;
		nfs_list_remove_request(req);
		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
		    (hdr->good_bytes < bytes)) {
581
			nfs_set_pageerror(req->wb_page);
582 583 584 585 586 587 588 589
			nfs_context_set_write_error(req->wb_context, hdr->error);
			goto remove_req;
		}
		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
			nfs_mark_request_dirty(req);
			goto next;
		}
		if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
590
			memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
F
Fred Isaman 已提交
591
			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
592 593 594 595 596
			goto next;
		}
remove_req:
		nfs_inode_remove_request(req);
next:
597
		nfs_unlock_request(req);
598
		nfs_end_page_writeback(req->wb_page);
599
		nfs_release_request(req);
600 601 602
	}
out:
	hdr->release(hdr);
603
}
L
Linus Torvalds 已提交
604

B
Bryan Schumaker 已提交
605
#if  IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
606
unsigned long
F
Fred Isaman 已提交
607
nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
608
{
F
Fred Isaman 已提交
609
	return cinfo->mds->ncommit;
F
Fred Isaman 已提交
610 611
}

F
Fred Isaman 已提交
612
/* cinfo->lock held by caller */
613
int
F
Fred Isaman 已提交
614 615
nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
		     struct nfs_commit_info *cinfo, int max)
F
Fred Isaman 已提交
616 617 618 619 620
{
	struct nfs_page *req, *tmp;
	int ret = 0;

	list_for_each_entry_safe(req, tmp, src, wb_list) {
621 622
		if (!nfs_lock_request(req))
			continue;
623
		kref_get(&req->wb_kref);
F
Fred Isaman 已提交
624
		if (cond_resched_lock(cinfo->lock))
625
			list_safe_reset_next(req, tmp, wb_list);
F
Fred Isaman 已提交
626
		nfs_request_remove_commit_list(req, cinfo);
627 628
		nfs_list_add_request(req, dst);
		ret++;
629
		if ((ret == max) && !cinfo->dreq)
630
			break;
F
Fred Isaman 已提交
631 632
	}
	return ret;
633 634
}

L
Linus Torvalds 已提交
635 636 637
/*
 * nfs_scan_commit - Scan an inode for commit requests
 * @inode: NFS inode to scan
F
Fred Isaman 已提交
638 639
 * @dst: mds destination list
 * @cinfo: mds and ds lists of reqs ready to commit
L
Linus Torvalds 已提交
640 641 642 643
 *
 * Moves requests from the inode's 'commit' request list.
 * The requests are *not* checked to ensure that they form a contiguous set.
 */
644
int
F
Fred Isaman 已提交
645 646
nfs_scan_commit(struct inode *inode, struct list_head *dst,
		struct nfs_commit_info *cinfo)
L
Linus Torvalds 已提交
647
{
F
Fred Isaman 已提交
648
	int ret = 0;
649

F
Fred Isaman 已提交
650 651
	spin_lock(cinfo->lock);
	if (cinfo->mds->ncommit > 0) {
652
		const int max = INT_MAX;
F
Fred Isaman 已提交
653

F
Fred Isaman 已提交
654 655 656
		ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
					   cinfo, max);
		ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
F
Fred Isaman 已提交
657
	}
F
Fred Isaman 已提交
658
	spin_unlock(cinfo->lock);
659
	return ret;
L
Linus Torvalds 已提交
660
}
F
Fred Isaman 已提交
661

662
#else
663
unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
664 665 666 667
{
	return 0;
}

668 669
int nfs_scan_commit(struct inode *inode, struct list_head *dst,
		    struct nfs_commit_info *cinfo)
670 671 672
{
	return 0;
}
L
Linus Torvalds 已提交
673 674 675
#endif

/*
676 677
 * Search for an existing write request, and attempt to update
 * it to reflect a new dirty region on a given page.
L
Linus Torvalds 已提交
678
 *
679 680
 * If the attempt fails, then the existing request is flushed out
 * to disk.
L
Linus Torvalds 已提交
681
 */
682 683 684 685
static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
		struct page *page,
		unsigned int offset,
		unsigned int bytes)
L
Linus Torvalds 已提交
686
{
687 688 689 690 691 692 693
	struct nfs_page *req;
	unsigned int rqend;
	unsigned int end;
	int error;

	if (!PagePrivate(page))
		return NULL;
L
Linus Torvalds 已提交
694 695

	end = offset + bytes;
696
	spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
697 698

	for (;;) {
699
		req = nfs_page_find_request_locked(NFS_I(inode), page);
700 701 702 703 704 705 706 707 708 709
		if (req == NULL)
			goto out_unlock;

		rqend = req->wb_offset + req->wb_bytes;
		/*
		 * Tell the caller to flush out the request if
		 * the offsets are non-contiguous.
		 * Note: nfs_flush_incompatible() will already
		 * have flushed out requests having wrong owners.
		 */
710
		if (offset > rqend
711 712 713
		    || end < req->wb_offset)
			goto out_flushme;

714
		if (nfs_lock_request(req))
L
Linus Torvalds 已提交
715 716
			break;

717
		/* The request is locked, so wait and then retry */
718
		spin_unlock(&inode->i_lock);
719 720 721 722 723
		error = nfs_wait_on_request(req);
		nfs_release_request(req);
		if (error != 0)
			goto out_err;
		spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
724 725 726 727 728 729 730 731 732
	}

	/* Okay, the request matches. Update the region */
	if (offset < req->wb_offset) {
		req->wb_offset = offset;
		req->wb_pgbase = offset;
	}
	if (end > rqend)
		req->wb_bytes = end - req->wb_offset;
733 734 735 736
	else
		req->wb_bytes = rqend - req->wb_offset;
out_unlock:
	spin_unlock(&inode->i_lock);
737 738
	if (req)
		nfs_clear_request_commit(req);
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
	return req;
out_flushme:
	spin_unlock(&inode->i_lock);
	nfs_release_request(req);
	error = nfs_wb_page(inode, page);
out_err:
	return ERR_PTR(error);
}

/*
 * Try to update an existing write request, or create one if there is none.
 *
 * Note: Should always be called with the Page Lock held to prevent races
 * if we have to add a new request. Also assumes that the caller has
 * already called nfs_flush_incompatible() if necessary.
 */
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
		struct page *page, unsigned int offset, unsigned int bytes)
{
758
	struct inode *inode = page_file_mapping(page)->host;
759
	struct nfs_page	*req;
L
Linus Torvalds 已提交
760

761 762 763 764 765 766
	req = nfs_try_to_update_request(inode, page, offset, bytes);
	if (req != NULL)
		goto out;
	req = nfs_create_request(ctx, inode, page, offset, bytes);
	if (IS_ERR(req))
		goto out;
F
Fred Isaman 已提交
767
	nfs_inode_add_request(inode, req);
768
out:
T
Trond Myklebust 已提交
769
	return req;
L
Linus Torvalds 已提交
770 771
}

772 773 774 775 776 777 778 779 780 781 782
static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
		unsigned int offset, unsigned int count)
{
	struct nfs_page	*req;

	req = nfs_setup_write_request(ctx, page, offset, count);
	if (IS_ERR(req))
		return PTR_ERR(req);
	/* Update file length */
	nfs_grow_file(page, offset, count);
	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
783
	nfs_mark_request_dirty(req);
784
	nfs_unlock_and_release_request(req);
785 786 787
	return 0;
}

L
Linus Torvalds 已提交
788 789
int nfs_flush_incompatible(struct file *file, struct page *page)
{
790
	struct nfs_open_context *ctx = nfs_file_open_context(file);
791
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
792
	struct nfs_page	*req;
T
Trond Myklebust 已提交
793
	int do_flush, status;
L
Linus Torvalds 已提交
794 795 796 797 798 799 800 801
	/*
	 * Look for a request corresponding to this page. If there
	 * is one, and it belongs to another file, we flush it out
	 * before we try to copy anything into the page. Do this
	 * due to the lack of an ACCESS-type call in NFSv2.
	 * Also do the same if we find a request from an existing
	 * dropped page.
	 */
T
Trond Myklebust 已提交
802 803 804 805
	do {
		req = nfs_page_find_request(page);
		if (req == NULL)
			return 0;
806 807
		l_ctx = req->wb_lock_context;
		do_flush = req->wb_page != page || req->wb_context != ctx;
808
		if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
809 810 811
			do_flush |= l_ctx->lockowner.l_owner != current->files
				|| l_ctx->lockowner.l_pid != current->tgid;
		}
L
Linus Torvalds 已提交
812
		nfs_release_request(req);
T
Trond Myklebust 已提交
813 814
		if (!do_flush)
			return 0;
815
		status = nfs_wb_page(page_file_mapping(page)->host, page);
T
Trond Myklebust 已提交
816 817
	} while (status == 0);
	return status;
L
Linus Torvalds 已提交
818 819
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
/*
 * Avoid buffered writes when a open context credential's key would
 * expire soon.
 *
 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
 *
 * Return 0 and set a credential flag which triggers the inode to flush
 * and performs  NFS_FILE_SYNC writes if the key will expired within
 * RPC_KEY_EXPIRE_TIMEO.
 */
int
nfs_key_timeout_notify(struct file *filp, struct inode *inode)
{
	struct nfs_open_context *ctx = nfs_file_open_context(filp);
	struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;

	return rpcauth_key_timeout_notify(auth, ctx->cred);
}

/*
 * Test if the open context credential key is marked to expire soon.
 */
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
{
	return rpcauth_cred_key_to_expire(ctx->cred);
}

847 848 849 850 851
/*
 * If the page cache is marked as unsafe or invalid, then we can't rely on
 * the PageUptodate() flag. In this case, we will need to turn off
 * write optimisations that depend on the page contents being correct.
 */
852
static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
853
{
854 855
	struct nfs_inode *nfsi = NFS_I(inode);

856 857
	if (nfs_have_delegated_attributes(inode))
		goto out;
858 859
	if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
		return false;
860
	smp_rmb();
861
	if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
862 863 864
		return false;
out:
	return PageUptodate(page) != 0;
865 866
}

867 868 869 870 871
/* If we know the page is up to date, and we're not using byte range locks (or
 * if we have the whole file locked for writing), it may be more efficient to
 * extend the write to cover the entire page in order to avoid fragmentation
 * inefficiencies.
 *
872 873
 * If the file is opened for synchronous writes then we can just skip the rest
 * of the checks.
874 875 876 877 878
 */
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
	if (file->f_flags & O_DSYNC)
		return 0;
879 880
	if (!nfs_write_pageuptodate(page, inode))
		return 0;
881 882
	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
		return 1;
883
	if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
884
			inode->i_flock->fl_end == OFFSET_MAX &&
885
			inode->i_flock->fl_type != F_RDLCK))
886 887 888 889
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
890 891 892 893 894 895 896 897 898
/*
 * Update and possibly write a cached page of an NFS file.
 *
 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
 * things with a page scheduled for an RPC call (e.g. invalidate it).
 */
int nfs_updatepage(struct file *file, struct page *page,
		unsigned int offset, unsigned int count)
{
899
	struct nfs_open_context *ctx = nfs_file_open_context(file);
900
	struct inode	*inode = page_file_mapping(page)->host;
L
Linus Torvalds 已提交
901 902
	int		status = 0;

C
Chuck Lever 已提交
903 904
	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);

905 906
	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
		file, count, (long long)(page_file_offset(page) + offset));
L
Linus Torvalds 已提交
907

908
	if (nfs_can_extend_write(file, page, inode)) {
909
		count = max(count + offset, nfs_page_length(page));
L
Linus Torvalds 已提交
910 911 912
		offset = 0;
	}

913
	status = nfs_writepage_setup(ctx, page, offset, count);
914 915
	if (status < 0)
		nfs_set_pageerror(page);
916 917
	else
		__set_page_dirty_nobuffers(page);
L
Linus Torvalds 已提交
918

919
	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
L
Linus Torvalds 已提交
920 921 922 923
			status, (long long)i_size_read(inode));
	return status;
}

924
static int flush_task_priority(int how)
L
Linus Torvalds 已提交
925 926 927 928 929 930 931 932 933 934
{
	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
		case FLUSH_HIGHPRI:
			return RPC_PRIORITY_HIGH;
		case FLUSH_LOWPRI:
			return RPC_PRIORITY_LOW;
	}
	return RPC_PRIORITY_NORMAL;
}

935
int nfs_initiate_write(struct rpc_clnt *clnt,
936
		       struct nfs_pgio_data *data,
937
		       const struct rpc_call_ops *call_ops,
938
		       int how, int flags)
L
Linus Torvalds 已提交
939
{
940
	struct inode *inode = data->header->inode;
941
	int priority = flush_task_priority(how);
942
	struct rpc_task *task;
943 944 945
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
946
		.rpc_cred = data->header->cred,
947
	};
948
	struct rpc_task_setup task_setup_data = {
949
		.rpc_client = clnt,
950
		.task = &data->task,
951
		.rpc_message = &msg,
952 953
		.callback_ops = call_ops,
		.callback_data = data,
954
		.workqueue = nfsiod_workqueue,
955
		.flags = RPC_TASK_ASYNC | flags,
956
		.priority = priority,
957
	};
958
	int ret = 0;
L
Linus Torvalds 已提交
959

960 961 962 963
	/* Set up the initial task struct.  */
	NFS_PROTO(inode)->write_setup(data, &msg);

	dprintk("NFS: %5u initiated write call "
964
		"(req %s/%llu, %u bytes @ offset %llu)\n",
965 966
		data->task.tk_pid,
		inode->i_sb->s_id,
967
		(unsigned long long)NFS_FILEID(inode),
968 969 970
		data->args.count,
		(unsigned long long)data->args.offset);

971 972 973
	nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client,
				 &task_setup_data.rpc_client, &msg, data);

974 975 976 977 978 979 980 981 982 983 984 985 986 987
	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task)) {
		ret = PTR_ERR(task);
		goto out;
	}
	if (how & FLUSH_SYNC) {
		ret = rpc_wait_for_completion_task(task);
		if (ret == 0)
			ret = task->tk_status;
	}
	rpc_put_task(task);
out:
	return ret;
}
988
EXPORT_SYMBOL_GPL(nfs_initiate_write);
989

990
static int nfs_do_write(struct nfs_pgio_data *data,
991 992 993
		const struct rpc_call_ops *call_ops,
		int how)
{
994
	struct inode *inode = data->header->inode;
995

996
	return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
L
Linus Torvalds 已提交
997 998
}

999 1000 1001 1002
static int nfs_do_multiple_writes(struct list_head *head,
		const struct rpc_call_ops *call_ops,
		int how)
{
1003
	struct nfs_pgio_data *data;
1004 1005 1006 1007 1008
	int ret = 0;

	while (!list_empty(head)) {
		int ret2;

1009
		data = list_first_entry(head, struct nfs_pgio_data, list);
1010 1011
		list_del_init(&data->list);
		
1012
		ret2 = nfs_do_write(data, call_ops, how);
1013 1014 1015 1016 1017 1018
		 if (ret == 0)
			 ret = ret2;
	}
	return ret;
}

F
Fred 已提交
1019 1020 1021 1022 1023 1024 1025
/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{
	nfs_mark_request_dirty(req);
1026
	nfs_unlock_request(req);
1027
	nfs_end_page_writeback(req->wb_page);
1028
	nfs_release_request(req);
F
Fred 已提交
1029 1030
}

1031
static void nfs_async_write_error(struct list_head *head)
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
{
	struct nfs_page	*req;

	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_redirty_request(req);
	}
}

1042 1043 1044 1045 1046
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
	.error_cleanup = nfs_async_write_error,
	.completion = nfs_write_completion,
};

1047
static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1048
{
1049
	struct nfs_rw_header *whdr;
1050
	struct nfs_pgio_header *hdr;
1051 1052
	int ret;

1053
	whdr = nfs_rw_header_alloc(desc->pg_rw_ops);
1054
	if (!whdr) {
1055
		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1056 1057 1058
		return -ENOMEM;
	}
	hdr = &whdr->header;
1059
	nfs_pgheader_init(desc, hdr, nfs_rw_header_free);
1060
	atomic_inc(&hdr->refcnt);
1061
	ret = nfs_generic_pgio(desc, hdr);
1062
	if (ret == 0)
1063 1064 1065 1066
		ret = nfs_do_multiple_writes(&hdr->rpc_list,
					     desc->pg_rpc_callops,
					     desc->pg_ioflags);
	if (atomic_dec_and_test(&hdr->refcnt))
1067
		hdr->completion_ops->completion(hdr);
1068
	return ret;
1069 1070 1071 1072 1073 1074 1075
}

static const struct nfs_pageio_ops nfs_pageio_write_ops = {
	.pg_test = nfs_generic_pg_test,
	.pg_doio = nfs_generic_pg_writepages,
};

1076
void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1077
			       struct inode *inode, int ioflags, bool force_mds,
1078
			       const struct nfs_pgio_completion_ops *compl_ops)
L
Linus Torvalds 已提交
1079
{
1080 1081 1082 1083 1084 1085 1086
	struct nfs_server *server = NFS_SERVER(inode);
	const struct nfs_pageio_ops *pg_ops = &nfs_pageio_write_ops;

#ifdef CONFIG_NFS_V4_1
	if (server->pnfs_curr_ld && !force_mds)
		pg_ops = server->pnfs_curr_ld->pg_write_ops;
#endif
1087 1088
	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
			server->wsize, ioflags);
1089
}
B
Bryan Schumaker 已提交
1090
EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
L
Linus Torvalds 已提交
1091

1092 1093 1094 1095 1096
void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{
	pgio->pg_ops = &nfs_pageio_write_ops;
	pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
}
1097
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1098

L
Linus Torvalds 已提交
1099

1100 1101 1102 1103 1104 1105 1106
void nfs_commit_prepare(struct rpc_task *task, void *calldata)
{
	struct nfs_commit_data *data = calldata;

	NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
}

1107
static void nfs_writeback_release_common(struct nfs_pgio_data *data)
1108
{
1109
	struct nfs_pgio_header *hdr = data->header;
1110
	int status = data->task.tk_status;
1111

1112 1113 1114 1115 1116
	if ((status >= 0) && nfs_write_need_commit(data)) {
		spin_lock(&hdr->lock);
		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
			; /* Do nothing */
		else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1117 1118
			memcpy(&hdr->verf, &data->verf, sizeof(hdr->verf));
		else if (memcmp(&hdr->verf, &data->verf, sizeof(hdr->verf)))
1119 1120
			set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
		spin_unlock(&hdr->lock);
L
Linus Torvalds 已提交
1121 1122 1123
	}
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
/*
 * Special version of should_remove_suid() that ignores capabilities.
 */
static int nfs_should_remove_suid(const struct inode *inode)
{
	umode_t mode = inode->i_mode;
	int kill = 0;

	/* suid always must be killed */
	if (unlikely(mode & S_ISUID))
		kill = ATTR_KILL_SUID;

	/*
	 * sgid without any exec bits is just a mandatory locking mark; leave
	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
	 */
	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
		kill |= ATTR_KILL_SGID;

	if (unlikely(kill && S_ISREG(mode)))
		return kill;

	return 0;
}
1148

L
Linus Torvalds 已提交
1149 1150 1151
/*
 * This function is called when the WRITE call is complete.
 */
1152 1153
static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data,
			      struct inode *inode)
L
Linus Torvalds 已提交
1154
{
1155
	int status;
L
Linus Torvalds 已提交
1156

1157 1158 1159 1160 1161 1162 1163
	/*
	 * ->write_done will attempt to use post-op attributes to detect
	 * conflicting writes by other clients.  A strict interpretation
	 * of close-to-open would allow us to continue caching even if
	 * another writer had changed the file, but some applications
	 * depend on tighter cache coherency when writing.
	 */
1164
	status = NFS_PROTO(inode)->write_done(task, data);
1165
	if (status != 0)
1166 1167
		return status;
	nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, data->res.count);
C
Chuck Lever 已提交
1168

B
Bryan Schumaker 已提交
1169
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1170
	if (data->res.verf->committed < data->args.stable && task->tk_status >= 0) {
L
Linus Torvalds 已提交
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
		/* We tried a write call, but the server did not
		 * commit data to stable storage even though we
		 * requested it.
		 * Note: There is a known bug in Tru64 < 5.0 in which
		 *	 the server reports NFS_DATA_SYNC, but performs
		 *	 NFS_FILE_SYNC. We therefore implement this checking
		 *	 as a dprintk() in order to avoid filling syslog.
		 */
		static unsigned long    complain;

1181
		/* Note this will print the MDS for a DS write */
L
Linus Torvalds 已提交
1182
		if (time_before(complain, jiffies)) {
1183
			dprintk("NFS:       faulty NFS server %s:"
L
Linus Torvalds 已提交
1184
				" (committed = %d) != (stable = %d)\n",
1185
				NFS_SERVER(inode)->nfs_client->cl_hostname,
1186
				data->res.verf->committed, data->args.stable);
L
Linus Torvalds 已提交
1187 1188 1189 1190
			complain = jiffies + 300 * HZ;
		}
	}
#endif
1191 1192 1193 1194

	/* Deal with the suid/sgid bit corner case */
	if (nfs_should_remove_suid(inode))
		nfs_mark_for_revalidate(inode);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
	return 0;
}

/*
 * This function is called when the WRITE call is complete.
 */
static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *data)
{
	struct nfs_pgio_args	*argp = &data->args;
	struct nfs_pgio_res	*resp = &data->res;
1205 1206

	if (resp->count < argp->count) {
L
Linus Torvalds 已提交
1207 1208
		static unsigned long    complain;

1209
		/* This a short write! */
1210
		nfs_inc_stats(data->header->inode, NFSIOS_SHORTWRITE);
C
Chuck Lever 已提交
1211

L
Linus Torvalds 已提交
1212
		/* Has the server at least made some progress? */
1213 1214 1215 1216 1217 1218
		if (resp->count == 0) {
			if (time_before(complain, jiffies)) {
				printk(KERN_WARNING
				       "NFS: Server wrote zero bytes, expected %u.\n",
				       argp->count);
				complain = jiffies + 300 * HZ;
L
Linus Torvalds 已提交
1219
			}
1220 1221
			nfs_set_pgio_error(data->header, -EIO, argp->offset);
			task->tk_status = -EIO;
1222
			return;
L
Linus Torvalds 已提交
1223
		}
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
		/* Was this an NFSv2 write or an NFSv3 stable write? */
		if (resp->verf->committed != NFS_UNSTABLE) {
			/* Resend from where the server left off */
			data->mds_offset += resp->count;
			argp->offset += resp->count;
			argp->pgbase += resp->count;
			argp->count -= resp->count;
		} else {
			/* Resend as a stable write in order to avoid
			 * headaches in the case of a server crash.
			 */
			argp->stable = NFS_FILE_SYNC;
L
Linus Torvalds 已提交
1236
		}
1237
		rpc_restart_call_prepare(task);
L
Linus Torvalds 已提交
1238 1239 1240 1241
	}
}


B
Bryan Schumaker 已提交
1242
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1243 1244
static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
{
1245 1246
	int ret;

1247 1248
	if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
		return 1;
1249 1250 1251 1252 1253 1254 1255
	if (!may_wait)
		return 0;
	ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
				NFS_INO_COMMIT,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
	return (ret < 0) ? ret : 1;
1256 1257
}

1258
static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1259 1260 1261 1262 1263 1264
{
	clear_bit(NFS_INO_COMMIT, &nfsi->flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}

1265
void nfs_commitdata_release(struct nfs_commit_data *data)
L
Linus Torvalds 已提交
1266
{
1267 1268
	put_nfs_open_context(data->context);
	nfs_commit_free(data);
L
Linus Torvalds 已提交
1269
}
1270
EXPORT_SYMBOL_GPL(nfs_commitdata_release);
L
Linus Torvalds 已提交
1271

1272
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1273
			const struct rpc_call_ops *call_ops,
1274
			int how, int flags)
L
Linus Torvalds 已提交
1275
{
1276
	struct rpc_task *task;
1277
	int priority = flush_task_priority(how);
1278 1279 1280
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
1281
		.rpc_cred = data->cred,
1282
	};
1283
	struct rpc_task_setup task_setup_data = {
1284
		.task = &data->task,
1285
		.rpc_client = clnt,
1286
		.rpc_message = &msg,
1287
		.callback_ops = call_ops,
1288
		.callback_data = data,
1289
		.workqueue = nfsiod_workqueue,
1290
		.flags = RPC_TASK_ASYNC | flags,
1291
		.priority = priority,
1292
	};
1293 1294 1295 1296 1297
	/* Set up the initial task struct.  */
	NFS_PROTO(data->inode)->commit_setup(data, &msg);

	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);

1298 1299 1300
	nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
		NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);

1301 1302 1303 1304 1305 1306 1307 1308
	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task))
		return PTR_ERR(task);
	if (how & FLUSH_SYNC)
		rpc_wait_for_completion_task(task);
	rpc_put_task(task);
	return 0;
}
1309
EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1310 1311 1312 1313

/*
 * Set up the argument/result storage required for the RPC call.
 */
1314
void nfs_init_commit(struct nfs_commit_data *data,
1315 1316 1317
		     struct list_head *head,
		     struct pnfs_layout_segment *lseg,
		     struct nfs_commit_info *cinfo)
1318 1319
{
	struct nfs_page *first = nfs_list_entry(head->next);
1320
	struct inode *inode = first->wb_context->dentry->d_inode;
L
Linus Torvalds 已提交
1321 1322 1323 1324 1325 1326 1327

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	list_splice_init(head, &data->pages);

	data->inode	  = inode;
1328
	data->cred	  = first->wb_context->cred;
1329
	data->lseg	  = lseg; /* reference transferred */
1330
	data->mds_ops     = &nfs_commit_ops;
1331
	data->completion_ops = cinfo->completion_ops;
F
Fred Isaman 已提交
1332
	data->dreq	  = cinfo->dreq;
L
Linus Torvalds 已提交
1333 1334

	data->args.fh     = NFS_FH(data->inode);
1335 1336 1337
	/* Note: we always request a commit of the entire inode */
	data->args.offset = 0;
	data->args.count  = 0;
1338
	data->context     = get_nfs_open_context(first->wb_context);
L
Linus Torvalds 已提交
1339 1340
	data->res.fattr   = &data->fattr;
	data->res.verf    = &data->verf;
1341
	nfs_fattr_init(&data->fattr);
L
Linus Torvalds 已提交
1342
}
1343
EXPORT_SYMBOL_GPL(nfs_init_commit);
L
Linus Torvalds 已提交
1344

1345
void nfs_retry_commit(struct list_head *page_list,
F
Fred Isaman 已提交
1346 1347
		      struct pnfs_layout_segment *lseg,
		      struct nfs_commit_info *cinfo)
1348 1349 1350 1351 1352 1353
{
	struct nfs_page *req;

	while (!list_empty(page_list)) {
		req = nfs_list_entry(page_list->next);
		nfs_list_remove_request(req);
F
Fred Isaman 已提交
1354
		nfs_mark_request_commit(req, lseg, cinfo);
1355 1356
		if (!cinfo->dreq) {
			dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1357
			dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
1358 1359
				     BDI_RECLAIMABLE);
		}
1360
		nfs_unlock_and_release_request(req);
1361 1362
	}
}
1363
EXPORT_SYMBOL_GPL(nfs_retry_commit);
1364

L
Linus Torvalds 已提交
1365 1366 1367 1368
/*
 * Commit dirty pages
 */
static int
F
Fred Isaman 已提交
1369 1370
nfs_commit_list(struct inode *inode, struct list_head *head, int how,
		struct nfs_commit_info *cinfo)
L
Linus Torvalds 已提交
1371
{
1372
	struct nfs_commit_data	*data;
L
Linus Torvalds 已提交
1373

1374
	data = nfs_commitdata_alloc();
L
Linus Torvalds 已提交
1375 1376 1377 1378 1379

	if (!data)
		goto out_bad;

	/* Set up the argument struct */
1380 1381
	nfs_init_commit(data, head, NULL, cinfo);
	atomic_inc(&cinfo->mds->rpcs_out);
1382 1383
	return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
				   how, 0);
L
Linus Torvalds 已提交
1384
 out_bad:
F
Fred Isaman 已提交
1385
	nfs_retry_commit(head, NULL, cinfo);
1386
	cinfo->completion_ops->error_cleanup(NFS_I(inode));
L
Linus Torvalds 已提交
1387 1388 1389 1390 1391 1392
	return -ENOMEM;
}

/*
 * COMMIT call returned
 */
1393
static void nfs_commit_done(struct rpc_task *task, void *calldata)
L
Linus Torvalds 已提交
1394
{
1395
	struct nfs_commit_data	*data = calldata;
L
Linus Torvalds 已提交
1396

C
Chuck Lever 已提交
1397
        dprintk("NFS: %5u nfs_commit_done (status %d)\n",
L
Linus Torvalds 已提交
1398 1399
                                task->tk_pid, task->tk_status);

1400
	/* Call the NFS version-specific code */
1401
	NFS_PROTO(data->inode)->commit_done(task, data);
1402 1403
}

1404
static void nfs_commit_release_pages(struct nfs_commit_data *data)
1405
{
1406
	struct nfs_page	*req;
1407
	int status = data->task.tk_status;
1408
	struct nfs_commit_info cinfo;
1409

L
Linus Torvalds 已提交
1410 1411 1412
	while (!list_empty(&data->pages)) {
		req = nfs_list_entry(data->pages.next);
		nfs_list_remove_request(req);
F
Fred Isaman 已提交
1413
		nfs_clear_page_commit(req->wb_page);
L
Linus Torvalds 已提交
1414

1415
		dprintk("NFS:       commit (%s/%llu %d@%lld)",
1416
			req->wb_context->dentry->d_sb->s_id,
1417
			(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
L
Linus Torvalds 已提交
1418 1419
			req->wb_bytes,
			(long long)req_offset(req));
1420 1421
		if (status < 0) {
			nfs_context_set_write_error(req->wb_context, status);
L
Linus Torvalds 已提交
1422
			nfs_inode_remove_request(req);
1423
			dprintk(", error = %d\n", status);
L
Linus Torvalds 已提交
1424 1425 1426 1427 1428
			goto next;
		}

		/* Okay, COMMIT succeeded, apparently. Check the verifier
		 * returned by the server against all stored verfs. */
1429
		if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
L
Linus Torvalds 已提交
1430 1431 1432 1433 1434 1435 1436
			/* We have a match */
			nfs_inode_remove_request(req);
			dprintk(" OK\n");
			goto next;
		}
		/* We have a mismatch. Write the page again */
		dprintk(" mismatch\n");
F
Fred 已提交
1437
		nfs_mark_request_dirty(req);
1438
		set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
L
Linus Torvalds 已提交
1439
	next:
1440
		nfs_unlock_and_release_request(req);
L
Linus Torvalds 已提交
1441
	}
1442 1443 1444
	nfs_init_cinfo(&cinfo, data->inode, data->dreq);
	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
		nfs_commit_clear_lock(NFS_I(data->inode));
1445 1446 1447 1448
}

static void nfs_commit_release(void *calldata)
{
1449
	struct nfs_commit_data *data = calldata;
1450

1451
	data->completion_ops->completion(data);
1452
	nfs_commitdata_release(calldata);
L
Linus Torvalds 已提交
1453
}
1454 1455

static const struct rpc_call_ops nfs_commit_ops = {
1456
	.rpc_call_prepare = nfs_commit_prepare,
1457 1458 1459
	.rpc_call_done = nfs_commit_done,
	.rpc_release = nfs_commit_release,
};
L
Linus Torvalds 已提交
1460

1461 1462 1463 1464 1465
static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
	.completion = nfs_commit_release_pages,
	.error_cleanup = nfs_commit_clear_lock,
};

1466 1467
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
			    int how, struct nfs_commit_info *cinfo)
1468 1469 1470
{
	int status;

F
Fred Isaman 已提交
1471
	status = pnfs_commit_list(inode, head, how, cinfo);
1472
	if (status == PNFS_NOT_ATTEMPTED)
F
Fred Isaman 已提交
1473
		status = nfs_commit_list(inode, head, how, cinfo);
1474 1475 1476
	return status;
}

1477
int nfs_commit_inode(struct inode *inode, int how)
L
Linus Torvalds 已提交
1478 1479
{
	LIST_HEAD(head);
F
Fred Isaman 已提交
1480
	struct nfs_commit_info cinfo;
1481
	int may_wait = how & FLUSH_SYNC;
1482
	int res;
L
Linus Torvalds 已提交
1483

1484 1485
	res = nfs_commit_set_lock(NFS_I(inode), may_wait);
	if (res <= 0)
1486
		goto out_mark_dirty;
F
Fred Isaman 已提交
1487 1488
	nfs_init_cinfo_from_inode(&cinfo, inode);
	res = nfs_scan_commit(inode, &head, &cinfo);
L
Linus Torvalds 已提交
1489
	if (res) {
1490 1491
		int error;

F
Fred Isaman 已提交
1492
		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1493 1494
		if (error < 0)
			return error;
1495
		if (!may_wait)
1496
			goto out_mark_dirty;
1497 1498 1499 1500 1501 1502
		error = wait_on_bit(&NFS_I(inode)->flags,
				NFS_INO_COMMIT,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
		if (error < 0)
			return error;
1503 1504
	} else
		nfs_commit_clear_lock(NFS_I(inode));
1505 1506 1507 1508 1509 1510 1511 1512
	return res;
	/* Note: If we exit without ensuring that the commit is complete,
	 * we must mark the inode as dirty. Otherwise, future calls to
	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
	 * that the data is on the disk.
	 */
out_mark_dirty:
	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
L
Linus Torvalds 已提交
1513 1514
	return res;
}
1515 1516 1517

static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{
1518 1519 1520
	struct nfs_inode *nfsi = NFS_I(inode);
	int flags = FLUSH_SYNC;
	int ret = 0;
1521

1522
	/* no commits means nothing needs to be done */
F
Fred Isaman 已提交
1523
	if (!nfsi->commit_info.ncommit)
1524 1525
		return ret;

1526 1527 1528 1529
	if (wbc->sync_mode == WB_SYNC_NONE) {
		/* Don't commit yet if this is a non-blocking flush and there
		 * are a lot of outstanding writes for this mapping.
		 */
F
Fred Isaman 已提交
1530
		if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
1531
			goto out_mark_dirty;
1532

1533
		/* don't wait for the COMMIT response */
1534
		flags = 0;
1535 1536
	}

1537 1538 1539 1540 1541 1542 1543 1544
	ret = nfs_commit_inode(inode, flags);
	if (ret >= 0) {
		if (wbc->sync_mode == WB_SYNC_NONE) {
			if (ret < wbc->nr_to_write)
				wbc->nr_to_write -= ret;
			else
				wbc->nr_to_write = 0;
		}
1545
		return 0;
1546 1547
	}
out_mark_dirty:
1548 1549 1550
	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
	return ret;
}
1551
#else
1552 1553 1554 1555
static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{
	return 0;
}
L
Linus Torvalds 已提交
1556 1557
#endif

1558 1559
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
1560 1561
	return nfs_commit_unstable_pages(inode, wbc);
}
B
Bryan Schumaker 已提交
1562
EXPORT_SYMBOL_GPL(nfs_write_inode);
1563

1564 1565 1566 1567
/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
T
Trond Myklebust 已提交
1568 1569
{
	struct writeback_control wbc = {
1570
		.sync_mode = WB_SYNC_ALL,
T
Trond Myklebust 已提交
1571
		.nr_to_write = LONG_MAX,
1572 1573
		.range_start = 0,
		.range_end = LLONG_MAX,
T
Trond Myklebust 已提交
1574
	};
1575 1576 1577 1578 1579
	int ret;

	trace_nfs_writeback_inode_enter(inode);

	ret = sync_inode(inode, &wbc);
T
Trond Myklebust 已提交
1580

1581 1582
	trace_nfs_writeback_inode_exit(inode, ret);
	return ret;
1583
}
B
Bryan Schumaker 已提交
1584
EXPORT_SYMBOL_GPL(nfs_wb_all);
1585

1586 1587 1588 1589 1590 1591
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	int ret = 0;

	for (;;) {
1592
		wait_on_page_writeback(page);
1593 1594 1595
		req = nfs_page_find_request(page);
		if (req == NULL)
			break;
1596
		if (nfs_lock_request(req)) {
1597
			nfs_clear_request_commit(req);
1598 1599 1600 1601 1602 1603
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
1604
			nfs_unlock_and_release_request(req);
1605 1606 1607
			break;
		}
		ret = nfs_wait_on_request(req);
1608
		nfs_release_request(req);
1609
		if (ret < 0)
1610
			break;
1611 1612 1613 1614
	}
	return ret;
}

T
Trond Myklebust 已提交
1615 1616 1617 1618
/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page *page)
1619
{
1620
	loff_t range_start = page_file_offset(page);
1621
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1622 1623
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
T
Trond Myklebust 已提交
1624
		.nr_to_write = 0,
1625 1626 1627 1628
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;
1629

1630 1631
	trace_nfs_writeback_page_enter(inode);

1632
	for (;;) {
1633
		wait_on_page_writeback(page);
1634 1635 1636 1637
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
1638
			continue;
T
Trond Myklebust 已提交
1639
		}
1640
		ret = 0;
1641 1642 1643
		if (!PagePrivate(page))
			break;
		ret = nfs_commit_inode(inode, FLUSH_SYNC);
1644
		if (ret < 0)
1645
			goto out_error;
T
Trond Myklebust 已提交
1646
	}
1647
out_error:
1648
	trace_nfs_writeback_page_exit(inode, ret);
1649
	return ret;
1650 1651
}

1652 1653
#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1654
		struct page *page, enum migrate_mode mode)
1655
{
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	/*
	 * If PagePrivate is set, then the page is currently associated with
	 * an in-progress read or write request. Don't try to migrate it.
	 *
	 * FIXME: we could do this in principle, but we'll need a way to ensure
	 *        that we can safely release the inode reference while holding
	 *        the page lock.
	 */
	if (PagePrivate(page))
		return -EBUSY;
1666

1667 1668
	if (!nfs_fscache_release_page(page, GFP_KERNEL))
		return -EBUSY;
1669

1670
	return migrate_page(mapping, newpage, page, mode);
1671 1672 1673
}
#endif

D
David Howells 已提交
1674
int __init nfs_init_writepagecache(void)
L
Linus Torvalds 已提交
1675 1676
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1677
					     sizeof(struct nfs_rw_header),
L
Linus Torvalds 已提交
1678
					     0, SLAB_HWCACHE_ALIGN,
1679
					     NULL);
L
Linus Torvalds 已提交
1680 1681 1682
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

1683 1684
	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
L
Linus Torvalds 已提交
1685
	if (nfs_wdata_mempool == NULL)
1686
		goto out_destroy_write_cache;
L
Linus Torvalds 已提交
1687

1688 1689 1690 1691 1692
	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
					     sizeof(struct nfs_commit_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_cdata_cachep == NULL)
1693
		goto out_destroy_write_mempool;
1694

1695
	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1696
						      nfs_cdata_cachep);
L
Linus Torvalds 已提交
1697
	if (nfs_commit_mempool == NULL)
1698
		goto out_destroy_commit_cache;
L
Linus Torvalds 已提交
1699

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

L
Linus Torvalds 已提交
1720
	return 0;
1721 1722 1723 1724 1725 1726 1727 1728

out_destroy_commit_cache:
	kmem_cache_destroy(nfs_cdata_cachep);
out_destroy_write_mempool:
	mempool_destroy(nfs_wdata_mempool);
out_destroy_write_cache:
	kmem_cache_destroy(nfs_wdata_cachep);
	return -ENOMEM;
L
Linus Torvalds 已提交
1729 1730
}

1731
void nfs_destroy_writepagecache(void)
L
Linus Torvalds 已提交
1732 1733
{
	mempool_destroy(nfs_commit_mempool);
1734
	kmem_cache_destroy(nfs_cdata_cachep);
L
Linus Torvalds 已提交
1735
	mempool_destroy(nfs_wdata_mempool);
1736
	kmem_cache_destroy(nfs_wdata_cachep);
L
Linus Torvalds 已提交
1737 1738
}

1739
static const struct nfs_rw_ops nfs_rw_write_ops = {
1740
	.rw_mode		= FMODE_WRITE,
1741 1742
	.rw_alloc_header	= nfs_writehdr_alloc,
	.rw_free_header		= nfs_writehdr_free,
1743
	.rw_release		= nfs_writeback_release_common,
1744 1745
	.rw_done		= nfs_writeback_done,
	.rw_result		= nfs_writeback_result,
1746
};