pagelist.c 18.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/fs/nfs/pagelist.c
 *
 * A set of helper functions for managing NFS read and write requests.
 * The main purpose of these routines is to provide support for the
 * coalescing of several requests into a single RPC call.
 *
 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
 *
 */

#include <linux/slab.h>
#include <linux/file.h>
A
Alexey Dobriyan 已提交
14
#include <linux/sched.h>
L
Linus Torvalds 已提交
15
#include <linux/sunrpc/clnt.h>
16
#include <linux/nfs.h>
L
Linus Torvalds 已提交
17 18 19 20 21
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
22
#include <linux/export.h>
L
Linus Torvalds 已提交
23

24
#include "internal.h"
25
#include "pnfs.h"
26

27 28
#define NFSDBG_FACILITY		NFSDBG_PAGECACHE

29
static struct kmem_cache *nfs_page_cachep;
L
Linus Torvalds 已提交
30

31
static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
F
Fred Isaman 已提交
32 33 34 35 36 37 38 39 40 41 42 43
{
	p->npages = pagecount;
	if (pagecount <= ARRAY_SIZE(p->page_array))
		p->pagevec = p->page_array;
	else {
		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
		if (!p->pagevec)
			p->npages = 0;
	}
	return p->pagevec != NULL;
}

44 45 46 47 48 49 50 51 52
void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
		       struct nfs_pgio_header *hdr,
		       void (*release)(struct nfs_pgio_header *hdr))
{
	hdr->req = nfs_list_entry(desc->pg_list.next);
	hdr->inode = desc->pg_inode;
	hdr->cred = hdr->req->wb_context->cred;
	hdr->io_start = req_offset(hdr->req);
	hdr->good_bytes = desc->pg_count;
53
	hdr->dreq = desc->pg_dreq;
54
	hdr->layout_private = desc->pg_layout_private;
55
	hdr->release = release;
56
	hdr->completion_ops = desc->pg_completion_ops;
57 58
	if (hdr->completion_ops->init_hdr)
		hdr->completion_ops->init_hdr(hdr);
59
}
B
Bryan Schumaker 已提交
60
EXPORT_SYMBOL_GPL(nfs_pgheader_init);
61 62 63 64 65 66 67 68 69 70 71 72 73

void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
	spin_lock(&hdr->lock);
	if (pos < hdr->io_start + hdr->good_bytes) {
		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
		hdr->good_bytes = pos - hdr->io_start;
		hdr->error = error;
	}
	spin_unlock(&hdr->lock);
}

L
Linus Torvalds 已提交
74 75 76
static inline struct nfs_page *
nfs_page_alloc(void)
{
77
	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
78
	if (p)
L
Linus Torvalds 已提交
79 80 81 82 83 84 85 86 87 88
		INIT_LIST_HEAD(&p->wb_list);
	return p;
}

static inline void
nfs_page_free(struct nfs_page *p)
{
	kmem_cache_free(nfs_page_cachep, p);
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
static void
nfs_iocounter_inc(struct nfs_io_counter *c)
{
	atomic_inc(&c->io_count);
}

static void
nfs_iocounter_dec(struct nfs_io_counter *c)
{
	if (atomic_dec_and_test(&c->io_count)) {
		clear_bit(NFS_IO_INPROGRESS, &c->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
	}
}

static int
__nfs_iocounter_wait(struct nfs_io_counter *c)
{
	wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
	DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
	int ret = 0;

	do {
		prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
		set_bit(NFS_IO_INPROGRESS, &c->flags);
		if (atomic_read(&c->io_count) == 0)
			break;
		ret = nfs_wait_bit_killable(&c->flags);
	} while (atomic_read(&c->io_count) != 0);
	finish_wait(wq, &q.wait);
	return ret;
}

/**
 * nfs_iocounter_wait - wait for i/o to complete
 * @c: nfs_io_counter to use
 *
 * returns -ERESTARTSYS if interrupted by a fatal signal.
 * Otherwise returns 0 once the io_count hits 0.
 */
int
nfs_iocounter_wait(struct nfs_io_counter *c)
{
	if (atomic_read(&c->io_count) == 0)
		return 0;
	return __nfs_iocounter_wait(c);
}

L
Linus Torvalds 已提交
138 139
/**
 * nfs_create_request - Create an NFS read/write request.
140
 * @ctx: open context to use
L
Linus Torvalds 已提交
141 142 143 144 145 146
 * @inode: inode to which the request is attached
 * @page: page to write
 * @offset: starting offset within the page for the write
 * @count: number of bytes to read/write
 *
 * The page must be locked by the caller. This makes sure we never
147
 * create two different requests for the same page.
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155
 * User should ensure it is safe to sleep in this function.
 */
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
		   struct page *page,
		   unsigned int offset, unsigned int count)
{
	struct nfs_page		*req;
156
	struct nfs_lock_context *l_ctx;
L
Linus Torvalds 已提交
157

158 159
	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
		return ERR_PTR(-EBADF);
160 161 162 163
	/* try to allocate the request struct */
	req = nfs_page_alloc();
	if (req == NULL)
		return ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
164

165
	/* get lock context early so we can deal with alloc failures */
166 167
	l_ctx = nfs_get_lock_context(ctx);
	if (IS_ERR(l_ctx)) {
168
		nfs_page_free(req);
169
		return ERR_CAST(l_ctx);
170
	}
171
	req->wb_lock_context = l_ctx;
172
	nfs_iocounter_inc(&l_ctx->io_count);
173

L
Linus Torvalds 已提交
174 175 176 177
	/* Initialize the request struct. Initially, we assume a
	 * long write-back delay. This will be adjusted in
	 * update_nfs_request below if the region is not locked. */
	req->wb_page    = page;
178
	req->wb_index	= page_file_index(page);
L
Linus Torvalds 已提交
179 180 181 182 183
	page_cache_get(page);
	req->wb_offset  = offset;
	req->wb_pgbase	= offset;
	req->wb_bytes   = count;
	req->wb_context = get_nfs_open_context(ctx);
184
	kref_init(&req->wb_kref);
L
Linus Torvalds 已提交
185 186 187 188
	return req;
}

/**
189
 * nfs_unlock_request - Unlock request and wake up sleepers.
L
Linus Torvalds 已提交
190 191
 * @req:
 */
192
void nfs_unlock_request(struct nfs_page *req)
L
Linus Torvalds 已提交
193 194 195 196 197 198 199 200
{
	if (!NFS_WBACK_BUSY(req)) {
		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
		BUG();
	}
	smp_mb__before_clear_bit();
	clear_bit(PG_BUSY, &req->wb_flags);
	smp_mb__after_clear_bit();
201
	wake_up_bit(&req->wb_flags, PG_BUSY);
202 203 204
}

/**
205 206
 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
 * @req:
207
 */
208
void nfs_unlock_and_release_request(struct nfs_page *req)
209
{
210
	nfs_unlock_request(req);
L
Linus Torvalds 已提交
211 212 213
	nfs_release_request(req);
}

214
/*
L
Linus Torvalds 已提交
215 216 217
 * nfs_clear_request - Free up all resources allocated to the request
 * @req:
 *
218 219
 * Release page and open context resources associated with a read/write
 * request after it has completed.
L
Linus Torvalds 已提交
220
 */
221
static void nfs_clear_request(struct nfs_page *req)
L
Linus Torvalds 已提交
222
{
223
	struct page *page = req->wb_page;
224
	struct nfs_open_context *ctx = req->wb_context;
225
	struct nfs_lock_context *l_ctx = req->wb_lock_context;
226

227 228
	if (page != NULL) {
		page_cache_release(page);
L
Linus Torvalds 已提交
229 230
		req->wb_page = NULL;
	}
231
	if (l_ctx != NULL) {
232
		nfs_iocounter_dec(&l_ctx->io_count);
233 234 235
		nfs_put_lock_context(l_ctx);
		req->wb_lock_context = NULL;
	}
236 237 238 239
	if (ctx != NULL) {
		put_nfs_open_context(ctx);
		req->wb_context = NULL;
	}
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248
}


/**
 * nfs_release_request - Release the count on an NFS read/write request
 * @req: request to release
 *
 * Note: Should never be called with the spinlock held!
 */
249
static void nfs_free_request(struct kref *kref)
L
Linus Torvalds 已提交
250
{
251
	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
L
Linus Torvalds 已提交
252

253
	/* Release struct file and open context */
L
Linus Torvalds 已提交
254 255 256 257
	nfs_clear_request(req);
	nfs_page_free(req);
}

258 259 260 261 262
void nfs_release_request(struct nfs_page *req)
{
	kref_put(&req->wb_kref, nfs_free_request);
}

263 264 265 266 267 268
static int nfs_wait_bit_uninterruptible(void *word)
{
	io_schedule();
	return 0;
}

L
Linus Torvalds 已提交
269 270 271 272
/**
 * nfs_wait_on_request - Wait for a request to complete.
 * @req: request to wait upon.
 *
273
 * Interruptible by fatal signals only.
L
Linus Torvalds 已提交
274 275 276 277 278
 * The user is responsible for holding a count on the request.
 */
int
nfs_wait_on_request(struct nfs_page *req)
{
279 280 281
	return wait_on_bit(&req->wb_flags, PG_BUSY,
			nfs_wait_bit_uninterruptible,
			TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
282 283
}

284
bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
285 286 287 288 289 290 291 292 293 294 295 296 297
{
	/*
	 * FIXME: ideally we should be able to coalesce all requests
	 * that are not block boundary aligned, but currently this
	 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
	 * since nfs_flush_multi and nfs_pagein_multi assume you
	 * can have only one struct nfs_page.
	 */
	if (desc->pg_bsize < PAGE_SIZE)
		return 0;

	return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
}
298
EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
299

300 301 302 303 304
static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr)
{
	return container_of(hdr, struct nfs_rw_header, header);
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
/**
 * nfs_rw_header_alloc - Allocate a header for a read or write
 * @ops: Read or write function vector
 */
struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops)
{
	struct nfs_rw_header *header = ops->rw_alloc_header();

	if (header) {
		struct nfs_pgio_header *hdr = &header->header;

		INIT_LIST_HEAD(&hdr->pages);
		INIT_LIST_HEAD(&hdr->rpc_list);
		spin_lock_init(&hdr->lock);
		atomic_set(&hdr->refcnt, 0);
		hdr->rw_ops = ops;
	}
	return header;
}
EXPORT_SYMBOL_GPL(nfs_rw_header_alloc);

/*
 * nfs_rw_header_free - Free a read or write header
 * @hdr: The header to free
 */
void nfs_rw_header_free(struct nfs_pgio_header *hdr)
{
	hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr));
}
EXPORT_SYMBOL_GPL(nfs_rw_header_free);

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
/**
 * nfs_pgio_data_alloc - Allocate pageio data
 * @hdr: The header making a request
 * @pagecount: Number of pages to create
 */
struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
					  unsigned int pagecount)
{
	struct nfs_pgio_data *data, *prealloc;

	prealloc = &NFS_RW_HEADER(hdr)->rpc_data;
	if (prealloc->header == NULL)
		data = prealloc;
	else
		data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		goto out;

	if (nfs_pgarray_set(&data->pages, pagecount)) {
		data->header = hdr;
		atomic_inc(&hdr->refcnt);
	} else {
		if (data != prealloc)
			kfree(data);
		data = NULL;
	}
out:
	return data;
}

/**
 * nfs_pgio_data_release - Properly free pageio data
 * @data: The data to release
 */
void nfs_pgio_data_release(struct nfs_pgio_data *data)
{
	struct nfs_pgio_header *hdr = data->header;
	struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr);

	put_nfs_open_context(data->args.context);
	if (data->pages.pagevec != data->pages.page_array)
		kfree(data->pages.pagevec);
	if (data == &pageio_header->rpc_data) {
		data->header = NULL;
		data = NULL;
	}
	if (atomic_dec_and_test(&hdr->refcnt))
		hdr->completion_ops->completion(hdr);
	/* Note: we only free the rpc_task after callbacks are done.
	 * See the comment in rpc_free_task() for why
	 */
	kfree(data);
}
EXPORT_SYMBOL_GPL(nfs_pgio_data_release);

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
/**
 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
 * @data: The pageio data
 * @count: Number of bytes to read
 * @offset: Initial offset
 * @how: How to commit data (writes only)
 * @cinfo: Commit information for the call (writes only)
 */
void nfs_pgio_rpcsetup(struct nfs_pgio_data *data,
			      unsigned int count, unsigned int offset,
			      int how, struct nfs_commit_info *cinfo)
{
	struct nfs_page *req = data->header->req;

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	data->args.fh     = NFS_FH(data->header->inode);
	data->args.offset = req_offset(req) + offset;
	/* pnfs_set_layoutcommit needs this */
	data->mds_offset = data->args.offset;
	data->args.pgbase = req->wb_pgbase + offset;
	data->args.pages  = data->pages.pagevec;
	data->args.count  = count;
	data->args.context = get_nfs_open_context(req->wb_context);
	data->args.lock_context = req->wb_lock_context;
	data->args.stable  = NFS_UNSTABLE;
	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
	case 0:
		break;
	case FLUSH_COND_STABLE:
		if (nfs_reqs_to_commit(cinfo))
			break;
	default:
		data->args.stable = NFS_FILE_SYNC;
	}

	data->res.fattr   = &data->fattr;
	data->res.count   = count;
	data->res.eof     = 0;
	data->res.verf    = &data->verf;
	nfs_fattr_init(&data->fattr);
}

435 436 437 438 439
/**
 * nfs_pgio_prepare - Prepare pageio data to go over the wire
 * @task: The current task
 * @calldata: pageio data to prepare
 */
440
static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
441 442 443 444 445 446 447 448 449 450 451 452
{
	struct nfs_pgio_data *data = calldata;
	int err;
	err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data);
	if (err)
		rpc_exit(task, err);
}

/**
 * nfs_pgio_release - Release pageio data
 * @calldata: The pageio data to release
 */
453
static void nfs_pgio_release(void *calldata)
454 455 456 457 458 459 460
{
	struct nfs_pgio_data *data = calldata;
	if (data->header->rw_ops->rw_release)
		data->header->rw_ops->rw_release(data);
	nfs_pgio_data_release(data);
}

L
Linus Torvalds 已提交
461
/**
462 463
 * nfs_pageio_init - initialise a page io descriptor
 * @desc: pointer to descriptor
464 465 466 467
 * @inode: pointer to inode
 * @doio: pointer to io function
 * @bsize: io block size
 * @io_flags: extra parameters for the io function
468
 */
469 470
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
		     struct inode *inode,
471
		     const struct nfs_pageio_ops *pg_ops,
472
		     const struct nfs_pgio_completion_ops *compl_ops,
473
		     const struct nfs_rw_ops *rw_ops,
474
		     size_t bsize,
475
		     int io_flags)
476 477
{
	INIT_LIST_HEAD(&desc->pg_list);
478
	desc->pg_bytes_written = 0;
479 480 481
	desc->pg_count = 0;
	desc->pg_bsize = bsize;
	desc->pg_base = 0;
482
	desc->pg_moreio = 0;
483
	desc->pg_recoalesce = 0;
484
	desc->pg_inode = inode;
485
	desc->pg_ops = pg_ops;
486
	desc->pg_completion_ops = compl_ops;
487
	desc->pg_rw_ops = rw_ops;
488 489
	desc->pg_ioflags = io_flags;
	desc->pg_error = 0;
490
	desc->pg_lseg = NULL;
491
	desc->pg_dreq = NULL;
492
	desc->pg_layout_private = NULL;
493
}
B
Bryan Schumaker 已提交
494
EXPORT_SYMBOL_GPL(nfs_pageio_init);
495

496 497 498 499 500
/**
 * nfs_pgio_result - Basic pageio error handling
 * @task: The task that ran
 * @calldata: Pageio data to check
 */
501
static void nfs_pgio_result(struct rpc_task *task, void *calldata)
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
{
	struct nfs_pgio_data *data = calldata;
	struct inode *inode = data->header->inode;

	dprintk("NFS: %s: %5u, (status %d)\n", __func__,
		task->tk_pid, task->tk_status);

	if (data->header->rw_ops->rw_done(task, data, inode) != 0)
		return;
	if (task->tk_status < 0)
		nfs_set_pgio_error(data->header, task->tk_status, data->args.offset);
	else
		data->header->rw_ops->rw_result(task, data);
}

517 518 519 520 521 522 523 524 525 526 527 528 529
static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
		const struct nfs_open_context *ctx2)
{
	return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
}

static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
		const struct nfs_lock_context *l2)
{
	return l1->lockowner.l_owner == l2->lockowner.l_owner
		&& l1->lockowner.l_pid == l2->lockowner.l_pid;
}

530 531 532 533 534 535 536 537 538 539 540
/**
 * nfs_can_coalesce_requests - test two requests for compatibility
 * @prev: pointer to nfs_page
 * @req: pointer to nfs_page
 *
 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 * page data area they describe is contiguous, and that their RPC
 * credentials, NFSv4 open state, and lockowners are the same.
 *
 * Return 'true' if this is the case, else return 'false'.
 */
541 542 543
static bool nfs_can_coalesce_requests(struct nfs_page *prev,
				      struct nfs_page *req,
				      struct nfs_pageio_descriptor *pgio)
544
{
545
	if (!nfs_match_open_context(req->wb_context, prev->wb_context))
546
		return false;
547 548
	if (req->wb_context->dentry->d_inode->i_flock != NULL &&
	    !nfs_match_lock_context(req->wb_lock_context, prev->wb_lock_context))
549
		return false;
550
	if (req->wb_pgbase != 0)
551
		return false;
552
	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
553
		return false;
554 555
	if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
		return false;
556
	return pgio->pg_ops->pg_test(pgio, prev, req);
557 558 559
}

/**
560
 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
561 562 563 564 565 566
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
567 568
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
				     struct nfs_page *req)
569 570 571 572 573
{
	if (desc->pg_count != 0) {
		struct nfs_page *prev;

		prev = nfs_list_entry(desc->pg_list.prev);
574
		if (!nfs_can_coalesce_requests(prev, req, desc))
575
			return 0;
576
	} else {
577 578
		if (desc->pg_ops->pg_init)
			desc->pg_ops->pg_init(desc, req);
579
		desc->pg_base = req->wb_pgbase;
580
	}
581 582
	nfs_list_remove_request(req);
	nfs_list_add_request(req, &desc->pg_list);
583
	desc->pg_count += req->wb_bytes;
584 585 586
	return 1;
}

587 588 589 590 591 592
/*
 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 */
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
	if (!list_empty(&desc->pg_list)) {
593
		int error = desc->pg_ops->pg_doio(desc);
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
		if (error < 0)
			desc->pg_error = error;
		else
			desc->pg_bytes_written += desc->pg_count;
	}
	if (list_empty(&desc->pg_list)) {
		desc->pg_count = 0;
		desc->pg_base = 0;
	}
}

/**
 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
613
static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
614
			   struct nfs_page *req)
615 616
{
	while (!nfs_pageio_do_add_request(desc, req)) {
617
		desc->pg_moreio = 1;
618 619 620
		nfs_pageio_doio(desc);
		if (desc->pg_error < 0)
			return 0;
621
		desc->pg_moreio = 0;
622 623
		if (desc->pg_recoalesce)
			return 0;
624 625 626 627
	}
	return 1;
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
{
	LIST_HEAD(head);

	do {
		list_splice_init(&desc->pg_list, &head);
		desc->pg_bytes_written -= desc->pg_count;
		desc->pg_count = 0;
		desc->pg_base = 0;
		desc->pg_recoalesce = 0;

		while (!list_empty(&head)) {
			struct nfs_page *req;

			req = list_first_entry(&head, struct nfs_page, wb_list);
			nfs_list_remove_request(req);
			if (__nfs_pageio_add_request(desc, req))
				continue;
			if (desc->pg_error < 0)
				return 0;
			break;
		}
	} while (desc->pg_recoalesce);
	return 1;
}

int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
		struct nfs_page *req)
{
	int ret;

	do {
		ret = __nfs_pageio_add_request(desc, req);
		if (ret)
			break;
		if (desc->pg_error < 0)
			break;
		ret = nfs_do_recoalesce(desc);
	} while (ret);
	return ret;
}
B
Bryan Schumaker 已提交
669
EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
670

671 672 673 674 675 676
/**
 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
 * @desc: pointer to io descriptor
 */
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
{
677 678 679 680 681 682 683
	for (;;) {
		nfs_pageio_doio(desc);
		if (!desc->pg_recoalesce)
			break;
		if (!nfs_do_recoalesce(desc))
			break;
	}
684
}
B
Bryan Schumaker 已提交
685
EXPORT_SYMBOL_GPL(nfs_pageio_complete);
686

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
/**
 * nfs_pageio_cond_complete - Conditional I/O completion
 * @desc: pointer to io descriptor
 * @index: page index
 *
 * It is important to ensure that processes don't try to take locks
 * on non-contiguous ranges of pages as that might deadlock. This
 * function should be called before attempting to wait on a locked
 * nfs_page. It will complete the I/O if the page index 'index'
 * is not contiguous with the existing list of pages in 'desc'.
 */
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
	if (!list_empty(&desc->pg_list)) {
		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
		if (index != prev->wb_index + 1)
703
			nfs_pageio_complete(desc);
704 705 706
	}
}

D
David Howells 已提交
707
int __init nfs_init_nfspagecache(void)
L
Linus Torvalds 已提交
708 709 710 711
{
	nfs_page_cachep = kmem_cache_create("nfs_page",
					    sizeof(struct nfs_page),
					    0, SLAB_HWCACHE_ALIGN,
712
					    NULL);
L
Linus Torvalds 已提交
713 714 715 716 717 718
	if (nfs_page_cachep == NULL)
		return -ENOMEM;

	return 0;
}

719
void nfs_destroy_nfspagecache(void)
L
Linus Torvalds 已提交
720
{
721
	kmem_cache_destroy(nfs_page_cachep);
L
Linus Torvalds 已提交
722 723
}

724 725 726 727 728
const struct rpc_call_ops nfs_pgio_common_ops = {
	.rpc_call_prepare = nfs_pgio_prepare,
	.rpc_call_done = nfs_pgio_result,
	.rpc_release = nfs_pgio_release,
};