pagelist.c 10.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/fs/nfs/pagelist.c
 *
 * A set of helper functions for managing NFS read and write requests.
 * The main purpose of these routines is to provide support for the
 * coalescing of several requests into a single RPC call.
 *
 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
 *
 */

#include <linux/slab.h>
#include <linux/file.h>
A
Alexey Dobriyan 已提交
14
#include <linux/sched.h>
L
Linus Torvalds 已提交
15
#include <linux/sunrpc/clnt.h>
16
#include <linux/nfs.h>
L
Linus Torvalds 已提交
17 18 19 20 21
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
22
#include <linux/export.h>
L
Linus Torvalds 已提交
23

24
#include "internal.h"
25
#include "pnfs.h"
26

27
static struct kmem_cache *nfs_page_cachep;
L
Linus Torvalds 已提交
28

F
Fred Isaman 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41
bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
{
	p->npages = pagecount;
	if (pagecount <= ARRAY_SIZE(p->page_array))
		p->pagevec = p->page_array;
	else {
		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
		if (!p->pagevec)
			p->npages = 0;
	}
	return p->pagevec != NULL;
}

L
Linus Torvalds 已提交
42 43 44
static inline struct nfs_page *
nfs_page_alloc(void)
{
45 46
	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
	if (p)
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58
		INIT_LIST_HEAD(&p->wb_list);
	return p;
}

static inline void
nfs_page_free(struct nfs_page *p)
{
	kmem_cache_free(nfs_page_cachep, p);
}

/**
 * nfs_create_request - Create an NFS read/write request.
59
 * @ctx: open context to use
L
Linus Torvalds 已提交
60 61 62 63 64 65
 * @inode: inode to which the request is attached
 * @page: page to write
 * @offset: starting offset within the page for the write
 * @count: number of bytes to read/write
 *
 * The page must be locked by the caller. This makes sure we never
66
 * create two different requests for the same page.
L
Linus Torvalds 已提交
67 68 69 70 71 72 73 74 75
 * User should ensure it is safe to sleep in this function.
 */
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
		   struct page *page,
		   unsigned int offset, unsigned int count)
{
	struct nfs_page		*req;

76 77 78 79
	/* try to allocate the request struct */
	req = nfs_page_alloc();
	if (req == NULL)
		return ERR_PTR(-ENOMEM);
L
Linus Torvalds 已提交
80

81 82 83 84 85 86 87
	/* get lock context early so we can deal with alloc failures */
	req->wb_lock_context = nfs_get_lock_context(ctx);
	if (req->wb_lock_context == NULL) {
		nfs_page_free(req);
		return ERR_PTR(-ENOMEM);
	}

L
Linus Torvalds 已提交
88 89 90 91 92 93 94
	/* Initialize the request struct. Initially, we assume a
	 * long write-back delay. This will be adjusted in
	 * update_nfs_request below if the region is not locked. */
	req->wb_page    = page;
	atomic_set(&req->wb_complete, 0);
	req->wb_index	= page->index;
	page_cache_get(page);
95 96 97
	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));
	BUG_ON(page->mapping->host != inode);
L
Linus Torvalds 已提交
98 99 100 101
	req->wb_offset  = offset;
	req->wb_pgbase	= offset;
	req->wb_bytes   = count;
	req->wb_context = get_nfs_open_context(ctx);
102
	kref_init(&req->wb_kref);
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
	return req;
}

/**
 * nfs_unlock_request - Unlock request and wake up sleepers.
 * @req:
 */
void nfs_unlock_request(struct nfs_page *req)
{
	if (!NFS_WBACK_BUSY(req)) {
		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
		BUG();
	}
	smp_mb__before_clear_bit();
	clear_bit(PG_BUSY, &req->wb_flags);
	smp_mb__after_clear_bit();
119
	wake_up_bit(&req->wb_flags, PG_BUSY);
L
Linus Torvalds 已提交
120 121 122
	nfs_release_request(req);
}

123
/*
L
Linus Torvalds 已提交
124 125 126
 * nfs_clear_request - Free up all resources allocated to the request
 * @req:
 *
127 128
 * Release page and open context resources associated with a read/write
 * request after it has completed.
L
Linus Torvalds 已提交
129
 */
130
static void nfs_clear_request(struct nfs_page *req)
L
Linus Torvalds 已提交
131
{
132
	struct page *page = req->wb_page;
133
	struct nfs_open_context *ctx = req->wb_context;
134
	struct nfs_lock_context *l_ctx = req->wb_lock_context;
135

136 137
	if (page != NULL) {
		page_cache_release(page);
L
Linus Torvalds 已提交
138 139
		req->wb_page = NULL;
	}
140 141 142 143
	if (l_ctx != NULL) {
		nfs_put_lock_context(l_ctx);
		req->wb_lock_context = NULL;
	}
144 145 146 147
	if (ctx != NULL) {
		put_nfs_open_context(ctx);
		req->wb_context = NULL;
	}
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156
}


/**
 * nfs_release_request - Release the count on an NFS read/write request
 * @req: request to release
 *
 * Note: Should never be called with the spinlock held!
 */
157
static void nfs_free_request(struct kref *kref)
L
Linus Torvalds 已提交
158
{
159
	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
L
Linus Torvalds 已提交
160

161
	/* Release struct file and open context */
L
Linus Torvalds 已提交
162 163 164 165
	nfs_clear_request(req);
	nfs_page_free(req);
}

166 167 168 169 170
void nfs_release_request(struct nfs_page *req)
{
	kref_put(&req->wb_kref, nfs_free_request);
}

171 172 173 174 175 176
static int nfs_wait_bit_uninterruptible(void *word)
{
	io_schedule();
	return 0;
}

L
Linus Torvalds 已提交
177 178 179 180
/**
 * nfs_wait_on_request - Wait for a request to complete.
 * @req: request to wait upon.
 *
181
 * Interruptible by fatal signals only.
L
Linus Torvalds 已提交
182 183 184 185 186
 * The user is responsible for holding a count on the request.
 */
int
nfs_wait_on_request(struct nfs_page *req)
{
187 188 189
	return wait_on_bit(&req->wb_flags, PG_BUSY,
			nfs_wait_bit_uninterruptible,
			TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
190 191
}

192
bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
193 194 195 196 197 198 199 200 201 202 203 204 205
{
	/*
	 * FIXME: ideally we should be able to coalesce all requests
	 * that are not block boundary aligned, but currently this
	 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
	 * since nfs_flush_multi and nfs_pagein_multi assume you
	 * can have only one struct nfs_page.
	 */
	if (desc->pg_bsize < PAGE_SIZE)
		return 0;

	return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
}
206
EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
207

L
Linus Torvalds 已提交
208
/**
209 210
 * nfs_pageio_init - initialise a page io descriptor
 * @desc: pointer to descriptor
211 212 213 214
 * @inode: pointer to inode
 * @doio: pointer to io function
 * @bsize: io block size
 * @io_flags: extra parameters for the io function
215
 */
216 217
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
		     struct inode *inode,
218
		     const struct nfs_pageio_ops *pg_ops,
219
		     size_t bsize,
220
		     int io_flags)
221 222
{
	INIT_LIST_HEAD(&desc->pg_list);
223
	desc->pg_bytes_written = 0;
224 225 226
	desc->pg_count = 0;
	desc->pg_bsize = bsize;
	desc->pg_base = 0;
227
	desc->pg_moreio = 0;
228
	desc->pg_recoalesce = 0;
229
	desc->pg_inode = inode;
230
	desc->pg_ops = pg_ops;
231 232
	desc->pg_ioflags = io_flags;
	desc->pg_error = 0;
233
	desc->pg_lseg = NULL;
234 235 236 237 238 239 240 241 242 243 244 245 246
}

/**
 * nfs_can_coalesce_requests - test two requests for compatibility
 * @prev: pointer to nfs_page
 * @req: pointer to nfs_page
 *
 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 * page data area they describe is contiguous, and that their RPC
 * credentials, NFSv4 open state, and lockowners are the same.
 *
 * Return 'true' if this is the case, else return 'false'.
 */
247 248 249
static bool nfs_can_coalesce_requests(struct nfs_page *prev,
				      struct nfs_page *req,
				      struct nfs_pageio_descriptor *pgio)
250 251
{
	if (req->wb_context->cred != prev->wb_context->cred)
252
		return false;
253
	if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
254
		return false;
255
	if (req->wb_context->state != prev->wb_context->state)
256
		return false;
257
	if (req->wb_index != (prev->wb_index + 1))
258
		return false;
259
	if (req->wb_pgbase != 0)
260
		return false;
261
	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
262
		return false;
263
	return pgio->pg_ops->pg_test(pgio, prev, req);
264 265 266
}

/**
267
 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
268 269 270 271 272 273
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
274 275
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
				     struct nfs_page *req)
276 277 278 279 280
{
	if (desc->pg_count != 0) {
		struct nfs_page *prev;

		prev = nfs_list_entry(desc->pg_list.prev);
281
		if (!nfs_can_coalesce_requests(prev, req, desc))
282
			return 0;
283
	} else {
284 285
		if (desc->pg_ops->pg_init)
			desc->pg_ops->pg_init(desc, req);
286
		desc->pg_base = req->wb_pgbase;
287
	}
288 289
	nfs_list_remove_request(req);
	nfs_list_add_request(req, &desc->pg_list);
290
	desc->pg_count += req->wb_bytes;
291 292 293
	return 1;
}

294 295 296 297 298 299
/*
 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 */
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
	if (!list_empty(&desc->pg_list)) {
300
		int error = desc->pg_ops->pg_doio(desc);
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
		if (error < 0)
			desc->pg_error = error;
		else
			desc->pg_bytes_written += desc->pg_count;
	}
	if (list_empty(&desc->pg_list)) {
		desc->pg_count = 0;
		desc->pg_base = 0;
	}
}

/**
 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
320
static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
321
			   struct nfs_page *req)
322 323
{
	while (!nfs_pageio_do_add_request(desc, req)) {
324
		desc->pg_moreio = 1;
325 326 327
		nfs_pageio_doio(desc);
		if (desc->pg_error < 0)
			return 0;
328
		desc->pg_moreio = 0;
329 330
		if (desc->pg_recoalesce)
			return 0;
331 332 333 334
	}
	return 1;
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
{
	LIST_HEAD(head);

	do {
		list_splice_init(&desc->pg_list, &head);
		desc->pg_bytes_written -= desc->pg_count;
		desc->pg_count = 0;
		desc->pg_base = 0;
		desc->pg_recoalesce = 0;

		while (!list_empty(&head)) {
			struct nfs_page *req;

			req = list_first_entry(&head, struct nfs_page, wb_list);
			nfs_list_remove_request(req);
			if (__nfs_pageio_add_request(desc, req))
				continue;
			if (desc->pg_error < 0)
				return 0;
			break;
		}
	} while (desc->pg_recoalesce);
	return 1;
}

int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
		struct nfs_page *req)
{
	int ret;

	do {
		ret = __nfs_pageio_add_request(desc, req);
		if (ret)
			break;
		if (desc->pg_error < 0)
			break;
		ret = nfs_do_recoalesce(desc);
	} while (ret);
	return ret;
}

377 378 379 380 381 382
/**
 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
 * @desc: pointer to io descriptor
 */
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
{
383 384 385 386 387 388 389
	for (;;) {
		nfs_pageio_doio(desc);
		if (!desc->pg_recoalesce)
			break;
		if (!nfs_do_recoalesce(desc))
			break;
	}
390 391
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/**
 * nfs_pageio_cond_complete - Conditional I/O completion
 * @desc: pointer to io descriptor
 * @index: page index
 *
 * It is important to ensure that processes don't try to take locks
 * on non-contiguous ranges of pages as that might deadlock. This
 * function should be called before attempting to wait on a locked
 * nfs_page. It will complete the I/O if the page index 'index'
 * is not contiguous with the existing list of pages in 'desc'.
 */
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
	if (!list_empty(&desc->pg_list)) {
		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
		if (index != prev->wb_index + 1)
408
			nfs_pageio_complete(desc);
409 410 411
	}
}

D
David Howells 已提交
412
int __init nfs_init_nfspagecache(void)
L
Linus Torvalds 已提交
413 414 415 416
{
	nfs_page_cachep = kmem_cache_create("nfs_page",
					    sizeof(struct nfs_page),
					    0, SLAB_HWCACHE_ALIGN,
417
					    NULL);
L
Linus Torvalds 已提交
418 419 420 421 422 423
	if (nfs_page_cachep == NULL)
		return -ENOMEM;

	return 0;
}

424
void nfs_destroy_nfspagecache(void)
L
Linus Torvalds 已提交
425
{
426
	kmem_cache_destroy(nfs_page_cachep);
L
Linus Torvalds 已提交
427 428
}