pagelist.c 11.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/fs/nfs/pagelist.c
 *
 * A set of helper functions for managing NFS read and write requests.
 * The main purpose of these routines is to provide support for the
 * coalescing of several requests into a single RPC call.
 *
 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
 *
 */

#include <linux/slab.h>
#include <linux/file.h>
A
Alexey Dobriyan 已提交
14
#include <linux/sched.h>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21
#include <linux/sunrpc/clnt.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>

22 23
#include "internal.h"

24
static struct kmem_cache *nfs_page_cachep;
L
Linus Torvalds 已提交
25 26 27 28 29

static inline struct nfs_page *
nfs_page_alloc(void)
{
	struct nfs_page	*p;
30
	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
	if (p) {
		memset(p, 0, sizeof(*p));
		INIT_LIST_HEAD(&p->wb_list);
	}
	return p;
}

static inline void
nfs_page_free(struct nfs_page *p)
{
	kmem_cache_free(nfs_page_cachep, p);
}

/**
 * nfs_create_request - Create an NFS read/write request.
 * @file: file descriptor to use
 * @inode: inode to which the request is attached
 * @page: page to write
 * @offset: starting offset within the page for the write
 * @count: number of bytes to read/write
 *
 * The page must be locked by the caller. This makes sure we never
53
 * create two different requests for the same page.
L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
 * User should ensure it is safe to sleep in this function.
 */
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
		   struct page *page,
		   unsigned int offset, unsigned int count)
{
	struct nfs_page		*req;

	for (;;) {
		/* try to allocate the request struct */
		req = nfs_page_alloc();
		if (req != NULL)
			break;

69
		if (fatal_signal_pending(current))
L
Linus Torvalds 已提交
70 71 72 73 74 75 76 77 78 79 80
			return ERR_PTR(-ERESTARTSYS);
		yield();
	}

	/* Initialize the request struct. Initially, we assume a
	 * long write-back delay. This will be adjusted in
	 * update_nfs_request below if the region is not locked. */
	req->wb_page    = page;
	atomic_set(&req->wb_complete, 0);
	req->wb_index	= page->index;
	page_cache_get(page);
81 82 83
	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));
	BUG_ON(page->mapping->host != inode);
L
Linus Torvalds 已提交
84 85 86 87
	req->wb_offset  = offset;
	req->wb_pgbase	= offset;
	req->wb_bytes   = count;
	req->wb_context = get_nfs_open_context(ctx);
88
	kref_init(&req->wb_kref);
L
Linus Torvalds 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	return req;
}

/**
 * nfs_unlock_request - Unlock request and wake up sleepers.
 * @req:
 */
void nfs_unlock_request(struct nfs_page *req)
{
	if (!NFS_WBACK_BUSY(req)) {
		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
		BUG();
	}
	smp_mb__before_clear_bit();
	clear_bit(PG_BUSY, &req->wb_flags);
	smp_mb__after_clear_bit();
105
	wake_up_bit(&req->wb_flags, PG_BUSY);
L
Linus Torvalds 已提交
106 107 108
	nfs_release_request(req);
}

109
/**
110
 * nfs_set_page_tag_locked - Tag a request as locked
111 112
 * @req:
 */
113
int nfs_set_page_tag_locked(struct nfs_page *req)
114
{
115
	struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
116

117
	if (!nfs_lock_request_dontget(req))
118
		return 0;
119 120
	if (req->wb_page != NULL)
		radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
121 122 123 124
	return 1;
}

/**
125
 * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
126
 */
127
void nfs_clear_page_tag_locked(struct nfs_page *req)
128
{
129 130
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	struct nfs_inode *nfsi = NFS_I(inode);
131

132
	if (req->wb_page != NULL) {
133
		spin_lock(&inode->i_lock);
134
		radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
135
		nfs_unlock_request(req);
136
		spin_unlock(&inode->i_lock);
137 138
	} else
		nfs_unlock_request(req);
139 140
}

L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148 149
/**
 * nfs_clear_request - Free up all resources allocated to the request
 * @req:
 *
 * Release page resources associated with a write request after it
 * has completed.
 */
void nfs_clear_request(struct nfs_page *req)
{
150 151 152
	struct page *page = req->wb_page;
	if (page != NULL) {
		page_cache_release(page);
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161 162 163
		req->wb_page = NULL;
	}
}


/**
 * nfs_release_request - Release the count on an NFS read/write request
 * @req: request to release
 *
 * Note: Should never be called with the spinlock held!
 */
164
static void nfs_free_request(struct kref *kref)
L
Linus Torvalds 已提交
165
{
166
	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
L
Linus Torvalds 已提交
167 168 169 170 171 172 173

	/* Release struct file or cached credential */
	nfs_clear_request(req);
	put_nfs_open_context(req->wb_context);
	nfs_page_free(req);
}

174 175 176 177 178
void nfs_release_request(struct nfs_page *req)
{
	kref_put(&req->wb_kref, nfs_free_request);
}

179 180 181 182 183 184
static int nfs_wait_bit_uninterruptible(void *word)
{
	io_schedule();
	return 0;
}

L
Linus Torvalds 已提交
185 186 187 188
/**
 * nfs_wait_on_request - Wait for a request to complete.
 * @req: request to wait upon.
 *
189
 * Interruptible by fatal signals only.
L
Linus Torvalds 已提交
190 191 192 193 194
 * The user is responsible for holding a count on the request.
 */
int
nfs_wait_on_request(struct nfs_page *req)
{
195 196 197
	return wait_on_bit(&req->wb_flags, PG_BUSY,
			nfs_wait_bit_uninterruptible,
			TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
198 199 200
}

/**
201 202
 * nfs_pageio_init - initialise a page io descriptor
 * @desc: pointer to descriptor
203 204 205 206
 * @inode: pointer to inode
 * @doio: pointer to io function
 * @bsize: io block size
 * @io_flags: extra parameters for the io function
207
 */
208 209
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
		     struct inode *inode,
210
		     int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
211
		     size_t bsize,
212
		     int io_flags)
213 214
{
	INIT_LIST_HEAD(&desc->pg_list);
215
	desc->pg_bytes_written = 0;
216 217 218
	desc->pg_count = 0;
	desc->pg_bsize = bsize;
	desc->pg_base = 0;
219 220 221 222
	desc->pg_inode = inode;
	desc->pg_doio = doio;
	desc->pg_ioflags = io_flags;
	desc->pg_error = 0;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
}

/**
 * nfs_can_coalesce_requests - test two requests for compatibility
 * @prev: pointer to nfs_page
 * @req: pointer to nfs_page
 *
 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
 * page data area they describe is contiguous, and that their RPC
 * credentials, NFSv4 open state, and lockowners are the same.
 *
 * Return 'true' if this is the case, else return 'false'.
 */
static int nfs_can_coalesce_requests(struct nfs_page *prev,
				     struct nfs_page *req)
{
	if (req->wb_context->cred != prev->wb_context->cred)
		return 0;
	if (req->wb_context->lockowner != prev->wb_context->lockowner)
		return 0;
	if (req->wb_context->state != prev->wb_context->state)
		return 0;
	if (req->wb_index != (prev->wb_index + 1))
		return 0;
	if (req->wb_pgbase != 0)
		return 0;
	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
		return 0;
	return 1;
}

/**
255
 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
256 257 258 259 260 261
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
262 263
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
				     struct nfs_page *req)
264 265 266 267 268 269 270 271 272 273 274 275 276
{
	size_t newlen = req->wb_bytes;

	if (desc->pg_count != 0) {
		struct nfs_page *prev;

		/*
		 * FIXME: ideally we should be able to coalesce all requests
		 * that are not block boundary aligned, but currently this
		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
		 * since nfs_flush_multi and nfs_pagein_multi assume you
		 * can have only one struct nfs_page.
		 */
277 278
		if (desc->pg_bsize < PAGE_SIZE)
			return 0;
279
		newlen += desc->pg_count;
280
		if (newlen > desc->pg_bsize)
281 282 283 284 285 286 287 288 289 290 291 292
			return 0;
		prev = nfs_list_entry(desc->pg_list.prev);
		if (!nfs_can_coalesce_requests(prev, req))
			return 0;
	} else
		desc->pg_base = req->wb_pgbase;
	nfs_list_remove_request(req);
	nfs_list_add_request(req, &desc->pg_list);
	desc->pg_count = newlen;
	return 1;
}

293 294 295 296 297 298 299 300
/*
 * Helper for nfs_pageio_add_request and nfs_pageio_complete
 */
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
	if (!list_empty(&desc->pg_list)) {
		int error = desc->pg_doio(desc->pg_inode,
					  &desc->pg_list,
301 302
					  nfs_page_array_len(desc->pg_base,
							     desc->pg_count),
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
					  desc->pg_count,
					  desc->pg_ioflags);
		if (error < 0)
			desc->pg_error = error;
		else
			desc->pg_bytes_written += desc->pg_count;
	}
	if (list_empty(&desc->pg_list)) {
		desc->pg_count = 0;
		desc->pg_base = 0;
	}
}

/**
 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
 * @desc: destination io descriptor
 * @req: request
 *
 * Returns true if the request 'req' was successfully coalesced into the
 * existing list of pages 'desc'.
 */
324 325
int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
			   struct nfs_page *req)
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
{
	while (!nfs_pageio_do_add_request(desc, req)) {
		nfs_pageio_doio(desc);
		if (desc->pg_error < 0)
			return 0;
	}
	return 1;
}

/**
 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
 * @desc: pointer to io descriptor
 */
void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
{
	nfs_pageio_doio(desc);
}

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
/**
 * nfs_pageio_cond_complete - Conditional I/O completion
 * @desc: pointer to io descriptor
 * @index: page index
 *
 * It is important to ensure that processes don't try to take locks
 * on non-contiguous ranges of pages as that might deadlock. This
 * function should be called before attempting to wait on a locked
 * nfs_page. It will complete the I/O if the page index 'index'
 * is not contiguous with the existing list of pages in 'desc'.
 */
void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{
	if (!list_empty(&desc->pg_list)) {
		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
		if (index != prev->wb_index + 1)
			nfs_pageio_doio(desc);
	}
}

364
#define NFS_SCAN_MAXENTRIES 16
L
Linus Torvalds 已提交
365 366
/**
 * nfs_scan_list - Scan a list for matching requests
367
 * @nfsi: NFS inode
L
Linus Torvalds 已提交
368 369 370
 * @dst: Destination list
 * @idx_start: lower bound of page->index to scan
 * @npages: idx_start + npages sets the upper bound to scan.
371
 * @tag: tag to scan for
L
Linus Torvalds 已提交
372 373 374 375 376
 *
 * Moves elements from one of the inode request lists.
 * If the number of requests is set to 0, the entire address_space
 * starting at index idx_start, is scanned.
 * The requests are *not* checked to ensure that they form a contiguous set.
377
 * You must be holding the inode's i_lock when calling this function
L
Linus Torvalds 已提交
378
 */
379
int nfs_scan_list(struct nfs_inode *nfsi,
380
		struct list_head *dst, pgoff_t idx_start,
381
		unsigned int npages, int tag)
L
Linus Torvalds 已提交
382
{
383 384
	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
	struct nfs_page *req;
385
	pgoff_t idx_end;
386 387
	int found, i;
	int res;
L
Linus Torvalds 已提交
388 389 390 391 392 393 394

	res = 0;
	if (npages == 0)
		idx_end = ~0;
	else
		idx_end = idx_start + npages - 1;

395
	for (;;) {
396
		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
397
				(void **)&pgvec[0], idx_start,
398
				NFS_SCAN_MAXENTRIES, tag);
399
		if (found <= 0)
L
Linus Torvalds 已提交
400
			break;
401 402 403 404 405
		for (i = 0; i < found; i++) {
			req = pgvec[i];
			if (req->wb_index > idx_end)
				goto out;
			idx_start = req->wb_index + 1;
406
			if (nfs_set_page_tag_locked(req)) {
407
				kref_get(&req->wb_kref);
408
				nfs_list_remove_request(req);
409 410
				radix_tree_tag_clear(&nfsi->nfs_page_tree,
						req->wb_index, tag);
411 412
				nfs_list_add_request(req, dst);
				res++;
413 414
				if (res == INT_MAX)
					goto out;
415 416
			}
		}
417
		/* for latency reduction */
418
		cond_resched_lock(&nfsi->vfs_inode.i_lock);
L
Linus Torvalds 已提交
419
	}
420
out:
L
Linus Torvalds 已提交
421 422 423
	return res;
}

D
David Howells 已提交
424
int __init nfs_init_nfspagecache(void)
L
Linus Torvalds 已提交
425 426 427 428
{
	nfs_page_cachep = kmem_cache_create("nfs_page",
					    sizeof(struct nfs_page),
					    0, SLAB_HWCACHE_ALIGN,
429
					    NULL);
L
Linus Torvalds 已提交
430 431 432 433 434 435
	if (nfs_page_cachep == NULL)
		return -ENOMEM;

	return 0;
}

436
void nfs_destroy_nfspagecache(void)
L
Linus Torvalds 已提交
437
{
438
	kmem_cache_destroy(nfs_page_cachep);
L
Linus Torvalds 已提交
439 440
}