page.c 30.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Cache page management and data I/O routines
 *
 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#define FSCACHE_DEBUG_LEVEL PAGE
#include <linux/module.h>
#include <linux/fscache-cache.h>
#include <linux/buffer_head.h>
#include <linux/pagevec.h>
17
#include <linux/slab.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
#include "internal.h"

/*
 * check to see if a page is being written to the cache
 */
bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
{
	void *val;

	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	rcu_read_unlock();

	return val != NULL;
}
EXPORT_SYMBOL(__fscache_check_page_write);

/*
 * wait for a page to finish being written to the cache
 */
void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
{
	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);

	wait_event(*wq, !__fscache_check_page_write(cookie, page));
}
EXPORT_SYMBOL(__fscache_wait_on_page_write);

46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * decide whether a page can be released, possibly by cancelling a store to it
 * - we're allowed to sleep if __GFP_WAIT is flagged
 */
bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
				  struct page *page,
				  gfp_t gfp)
{
	struct page *xpage;
	void *val;

	_enter("%p,%p,%x", cookie, page, gfp);

59
try_again:
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	if (!val) {
		rcu_read_unlock();
		fscache_stat(&fscache_n_store_vmscan_not_storing);
		__fscache_uncache_page(cookie, page);
		return true;
	}

	/* see if the page is actually undergoing storage - if so we can't get
	 * rid of it till the cache has finished with it */
	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		rcu_read_unlock();
		goto page_busy;
	}

	/* the page is pending storage, so we attempt to cancel the store and
	 * discard the store request so that the page can be reclaimed */
	spin_lock(&cookie->stores_lock);
	rcu_read_unlock();

	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		/* the page started to undergo storage whilst we were looking,
		 * so now we can only wait or return */
		spin_unlock(&cookie->stores_lock);
		goto page_busy;
	}

	xpage = radix_tree_delete(&cookie->stores, page->index);
	spin_unlock(&cookie->stores_lock);

	if (xpage) {
		fscache_stat(&fscache_n_store_vmscan_cancelled);
		fscache_stat(&fscache_n_store_radix_deletes);
		ASSERTCMP(xpage, ==, page);
	} else {
		fscache_stat(&fscache_n_store_vmscan_gone);
	}

	wake_up_bit(&cookie->flags, 0);
	if (xpage)
		page_cache_release(xpage);
	__fscache_uncache_page(cookie, page);
	return true;

page_busy:
108 109 110 111
	/* We will wait here if we're allowed to, but that could deadlock the
	 * allocator as the work threads writing to the cache may all end up
	 * sleeping on memory allocation, so we may need to impose a timeout
	 * too. */
112
	if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
113 114 115 116 117 118 119 120
		fscache_stat(&fscache_n_store_vmscan_busy);
		return false;
	}

	fscache_stat(&fscache_n_store_vmscan_wait);
	__fscache_wait_on_page_write(cookie, page);
	gfp &= ~__GFP_WAIT;
	goto try_again;
121 122 123
}
EXPORT_SYMBOL(__fscache_maybe_release_page);

124 125 126
/*
 * note that a page has finished being written to the cache
 */
127 128
static void fscache_end_page_write(struct fscache_object *object,
				   struct page *page)
129
{
130 131
	struct fscache_cookie *cookie;
	struct page *xpage = NULL;
132

133 134 135 136 137 138
	spin_lock(&object->lock);
	cookie = object->cookie;
	if (cookie) {
		/* delete the page from the tree if it is now no longer
		 * pending */
		spin_lock(&cookie->stores_lock);
139 140
		radix_tree_tag_clear(&cookie->stores, page->index,
				     FSCACHE_COOKIE_STORING_TAG);
141 142 143 144 145
		if (!radix_tree_tag_get(&cookie->stores, page->index,
					FSCACHE_COOKIE_PENDING_TAG)) {
			fscache_stat(&fscache_n_store_radix_deletes);
			xpage = radix_tree_delete(&cookie->stores, page->index);
		}
146 147 148 149 150 151
		spin_unlock(&cookie->stores_lock);
		wake_up_bit(&cookie->flags, 0);
	}
	spin_unlock(&object->lock);
	if (xpage)
		page_cache_release(xpage);
152 153 154 155 156 157 158 159
}

/*
 * actually apply the changed attributes to a cache object
 */
static void fscache_attr_changed_op(struct fscache_operation *op)
{
	struct fscache_object *object = op->object;
160
	int ret;
161 162 163 164 165

	_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);

	fscache_stat(&fscache_n_attr_changed_calls);

166
	if (fscache_object_is_active(object)) {
167
		fscache_stat(&fscache_n_cop_attr_changed);
168
		ret = object->cache->ops->attr_changed(object);
169
		fscache_stat_d(&fscache_n_cop_attr_changed);
170 171 172
		if (ret < 0)
			fscache_abort_object(object);
	}
173

174
	fscache_op_complete(op, true);
175 176 177 178 179 180 181 182 183 184
	_leave("");
}

/*
 * notification that the attributes on an object have changed
 */
int __fscache_attr_changed(struct fscache_cookie *cookie)
{
	struct fscache_operation *op;
	struct fscache_object *object;
185
	bool wake_cookie;
186 187 188 189 190 191 192 193 194 195 196 197 198 199

	_enter("%p", cookie);

	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);

	fscache_stat(&fscache_n_attr_changed);

	op = kzalloc(sizeof(*op), GFP_KERNEL);
	if (!op) {
		fscache_stat(&fscache_n_attr_changed_nomem);
		_leave(" = -ENOMEM");
		return -ENOMEM;
	}

200
	fscache_operation_init(op, fscache_attr_changed_op, NULL);
201 202 203
	op->flags = FSCACHE_OP_ASYNC |
		(1 << FSCACHE_OP_EXCLUSIVE) |
		(1 << FSCACHE_OP_UNUSE_COOKIE);
204 205 206

	spin_lock(&cookie->lock);

207 208
	if (!fscache_cookie_enabled(cookie) ||
	    hlist_empty(&cookie->backing_objects))
209 210 211 212
		goto nobufs;
	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);

213
	__fscache_use_cookie(cookie);
214 215 216 217 218 219 220 221 222
	if (fscache_submit_exclusive_op(object, op) < 0)
		goto nobufs;
	spin_unlock(&cookie->lock);
	fscache_stat(&fscache_n_attr_changed_ok);
	fscache_put_operation(op);
	_leave(" = 0");
	return 0;

nobufs:
223
	wake_cookie = __fscache_unuse_cookie(cookie);
224 225
	spin_unlock(&cookie->lock);
	kfree(op);
226 227
	if (wake_cookie)
		__fscache_wake_unused_cookie(cookie);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	fscache_stat(&fscache_n_attr_changed_nobufs);
	_leave(" = %d", -ENOBUFS);
	return -ENOBUFS;
}
EXPORT_SYMBOL(__fscache_attr_changed);

/*
 * release a retrieval op reference
 */
static void fscache_release_retrieval_op(struct fscache_operation *_op)
{
	struct fscache_retrieval *op =
		container_of(_op, struct fscache_retrieval, op);

	_enter("{OP%x}", op->op.debug_id);

244
	ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
245

246 247 248 249 250 251 252 253 254 255 256
	fscache_hist(fscache_retrieval_histogram, op->start_time);
	if (op->context)
		fscache_put_context(op->op.object->cookie, op->context);

	_leave("");
}

/*
 * allocate a retrieval op
 */
static struct fscache_retrieval *fscache_alloc_retrieval(
257
	struct fscache_cookie *cookie,
258 259 260 261 262 263 264 265 266 267 268 269 270
	struct address_space *mapping,
	fscache_rw_complete_t end_io_func,
	void *context)
{
	struct fscache_retrieval *op;

	/* allocate a retrieval operation and attempt to submit it */
	op = kzalloc(sizeof(*op), GFP_NOIO);
	if (!op) {
		fscache_stat(&fscache_n_retrievals_nomem);
		return NULL;
	}

271
	fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
272 273 274
	op->op.flags	= FSCACHE_OP_MYTHREAD |
		(1UL << FSCACHE_OP_WAITING) |
		(1UL << FSCACHE_OP_UNUSE_COOKIE);
275 276 277 278 279 280 281 282 283 284 285
	op->mapping	= mapping;
	op->end_io_func	= end_io_func;
	op->context	= context;
	op->start_time	= jiffies;
	INIT_LIST_HEAD(&op->to_do);
	return op;
}

/*
 * wait for a deferred lookup to complete
 */
286
int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
{
	unsigned long jif;

	_enter("");

	if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
		_leave(" = 0 [imm]");
		return 0;
	}

	fscache_stat(&fscache_n_retrievals_wait);

	jif = jiffies;
	if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
			fscache_wait_bit_interruptible,
			TASK_INTERRUPTIBLE) != 0) {
		fscache_stat(&fscache_n_retrievals_intr);
		_leave(" = -ERESTARTSYS");
		return -ERESTARTSYS;
	}

	ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));

	smp_rmb();
	fscache_hist(fscache_retrieval_delay_histogram, jif);
	_leave(" = 0 [dly]");
	return 0;
}

316 317 318 319 320 321 322 323
/*
 * Handle cancellation of a pending retrieval op
 */
static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
{
	struct fscache_retrieval *op =
		container_of(_op, struct fscache_retrieval, op);

324
	atomic_set(&op->n_pages, 0);
325 326
}

327 328 329
/*
 * wait for an object to become active (or dead)
 */
330 331 332 333 334
int fscache_wait_for_operation_activation(struct fscache_object *object,
					  struct fscache_operation *op,
					  atomic_t *stat_op_waits,
					  atomic_t *stat_object_dead,
					  void (*do_cancel)(struct fscache_operation *))
335 336 337
{
	int ret;

338
	if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
339 340 341
		goto check_if_dead;

	_debug(">>> WT");
342 343 344
	if (stat_op_waits)
		fscache_stat(stat_op_waits);
	if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
345
			fscache_wait_bit_interruptible,
346
			TASK_INTERRUPTIBLE) != 0) {
347
		ret = fscache_cancel_op(op, do_cancel);
348 349 350 351 352
		if (ret == 0)
			return -ERESTARTSYS;

		/* it's been removed from the pending queue by another party,
		 * so we should get to run shortly */
353
		wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
354 355 356 357 358
			    fscache_wait_bit, TASK_UNINTERRUPTIBLE);
	}
	_debug("<<< GO");

check_if_dead:
359 360 361
	if (op->state == FSCACHE_OP_ST_CANCELLED) {
		if (stat_object_dead)
			fscache_stat(stat_object_dead);
362 363 364
		_leave(" = -ENOBUFS [cancelled]");
		return -ENOBUFS;
	}
365
	if (unlikely(fscache_object_is_dead(object))) {
366 367 368 369
		pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
		fscache_cancel_op(op, do_cancel);
		if (stat_object_dead)
			fscache_stat(stat_object_dead);
370 371 372 373 374
		return -ENOBUFS;
	}
	return 0;
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
/*
 * read a page from the cache or allocate a block in which to store it
 * - we return:
 *   -ENOMEM	- out of memory, nothing done
 *   -ERESTARTSYS - interrupted
 *   -ENOBUFS	- no backing object available in which to cache the block
 *   -ENODATA	- no data available in the backing object for this block
 *   0		- dispatched a read - it'll call end_io_func() when finished
 */
int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
				 struct page *page,
				 fscache_rw_complete_t end_io_func,
				 void *context,
				 gfp_t gfp)
{
	struct fscache_retrieval *op;
	struct fscache_object *object;
392
	bool wake_cookie = false;
393 394 395 396 397 398 399 400 401
	int ret;

	_enter("%p,%p,,,", cookie, page);

	fscache_stat(&fscache_n_retrievals);

	if (hlist_empty(&cookie->backing_objects))
		goto nobufs;

402 403 404 405 406
	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
		_leave(" = -ENOBUFS [invalidating]");
		return -ENOBUFS;
	}

407 408 409 410 411 412
	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
	ASSERTCMP(page, !=, NULL);

	if (fscache_wait_for_deferred_lookup(cookie) < 0)
		return -ERESTARTSYS;

413
	op = fscache_alloc_retrieval(cookie, page->mapping,
414
				     end_io_func, context);
415 416 417 418
	if (!op) {
		_leave(" = -ENOMEM");
		return -ENOMEM;
	}
419
	atomic_set(&op->n_pages, 1);
420 421 422

	spin_lock(&cookie->lock);

423 424
	if (!fscache_cookie_enabled(cookie) ||
	    hlist_empty(&cookie->backing_objects))
425 426 427 428
		goto nobufs_unlock;
	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);

429
	ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
430

431
	__fscache_use_cookie(cookie);
432
	atomic_inc(&object->n_reads);
433
	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
434

435
	if (fscache_submit_op(object, &op->op) < 0)
436
		goto nobufs_unlock_dec;
437 438 439 440 441 442 443 444 445 446
	spin_unlock(&cookie->lock);

	fscache_stat(&fscache_n_retrieval_ops);

	/* pin the netfs read context in case we need to do the actual netfs
	 * read because we've encountered a cache read failure */
	fscache_get_context(object->cookie, op->context);

	/* we wait for the operation to become active, and then process it
	 * *here*, in this thread, and not in the thread pool */
447 448
	ret = fscache_wait_for_operation_activation(
		object, &op->op,
449
		__fscache_stat(&fscache_n_retrieval_op_waits),
450 451
		__fscache_stat(&fscache_n_retrievals_object_dead),
		fscache_do_cancel_retrieval);
452 453
	if (ret < 0)
		goto error;
454 455 456

	/* ask the cache to honour the operation */
	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
457
		fscache_stat(&fscache_n_cop_allocate_page);
458
		ret = object->cache->ops->allocate_page(op, page, gfp);
459
		fscache_stat_d(&fscache_n_cop_allocate_page);
460 461 462
		if (ret == 0)
			ret = -ENODATA;
	} else {
463
		fscache_stat(&fscache_n_cop_read_or_alloc_page);
464
		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
465
		fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
466 467
	}

468
error:
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	if (ret == -ENOMEM)
		fscache_stat(&fscache_n_retrievals_nomem);
	else if (ret == -ERESTARTSYS)
		fscache_stat(&fscache_n_retrievals_intr);
	else if (ret == -ENODATA)
		fscache_stat(&fscache_n_retrievals_nodata);
	else if (ret < 0)
		fscache_stat(&fscache_n_retrievals_nobufs);
	else
		fscache_stat(&fscache_n_retrievals_ok);

	fscache_put_retrieval(op);
	_leave(" = %d", ret);
	return ret;

484 485
nobufs_unlock_dec:
	atomic_dec(&object->n_reads);
486
	wake_cookie = __fscache_unuse_cookie(cookie);
487 488
nobufs_unlock:
	spin_unlock(&cookie->lock);
489 490
	if (wake_cookie)
		__fscache_wake_unused_cookie(cookie);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	kfree(op);
nobufs:
	fscache_stat(&fscache_n_retrievals_nobufs);
	_leave(" = -ENOBUFS");
	return -ENOBUFS;
}
EXPORT_SYMBOL(__fscache_read_or_alloc_page);

/*
 * read a list of page from the cache or allocate a block in which to store
 * them
 * - we return:
 *   -ENOMEM	- out of memory, some pages may be being read
 *   -ERESTARTSYS - interrupted, some pages may be being read
 *   -ENOBUFS	- no backing object or space available in which to cache any
 *                pages not being read
 *   -ENODATA	- no data available in the backing object for some or all of
 *                the pages
 *   0		- dispatched a read on all pages
 *
 * end_io_func() will be called for each page read from the cache as it is
 * finishes being read
 *
 * any pages for which a read is dispatched will be removed from pages and
 * nr_pages
 */
int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
				  struct address_space *mapping,
				  struct list_head *pages,
				  unsigned *nr_pages,
				  fscache_rw_complete_t end_io_func,
				  void *context,
				  gfp_t gfp)
{
	struct fscache_retrieval *op;
	struct fscache_object *object;
527
	bool wake_cookie = false;
528 529 530 531 532 533 534 535 536
	int ret;

	_enter("%p,,%d,,,", cookie, *nr_pages);

	fscache_stat(&fscache_n_retrievals);

	if (hlist_empty(&cookie->backing_objects))
		goto nobufs;

537 538 539 540 541
	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
		_leave(" = -ENOBUFS [invalidating]");
		return -ENOBUFS;
	}

542 543 544 545 546 547 548
	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
	ASSERTCMP(*nr_pages, >, 0);
	ASSERT(!list_empty(pages));

	if (fscache_wait_for_deferred_lookup(cookie) < 0)
		return -ERESTARTSYS;

549
	op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
550 551
	if (!op)
		return -ENOMEM;
552
	atomic_set(&op->n_pages, *nr_pages);
553 554 555

	spin_lock(&cookie->lock);

556 557
	if (!fscache_cookie_enabled(cookie) ||
	    hlist_empty(&cookie->backing_objects))
558 559 560 561
		goto nobufs_unlock;
	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);

562
	__fscache_use_cookie(cookie);
563
	atomic_inc(&object->n_reads);
564
	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
565

566
	if (fscache_submit_op(object, &op->op) < 0)
567
		goto nobufs_unlock_dec;
568 569 570 571 572 573 574 575 576 577
	spin_unlock(&cookie->lock);

	fscache_stat(&fscache_n_retrieval_ops);

	/* pin the netfs read context in case we need to do the actual netfs
	 * read because we've encountered a cache read failure */
	fscache_get_context(object->cookie, op->context);

	/* we wait for the operation to become active, and then process it
	 * *here*, in this thread, and not in the thread pool */
578 579
	ret = fscache_wait_for_operation_activation(
		object, &op->op,
580
		__fscache_stat(&fscache_n_retrieval_op_waits),
581 582
		__fscache_stat(&fscache_n_retrievals_object_dead),
		fscache_do_cancel_retrieval);
583 584
	if (ret < 0)
		goto error;
585 586

	/* ask the cache to honour the operation */
587 588 589 590 591 592 593 594 595 596 597
	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
		fscache_stat(&fscache_n_cop_allocate_pages);
		ret = object->cache->ops->allocate_pages(
			op, pages, nr_pages, gfp);
		fscache_stat_d(&fscache_n_cop_allocate_pages);
	} else {
		fscache_stat(&fscache_n_cop_read_or_alloc_pages);
		ret = object->cache->ops->read_or_alloc_pages(
			op, pages, nr_pages, gfp);
		fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
	}
598

599
error:
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	if (ret == -ENOMEM)
		fscache_stat(&fscache_n_retrievals_nomem);
	else if (ret == -ERESTARTSYS)
		fscache_stat(&fscache_n_retrievals_intr);
	else if (ret == -ENODATA)
		fscache_stat(&fscache_n_retrievals_nodata);
	else if (ret < 0)
		fscache_stat(&fscache_n_retrievals_nobufs);
	else
		fscache_stat(&fscache_n_retrievals_ok);

	fscache_put_retrieval(op);
	_leave(" = %d", ret);
	return ret;

615 616
nobufs_unlock_dec:
	atomic_dec(&object->n_reads);
617
	wake_cookie = __fscache_unuse_cookie(cookie);
618 619 620
nobufs_unlock:
	spin_unlock(&cookie->lock);
	kfree(op);
621 622
	if (wake_cookie)
		__fscache_wake_unused_cookie(cookie);
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
nobufs:
	fscache_stat(&fscache_n_retrievals_nobufs);
	_leave(" = -ENOBUFS");
	return -ENOBUFS;
}
EXPORT_SYMBOL(__fscache_read_or_alloc_pages);

/*
 * allocate a block in the cache on which to store a page
 * - we return:
 *   -ENOMEM	- out of memory, nothing done
 *   -ERESTARTSYS - interrupted
 *   -ENOBUFS	- no backing object available in which to cache the block
 *   0		- block allocated
 */
int __fscache_alloc_page(struct fscache_cookie *cookie,
			 struct page *page,
			 gfp_t gfp)
{
	struct fscache_retrieval *op;
	struct fscache_object *object;
644
	bool wake_cookie = false;
645 646 647 648 649 650 651 652 653 654 655 656
	int ret;

	_enter("%p,%p,,,", cookie, page);

	fscache_stat(&fscache_n_allocs);

	if (hlist_empty(&cookie->backing_objects))
		goto nobufs;

	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
	ASSERTCMP(page, !=, NULL);

657 658 659 660 661
	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
		_leave(" = -ENOBUFS [invalidating]");
		return -ENOBUFS;
	}

662 663 664
	if (fscache_wait_for_deferred_lookup(cookie) < 0)
		return -ERESTARTSYS;

665
	op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
666 667
	if (!op)
		return -ENOMEM;
668
	atomic_set(&op->n_pages, 1);
669 670 671

	spin_lock(&cookie->lock);

672 673
	if (!fscache_cookie_enabled(cookie) ||
	    hlist_empty(&cookie->backing_objects))
674 675 676 677
		goto nobufs_unlock;
	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);

678
	__fscache_use_cookie(cookie);
679
	if (fscache_submit_op(object, &op->op) < 0)
680
		goto nobufs_unlock_dec;
681 682 683 684
	spin_unlock(&cookie->lock);

	fscache_stat(&fscache_n_alloc_ops);

685 686
	ret = fscache_wait_for_operation_activation(
		object, &op->op,
687
		__fscache_stat(&fscache_n_alloc_op_waits),
688 689
		__fscache_stat(&fscache_n_allocs_object_dead),
		fscache_do_cancel_retrieval);
690 691
	if (ret < 0)
		goto error;
692 693

	/* ask the cache to honour the operation */
694
	fscache_stat(&fscache_n_cop_allocate_page);
695
	ret = object->cache->ops->allocate_page(op, page, gfp);
696
	fscache_stat_d(&fscache_n_cop_allocate_page);
697

698 699 700 701
error:
	if (ret == -ERESTARTSYS)
		fscache_stat(&fscache_n_allocs_intr);
	else if (ret < 0)
702 703 704 705 706 707 708 709
		fscache_stat(&fscache_n_allocs_nobufs);
	else
		fscache_stat(&fscache_n_allocs_ok);

	fscache_put_retrieval(op);
	_leave(" = %d", ret);
	return ret;

710 711
nobufs_unlock_dec:
	wake_cookie = __fscache_unuse_cookie(cookie);
712 713 714
nobufs_unlock:
	spin_unlock(&cookie->lock);
	kfree(op);
715 716
	if (wake_cookie)
		__fscache_wake_unused_cookie(cookie);
717 718 719 720 721 722 723
nobufs:
	fscache_stat(&fscache_n_allocs_nobufs);
	_leave(" = -ENOBUFS");
	return -ENOBUFS;
}
EXPORT_SYMBOL(__fscache_alloc_page);

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/*
 * Unmark pages allocate in the readahead code path (via:
 * fscache_readpages_or_alloc) after delegating to the base filesystem
 */
void __fscache_readpages_cancel(struct fscache_cookie *cookie,
				struct list_head *pages)
{
	struct page *page;

	list_for_each_entry(page, pages, lru) {
		if (PageFsCache(page))
			__fscache_uncache_page(cookie, page);
	}
}
EXPORT_SYMBOL(__fscache_readpages_cancel);

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
/*
 * release a write op reference
 */
static void fscache_release_write_op(struct fscache_operation *_op)
{
	_enter("{OP%x}", _op->debug_id);
}

/*
 * perform the background storage of a page into the cache
 */
static void fscache_write_op(struct fscache_operation *_op)
{
	struct fscache_storage *op =
		container_of(_op, struct fscache_storage, op);
	struct fscache_object *object = op->op.object;
756
	struct fscache_cookie *cookie;
757 758 759 760 761 762 763 764
	struct page *page;
	unsigned n;
	void *results[1];
	int ret;

	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));

	spin_lock(&object->lock);
765
	cookie = object->cookie;
766

767 768 769 770
	if (!fscache_object_is_active(object)) {
		/* If we get here, then the on-disk cache object likely longer
		 * exists, so we should just cancel this write operation.
		 */
771
		spin_unlock(&object->lock);
772
		fscache_op_complete(&op->op, false);
773 774 775 776 777 778 779 780 781 782 783 784
		_leave(" [inactive]");
		return;
	}

	if (!cookie) {
		/* If we get here, then the cookie belonging to the object was
		 * detached, probably by the cookie being withdrawn due to
		 * memory pressure, which means that the pages we might write
		 * to the cache from no longer exist - therefore, we can just
		 * cancel this write operation.
		 */
		spin_unlock(&object->lock);
785
		fscache_op_complete(&op->op, false);
786 787 788
		_leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
		       _op->flags, _op->state, object->state->short_name,
		       object->flags);
789 790 791
		return;
	}

792 793
	spin_lock(&cookie->stores_lock);

794 795 796 797 798 799 800 801 802 803
	fscache_stat(&fscache_n_store_calls);

	/* find a page to store */
	page = NULL;
	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
				       FSCACHE_COOKIE_PENDING_TAG);
	if (n != 1)
		goto superseded;
	page = results[0];
	_debug("gang %d [%lx]", n, page->index);
804 805
	if (page->index > op->store_limit) {
		fscache_stat(&fscache_n_store_pages_over_limit);
806
		goto superseded;
807
	}
808

809 810 811 812
	radix_tree_tag_set(&cookie->stores, page->index,
			   FSCACHE_COOKIE_STORING_TAG);
	radix_tree_tag_clear(&cookie->stores, page->index,
			     FSCACHE_COOKIE_PENDING_TAG);
813

814
	spin_unlock(&cookie->stores_lock);
815 816
	spin_unlock(&object->lock);

817 818 819 820 821 822 823
	fscache_stat(&fscache_n_store_pages);
	fscache_stat(&fscache_n_cop_write_page);
	ret = object->cache->ops->write_page(op, page);
	fscache_stat_d(&fscache_n_cop_write_page);
	fscache_end_page_write(object, page);
	if (ret < 0) {
		fscache_abort_object(object);
824
		fscache_op_complete(&op->op, true);
825 826
	} else {
		fscache_enqueue_operation(&op->op);
827 828 829 830 831 832 833 834 835
	}

	_leave("");
	return;

superseded:
	/* this writer is going away and there aren't any more things to
	 * write */
	_debug("cease");
836
	spin_unlock(&cookie->stores_lock);
837 838
	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
	spin_unlock(&object->lock);
839
	fscache_op_complete(&op->op, true);
840 841 842
	_leave("");
}

843 844 845 846 847 848 849 850 851 852 853
/*
 * Clear the pages pending writing for invalidation
 */
void fscache_invalidate_writes(struct fscache_cookie *cookie)
{
	struct page *page;
	void *results[16];
	int n, i;

	_enter("");

854 855 856 857 858 859 860 861 862 863
	for (;;) {
		spin_lock(&cookie->stores_lock);
		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
					       ARRAY_SIZE(results),
					       FSCACHE_COOKIE_PENDING_TAG);
		if (n == 0) {
			spin_unlock(&cookie->stores_lock);
			break;
		}

864 865 866 867 868 869 870 871 872 873 874 875 876 877
		for (i = n - 1; i >= 0; i--) {
			page = results[i];
			radix_tree_delete(&cookie->stores, page->index);
		}

		spin_unlock(&cookie->stores_lock);

		for (i = n - 1; i >= 0; i--)
			page_cache_release(results[i]);
	}

	_leave("");
}

878 879 880 881 882 883 884 885 886 887 888 889 890
/*
 * request a page be stored in the cache
 * - returns:
 *   -ENOMEM	- out of memory, nothing done
 *   -ENOBUFS	- no backing object available in which to cache the page
 *   0		- dispatched a write - it'll call end_io_func() when finished
 *
 * if the cookie still has a backing object at this point, that object can be
 * in one of a few states with respect to storage processing:
 *
 *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
 *      set)
 *
891
 *	(a) no writes yet
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
 *
 *	(b) writes deferred till post-creation (mark page for writing and
 *	    return immediately)
 *
 *  (2) negative lookup, object created, initial fill being made from netfs
 *
 *	(a) fill point not yet reached this page (mark page for writing and
 *          return)
 *
 *	(b) fill point passed this page (queue op to store this page)
 *
 *  (3) object extant (queue op to store this page)
 *
 * any other state is invalid
 */
int __fscache_write_page(struct fscache_cookie *cookie,
			 struct page *page,
			 gfp_t gfp)
{
	struct fscache_storage *op;
	struct fscache_object *object;
913
	bool wake_cookie = false;
914 915 916 917 918 919 920 921 922
	int ret;

	_enter("%p,%x,", cookie, (u32) page->flags);

	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
	ASSERT(PageFsCache(page));

	fscache_stat(&fscache_n_stores);

923 924 925 926 927
	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
		_leave(" = -ENOBUFS [invalidating]");
		return -ENOBUFS;
	}

928
	op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
929 930 931
	if (!op)
		goto nomem;

932 933
	fscache_operation_init(&op->op, fscache_write_op,
			       fscache_release_write_op);
934 935 936
	op->op.flags = FSCACHE_OP_ASYNC |
		(1 << FSCACHE_OP_WAITING) |
		(1 << FSCACHE_OP_UNUSE_COOKIE);
937

938
	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
939 940 941 942 943 944
	if (ret < 0)
		goto nomem_free;

	ret = -ENOBUFS;
	spin_lock(&cookie->lock);

945 946
	if (!fscache_cookie_enabled(cookie) ||
	    hlist_empty(&cookie->backing_objects))
947 948 949 950 951 952 953 954 955
		goto nobufs;
	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);
	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
		goto nobufs;

	/* add the page to the pending-storage radix tree on the backing
	 * object */
	spin_lock(&object->lock);
956
	spin_lock(&cookie->stores_lock);
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976

	_debug("store limit %llx", (unsigned long long) object->store_limit);

	ret = radix_tree_insert(&cookie->stores, page->index, page);
	if (ret < 0) {
		if (ret == -EEXIST)
			goto already_queued;
		_debug("insert failed %d", ret);
		goto nobufs_unlock_obj;
	}

	radix_tree_tag_set(&cookie->stores, page->index,
			   FSCACHE_COOKIE_PENDING_TAG);
	page_cache_get(page);

	/* we only want one writer at a time, but we do need to queue new
	 * writers after exclusive ops */
	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
		goto already_pending;

977
	spin_unlock(&cookie->stores_lock);
978 979 980 981 982
	spin_unlock(&object->lock);

	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
	op->store_limit = object->store_limit;

983
	__fscache_use_cookie(cookie);
984 985 986 987 988 989 990 991
	if (fscache_submit_op(object, &op->op) < 0)
		goto submit_failed;

	spin_unlock(&cookie->lock);
	radix_tree_preload_end();
	fscache_stat(&fscache_n_store_ops);
	fscache_stat(&fscache_n_stores_ok);

992
	/* the work queue now carries its own ref on the object */
993 994 995 996 997 998 999
	fscache_put_operation(&op->op);
	_leave(" = 0");
	return 0;

already_queued:
	fscache_stat(&fscache_n_stores_again);
already_pending:
1000
	spin_unlock(&cookie->stores_lock);
1001 1002 1003 1004 1005 1006 1007 1008 1009
	spin_unlock(&object->lock);
	spin_unlock(&cookie->lock);
	radix_tree_preload_end();
	kfree(op);
	fscache_stat(&fscache_n_stores_ok);
	_leave(" = 0");
	return 0;

submit_failed:
1010
	spin_lock(&cookie->stores_lock);
1011
	radix_tree_delete(&cookie->stores, page->index);
1012
	spin_unlock(&cookie->stores_lock);
1013
	wake_cookie = __fscache_unuse_cookie(cookie);
1014 1015 1016 1017 1018
	page_cache_release(page);
	ret = -ENOBUFS;
	goto nobufs;

nobufs_unlock_obj:
D
Dan Carpenter 已提交
1019
	spin_unlock(&cookie->stores_lock);
1020 1021 1022 1023 1024
	spin_unlock(&object->lock);
nobufs:
	spin_unlock(&cookie->lock);
	radix_tree_preload_end();
	kfree(op);
1025 1026
	if (wake_cookie)
		__fscache_wake_unused_cookie(cookie);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	fscache_stat(&fscache_n_stores_nobufs);
	_leave(" = -ENOBUFS");
	return -ENOBUFS;

nomem_free:
	kfree(op);
nomem:
	fscache_stat(&fscache_n_stores_oom);
	_leave(" = -ENOMEM");
	return -ENOMEM;
}
EXPORT_SYMBOL(__fscache_write_page);

/*
 * remove a page from the cache
 */
void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
{
	struct fscache_object *object;

	_enter(",%p", page);

	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
	ASSERTCMP(page, !=, NULL);

	fscache_stat(&fscache_n_uncaches);

	/* cache withdrawal may beat us to it */
	if (!PageFsCache(page))
		goto done;

	/* get the object */
	spin_lock(&cookie->lock);

	if (hlist_empty(&cookie->backing_objects)) {
		ClearPageFsCache(page);
		goto done_unlock;
	}

	object = hlist_entry(cookie->backing_objects.first,
			     struct fscache_object, cookie_link);

	/* there might now be stuff on disk we could read */
	clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);

	/* only invoke the cache backend if we managed to mark the page
	 * uncached here; this deals with synchronisation vs withdrawal */
	if (TestClearPageFsCache(page) &&
	    object->cache->ops->uncache_page) {
		/* the cache backend releases the cookie lock */
1077
		fscache_stat(&fscache_n_cop_uncache_page);
1078
		object->cache->ops->uncache_page(object, page);
1079
		fscache_stat_d(&fscache_n_cop_uncache_page);
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
		goto done;
	}

done_unlock:
	spin_unlock(&cookie->lock);
done:
	_leave("");
}
EXPORT_SYMBOL(__fscache_uncache_page);

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
/**
 * fscache_mark_page_cached - Mark a page as being cached
 * @op: The retrieval op pages are being marked for
 * @page: The page to be marked
 *
 * Mark a netfs page as being cached.  After this is called, the netfs
 * must call fscache_uncache_page() to remove the mark.
 */
void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
{
	struct fscache_cookie *cookie = op->op.object->cookie;

#ifdef CONFIG_FSCACHE_STATS
	atomic_inc(&fscache_n_marks);
#endif

	_debug("- mark %p{%lx}", page, page->index);
	if (TestSetPageFsCache(page)) {
		static bool once_only;
		if (!once_only) {
			once_only = true;
1111 1112
			pr_warn("Cookie type %s marked page %lx multiple times\n",
				cookie->def->name, page->index);
1113 1114 1115 1116 1117 1118 1119 1120 1121
		}
	}

	if (cookie->def->mark_page_cached)
		cookie->def->mark_page_cached(cookie->netfs_data,
					      op->mapping, page);
}
EXPORT_SYMBOL(fscache_mark_page_cached);

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/**
 * fscache_mark_pages_cached - Mark pages as being cached
 * @op: The retrieval op pages are being marked for
 * @pagevec: The pages to be marked
 *
 * Mark a bunch of netfs pages as being cached.  After this is called,
 * the netfs must call fscache_uncache_page() to remove the mark.
 */
void fscache_mark_pages_cached(struct fscache_retrieval *op,
			       struct pagevec *pagevec)
{
	unsigned long loop;

1135 1136
	for (loop = 0; loop < pagevec->nr; loop++)
		fscache_mark_page_cached(op, pagevec->pages[loop]);
1137 1138 1139 1140

	pagevec_reinit(pagevec);
}
EXPORT_SYMBOL(fscache_mark_pages_cached);
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

/*
 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
 * to be associated with the given cookie.
 */
void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
				       struct inode *inode)
{
	struct address_space *mapping = inode->i_mapping;
	struct pagevec pvec;
	pgoff_t next;
	int i;

	_enter("%p,%p", cookie, inode);

	if (!mapping || mapping->nrpages == 0) {
		_leave(" [no pages]");
		return;
	}

	pagevec_init(&pvec, 0);
	next = 0;
1163 1164 1165
	do {
		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
			break;
1166 1167
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
1168
			next = page->index;
1169 1170 1171 1172 1173 1174 1175
			if (PageFsCache(page)) {
				__fscache_wait_on_page_write(cookie, page);
				__fscache_uncache_page(cookie, page);
			}
		}
		pagevec_release(&pvec);
		cond_resched();
1176
	} while (++next);
1177 1178 1179 1180

	_leave("");
}
EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);