readahead.c 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/readahead.c - address_space-level file readahead.
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 09Apr2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10 11
 *		Initial version.
 */

#include <linux/kernel.h>
#include <linux/fs.h>
12
#include <linux/gfp.h>
L
Linus Torvalds 已提交
13 14 15 16
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
17
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
18
#include <linux/pagevec.h>
J
Jens Axboe 已提交
19
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28

/*
 * Initialise a struct file's readahead state.  Assumes that the caller has
 * memset *ra to zero.
 */
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
	ra->ra_pages = mapping->backing_dev_info->ra_pages;
29
	ra->prev_pos = -1;
L
Linus Torvalds 已提交
30
}
31
EXPORT_SYMBOL_GPL(file_ra_state_init);
L
Linus Torvalds 已提交
32 33 34

#define list_to_page(head) (list_entry((head)->prev, struct page, lru))

35 36
/*
 * see if a page needs releasing upon read_cache_pages() failure
37 38 39 40
 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
 *   before calling, such as the NFS fs marking pages that are cached locally
 *   on disk, thus we need to give the fs a chance to clean up in the event of
 *   an error
41 42 43 44
 */
static void read_cache_pages_invalidate_page(struct address_space *mapping,
					     struct page *page)
{
45
	if (page_has_private(page)) {
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
		if (!trylock_page(page))
			BUG();
		page->mapping = mapping;
		do_invalidatepage(page, 0);
		page->mapping = NULL;
		unlock_page(page);
	}
	page_cache_release(page);
}

/*
 * release a list of pages, invalidating them first if need be
 */
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
					      struct list_head *pages)
{
	struct page *victim;

	while (!list_empty(pages)) {
		victim = list_to_page(pages);
		list_del(&victim->lru);
		read_cache_pages_invalidate_page(mapping, victim);
	}
}

L
Linus Torvalds 已提交
71
/**
72
 * read_cache_pages - populate an address space with some pages & start reads against them
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 * @filler: callback routine for filling a single page.
 * @data: private data for the callback routine.
 *
 * Hides the details of the LRU cache etc from the filesystems.
 */
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
			int (*filler)(void *, struct page *), void *data)
{
	struct page *page;
	int ret = 0;

	while (!list_empty(pages)) {
		page = list_to_page(pages);
		list_del(&page->lru);
N
Nick Piggin 已提交
90 91
		if (add_to_page_cache_lru(page, mapping,
					page->index, GFP_KERNEL)) {
92
			read_cache_pages_invalidate_page(mapping, page);
L
Linus Torvalds 已提交
93 94
			continue;
		}
N
Nick Piggin 已提交
95 96
		page_cache_release(page);

L
Linus Torvalds 已提交
97
		ret = filler(data, page);
N
Nick Piggin 已提交
98
		if (unlikely(ret)) {
99
			read_cache_pages_invalidate_pages(mapping, pages);
L
Linus Torvalds 已提交
100 101
			break;
		}
102
		task_io_account_read(PAGE_CACHE_SIZE);
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112
	}
	return ret;
}

EXPORT_SYMBOL(read_cache_pages);

static int read_pages(struct address_space *mapping, struct file *filp,
		struct list_head *pages, unsigned nr_pages)
{
	unsigned page_idx;
113
	int ret;
L
Linus Torvalds 已提交
114 115 116

	if (mapping->a_ops->readpages) {
		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
O
OGAWA Hirofumi 已提交
117 118
		/* Clean up the remaining pages */
		put_pages_list(pages);
L
Linus Torvalds 已提交
119 120 121 122 123 124
		goto out;
	}

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_to_page(pages);
		list_del(&page->lru);
N
Nick Piggin 已提交
125
		if (!add_to_page_cache_lru(page, mapping,
L
Linus Torvalds 已提交
126
					page->index, GFP_KERNEL)) {
127
			mapping->a_ops->readpage(filp, page);
N
Nick Piggin 已提交
128 129
		}
		page_cache_release(page);
L
Linus Torvalds 已提交
130
	}
131
	ret = 0;
L
Linus Torvalds 已提交
132 133 134 135 136
out:
	return ret;
}

/*
137
 * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145
 * the pages first, then submits them all for I/O. This avoids the very bad
 * behaviour which would occur if page allocations are causing VM writeback.
 * We really don't want to intermingle reads and writes like that.
 *
 * Returns the number of pages requested, or the maximum amount of I/O allowed.
 */
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
146 147
			pgoff_t offset, unsigned long nr_to_read,
			unsigned long lookahead_size)
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157 158 159
{
	struct inode *inode = mapping->host;
	struct page *page;
	unsigned long end_index;	/* The last page we want to read */
	LIST_HEAD(page_pool);
	int page_idx;
	int ret = 0;
	loff_t isize = i_size_read(inode);

	if (isize == 0)
		goto out;

160
	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
L
Linus Torvalds 已提交
161 162 163 164 165

	/*
	 * Preallocate as many pages as we will need.
	 */
	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
A
Andrew Morton 已提交
166
		pgoff_t page_offset = offset + page_idx;
167

L
Linus Torvalds 已提交
168 169 170
		if (page_offset > end_index)
			break;

N
Nick Piggin 已提交
171
		rcu_read_lock();
L
Linus Torvalds 已提交
172
		page = radix_tree_lookup(&mapping->page_tree, page_offset);
N
Nick Piggin 已提交
173
		rcu_read_unlock();
L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181
		if (page)
			continue;

		page = page_cache_alloc_cold(mapping);
		if (!page)
			break;
		page->index = page_offset;
		list_add(&page->lru, &page_pool);
182 183
		if (page_idx == nr_to_read - lookahead_size)
			SetPageReadahead(page);
L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
		ret++;
	}

	/*
	 * Now start the IO.  We ignore I/O errors - if the page is not
	 * uptodate then the caller will launch readpage again, and
	 * will then handle the error.
	 */
	if (ret)
		read_pages(mapping, filp, &page_pool, ret);
	BUG_ON(!list_empty(&page_pool));
out:
	return ret;
}

/*
 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 * memory at once.
 */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
A
Andrew Morton 已提交
204
		pgoff_t offset, unsigned long nr_to_read)
L
Linus Torvalds 已提交
205 206 207 208 209 210
{
	int ret = 0;

	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
		return -EINVAL;

211
	nr_to_read = max_sane_readahead(nr_to_read);
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219
	while (nr_to_read) {
		int err;

		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;

		if (this_chunk > nr_to_read)
			this_chunk = nr_to_read;
		err = __do_page_cache_readahead(mapping, filp,
220
						offset, this_chunk, 0);
L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		if (err < 0) {
			ret = err;
			break;
		}
		ret += err;
		offset += this_chunk;
		nr_to_read -= this_chunk;
	}
	return ret;
}

/*
 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
 * sensible upper limit.
 */
unsigned long max_sane_readahead(unsigned long nr)
{
238
	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
239
		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
L
Linus Torvalds 已提交
240
}
241 242 243 244

/*
 * Submit IO for the read-ahead request in file_ra_state.
 */
245
unsigned long ra_submit(struct file_ra_state *ra,
246 247 248 249 250
		       struct address_space *mapping, struct file *filp)
{
	int actual;

	actual = __do_page_cache_readahead(mapping, filp,
251
					ra->start, ra->size, ra->async_size);
252 253 254

	return actual;
}
255

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
/*
 * Set the initial window size, round to next power of 2 and square
 * for small size, x 4 for medium, and x 2 for large
 * for 128k (32 page) max ra
 * 1-8 page = 32k initial, > 8 page = 128k initial
 */
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
{
	unsigned long newsize = roundup_pow_of_two(size);

	if (newsize <= max / 32)
		newsize = newsize * 4;
	else if (newsize <= max / 4)
		newsize = newsize * 2;
	else
		newsize = max;

	return newsize;
}

276 277 278 279
/*
 *  Get the previous window size, ramp it up, and
 *  return it as the new window size.
 */
280
static unsigned long get_next_ra_size(struct file_ra_state *ra,
281 282
						unsigned long max)
{
283
	unsigned long cur = ra->size;
284 285 286
	unsigned long newsize;

	if (cur < max / 16)
287
		newsize = 4 * cur;
288
	else
289
		newsize = 2 * cur;
290 291 292 293 294 295 296 297 298 299

	return min(newsize, max);
}

/*
 * On-demand readahead design.
 *
 * The fields in struct file_ra_state represent the most-recently-executed
 * readahead attempt:
 *
300 301 302 303
 *                        |<----- async_size ---------|
 *     |------------------- size -------------------->|
 *     |==================#===========================|
 *     ^start             ^page marked with PG_readahead
304 305 306 307
 *
 * To overlap application thinking time and disk I/O time, we do
 * `readahead pipelining': Do not wait until the application consumed all
 * readahead pages and stalled on the missing page at readahead_index;
308 309 310
 * Instead, submit an asynchronous readahead I/O as soon as there are
 * only async_size pages left in the readahead window. Normally async_size
 * will be equal to size, for maximum pipelining.
311 312 313
 *
 * In interleaved sequential reads, concurrent streams on the same fd can
 * be invalidating each other's readahead state. So we flag the new readahead
314
 * page at (start+size-async_size) with PG_readahead, and use it as readahead
315 316 317
 * indicator. The flag won't be set on already cached pages, to avoid the
 * readahead-for-nothing fuss, saving pointless page cache lookups.
 *
318
 * prev_pos tracks the last visited byte in the _previous_ read request.
319 320 321 322 323 324 325 326 327 328 329 330 331 332
 * It should be maintained by the caller, and will be used for detecting
 * small random reads. Note that the readahead algorithm checks loosely
 * for sequential patterns. Hence interleaved reads might be served as
 * sequential ones.
 *
 * There is a special-case: if the first page which the application tries to
 * read happens to be the first page of the file, it is assumed that a linear
 * read is about to happen and the window is immediately set to the initial size
 * based on I/O request size and the max_readahead.
 *
 * The code ramps up the readahead size aggressively at first, but slow down as
 * it approaches max_readhead.
 */

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/*
 * Count contiguously cached pages from @offset-1 to @offset-@max,
 * this count is a conservative estimation of
 * 	- length of the sequential read sequence, or
 * 	- thrashing threshold in memory tight systems
 */
static pgoff_t count_history_pages(struct address_space *mapping,
				   struct file_ra_state *ra,
				   pgoff_t offset, unsigned long max)
{
	pgoff_t head;

	rcu_read_lock();
	head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
	rcu_read_unlock();

	return offset - 1 - head;
}

/*
 * page cache context based read-ahead
 */
static int try_context_readahead(struct address_space *mapping,
				 struct file_ra_state *ra,
				 pgoff_t offset,
				 unsigned long req_size,
				 unsigned long max)
{
	pgoff_t size;

	size = count_history_pages(mapping, ra, offset, max);

	/*
	 * no history pages:
	 * it could be a random read
	 */
	if (!size)
		return 0;

	/*
	 * starts from beginning of file:
	 * it is a strong indication of long-run stream (or whole-file-read)
	 */
	if (size >= offset)
		size *= 2;

	ra->start = offset;
	ra->size = get_init_ra_size(size + req_size, max);
	ra->async_size = ra->size;

	return 1;
}

386 387 388 389 390 391
/*
 * A minimal readahead algorithm for trivial sequential/random reads.
 */
static unsigned long
ondemand_readahead(struct address_space *mapping,
		   struct file_ra_state *ra, struct file *filp,
392
		   bool hit_readahead_marker, pgoff_t offset,
393 394
		   unsigned long req_size)
{
395
	unsigned long max = max_sane_readahead(ra->ra_pages);
396 397 398 399 400 401

	/*
	 * start of file
	 */
	if (!offset)
		goto initial_readahead;
402 403

	/*
404
	 * It's the expected callback offset, assume sequential access.
405 406
	 * Ramp up sizes, and push forward the readahead window.
	 */
407 408
	if ((offset == (ra->start + ra->size - ra->async_size) ||
	     offset == (ra->start + ra->size))) {
409 410 411 412
		ra->start += ra->size;
		ra->size = get_next_ra_size(ra, max);
		ra->async_size = ra->size;
		goto readit;
413 414
	}

415 416 417 418 419 420 421 422 423
	/*
	 * Hit a marked page without valid readahead state.
	 * E.g. interleaved reads.
	 * Query the pagecache for async_size, which normally equals to
	 * readahead size. Ramp it up and use it as the new readahead size.
	 */
	if (hit_readahead_marker) {
		pgoff_t start;

N
Nick Piggin 已提交
424
		rcu_read_lock();
425
		start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
N
Nick Piggin 已提交
426
		rcu_read_unlock();
427 428 429 430 431 432

		if (!start || start - offset > max)
			return 0;

		ra->start = start;
		ra->size = start - offset;	/* old async_size */
433
		ra->size += req_size;
434 435 436 437 438
		ra->size = get_next_ra_size(ra, max);
		ra->async_size = ra->size;
		goto readit;
	}

439
	/*
440
	 * oversize read
441
	 */
442 443 444 445 446 447 448 449 450
	if (req_size > max)
		goto initial_readahead;

	/*
	 * sequential cache miss
	 */
	if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
		goto initial_readahead;

451 452 453 454 455 456 457
	/*
	 * Query the page cache and look for the traces(cached history pages)
	 * that a sequential stream would leave behind.
	 */
	if (try_context_readahead(mapping, ra, offset, req_size, max))
		goto readit;

458 459 460 461 462 463 464
	/*
	 * standalone, small random read
	 * Read as is, and do not pollute the readahead state.
	 */
	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);

initial_readahead:
465 466 467
	ra->start = offset;
	ra->size = get_init_ra_size(req_size, max);
	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
468

469
readit:
470 471 472 473 474 475 476 477 478 479
	/*
	 * Will this read hit the readahead marker made by itself?
	 * If so, trigger the readahead marker hit now, and merge
	 * the resulted next readahead window into the current one.
	 */
	if (offset == ra->start && ra->size == ra->async_size) {
		ra->async_size = get_next_ra_size(ra, max);
		ra->size += ra->async_size;
	}

480 481 482 483
	return ra_submit(ra, mapping, filp);
}

/**
484
 * page_cache_sync_readahead - generic file readahead
485 486 487
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
488
 * @offset: start offset into @mapping, in pagecache page-sized units
489
 * @req_size: hint: total size of the read which the caller is performing in
490
 *            pagecache pages
491
 *
492 493 494 495
 * page_cache_sync_readahead() should be called when a cache miss happened:
 * it will submit the read.  The readahead logic may decide to piggyback more
 * pages onto the read request if access patterns suggest it will improve
 * performance.
496
 */
497 498 499
void page_cache_sync_readahead(struct address_space *mapping,
			       struct file_ra_state *ra, struct file *filp,
			       pgoff_t offset, unsigned long req_size)
500 501 502
{
	/* no read-ahead */
	if (!ra->ra_pages)
503 504
		return;

505
	/* be dumb */
506
	if (filp && (filp->f_mode & FMODE_RANDOM)) {
507 508 509 510
		force_page_cache_readahead(mapping, filp, offset, req_size);
		return;
	}

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	/* do read-ahead */
	ondemand_readahead(mapping, ra, filp, false, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);

/**
 * page_cache_async_readahead - file readahead for marked pages
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
 * @page: the page at @offset which has the PG_readahead flag set
 * @offset: start offset into @mapping, in pagecache page-sized units
 * @req_size: hint: total size of the read which the caller is performing in
 *            pagecache pages
 *
H
Huang Shijie 已提交
526
 * page_cache_async_readahead() should be called when a page is used which
527
 * has the PG_readahead flag; this is a marker to suggest that the application
528
 * has used up enough of the readahead window that we should start pulling in
529 530
 * more pages.
 */
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
void
page_cache_async_readahead(struct address_space *mapping,
			   struct file_ra_state *ra, struct file *filp,
			   struct page *page, pgoff_t offset,
			   unsigned long req_size)
{
	/* no read-ahead */
	if (!ra->ra_pages)
		return;

	/*
	 * Same bit is used for PG_readahead and PG_reclaim.
	 */
	if (PageWriteback(page))
		return;

	ClearPageReadahead(page);

	/*
	 * Defer asynchronous read-ahead on IO congestion.
	 */
	if (bdi_read_congested(mapping->backing_dev_info))
		return;
554 555

	/* do read-ahead */
556
	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
557
}
558
EXPORT_SYMBOL_GPL(page_cache_async_readahead);