readahead.c 15.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/readahead.c - address_space-level file readahead.
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 09Apr2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10 11
 *		Initial version.
 */

#include <linux/kernel.h>
#include <linux/fs.h>
12
#include <linux/gfp.h>
L
Linus Torvalds 已提交
13
#include <linux/mm.h>
14
#include <linux/export.h>
L
Linus Torvalds 已提交
15 16
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
17
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
18
#include <linux/pagevec.h>
J
Jens Axboe 已提交
19
#include <linux/pagemap.h>
20 21
#include <linux/syscalls.h>
#include <linux/file.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30

/*
 * Initialise a struct file's readahead state.  Assumes that the caller has
 * memset *ra to zero.
 */
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
	ra->ra_pages = mapping->backing_dev_info->ra_pages;
31
	ra->prev_pos = -1;
L
Linus Torvalds 已提交
32
}
33
EXPORT_SYMBOL_GPL(file_ra_state_init);
L
Linus Torvalds 已提交
34 35 36

#define list_to_page(head) (list_entry((head)->prev, struct page, lru))

37 38
/*
 * see if a page needs releasing upon read_cache_pages() failure
39 40 41 42
 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
 *   before calling, such as the NFS fs marking pages that are cached locally
 *   on disk, thus we need to give the fs a chance to clean up in the event of
 *   an error
43 44 45 46
 */
static void read_cache_pages_invalidate_page(struct address_space *mapping,
					     struct page *page)
{
47
	if (page_has_private(page)) {
48 49 50
		if (!trylock_page(page))
			BUG();
		page->mapping = mapping;
51
		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
		page->mapping = NULL;
		unlock_page(page);
	}
	page_cache_release(page);
}

/*
 * release a list of pages, invalidating them first if need be
 */
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
					      struct list_head *pages)
{
	struct page *victim;

	while (!list_empty(pages)) {
		victim = list_to_page(pages);
		list_del(&victim->lru);
		read_cache_pages_invalidate_page(mapping, victim);
	}
}

L
Linus Torvalds 已提交
73
/**
74
 * read_cache_pages - populate an address space with some pages & start reads against them
L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 * @filler: callback routine for filling a single page.
 * @data: private data for the callback routine.
 *
 * Hides the details of the LRU cache etc from the filesystems.
 */
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
			int (*filler)(void *, struct page *), void *data)
{
	struct page *page;
	int ret = 0;

	while (!list_empty(pages)) {
		page = list_to_page(pages);
		list_del(&page->lru);
N
Nick Piggin 已提交
92 93
		if (add_to_page_cache_lru(page, mapping,
					page->index, GFP_KERNEL)) {
94
			read_cache_pages_invalidate_page(mapping, page);
L
Linus Torvalds 已提交
95 96
			continue;
		}
N
Nick Piggin 已提交
97 98
		page_cache_release(page);

L
Linus Torvalds 已提交
99
		ret = filler(data, page);
N
Nick Piggin 已提交
100
		if (unlikely(ret)) {
101
			read_cache_pages_invalidate_pages(mapping, pages);
L
Linus Torvalds 已提交
102 103
			break;
		}
104
		task_io_account_read(PAGE_CACHE_SIZE);
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113
	}
	return ret;
}

EXPORT_SYMBOL(read_cache_pages);

static int read_pages(struct address_space *mapping, struct file *filp,
		struct list_head *pages, unsigned nr_pages)
{
J
Jens Axboe 已提交
114
	struct blk_plug plug;
L
Linus Torvalds 已提交
115
	unsigned page_idx;
116
	int ret;
L
Linus Torvalds 已提交
117

J
Jens Axboe 已提交
118 119
	blk_start_plug(&plug);

L
Linus Torvalds 已提交
120 121
	if (mapping->a_ops->readpages) {
		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
O
OGAWA Hirofumi 已提交
122 123
		/* Clean up the remaining pages */
		put_pages_list(pages);
L
Linus Torvalds 已提交
124 125 126 127 128 129
		goto out;
	}

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_to_page(pages);
		list_del(&page->lru);
N
Nick Piggin 已提交
130
		if (!add_to_page_cache_lru(page, mapping,
L
Linus Torvalds 已提交
131
					page->index, GFP_KERNEL)) {
132
			mapping->a_ops->readpage(filp, page);
N
Nick Piggin 已提交
133 134
		}
		page_cache_release(page);
L
Linus Torvalds 已提交
135
	}
136
	ret = 0;
J
Jens Axboe 已提交
137

L
Linus Torvalds 已提交
138
out:
J
Jens Axboe 已提交
139 140
	blk_finish_plug(&plug);

L
Linus Torvalds 已提交
141 142 143 144
	return ret;
}

/*
145
 * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all
L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153
 * the pages first, then submits them all for I/O. This avoids the very bad
 * behaviour which would occur if page allocations are causing VM writeback.
 * We really don't want to intermingle reads and writes like that.
 *
 * Returns the number of pages requested, or the maximum amount of I/O allowed.
 */
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
154 155
			pgoff_t offset, unsigned long nr_to_read,
			unsigned long lookahead_size)
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165 166 167
{
	struct inode *inode = mapping->host;
	struct page *page;
	unsigned long end_index;	/* The last page we want to read */
	LIST_HEAD(page_pool);
	int page_idx;
	int ret = 0;
	loff_t isize = i_size_read(inode);

	if (isize == 0)
		goto out;

168
	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
L
Linus Torvalds 已提交
169 170 171 172 173

	/*
	 * Preallocate as many pages as we will need.
	 */
	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
A
Andrew Morton 已提交
174
		pgoff_t page_offset = offset + page_idx;
175

L
Linus Torvalds 已提交
176 177 178
		if (page_offset > end_index)
			break;

N
Nick Piggin 已提交
179
		rcu_read_lock();
L
Linus Torvalds 已提交
180
		page = radix_tree_lookup(&mapping->page_tree, page_offset);
N
Nick Piggin 已提交
181
		rcu_read_unlock();
L
Linus Torvalds 已提交
182 183 184
		if (page)
			continue;

185
		page = page_cache_alloc_readahead(mapping);
L
Linus Torvalds 已提交
186 187 188 189
		if (!page)
			break;
		page->index = page_offset;
		list_add(&page->lru, &page_pool);
190 191
		if (page_idx == nr_to_read - lookahead_size)
			SetPageReadahead(page);
L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
		ret++;
	}

	/*
	 * Now start the IO.  We ignore I/O errors - if the page is not
	 * uptodate then the caller will launch readpage again, and
	 * will then handle the error.
	 */
	if (ret)
		read_pages(mapping, filp, &page_pool, ret);
	BUG_ON(!list_empty(&page_pool));
out:
	return ret;
}

/*
 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 * memory at once.
 */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
A
Andrew Morton 已提交
212
		pgoff_t offset, unsigned long nr_to_read)
L
Linus Torvalds 已提交
213 214 215 216 217 218
{
	int ret = 0;

	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
		return -EINVAL;

219
	nr_to_read = max_sane_readahead(nr_to_read);
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227
	while (nr_to_read) {
		int err;

		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;

		if (this_chunk > nr_to_read)
			this_chunk = nr_to_read;
		err = __do_page_cache_readahead(mapping, filp,
228
						offset, this_chunk, 0);
L
Linus Torvalds 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
		if (err < 0) {
			ret = err;
			break;
		}
		ret += err;
		offset += this_chunk;
		nr_to_read -= this_chunk;
	}
	return ret;
}

/*
 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
 * sensible upper limit.
 */
unsigned long max_sane_readahead(unsigned long nr)
{
246
	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
247
		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
L
Linus Torvalds 已提交
248
}
249 250 251 252

/*
 * Submit IO for the read-ahead request in file_ra_state.
 */
253
unsigned long ra_submit(struct file_ra_state *ra,
254 255 256 257 258
		       struct address_space *mapping, struct file *filp)
{
	int actual;

	actual = __do_page_cache_readahead(mapping, filp,
259
					ra->start, ra->size, ra->async_size);
260 261 262

	return actual;
}
263

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/*
 * Set the initial window size, round to next power of 2 and square
 * for small size, x 4 for medium, and x 2 for large
 * for 128k (32 page) max ra
 * 1-8 page = 32k initial, > 8 page = 128k initial
 */
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
{
	unsigned long newsize = roundup_pow_of_two(size);

	if (newsize <= max / 32)
		newsize = newsize * 4;
	else if (newsize <= max / 4)
		newsize = newsize * 2;
	else
		newsize = max;

	return newsize;
}

284 285 286 287
/*
 *  Get the previous window size, ramp it up, and
 *  return it as the new window size.
 */
288
static unsigned long get_next_ra_size(struct file_ra_state *ra,
289 290
						unsigned long max)
{
291
	unsigned long cur = ra->size;
292 293 294
	unsigned long newsize;

	if (cur < max / 16)
295
		newsize = 4 * cur;
296
	else
297
		newsize = 2 * cur;
298 299 300 301 302 303 304 305 306 307

	return min(newsize, max);
}

/*
 * On-demand readahead design.
 *
 * The fields in struct file_ra_state represent the most-recently-executed
 * readahead attempt:
 *
308 309 310 311
 *                        |<----- async_size ---------|
 *     |------------------- size -------------------->|
 *     |==================#===========================|
 *     ^start             ^page marked with PG_readahead
312 313 314 315
 *
 * To overlap application thinking time and disk I/O time, we do
 * `readahead pipelining': Do not wait until the application consumed all
 * readahead pages and stalled on the missing page at readahead_index;
316 317 318
 * Instead, submit an asynchronous readahead I/O as soon as there are
 * only async_size pages left in the readahead window. Normally async_size
 * will be equal to size, for maximum pipelining.
319 320 321
 *
 * In interleaved sequential reads, concurrent streams on the same fd can
 * be invalidating each other's readahead state. So we flag the new readahead
322
 * page at (start+size-async_size) with PG_readahead, and use it as readahead
323 324 325
 * indicator. The flag won't be set on already cached pages, to avoid the
 * readahead-for-nothing fuss, saving pointless page cache lookups.
 *
326
 * prev_pos tracks the last visited byte in the _previous_ read request.
327 328 329 330 331 332 333 334 335 336 337 338 339 340
 * It should be maintained by the caller, and will be used for detecting
 * small random reads. Note that the readahead algorithm checks loosely
 * for sequential patterns. Hence interleaved reads might be served as
 * sequential ones.
 *
 * There is a special-case: if the first page which the application tries to
 * read happens to be the first page of the file, it is assumed that a linear
 * read is about to happen and the window is immediately set to the initial size
 * based on I/O request size and the max_readahead.
 *
 * The code ramps up the readahead size aggressively at first, but slow down as
 * it approaches max_readhead.
 */

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/*
 * Count contiguously cached pages from @offset-1 to @offset-@max,
 * this count is a conservative estimation of
 * 	- length of the sequential read sequence, or
 * 	- thrashing threshold in memory tight systems
 */
static pgoff_t count_history_pages(struct address_space *mapping,
				   struct file_ra_state *ra,
				   pgoff_t offset, unsigned long max)
{
	pgoff_t head;

	rcu_read_lock();
	head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
	rcu_read_unlock();

	return offset - 1 - head;
}

/*
 * page cache context based read-ahead
 */
static int try_context_readahead(struct address_space *mapping,
				 struct file_ra_state *ra,
				 pgoff_t offset,
				 unsigned long req_size,
				 unsigned long max)
{
	pgoff_t size;

	size = count_history_pages(mapping, ra, offset, max);

	/*
374
	 * not enough history pages:
375 376
	 * it could be a random read
	 */
377
	if (size <= req_size)
378 379 380 381 382 383 384 385 386 387
		return 0;

	/*
	 * starts from beginning of file:
	 * it is a strong indication of long-run stream (or whole-file-read)
	 */
	if (size >= offset)
		size *= 2;

	ra->start = offset;
388 389
	ra->size = min(size + req_size, max);
	ra->async_size = 1;
390 391 392 393

	return 1;
}

394 395 396 397 398 399
/*
 * A minimal readahead algorithm for trivial sequential/random reads.
 */
static unsigned long
ondemand_readahead(struct address_space *mapping,
		   struct file_ra_state *ra, struct file *filp,
400
		   bool hit_readahead_marker, pgoff_t offset,
401 402
		   unsigned long req_size)
{
403
	unsigned long max = max_sane_readahead(ra->ra_pages);
404 405 406 407 408 409

	/*
	 * start of file
	 */
	if (!offset)
		goto initial_readahead;
410 411

	/*
412
	 * It's the expected callback offset, assume sequential access.
413 414
	 * Ramp up sizes, and push forward the readahead window.
	 */
415 416
	if ((offset == (ra->start + ra->size - ra->async_size) ||
	     offset == (ra->start + ra->size))) {
417 418 419 420
		ra->start += ra->size;
		ra->size = get_next_ra_size(ra, max);
		ra->async_size = ra->size;
		goto readit;
421 422
	}

423 424 425 426 427 428 429 430 431
	/*
	 * Hit a marked page without valid readahead state.
	 * E.g. interleaved reads.
	 * Query the pagecache for async_size, which normally equals to
	 * readahead size. Ramp it up and use it as the new readahead size.
	 */
	if (hit_readahead_marker) {
		pgoff_t start;

N
Nick Piggin 已提交
432
		rcu_read_lock();
433
		start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
N
Nick Piggin 已提交
434
		rcu_read_unlock();
435 436 437 438 439 440

		if (!start || start - offset > max)
			return 0;

		ra->start = start;
		ra->size = start - offset;	/* old async_size */
441
		ra->size += req_size;
442 443 444 445 446
		ra->size = get_next_ra_size(ra, max);
		ra->async_size = ra->size;
		goto readit;
	}

447
	/*
448
	 * oversize read
449
	 */
450 451 452 453 454 455 456 457 458
	if (req_size > max)
		goto initial_readahead;

	/*
	 * sequential cache miss
	 */
	if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
		goto initial_readahead;

459 460 461 462 463 464 465
	/*
	 * Query the page cache and look for the traces(cached history pages)
	 * that a sequential stream would leave behind.
	 */
	if (try_context_readahead(mapping, ra, offset, req_size, max))
		goto readit;

466 467 468 469 470 471 472
	/*
	 * standalone, small random read
	 * Read as is, and do not pollute the readahead state.
	 */
	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);

initial_readahead:
473 474 475
	ra->start = offset;
	ra->size = get_init_ra_size(req_size, max);
	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
476

477
readit:
478 479 480 481 482 483 484 485 486 487
	/*
	 * Will this read hit the readahead marker made by itself?
	 * If so, trigger the readahead marker hit now, and merge
	 * the resulted next readahead window into the current one.
	 */
	if (offset == ra->start && ra->size == ra->async_size) {
		ra->async_size = get_next_ra_size(ra, max);
		ra->size += ra->async_size;
	}

488 489 490 491
	return ra_submit(ra, mapping, filp);
}

/**
492
 * page_cache_sync_readahead - generic file readahead
493 494 495
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
496
 * @offset: start offset into @mapping, in pagecache page-sized units
497
 * @req_size: hint: total size of the read which the caller is performing in
498
 *            pagecache pages
499
 *
500 501 502 503
 * page_cache_sync_readahead() should be called when a cache miss happened:
 * it will submit the read.  The readahead logic may decide to piggyback more
 * pages onto the read request if access patterns suggest it will improve
 * performance.
504
 */
505 506 507
void page_cache_sync_readahead(struct address_space *mapping,
			       struct file_ra_state *ra, struct file *filp,
			       pgoff_t offset, unsigned long req_size)
508 509 510
{
	/* no read-ahead */
	if (!ra->ra_pages)
511 512
		return;

513
	/* be dumb */
514
	if (filp && (filp->f_mode & FMODE_RANDOM)) {
515 516 517 518
		force_page_cache_readahead(mapping, filp, offset, req_size);
		return;
	}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	/* do read-ahead */
	ondemand_readahead(mapping, ra, filp, false, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);

/**
 * page_cache_async_readahead - file readahead for marked pages
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
 * @page: the page at @offset which has the PG_readahead flag set
 * @offset: start offset into @mapping, in pagecache page-sized units
 * @req_size: hint: total size of the read which the caller is performing in
 *            pagecache pages
 *
H
Huang Shijie 已提交
534
 * page_cache_async_readahead() should be called when a page is used which
535
 * has the PG_readahead flag; this is a marker to suggest that the application
536
 * has used up enough of the readahead window that we should start pulling in
537 538
 * more pages.
 */
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
void
page_cache_async_readahead(struct address_space *mapping,
			   struct file_ra_state *ra, struct file *filp,
			   struct page *page, pgoff_t offset,
			   unsigned long req_size)
{
	/* no read-ahead */
	if (!ra->ra_pages)
		return;

	/*
	 * Same bit is used for PG_readahead and PG_reclaim.
	 */
	if (PageWriteback(page))
		return;

	ClearPageReadahead(page);

	/*
	 * Defer asynchronous read-ahead on IO congestion.
	 */
	if (bdi_read_congested(mapping->backing_dev_info))
		return;
562 563

	/* do read-ahead */
564
	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
565
}
566
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
567 568 569 570 571

static ssize_t
do_readahead(struct address_space *mapping, struct file *filp,
	     pgoff_t index, unsigned long nr)
{
572
	if (!mapping || !mapping->a_ops)
573 574 575 576 577 578
		return -EINVAL;

	force_page_cache_readahead(mapping, filp, index, nr);
	return 0;
}

579
SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
580 581
{
	ssize_t ret;
582
	struct fd f;
583 584

	ret = -EBADF;
585 586 587 588
	f = fdget(fd);
	if (f.file) {
		if (f.file->f_mode & FMODE_READ) {
			struct address_space *mapping = f.file->f_mapping;
589 590 591
			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
			unsigned long len = end - start + 1;
592
			ret = do_readahead(mapping, f.file, start, len);
593
		}
594
		fdput(f);
595 596 597
	}
	return ret;
}