readahead.c 17.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * mm/readahead.c - address_space-level file readahead.
 *
 * Copyright (C) 2002, Linus Torvalds
 *
 * 09Apr2002	akpm@zip.com.au
 *		Initial version.
 */

#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
16
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22 23
#include <linux/pagevec.h>

void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
}
EXPORT_SYMBOL(default_unplug_io_fn);

24 25 26 27 28 29 30 31
/*
 * Convienent macros for min/max read-ahead pages.
 * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
 * The latter is necessary for systems with large page size(i.e. 64k).
 */
#define MAX_RA_PAGES	(VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
#define MIN_RA_PAGES	DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)

L
Linus Torvalds 已提交
32
struct backing_dev_info default_backing_dev_info = {
33
	.ra_pages	= MAX_RA_PAGES,
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47
	.state		= 0,
	.capabilities	= BDI_CAP_MAP_COPY,
	.unplug_io_fn	= default_unplug_io_fn,
};
EXPORT_SYMBOL_GPL(default_backing_dev_info);

/*
 * Initialise a struct file's readahead state.  Assumes that the caller has
 * memset *ra to zero.
 */
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
	ra->ra_pages = mapping->backing_dev_info->ra_pages;
J
Jan Kara 已提交
48
	ra->prev_index = -1;
L
Linus Torvalds 已提交
49
}
50
EXPORT_SYMBOL_GPL(file_ra_state_init);
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59 60 61

/*
 * Return max readahead size for this inode in number-of-pages.
 */
static inline unsigned long get_max_readahead(struct file_ra_state *ra)
{
	return ra->ra_pages;
}

static inline unsigned long get_min_readahead(struct file_ra_state *ra)
{
62
	return MIN_RA_PAGES;
L
Linus Torvalds 已提交
63 64
}

65 66 67 68 69 70 71 72 73 74 75 76
static inline void reset_ahead_window(struct file_ra_state *ra)
{
	/*
	 * ... but preserve ahead_start + ahead_size value,
	 * see 'recheck:' label in page_cache_readahead().
	 * Note: We never use ->ahead_size as rvalue without
	 * checking ->ahead_start != 0 first.
	 */
	ra->ahead_size += ra->ahead_start;
	ra->ahead_start = 0;
}

L
Linus Torvalds 已提交
77 78 79 80 81
static inline void ra_off(struct file_ra_state *ra)
{
	ra->start = 0;
	ra->flags = 0;
	ra->size = 0;
82
	reset_ahead_window(ra);
L
Linus Torvalds 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95
	return;
}

/*
 * Set the initial window size, round to next power of 2 and square
 * for small size, x 4 for medium, and x 2 for large
 * for 128k (32 page) max ra
 * 1-8 page = 32k initial, > 8 page = 128k initial
 */
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
{
	unsigned long newsize = roundup_pow_of_two(size);

96 97
	if (newsize <= max / 32)
		newsize = newsize * 4;
L
Linus Torvalds 已提交
98
	else if (newsize <= max / 4)
99
		newsize = newsize * 2;
L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
	else
		newsize = max;
	return newsize;
}

/*
 * Set the new window size, this is called only when I/O is to be submitted,
 * not for each call to readahead.  If a cache miss occured, reduce next I/O
 * size, else increase depending on how close to max we are.
 */
static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
{
	unsigned long max = get_max_readahead(ra);
	unsigned long min = get_min_readahead(ra);
	unsigned long cur = ra->size;
	unsigned long newsize;

	if (ra->flags & RA_FLAG_MISS) {
		ra->flags &= ~RA_FLAG_MISS;
		newsize = max((cur - 2), min);
	} else if (cur < max / 16) {
		newsize = 4 * cur;
	} else {
		newsize = 2 * cur;
	}
	return min(newsize, max);
}

#define list_to_page(head) (list_entry((head)->prev, struct page, lru))

/**
131
 * read_cache_pages - populate an address space with some pages & start reads against them
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 * @filler: callback routine for filling a single page.
 * @data: private data for the callback routine.
 *
 * Hides the details of the LRU cache etc from the filesystems.
 */
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
			int (*filler)(void *, struct page *), void *data)
{
	struct page *page;
	struct pagevec lru_pvec;
	int ret = 0;

	pagevec_init(&lru_pvec, 0);

	while (!list_empty(pages)) {
		page = list_to_page(pages);
		list_del(&page->lru);
		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
			page_cache_release(page);
			continue;
		}
		ret = filler(data, page);
		if (!pagevec_add(&lru_pvec, page))
			__pagevec_lru_add(&lru_pvec);
		if (ret) {
160
			put_pages_list(pages);
L
Linus Torvalds 已提交
161 162
			break;
		}
163
		task_io_account_read(PAGE_CACHE_SIZE);
L
Linus Torvalds 已提交
164 165 166 167 168 169 170 171 172 173 174 175
	}
	pagevec_lru_add(&lru_pvec);
	return ret;
}

EXPORT_SYMBOL(read_cache_pages);

static int read_pages(struct address_space *mapping, struct file *filp,
		struct list_head *pages, unsigned nr_pages)
{
	unsigned page_idx;
	struct pagevec lru_pvec;
176
	int ret;
L
Linus Torvalds 已提交
177 178 179

	if (mapping->a_ops->readpages) {
		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
O
OGAWA Hirofumi 已提交
180 181
		/* Clean up the remaining pages */
		put_pages_list(pages);
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190
		goto out;
	}

	pagevec_init(&lru_pvec, 0);
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_to_page(pages);
		list_del(&page->lru);
		if (!add_to_page_cache(page, mapping,
					page->index, GFP_KERNEL)) {
191 192 193 194 195
			mapping->a_ops->readpage(filp, page);
			if (!pagevec_add(&lru_pvec, page))
				__pagevec_lru_add(&lru_pvec);
		} else
			page_cache_release(page);
L
Linus Torvalds 已提交
196 197
	}
	pagevec_lru_add(&lru_pvec);
198
	ret = 0;
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212
out:
	return ret;
}

/*
 * Readahead design.
 *
 * The fields in struct file_ra_state represent the most-recently-executed
 * readahead attempt:
 *
 * start:	Page index at which we started the readahead
 * size:	Number of pages in that read
 *              Together, these form the "current window".
 *              Together, start and size represent the `readahead window'.
J
Jan Kara 已提交
213
 * prev_index:  The page which the readahead algorithm most-recently inspected.
L
Linus Torvalds 已提交
214 215 216 217
 *              It is mainly used to detect sequential file reading.
 *              If page_cache_readahead sees that it is again being called for
 *              a page which it just looked at, it can return immediately without
 *              making any state changes.
J
Jan Kara 已提交
218
 * offset:      Offset in the prev_index where the last read ended - used for
219
 *              detection of sequential file reading.
L
Linus Torvalds 已提交
220 221 222 223 224
 * ahead_start,
 * ahead_size:  Together, these form the "ahead window".
 * ra_pages:	The externally controlled max readahead for this fd.
 *
 * When readahead is in the off state (size == 0), readahead is disabled.
J
Jan Kara 已提交
225
 * In this state, prev_index is used to detect the resumption of sequential I/O.
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
 *
 * The readahead code manages two windows - the "current" and the "ahead"
 * windows.  The intent is that while the application is walking the pages
 * in the current window, I/O is underway on the ahead window.  When the
 * current window is fully traversed, it is replaced by the ahead window
 * and the ahead window is invalidated.  When this copying happens, the
 * new current window's pages are probably still locked.  So
 * we submit a new batch of I/O immediately, creating a new ahead window.
 *
 * So:
 *
 *   ----|----------------|----------------|-----
 *       ^start           ^start+size
 *                        ^ahead_start     ^ahead_start+ahead_size
 *
 *         ^ When this page is read, we submit I/O for the
 *           ahead window.
 *
 * A `readahead hit' occurs when a read request is made against a page which is
 * the next sequential page. Ahead window calculations are done only when it
 * is time to submit a new IO.  The code ramps up the size agressively at first,
 * but slow down as it approaches max_readhead.
 *
 * Any seek/ramdom IO will result in readahead being turned off.  It will resume
 * at the first sequential access.
 *
 * There is a special-case: if the first page which the application tries to
 * read happens to be the first page of the file, it is assumed that a linear
 * read is about to happen and the window is immediately set to the initial size
 * based on I/O request size and the max_readahead.
 *
 * This function is to be called for every read request, rather than when
 * it is time to perform readahead.  It is called only once for the entire I/O
 * regardless of size unless readahead is unable to start enough I/O to satisfy
 * the request (I/O request > max_readahead).
 */

/*
 * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
 * the pages first, then submits them all for I/O. This avoids the very bad
 * behaviour which would occur if page allocations are causing VM writeback.
 * We really don't want to intermingle reads and writes like that.
 *
 * Returns the number of pages requested, or the maximum amount of I/O allowed.
 *
 * do_page_cache_readahead() returns -1 if it encountered request queue
 * congestion.
 */
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
276 277
			pgoff_t offset, unsigned long nr_to_read,
			unsigned long lookahead_size)
L
Linus Torvalds 已提交
278 279 280 281 282 283 284 285 286 287 288 289
{
	struct inode *inode = mapping->host;
	struct page *page;
	unsigned long end_index;	/* The last page we want to read */
	LIST_HEAD(page_pool);
	int page_idx;
	int ret = 0;
	loff_t isize = i_size_read(inode);

	if (isize == 0)
		goto out;

290
	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
L
Linus Torvalds 已提交
291 292 293 294 295 296

	/*
	 * Preallocate as many pages as we will need.
	 */
	read_lock_irq(&mapping->tree_lock);
	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
A
Andrew Morton 已提交
297
		pgoff_t page_offset = offset + page_idx;
L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		
		if (page_offset > end_index)
			break;

		page = radix_tree_lookup(&mapping->page_tree, page_offset);
		if (page)
			continue;

		read_unlock_irq(&mapping->tree_lock);
		page = page_cache_alloc_cold(mapping);
		read_lock_irq(&mapping->tree_lock);
		if (!page)
			break;
		page->index = page_offset;
		list_add(&page->lru, &page_pool);
313 314
		if (page_idx == nr_to_read - lookahead_size)
			SetPageReadahead(page);
L
Linus Torvalds 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
		ret++;
	}
	read_unlock_irq(&mapping->tree_lock);

	/*
	 * Now start the IO.  We ignore I/O errors - if the page is not
	 * uptodate then the caller will launch readpage again, and
	 * will then handle the error.
	 */
	if (ret)
		read_pages(mapping, filp, &page_pool, ret);
	BUG_ON(!list_empty(&page_pool));
out:
	return ret;
}

/*
 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 * memory at once.
 */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
A
Andrew Morton 已提交
336
		pgoff_t offset, unsigned long nr_to_read)
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350
{
	int ret = 0;

	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
		return -EINVAL;

	while (nr_to_read) {
		int err;

		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;

		if (this_chunk > nr_to_read)
			this_chunk = nr_to_read;
		err = __do_page_cache_readahead(mapping, filp,
351
						offset, this_chunk, 0);
L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
		if (err < 0) {
			ret = err;
			break;
		}
		ret += err;
		offset += this_chunk;
		nr_to_read -= this_chunk;
	}
	return ret;
}

/*
 * Check how effective readahead is being.  If the amount of started IO is
 * less than expected then the file is partly or fully in pagecache and
 * readahead isn't helping.
 *
 */
static inline int check_ra_success(struct file_ra_state *ra,
			unsigned long nr_to_read, unsigned long actual)
{
	if (actual == 0) {
		ra->cache_hit += nr_to_read;
		if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
			ra_off(ra);
			ra->flags |= RA_FLAG_INCACHE;
			return 0;
		}
	} else {
		ra->cache_hit=0;
	}
	return 1;
}

/*
 * This version skips the IO if the queue is read-congested, and will tell the
 * block layer to abandon the readahead if request allocation would block.
 *
 * force_page_cache_readahead() will ignore queue congestion and will block on
 * request queues.
 */
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
A
Andrew Morton 已提交
393
			pgoff_t offset, unsigned long nr_to_read)
L
Linus Torvalds 已提交
394 395 396 397
{
	if (bdi_read_congested(mapping->backing_dev_info))
		return -1;

398
	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
L
Linus Torvalds 已提交
399 400 401 402 403 404
}

/*
 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
 * is set wait till the read completes.  Otherwise attempt to read without
 * blocking.
A
Andreas Mohr 已提交
405 406
 * Returns 1 meaning 'success' if read is successful without switching off
 * readahead mode. Otherwise return failure.
L
Linus Torvalds 已提交
407 408 409
 */
static int
blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
A
Andrew Morton 已提交
410
			pgoff_t offset, unsigned long nr_to_read,
L
Linus Torvalds 已提交
411 412 413 414 415 416 417
			struct file_ra_state *ra, int block)
{
	int actual;

	if (!block && bdi_read_congested(mapping->backing_dev_info))
		return 0;

418
	actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429 430

	return check_ra_success(ra, nr_to_read, actual);
}

static int make_ahead_window(struct address_space *mapping, struct file *filp,
				struct file_ra_state *ra, int force)
{
	int block, ret;

	ra->ahead_size = get_next_ra_size(ra);
	ra->ahead_start = ra->start + ra->size;

J
Jan Kara 已提交
431
	block = force || (ra->prev_index >= ra->ahead_start);
L
Linus Torvalds 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	ret = blockable_page_cache_readahead(mapping, filp,
			ra->ahead_start, ra->ahead_size, ra, block);

	if (!ret && !force) {
		/* A read failure in blocking mode, implies pages are
		 * all cached. So we can safely assume we have taken
		 * care of all the pages requested in this call.
		 * A read failure in non-blocking mode, implies we are
		 * reading more pages than requested in this call.  So
		 * we safely assume we have taken care of all the pages
		 * requested in this call.
		 *
		 * Just reset the ahead window in case we failed due to
		 * congestion.  The ahead window will any way be closed
		 * in case we failed due to excessive page cache hits.
		 */
448
		reset_ahead_window(ra);
L
Linus Torvalds 已提交
449 450 451 452 453
	}

	return ret;
}

A
Andrew Morton 已提交
454 455 456 457 458 459 460 461 462
/**
 * page_cache_readahead - generic adaptive readahead
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
 * @req_size: hint: total size of the read which the caller is performing in
 *            PAGE_CACHE_SIZE units
 *
463
 * page_cache_readahead() is the main function.  It performs the adaptive
L
Linus Torvalds 已提交
464
 * readahead window size management and submits the readahead I/O.
A
Andrew Morton 已提交
465 466 467
 *
 * Note that @filp is purely used for passing on to the ->readpage[s]()
 * handler: it may refer to a different file from @mapping (so we may not use
J
Josef Sipek 已提交
468
 * @filp->f_mapping or @filp->f_path.dentry->d_inode here).
A
Andrew Morton 已提交
469 470
 * Also, @ra may not be equal to &@filp->f_ra.
 *
L
Linus Torvalds 已提交
471 472 473
 */
unsigned long
page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
A
Andrew Morton 已提交
474
		     struct file *filp, pgoff_t offset, unsigned long req_size)
L
Linus Torvalds 已提交
475 476 477 478 479 480 481 482
{
	unsigned long max, newsize;
	int sequential;

	/*
	 * We avoid doing extra work and bogusly perturbing the readahead
	 * window expansion logic.
	 */
J
Jan Kara 已提交
483
	if (offset == ra->prev_index && --req_size)
L
Linus Torvalds 已提交
484 485
		++offset;

J
Jan Kara 已提交
486 487 488 489
	/* Note that prev_index == -1 if it is a first read */
	sequential = (offset == ra->prev_index + 1);
	ra->prev_index = offset;
	ra->prev_offset = 0;
L
Linus Torvalds 已提交
490 491 492 493 494 495 496 497

	max = get_max_readahead(ra);
	newsize = min(req_size, max);

	/* No readahead or sub-page sized read or file already in cache */
	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
		goto out;

J
Jan Kara 已提交
498
	ra->prev_index += newsize - 1;
L
Linus Torvalds 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543

	/*
	 * Special case - first read at start of file. We'll assume it's
	 * a whole-file read and grow the window fast.  Or detect first
	 * sequential access
	 */
	if (sequential && ra->size == 0) {
		ra->size = get_init_ra_size(newsize, max);
		ra->start = offset;
		if (!blockable_page_cache_readahead(mapping, filp, offset,
							 ra->size, ra, 1))
			goto out;

		/*
		 * If the request size is larger than our max readahead, we
		 * at least want to be sure that we get 2 IOs in flight and
		 * we know that we will definitly need the new I/O.
		 * once we do this, subsequent calls should be able to overlap
		 * IOs,* thus preventing stalls. so issue the ahead window
		 * immediately.
		 */
		if (req_size >= max)
			make_ahead_window(mapping, filp, ra, 1);

		goto out;
	}

	/*
	 * Now handle the random case:
	 * partial page reads and first access were handled above,
	 * so this must be the next page otherwise it is random
	 */
	if (!sequential) {
		ra_off(ra);
		blockable_page_cache_readahead(mapping, filp, offset,
				 newsize, ra, 1);
		goto out;
	}

	/*
	 * If we get here we are doing sequential IO and this was not the first
	 * occurence (ie we have an existing window)
	 */
	if (ra->ahead_start == 0) {	 /* no ahead window yet */
		if (!make_ahead_window(mapping, filp, ra, 0))
544
			goto recheck;
L
Linus Torvalds 已提交
545
	}
546

L
Linus Torvalds 已提交
547 548 549 550 551 552 553
	/*
	 * Already have an ahead window, check if we crossed into it.
	 * If so, shift windows and issue a new ahead window.
	 * Only return the #pages that are in the current window, so that
	 * we get called back on the first page of the ahead window which
	 * will allow us to submit more IO.
	 */
J
Jan Kara 已提交
554
	if (ra->prev_index >= ra->ahead_start) {
L
Linus Torvalds 已提交
555 556 557
		ra->start = ra->ahead_start;
		ra->size = ra->ahead_size;
		make_ahead_window(mapping, filp, ra, 0);
558
recheck:
J
Jan Kara 已提交
559 560
		/* prev_index shouldn't overrun the ahead window */
		ra->prev_index = min(ra->prev_index,
561
			ra->ahead_start + ra->ahead_size - 1);
L
Linus Torvalds 已提交
562 563 564
	}

out:
J
Jan Kara 已提交
565
	return ra->prev_index + 1;
L
Linus Torvalds 已提交
566
}
567
EXPORT_SYMBOL_GPL(page_cache_readahead);
L
Linus Torvalds 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582

/*
 * handle_ra_miss() is called when it is known that a page which should have
 * been present in the pagecache (we just did some readahead there) was in fact
 * not found.  This will happen if it was evicted by the VM (readahead
 * thrashing)
 *
 * Turn on the cache miss flag in the RA struct, this will cause the RA code
 * to reduce the RA size on the next read.
 */
void handle_ra_miss(struct address_space *mapping,
		struct file_ra_state *ra, pgoff_t offset)
{
	ra->flags |= RA_FLAG_MISS;
	ra->flags &= ~RA_FLAG_INCACHE;
583
	ra->cache_hit = 0;
L
Linus Torvalds 已提交
584 585 586 587 588 589 590 591
}

/*
 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
 * sensible upper limit.
 */
unsigned long max_sane_readahead(unsigned long nr)
{
592 593
	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
L
Linus Torvalds 已提交
594
}
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613

/*
 * Submit IO for the read-ahead request in file_ra_state.
 */
unsigned long ra_submit(struct file_ra_state *ra,
		       struct address_space *mapping, struct file *filp)
{
	unsigned long ra_size;
	unsigned long la_size;
	int actual;

	ra_size = ra_readahead_size(ra);
	la_size = ra_lookahead_size(ra);
	actual = __do_page_cache_readahead(mapping, filp,
					ra->ra_index, ra_size, la_size);

	return actual;
}
EXPORT_SYMBOL_GPL(ra_submit);