提交 f4e6b498 编写于 作者: F Fengguang Wu 提交者: Linus Torvalds

readahead: combine file_ra_state.prev_index/prev_offset into prev_pos

Combine the file_ra_state members
				unsigned long prev_index
				unsigned int prev_offset
into
				loff_t prev_pos

It is more consistent and better supports huge files.

Thanks to Peter for the nice proposal!

[akpm@linux-foundation.org: fix shift overflow]
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: NFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 0bb7ba6b
...@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp, ...@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp,
sb->s_bdev->bd_inode->i_mapping, sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp, &filp->f_ra, filp,
index, 1); index, 1);
filp->f_ra.prev_index = index; filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext3_bread(NULL, inode, blk, 0, &err); bh = ext3_bread(NULL, inode, blk, 0, &err);
} }
......
...@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp, ...@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp,
sb->s_bdev->bd_inode->i_mapping, sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, filp, &filp->f_ra, filp,
index, 1); index, 1);
filp->f_ra.prev_index = index; filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, blk, 0, &err); bh = ext4_bread(NULL, inode, blk, 0, &err);
} }
......
...@@ -447,7 +447,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -447,7 +447,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
*/ */
while (page_nr < nr_pages) while (page_nr < nr_pages)
page_cache_release(pages[page_nr++]); page_cache_release(pages[page_nr++]);
in->f_ra.prev_index = index; in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
if (spd.nr_pages) if (spd.nr_pages)
return splice_to_pipe(pipe, &spd); return splice_to_pipe(pipe, &spd);
......
...@@ -704,8 +704,7 @@ struct file_ra_state { ...@@ -704,8 +704,7 @@ struct file_ra_state {
unsigned int ra_pages; /* Maximum readahead window */ unsigned int ra_pages; /* Maximum readahead window */
int mmap_miss; /* Cache miss stat for mmap accesses */ int mmap_miss; /* Cache miss stat for mmap accesses */
unsigned long prev_index; /* Cache last read() position */ loff_t prev_pos; /* Cache last read() position */
unsigned int prev_offset; /* Offset where last read() ended in a page */
}; };
/* /*
......
...@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping, ...@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
cached_page = NULL; cached_page = NULL;
index = *ppos >> PAGE_CACHE_SHIFT; index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index; next_index = index;
prev_index = ra.prev_index; prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra.prev_offset; prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK; offset = *ppos & ~PAGE_CACHE_MASK;
...@@ -966,7 +966,6 @@ void do_generic_mapping_read(struct address_space *mapping, ...@@ -966,7 +966,6 @@ void do_generic_mapping_read(struct address_space *mapping,
index += offset >> PAGE_CACHE_SHIFT; index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK; offset &= ~PAGE_CACHE_MASK;
prev_offset = offset; prev_offset = offset;
ra.prev_offset = offset;
page_cache_release(page); page_cache_release(page);
if (ret == nr && desc->count) if (ret == nr && desc->count)
...@@ -1056,9 +1055,11 @@ void do_generic_mapping_read(struct address_space *mapping, ...@@ -1056,9 +1055,11 @@ void do_generic_mapping_read(struct address_space *mapping,
out: out:
*_ra = ra; *_ra = ra;
_ra->prev_index = prev_index; _ra->prev_pos = prev_index;
_ra->prev_pos <<= PAGE_CACHE_SHIFT;
_ra->prev_pos |= prev_offset;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
if (cached_page) if (cached_page)
page_cache_release(cached_page); page_cache_release(cached_page);
if (filp) if (filp)
...@@ -1396,7 +1397,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1396,7 +1397,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Found the page and have a reference on it. * Found the page and have a reference on it.
*/ */
mark_page_accessed(page); mark_page_accessed(page);
ra->prev_index = page->index; ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
vmf->page = page; vmf->page = page;
return ret | VM_FAULT_LOCKED; return ret | VM_FAULT_LOCKED;
......
...@@ -46,7 +46,7 @@ void ...@@ -46,7 +46,7 @@ void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{ {
ra->ra_pages = mapping->backing_dev_info->ra_pages; ra->ra_pages = mapping->backing_dev_info->ra_pages;
ra->prev_index = -1; ra->prev_pos = -1;
} }
EXPORT_SYMBOL_GPL(file_ra_state_init); EXPORT_SYMBOL_GPL(file_ra_state_init);
...@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, ...@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
* indicator. The flag won't be set on already cached pages, to avoid the * indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups. * readahead-for-nothing fuss, saving pointless page cache lookups.
* *
* prev_index tracks the last visited page in the _previous_ read request. * prev_pos tracks the last visited byte in the _previous_ read request.
* It should be maintained by the caller, and will be used for detecting * It should be maintained by the caller, and will be used for detecting
* small random reads. Note that the readahead algorithm checks loosely * small random reads. Note that the readahead algorithm checks loosely
* for sequential patterns. Hence interleaved reads might be served as * for sequential patterns. Hence interleaved reads might be served as
...@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping, ...@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
bool hit_readahead_marker, pgoff_t offset, bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size) unsigned long req_size)
{ {
int max; /* max readahead pages */ int max = ra->ra_pages; /* max readahead pages */
int sequential; pgoff_t prev_offset;
int sequential;
max = ra->ra_pages;
sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
/* /*
* It's the expected callback offset, assume sequential access. * It's the expected callback offset, assume sequential access.
...@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping, ...@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
goto readit; goto readit;
} }
prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
sequential = offset - prev_offset <= 1UL || req_size > max;
/* /*
* Standalone, small read. * Standalone, small read.
* Read as is, and do not pollute the readahead state. * Read as is, and do not pollute the readahead state.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册