提交 042124cc 编写于 作者: M Matthew Wilcox (Oracle) 提交者: Linus Torvalds

mm: add new readahead_control API

Filesystems which implement the upcoming ->readahead method will get
their pages by calling readahead_page() or readahead_page_batch().
These functions support large pages, even though none of the filesystems
to be converted do yet.
Signed-off-by: NMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NWilliam Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-6-willy@infradead.orgSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 ad4ae1c7
......@@ -642,6 +642,146 @@ static inline int add_to_page_cache(struct page *page,
return error;
}
/**
* struct readahead_control - Describes a readahead request.
*
* A readahead request is for consecutive pages. Filesystems which
* implement the ->readahead method should call readahead_page() or
* readahead_page_batch() in a loop and attempt to start I/O against
* each page in the request.
*
* Most of the fields in this struct are private and should be accessed
* by the functions below.
*
* @file: The file, used primarily by network filesystems for authentication.
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
};
/**
* readahead_page - Get the next page to read.
* @rac: The current readahead request.
*
* Context: The page is locked and has an elevated refcount. The caller
* should decreases the refcount once the page has been submitted for I/O
* and unlock the page once all I/O to that page has completed.
* Return: A pointer to the next page, or %NULL if we are done.
*/
static inline struct page *readahead_page(struct readahead_control *rac)
{
struct page *page;
BUG_ON(rac->_batch_count > rac->_nr_pages);
rac->_nr_pages -= rac->_batch_count;
rac->_index += rac->_batch_count;
if (!rac->_nr_pages) {
rac->_batch_count = 0;
return NULL;
}
page = xa_load(&rac->mapping->i_pages, rac->_index);
VM_BUG_ON_PAGE(!PageLocked(page), page);
rac->_batch_count = hpage_nr_pages(page);
return page;
}
static inline unsigned int __readahead_batch(struct readahead_control *rac,
struct page **array, unsigned int array_sz)
{
unsigned int i = 0;
XA_STATE(xas, &rac->mapping->i_pages, 0);
struct page *page;
BUG_ON(rac->_batch_count > rac->_nr_pages);
rac->_nr_pages -= rac->_batch_count;
rac->_index += rac->_batch_count;
rac->_batch_count = 0;
xas_set(&xas, rac->_index);
rcu_read_lock();
xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
rac->_batch_count += hpage_nr_pages(page);
/*
* The page cache isn't using multi-index entries yet,
* so the xas cursor needs to be manually moved to the
* next index. This can be removed once the page cache
* is converted.
*/
if (PageHead(page))
xas_set(&xas, rac->_index + rac->_batch_count);
if (i == array_sz)
break;
}
rcu_read_unlock();
return i;
}
/**
* readahead_page_batch - Get a batch of pages to read.
* @rac: The current readahead request.
* @array: An array of pointers to struct page.
*
* Context: The pages are locked and have an elevated refcount. The caller
* should decreases the refcount once the page has been submitted for I/O
* and unlock the page once all I/O to that page has completed.
* Return: The number of pages placed in the array. 0 indicates the request
* is complete.
*/
#define readahead_page_batch(rac, array) \
__readahead_batch(rac, array, ARRAY_SIZE(array))
/**
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
static inline loff_t readahead_pos(struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
/**
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
static inline loff_t readahead_length(struct readahead_control *rac)
{
return (loff_t)rac->_nr_pages * PAGE_SIZE;
}
/**
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
static inline pgoff_t readahead_index(struct readahead_control *rac)
{
return rac->_index;
}
/**
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
static inline unsigned int readahead_count(struct readahead_control *rac)
{
return rac->_nr_pages;
}
static inline unsigned long dir_pages(struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册