diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 22a20af4d785c2f5b251e6828ed73f429267ecfc..aa08fa8fd79b9c85bd3adaa68e22631b0f425e97 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -3,15 +3,9 @@ #include #include +#include #include -struct page; -struct file; -struct inode; -struct super_block; -struct user_struct; -struct vm_area_struct; - /* inode in-kernel data */ #define SHMEM_NR_DIRECT 16 @@ -61,9 +55,18 @@ extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(swp_entry_t entry, struct page *page); extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, struct page **pagep, swp_entry_t *ent); +static inline struct page *shmem_read_mapping_page( + struct address_space *mapping, pgoff_t index) +{ + return shmem_read_mapping_page_gfp(mapping, index, + mapping_gfp_mask(mapping)); +} + #endif diff --git a/mm/shmem.c b/mm/shmem.c index f1714758ea960f9fc8f7750674a38482ea6136f0..fcedf5464eb79dba02ad485681fcc67530bdb720 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3035,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } + +/** + * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. + * @mapping: the page's address_space + * @index: the page index + * @gfp: the page allocator flags to use if allocating + * + * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", + * with any new page allocations done using the specified allocation flags. + * But read_cache_page_gfp() uses the ->readpage() method: which does not + * suit tmpfs, since it may have pages in swapcache, and needs to find those + * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. + * + * Provide a stub for those callers to start using now, then later + * flesh it out to call shmem_getpage() with additional gfp mask, when + * shmem_file_splice_read() is added and shmem_readpage() is removed. + */ +struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp) +{ + return read_cache_page_gfp(mapping, index, gfp); +} +EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);