提交 fc0ecff6 编写于 作者: A Andrew Morton 提交者: Linus Torvalds

[PATCH] remove invalidate_inode_pages()

Convert all calls to invalidate_inode_pages() into open-coded calls to
invalidate_mapping_pages().

Leave the invalidate_inode_pages() wrapper in place for now, marked as
deprecated.
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 54bc4855
...@@ -666,7 +666,7 @@ static void bitmap_file_put(struct bitmap *bitmap) ...@@ -666,7 +666,7 @@ static void bitmap_file_put(struct bitmap *bitmap)
if (file) { if (file) {
struct inode *inode = file->f_path.dentry->d_inode; struct inode *inode = file->f_path.dentry->d_inode;
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
fput(file); fput(file);
} }
} }
......
...@@ -278,7 +278,8 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) ...@@ -278,7 +278,8 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name); kfree(dev->mtd.name);
if (dev->blkdev) { if (dev->blkdev) {
invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping); invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
close_bdev_excl(dev->blkdev); close_bdev_excl(dev->blkdev);
} }
......
...@@ -1953,7 +1953,7 @@ static void invalidate_sub(struct lun *curlun) ...@@ -1953,7 +1953,7 @@ static void invalidate_sub(struct lun *curlun)
struct inode *inode = filp->f_path.dentry->d_inode; struct inode *inode = filp->f_path.dentry->d_inode;
unsigned long rc; unsigned long rc;
rc = invalidate_inode_pages(inode->i_mapping); rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
} }
......
...@@ -110,7 +110,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) ...@@ -110,7 +110,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
filemap_write_and_wait(inode->i_mapping); filemap_write_and_wait(inode->i_mapping);
invalidate_inode_pages(&inode->i_data); invalidate_mapping_pages(&inode->i_data, 0, -1);
} }
return res; return res;
...@@ -234,7 +234,7 @@ v9fs_file_write(struct file *filp, const char __user * data, ...@@ -234,7 +234,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
total += result; total += result;
} while (count); } while (count);
invalidate_inode_pages2(inode->i_mapping); invalidate_inode_pages2(inode->i_mapping);
return total; return total;
} }
......
...@@ -345,7 +345,7 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) ...@@ -345,7 +345,7 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
* We really want to use invalidate_inode_pages2() for * We really want to use invalidate_inode_pages2() for
* that, but not until that's cleaned up. * that, but not until that's cleaned up.
*/ */
invalidate_inode_pages(mapping); invalidate_mapping_pages(mapping, 0, -1);
} }
/* /*
......
...@@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb) ...@@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_WILL_FREE)) if (inode->i_state & (I_FREEING|I_WILL_FREE))
continue; continue;
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
} }
......
...@@ -69,7 +69,7 @@ void fuse_finish_open(struct inode *inode, struct file *file, ...@@ -69,7 +69,7 @@ void fuse_finish_open(struct inode *inode, struct file *file,
if (outarg->open_flags & FOPEN_DIRECT_IO) if (outarg->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations; file->f_op = &fuse_direct_io_file_operations;
if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
ff->fh = outarg->fh; ff->fh = outarg->fh;
file->private_data = ff; file->private_data = ff;
} }
......
...@@ -112,7 +112,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr) ...@@ -112,7 +112,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
{ {
struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_conn *fc = get_fuse_conn(inode);
if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size) if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
inode->i_ino = attr->ino; inode->i_ino = attr->ino;
inode->i_mode = (inode->i_mode & S_IFMT) + (attr->mode & 07777); inode->i_mode = (inode->i_mode & S_IFMT) + (attr->mode & 07777);
......
...@@ -414,7 +414,8 @@ static void prune_icache(int nr_to_scan) ...@@ -414,7 +414,8 @@ static void prune_icache(int nr_to_scan)
__iget(inode); __iget(inode);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
if (remove_inode_buffers(inode)) if (remove_inode_buffers(inode))
reap += invalidate_inode_pages(&inode->i_data); reap += invalidate_mapping_pages(&inode->i_data,
0, -1);
iput(inode); iput(inode);
spin_lock(&inode_lock); spin_lock(&inode_lock);
......
...@@ -296,7 +296,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) ...@@ -296,7 +296,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
inode->i_blocks = (inode->i_size + 511) >> 9; inode->i_blocks = (inode->i_size + 511) >> 9;
if (len) { if (len) {
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
} }
inode->i_ctime = CURRENT_TIME_SEC; inode->i_ctime = CURRENT_TIME_SEC;
inode->i_mtime = inode->i_ctime; inode->i_mtime = inode->i_ctime;
...@@ -1518,7 +1518,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, ...@@ -1518,7 +1518,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
} }
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(inode); mark_inode_dirty(inode);
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
out_isem: out_isem:
return err; return err;
......
...@@ -1574,7 +1574,7 @@ extern int invalidate_inodes(struct super_block *); ...@@ -1574,7 +1574,7 @@ extern int invalidate_inodes(struct super_block *);
unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end); pgoff_t start, pgoff_t end);
static inline unsigned long static inline unsigned long __deprecated
invalidate_inode_pages(struct address_space *mapping) invalidate_inode_pages(struct address_space *mapping)
{ {
return invalidate_mapping_pages(mapping, 0, ~0UL); return invalidate_mapping_pages(mapping, 0, ~0UL);
...@@ -1584,7 +1584,7 @@ static inline void invalidate_remote_inode(struct inode *inode) ...@@ -1584,7 +1584,7 @@ static inline void invalidate_remote_inode(struct inode *inode)
{ {
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) S_ISLNK(inode->i_mode))
invalidate_inode_pages(inode->i_mapping); invalidate_mapping_pages(inode->i_mapping, 0, -1);
} }
extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping, extern int invalidate_inode_pages2_range(struct address_space *mapping,
......
...@@ -85,7 +85,7 @@ EXPORT_SYMBOL(cancel_dirty_page); ...@@ -85,7 +85,7 @@ EXPORT_SYMBOL(cancel_dirty_page);
* *
* We need to bale out if page->mapping is no longer equal to the original * We need to bale out if page->mapping is no longer equal to the original
* mapping. This happens a) when the VM reclaimed the page while we waited on * mapping. This happens a) when the VM reclaimed the page while we waited on
* its lock, b) when a concurrent invalidate_inode_pages got there first and * its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/ */
static void static void
...@@ -106,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) ...@@ -106,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
} }
/* /*
* This is for invalidate_inode_pages(). That function can be called at * This is for invalidate_mapping_pages(). That function can be called at
* any time, and is not supposed to throw away dirty pages. But pages can * any time, and is not supposed to throw away dirty pages. But pages can
* be marked dirty at any time too, so use remove_mapping which safely * be marked dirty at any time too, so use remove_mapping which safely
* discards clean, unused pages. * discards clean, unused pages.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册