提交 f9a14399 编写于 作者: P Peter Zijlstra 提交者: Linus Torvalds

mm: optimize kill_bdev()

Remove duplicate work in kill_bdev().

It currently invalidates and then truncates the bdev's mapping.
invalidate_mapping_pages() will opportunistically remove pages from the
mapping.  And truncate_inode_pages() will forcefully remove all pages.

The only thing truncate doesn't do is flush the bh lrus.  So do that
explicitly.  This avoids (very unlikely) but possible invalid lookup
results if the same bdev is quickly re-issued.

It also will prevent extreme kernel latencies which are observed when
blockdevs which have a large amount of pagecache are unmounted, by avoiding
invalidate_mapping_pages() on that path.  invalidate_mapping_pages() has no
cond_resched (it can be called under spinlock), whereas truncate_inode_pages()
has one.

[akpm@linux-foundation.org: restore nrpages==0 optimisation]
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f98393a6
...@@ -55,10 +55,12 @@ static sector_t max_block(struct block_device *bdev) ...@@ -55,10 +55,12 @@ static sector_t max_block(struct block_device *bdev)
return retval; return retval;
} }
/* Kill _all_ buffers, dirty or not.. */ /* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev) static void kill_bdev(struct block_device *bdev)
{ {
invalidate_bdev(bdev); if (bdev->bd_inode->i_mapping->nrpages == 0)
return;
invalidate_bh_lrus();
truncate_inode_pages(bdev->bd_inode->i_mapping, 0); truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
} }
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
...@@ -1403,7 +1402,7 @@ static void invalidate_bh_lru(void *arg) ...@@ -1403,7 +1402,7 @@ static void invalidate_bh_lru(void *arg)
put_cpu_var(bh_lrus); put_cpu_var(bh_lrus);
} }
static void invalidate_bh_lrus(void) void invalidate_bh_lrus(void)
{ {
on_each_cpu(invalidate_bh_lru, NULL, 1, 1); on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
} }
......
...@@ -182,6 +182,7 @@ void __brelse(struct buffer_head *); ...@@ -182,6 +182,7 @@ void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *); void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size); void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh)); void FASTCALL(unlock_buffer(struct buffer_head *bh));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册