提交 16c4042f 编写于 作者: W Wu Fengguang 提交者: Linus Torvalds

writeback: avoid unnecessary calculation of bdi dirty thresholds

Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so
that the latter can be avoided when under global dirty background
threshold (which is the normal state for most systems).
Signed-off-by: NWu Fengguang <fengguang.wu@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e50e3720
...@@ -590,7 +590,7 @@ static inline bool over_bground_thresh(void) ...@@ -590,7 +590,7 @@ static inline bool over_bground_thresh(void)
{ {
unsigned long background_thresh, dirty_thresh; unsigned long background_thresh, dirty_thresh;
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); global_dirty_limits(&background_thresh, &dirty_thresh);
return (global_page_state(NR_FILE_DIRTY) + return (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) >= background_thresh); global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
......
...@@ -124,8 +124,9 @@ struct ctl_table; ...@@ -124,8 +124,9 @@ struct ctl_table;
int dirty_writeback_centisecs_handler(struct ctl_table *, int, int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *); void __user *, size_t *, loff_t *);
void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long *pbdi_dirty, struct backing_dev_info *bdi); unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
unsigned long dirty);
void page_writeback_init(void); void page_writeback_init(void);
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
......
...@@ -81,7 +81,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) ...@@ -81,7 +81,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
nr_more_io++; nr_more_io++;
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); global_dirty_limits(&background_thresh, &dirty_thresh);
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
#define K(x) ((x) << (PAGE_SHIFT - 10)) #define K(x) ((x) << (PAGE_SHIFT - 10))
seq_printf(m, seq_printf(m,
......
...@@ -267,10 +267,11 @@ static inline void task_dirties_fraction(struct task_struct *tsk, ...@@ -267,10 +267,11 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
* *
* dirty -= (dirty/8) * p_{t} * dirty -= (dirty/8) * p_{t}
*/ */
static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty) static unsigned long task_dirty_limit(struct task_struct *tsk,
unsigned long bdi_dirty)
{ {
long numerator, denominator; long numerator, denominator;
unsigned long dirty = *pdirty; unsigned long dirty = bdi_dirty;
u64 inv = dirty >> 3; u64 inv = dirty >> 3;
task_dirties_fraction(tsk, &numerator, &denominator); task_dirties_fraction(tsk, &numerator, &denominator);
...@@ -278,10 +279,8 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty) ...@@ -278,10 +279,8 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
do_div(inv, denominator); do_div(inv, denominator);
dirty -= inv; dirty -= inv;
if (dirty < *pdirty/2)
dirty = *pdirty/2;
*pdirty = dirty; return max(dirty, bdi_dirty/2);
} }
/* /*
...@@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void) ...@@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void)
return x + 1; /* Ensure that we never return 0 */ return x + 1; /* Ensure that we never return 0 */
} }
void void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
{ {
unsigned long background; unsigned long background;
unsigned long dirty; unsigned long dirty;
...@@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, ...@@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
} }
*pbackground = background; *pbackground = background;
*pdirty = dirty; *pdirty = dirty;
}
if (bdi) { unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
u64 bdi_dirty; unsigned long dirty)
long numerator, denominator; {
u64 bdi_dirty;
long numerator, denominator;
/* /*
* Calculate this BDI's share of the dirty ratio. * Calculate this BDI's share of the dirty ratio.
*/ */
bdi_writeout_fraction(bdi, &numerator, &denominator); bdi_writeout_fraction(bdi, &numerator, &denominator);
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator; bdi_dirty *= numerator;
do_div(bdi_dirty, denominator); do_div(bdi_dirty, denominator);
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;
*pbdi_dirty = bdi_dirty; bdi_dirty += (dirty * bdi->min_ratio) / 100;
task_dirty_limit(current, pbdi_dirty); if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
} bdi_dirty = dirty * bdi->max_ratio / 100;
return bdi_dirty;
} }
/* /*
...@@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping,
.range_cyclic = 1, .range_cyclic = 1,
}; };
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
nr_reclaimable = global_page_state(NR_FILE_DIRTY) + nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS); global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK); nr_writeback = global_page_state(NR_WRITEBACK);
global_dirty_limits(&background_thresh, &dirty_thresh);
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
if (nr_reclaimable + nr_writeback <
(background_thresh + dirty_thresh) / 2)
break;
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
bdi_thresh = task_dirty_limit(current, bdi_thresh);
/* /*
* In order to avoid the stacked BDI deadlock we need * In order to avoid the stacked BDI deadlock we need
* to ensure we accurately count the 'dirty' pages when * to ensure we accurately count the 'dirty' pages when
...@@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping,
if (!dirty_exceeded) if (!dirty_exceeded)
break; break;
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
if (nr_reclaimable + nr_writeback <
(background_thresh + dirty_thresh) / 2)
break;
if (!bdi->dirty_exceeded) if (!bdi->dirty_exceeded)
bdi->dirty_exceeded = 1; bdi->dirty_exceeded = 1;
...@@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask) ...@@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
unsigned long dirty_thresh; unsigned long dirty_thresh;
for ( ; ; ) { for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); global_dirty_limits(&background_thresh, &dirty_thresh);
/* /*
* Boost the allowable dirty threshold a bit for page * Boost the allowable dirty threshold a bit for page
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册