提交 1edf2234 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm/page-writeback.c: make determine_dirtyable_memory static again

The tracing ring-buffer used this function briefly, but not anymore.
Make it local to the writeback code again.

Also, move the function so that no forward declaration needs to be
reintroduced.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NMel Gorman <mgorman@suse.de>
Reviewed-by: NMichal Hocko <mhocko@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e4e11180
......@@ -138,8 +138,6 @@ extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
extern unsigned long determine_dirtyable_memory(void);
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
......
......@@ -129,6 +129,66 @@ unsigned long global_dirty_limit;
*/
static struct prop_descriptor vm_completions;
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
/**
* determine_dirtyable_memory - amount of memory that may be used
*
* Returns the numebr of pages that can currently be freed and used
* by the kernel for direct mappings.
*/
static unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
return x + 1; /* Ensure that we never return 0 */
}
/*
* couple the period to the dirty_ratio:
*
......@@ -196,7 +256,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
return ret;
}
int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
......@@ -291,67 +350,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
}
EXPORT_SYMBOL(bdi_set_max_ratio);
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}
/**
* determine_dirtyable_memory - amount of memory that may be used
*
* Returns the numebr of pages that can currently be freed and used
* by the kernel for direct mappings.
*/
unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
return x + 1; /* Ensure that we never return 0 */
}
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
unsigned long bg_thresh)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册