提交 9d11ea9f 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

memcg: simplify the way memory limits are checked

Since transparent huge pages, checking whether memory cgroups are below
their limits is no longer enough, but the actual amount of chargeable
space is important.

To not have more than one limit-checking interface, replace
memory_cgroup_check_under_limit() and memory_cgroup_check_margin() with a
single memory_cgroup_margin() that returns the chargeable space and leaves
the comparison to the callsite.

Soft limits are now checked the other way round, by using the already
existing function that returns the amount by which soft limits are
exceeded: res_counter_soft_limit_excess().

Also remove all the corresponding functions on the res_counter side that
are now no longer used.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: NBalbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b7c61678
...@@ -129,20 +129,22 @@ int __must_check res_counter_charge(struct res_counter *counter, ...@@ -129,20 +129,22 @@ int __must_check res_counter_charge(struct res_counter *counter,
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
void res_counter_uncharge(struct res_counter *counter, unsigned long val); void res_counter_uncharge(struct res_counter *counter, unsigned long val);
static inline bool res_counter_limit_check_locked(struct res_counter *cnt) /**
{ * res_counter_margin - calculate chargeable space of a counter
if (cnt->usage < cnt->limit) * @cnt: the counter
return true; *
* Returns the difference between the hard limit and the current usage
return false; * of resource counter @cnt.
} */
static inline unsigned long long res_counter_margin(struct res_counter *cnt)
static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
{ {
if (cnt->usage <= cnt->soft_limit) unsigned long long margin;
return true; unsigned long flags;
return false; spin_lock_irqsave(&cnt->lock, flags);
margin = cnt->limit - cnt->usage;
spin_unlock_irqrestore(&cnt->lock, flags);
return margin;
} }
/** /**
...@@ -167,52 +169,6 @@ res_counter_soft_limit_excess(struct res_counter *cnt) ...@@ -167,52 +169,6 @@ res_counter_soft_limit_excess(struct res_counter *cnt)
return excess; return excess;
} }
/*
* Helper function to detect if the cgroup is within it's limit or
* not. It's currently called from cgroup_rss_prepare()
*/
static inline bool res_counter_check_under_limit(struct res_counter *cnt)
{
bool ret;
unsigned long flags;
spin_lock_irqsave(&cnt->lock, flags);
ret = res_counter_limit_check_locked(cnt);
spin_unlock_irqrestore(&cnt->lock, flags);
return ret;
}
/**
* res_counter_check_margin - check if the counter allows charging
* @cnt: the resource counter to check
* @bytes: the number of bytes to check the remaining space against
*
* Returns a boolean value on whether the counter can be charged
* @bytes or whether this would exceed the limit.
*/
static inline bool res_counter_check_margin(struct res_counter *cnt,
unsigned long bytes)
{
bool ret;
unsigned long flags;
spin_lock_irqsave(&cnt->lock, flags);
ret = cnt->limit - cnt->usage >= bytes;
spin_unlock_irqrestore(&cnt->lock, flags);
return ret;
}
static inline bool res_counter_check_within_soft_limit(struct res_counter *cnt)
{
bool ret;
unsigned long flags;
spin_lock_irqsave(&cnt->lock, flags);
ret = res_counter_soft_limit_check_locked(cnt);
spin_unlock_irqrestore(&cnt->lock, flags);
return ret;
}
static inline void res_counter_reset_max(struct res_counter *cnt) static inline void res_counter_reset_max(struct res_counter *cnt)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -504,11 +504,6 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) ...@@ -504,11 +504,6 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
} }
} }
static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
{
return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
}
static struct mem_cgroup_per_zone * static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{ {
...@@ -1127,33 +1122,21 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, ...@@ -1127,33 +1122,21 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
#define mem_cgroup_from_res_counter(counter, member) \ #define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member) container_of(counter, struct mem_cgroup, member)
static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
if (do_swap_account) {
if (res_counter_check_under_limit(&mem->res) &&
res_counter_check_under_limit(&mem->memsw))
return true;
} else
if (res_counter_check_under_limit(&mem->res))
return true;
return false;
}
/** /**
* mem_cgroup_check_margin - check if the memory cgroup allows charging * mem_cgroup_margin - calculate chargeable space of a memory cgroup
* @mem: memory cgroup to check * @mem: the memory cgroup
* @bytes: the number of bytes the caller intends to charge
* *
* Returns a boolean value on whether @mem can be charged @bytes or * Returns the maximum amount of memory @mem can be charged with, in
* whether this would exceed the limit. * bytes.
*/ */
static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes) static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem)
{ {
if (!res_counter_check_margin(&mem->res, bytes)) unsigned long long margin;
return false;
if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes)) margin = res_counter_margin(&mem->res);
return false; if (do_swap_account)
return true; margin = min(margin, res_counter_margin(&mem->memsw));
return margin;
} }
static unsigned int get_swappiness(struct mem_cgroup *memcg) static unsigned int get_swappiness(struct mem_cgroup *memcg)
...@@ -1420,7 +1403,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -1420,7 +1403,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
unsigned long excess = mem_cgroup_get_excess(root_mem); unsigned long excess;
excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
/* If memsw_is_minimum==1, swap-out is of-no-use. */ /* If memsw_is_minimum==1, swap-out is of-no-use. */
if (root_mem->memsw_is_minimum) if (root_mem->memsw_is_minimum)
...@@ -1477,9 +1462,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -1477,9 +1462,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return ret; return ret;
total += ret; total += ret;
if (check_soft) { if (check_soft) {
if (res_counter_check_within_soft_limit(&root_mem->res)) if (!res_counter_soft_limit_excess(&root_mem->res))
return total; return total;
} else if (mem_cgroup_check_under_limit(root_mem)) } else if (mem_cgroup_margin(root_mem))
return 1 + total; return 1 + total;
} }
return total; return total;
...@@ -1898,7 +1883,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, ...@@ -1898,7 +1883,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
gfp_mask, flags); gfp_mask, flags);
if (mem_cgroup_check_margin(mem_over_limit, csize)) if (mem_cgroup_margin(mem_over_limit) >= csize)
return CHARGE_RETRY; return CHARGE_RETRY;
/* /*
* Even though the limit is exceeded at this point, reclaim * Even though the limit is exceeded at this point, reclaim
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册