提交 6d3d6aa2 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm: memcontrol: remove synchronous stock draining code

With charge reparenting, the last synchronous stock drainer left.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: NVladimir Davydov <vdavydov@parallels.com>
Acked-by: NMichal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b2052564
...@@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg) ...@@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
disarm_kmem_keys(memcg); disarm_kmem_keys(memcg);
} }
static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone * static struct mem_cgroup_per_zone *
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
{ {
...@@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
/* /*
* Drains all per-CPU charge caches for given root_memcg resp. subtree * Drains all per-CPU charge caches for given root_memcg resp. subtree
* of the hierarchy under it. sync flag says whether we should block * of the hierarchy under it.
* until the work is done.
*/ */
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) static void drain_all_stock(struct mem_cgroup *root_memcg)
{ {
int cpu, curcpu; int cpu, curcpu;
/* If someone's already draining, avoid adding running more workers. */
if (!mutex_trylock(&percpu_charge_mutex))
return;
/* Notify other cpus that system-wide "drain" is running */ /* Notify other cpus that system-wide "drain" is running */
get_online_cpus(); get_online_cpus();
curcpu = get_cpu(); curcpu = get_cpu();
...@@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) ...@@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
} }
} }
put_cpu(); put_cpu();
if (!sync)
goto out;
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
flush_work(&stock->work);
}
out:
put_online_cpus(); put_online_cpus();
}
/*
* Tries to drain stocked charges in other cpus. This function is asynchronous
* and just put a work per cpu for draining localy on each cpu. Caller can
* expects some charges will be back later but cannot wait for it.
*/
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
{
/*
* If someone calls draining, avoid adding more kworker runs.
*/
if (!mutex_trylock(&percpu_charge_mutex))
return;
drain_all_stock(root_memcg, false);
mutex_unlock(&percpu_charge_mutex);
}
/* This is a synchronous drain interface. */
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
{
/* called when force_empty is called */
mutex_lock(&percpu_charge_mutex);
drain_all_stock(root_memcg, true);
mutex_unlock(&percpu_charge_mutex); mutex_unlock(&percpu_charge_mutex);
} }
...@@ -2472,7 +2438,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -2472,7 +2438,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
goto retry; goto retry;
if (!drained) { if (!drained) {
drain_all_stock_async(mem_over_limit); drain_all_stock(mem_over_limit);
drained = true; drained = true;
goto retry; goto retry;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册