提交 f53d7ce3 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm: memcg: shorten preempt-disabled section around event checks

Only the ratelimit checks themselves have to run with preemption
disabled, the resulting actions - checking for usage thresholds,
updating the soft limit tree - can and should run with preemption
enabled.
Signed-off-by: NJohannes Weiner <jweiner@redhat.com>
Reported-by: NYong Zhang <yong.zhang0@gmail.com>
Tested-by: NYong Zhang <yong.zhang0@gmail.com>
Reported-by: NLuis Henriques <henrix@camandro.org>
Tested-by: NLuis Henriques <henrix@camandro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e94c8a9c
...@@ -748,37 +748,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, ...@@ -748,37 +748,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
return total; return total;
} }
static bool __memcg_event_check(struct mem_cgroup *memcg, int target) static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
enum mem_cgroup_events_target target)
{ {
unsigned long val, next; unsigned long val, next;
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
next = __this_cpu_read(memcg->stat->targets[target]); next = __this_cpu_read(memcg->stat->targets[target]);
/* from time_after() in jiffies.h */ /* from time_after() in jiffies.h */
return ((long)next - (long)val < 0); if ((long)next - (long)val < 0) {
} switch (target) {
case MEM_CGROUP_TARGET_THRESH:
static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target) next = val + THRESHOLDS_EVENTS_TARGET;
{ break;
unsigned long val, next; case MEM_CGROUP_TARGET_SOFTLIMIT:
next = val + SOFTLIMIT_EVENTS_TARGET;
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); break;
case MEM_CGROUP_TARGET_NUMAINFO:
switch (target) { next = val + NUMAINFO_EVENTS_TARGET;
case MEM_CGROUP_TARGET_THRESH: break;
next = val + THRESHOLDS_EVENTS_TARGET; default:
break; break;
case MEM_CGROUP_TARGET_SOFTLIMIT: }
next = val + SOFTLIMIT_EVENTS_TARGET; __this_cpu_write(memcg->stat->targets[target], next);
break; return true;
case MEM_CGROUP_TARGET_NUMAINFO:
next = val + NUMAINFO_EVENTS_TARGET;
break;
default:
return;
} }
return false;
__this_cpu_write(memcg->stat->targets[target], next);
} }
/* /*
...@@ -789,25 +784,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) ...@@ -789,25 +784,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{ {
preempt_disable(); preempt_disable();
/* threshold event is triggered in finer grain than soft limit */ /* threshold event is triggered in finer grain than soft limit */
if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) { if (unlikely(mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_THRESH))) {
bool do_softlimit, do_numainfo;
do_softlimit = mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
#if MAX_NUMNODES > 1
do_numainfo = mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_NUMAINFO);
#endif
preempt_enable();
mem_cgroup_threshold(memcg); mem_cgroup_threshold(memcg);
__mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH); if (unlikely(do_softlimit))
if (unlikely(__memcg_event_check(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT))) {
mem_cgroup_update_tree(memcg, page); mem_cgroup_update_tree(memcg, page);
__mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
}
#if MAX_NUMNODES > 1 #if MAX_NUMNODES > 1
if (unlikely(__memcg_event_check(memcg, if (unlikely(do_numainfo))
MEM_CGROUP_TARGET_NUMAINFO))) {
atomic_inc(&memcg->numainfo_events); atomic_inc(&memcg->numainfo_events);
__mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_NUMAINFO);
}
#endif #endif
} } else
preempt_enable(); preempt_enable();
} }
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册