提交 78a9c546 编写于 作者: S Srikar Dronamraju 提交者: Ingo Molnar

sched/numa: Rename numabalancing_enabled to sched_numa_balancing

Simple rename of the 'numabalancing_enabled' variable to 'sched_numa_balancing'.
No functional changes.
Suggested-by: NIngo Molnar <mingo@kernel.org>
Signed-off-by: NSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1439290813-6683-2-git-send-email-srikar@linux.vnet.ibm.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 c5afb6a8
...@@ -2124,11 +2124,11 @@ void set_numabalancing_state(bool enabled) ...@@ -2124,11 +2124,11 @@ void set_numabalancing_state(bool enabled)
sched_feat_set("NO_NUMA"); sched_feat_set("NO_NUMA");
} }
#else #else
__read_mostly bool numabalancing_enabled; __read_mostly bool sched_numa_balancing;
void set_numabalancing_state(bool enabled) void set_numabalancing_state(bool enabled)
{ {
numabalancing_enabled = enabled; sched_numa_balancing = enabled;
} }
#endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_SCHED_DEBUG */
...@@ -2138,7 +2138,7 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, ...@@ -2138,7 +2138,7 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
{ {
struct ctl_table t; struct ctl_table t;
int err; int err;
int state = numabalancing_enabled; int state = sched_numa_balancing;
if (write && !capable(CAP_SYS_ADMIN)) if (write && !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
......
...@@ -2069,7 +2069,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) ...@@ -2069,7 +2069,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
int local = !!(flags & TNF_FAULT_LOCAL); int local = !!(flags & TNF_FAULT_LOCAL);
int priv; int priv;
if (!numabalancing_enabled) if (!sched_numa_balancing)
return; return;
/* for example, ksmd faulting in a user's mm */ /* for example, ksmd faulting in a user's mm */
...@@ -7874,7 +7874,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) ...@@ -7874,7 +7874,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued); entity_tick(cfs_rq, se, queued);
} }
if (numabalancing_enabled) if (sched_numa_balancing)
task_tick_numa(rq, curr); task_tick_numa(rq, curr);
} }
......
...@@ -1006,13 +1006,13 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; ...@@ -1006,13 +1006,13 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
#define sched_feat_numa(x) sched_feat(x) #define sched_feat_numa(x) sched_feat(x)
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
#define numabalancing_enabled sched_feat_numa(NUMA) #define sched_numa_balancing sched_feat_numa(NUMA)
#else #else
extern bool numabalancing_enabled; extern bool sched_numa_balancing;
#endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_SCHED_DEBUG */
#else #else
#define sched_feat_numa(x) (0) #define sched_feat_numa(x) (0)
#define numabalancing_enabled (0) #define sched_numa_balancing (0)
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
static inline u64 global_rt_period(void) static inline u64 global_rt_period(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册