Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
53c5f5ba
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
53c5f5ba
编写于
6月 13, 2010
作者:
T
Tejun Heo
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sched-wq' of ../wq into cmwq-base
上级
7e27d6e7
21aa9af0
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
203 addition
and
81 deletion
+203
-81
include/linux/cpu.h
include/linux/cpu.h
+25
-0
include/linux/cpuset.h
include/linux/cpuset.h
+6
-0
include/linux/perf_event.h
include/linux/perf_event.h
+1
-1
include/linux/sched.h
include/linux/sched.h
+1
-0
kernel/cpu.c
kernel/cpu.c
+0
-6
kernel/cpuset.c
kernel/cpuset.c
+2
-19
kernel/fork.c
kernel/fork.c
+1
-1
kernel/sched.c
kernel/sched.c
+151
-54
kernel/workqueue_sched.h
kernel/workqueue_sched.h
+16
-0
未找到文件。
include/linux/cpu.h
浏览文件 @
53c5f5ba
...
...
@@ -48,6 +48,31 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct
notifier_block
;
/*
* CPU notifier priorities.
*/
enum
{
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
* notifier. CPUSET_ACTIVE adjusts cpuset according to
* cpu_active mask right after SCHED_ACTIVE. During
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
* ordered in the similar way.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE
=
INT_MAX
,
CPU_PRI_CPUSET_ACTIVE
=
INT_MAX
-
1
,
CPU_PRI_SCHED_INACTIVE
=
INT_MIN
+
1
,
CPU_PRI_CPUSET_INACTIVE
=
INT_MIN
,
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF
=
20
,
CPU_PRI_MIGRATION
=
10
,
};
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
...
...
include/linux/cpuset.h
浏览文件 @
53c5f5ba
...
...
@@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern
int
cpuset_init
(
void
);
extern
void
cpuset_init_smp
(
void
);
extern
void
cpuset_update_active_cpus
(
void
);
extern
void
cpuset_cpus_allowed
(
struct
task_struct
*
p
,
struct
cpumask
*
mask
);
extern
int
cpuset_cpus_allowed_fallback
(
struct
task_struct
*
p
);
extern
nodemask_t
cpuset_mems_allowed
(
struct
task_struct
*
p
);
...
...
@@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static
inline
int
cpuset_init
(
void
)
{
return
0
;
}
static
inline
void
cpuset_init_smp
(
void
)
{}
static
inline
void
cpuset_update_active_cpus
(
void
)
{
partition_sched_domains
(
1
,
NULL
,
NULL
);
}
static
inline
void
cpuset_cpus_allowed
(
struct
task_struct
*
p
,
struct
cpumask
*
mask
)
{
...
...
include/linux/perf_event.h
浏览文件 @
53c5f5ba
...
...
@@ -1068,7 +1068,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
{ .notifier_call = fn, .priority =
20 };
\
{ .notifier_call = fn, .priority =
CPU_PRI_PERF };
\
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
...
...
include/linux/sched.h
浏览文件 @
53c5f5ba
...
...
@@ -1696,6 +1696,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_EXITING 0x00000004
/* getting shut down */
#define PF_EXITPIDONE 0x00000008
/* pi exit done on shut down */
#define PF_VCPU 0x00000010
/* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020
/* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040
/* forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080
/* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100
/* used super-user privileges */
...
...
kernel/cpu.c
浏览文件 @
53c5f5ba
...
...
@@ -235,11 +235,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
return
-
EINVAL
;
cpu_hotplug_begin
();
set_cpu_active
(
cpu
,
false
);
err
=
__cpu_notify
(
CPU_DOWN_PREPARE
|
mod
,
hcpu
,
-
1
,
&
nr_calls
);
if
(
err
)
{
set_cpu_active
(
cpu
,
true
);
nr_calls
--
;
__cpu_notify
(
CPU_DOWN_FAILED
|
mod
,
hcpu
,
nr_calls
,
NULL
);
printk
(
"%s: attempt to take down CPU %u failed
\n
"
,
...
...
@@ -249,7 +246,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
err
=
__stop_machine
(
take_cpu_down
,
&
tcd_param
,
cpumask_of
(
cpu
));
if
(
err
)
{
set_cpu_active
(
cpu
,
true
);
/* CPU didn't die: tell everyone. Can't complain. */
cpu_notify_nofail
(
CPU_DOWN_FAILED
|
mod
,
hcpu
);
...
...
@@ -321,8 +317,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
goto
out_notify
;
BUG_ON
(
!
cpu_online
(
cpu
));
set_cpu_active
(
cpu
,
true
);
/* Now call notifier in preparation. */
cpu_notify
(
CPU_ONLINE
|
mod
,
hcpu
);
...
...
kernel/cpuset.c
浏览文件 @
53c5f5ba
...
...
@@ -2113,31 +2113,17 @@ static void scan_for_empty_cpusets(struct cpuset *root)
* but making no active use of cpusets.
*
* This routine ensures that top_cpuset.cpus_allowed tracks
* cpu_
online_map
on each CPU hotplug (cpuhp) event.
* cpu_
active_mask
on each CPU hotplug (cpuhp) event.
*
* Called within get_online_cpus(). Needs to call cgroup_lock()
* before calling generate_sched_domains().
*/
static
int
cpuset_track_online_cpus
(
struct
notifier_block
*
unused_nb
,
unsigned
long
phase
,
void
*
unused_cpu
)
void
__cpuexit
cpuset_update_active_cpus
(
void
)
{
struct
sched_domain_attr
*
attr
;
cpumask_var_t
*
doms
;
int
ndoms
;
switch
(
phase
)
{
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
case
CPU_DOWN_PREPARE
:
case
CPU_DOWN_PREPARE_FROZEN
:
case
CPU_DOWN_FAILED
:
case
CPU_DOWN_FAILED_FROZEN
:
break
;
default:
return
NOTIFY_DONE
;
}
cgroup_lock
();
mutex_lock
(
&
callback_mutex
);
cpumask_copy
(
top_cpuset
.
cpus_allowed
,
cpu_active_mask
);
...
...
@@ -2148,8 +2134,6 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
/* Have scheduler rebuild the domains */
partition_sched_domains
(
ndoms
,
doms
,
attr
);
return
NOTIFY_OK
;
}
#ifdef CONFIG_MEMORY_HOTPLUG
...
...
@@ -2203,7 +2187,6 @@ void __init cpuset_init_smp(void)
cpumask_copy
(
top_cpuset
.
cpus_allowed
,
cpu_active_mask
);
top_cpuset
.
mems_allowed
=
node_states
[
N_HIGH_MEMORY
];
hotcpu_notifier
(
cpuset_track_online_cpus
,
0
);
hotplug_memory_notifier
(
cpuset_track_online_nodes
,
10
);
cpuset_wq
=
create_singlethread_workqueue
(
"cpuset"
);
...
...
kernel/fork.c
浏览文件 @
53c5f5ba
...
...
@@ -907,7 +907,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned
long
new_flags
=
p
->
flags
;
new_flags
&=
~
PF_SUPERPRIV
;
new_flags
&=
~
(
PF_SUPERPRIV
|
PF_WQ_WORKER
)
;
new_flags
|=
PF_FORKNOEXEC
;
new_flags
|=
PF_STARTING
;
p
->
flags
=
new_flags
;
...
...
kernel/sched.c
浏览文件 @
53c5f5ba
...
...
@@ -77,6 +77,7 @@
#include <asm/irq_regs.h>
#include "sched_cpupri.h"
#include "workqueue_sched.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
...
...
@@ -2267,11 +2268,55 @@ static void update_avg(u64 *avg, u64 sample)
}
#endif
/***
static
inline
void
ttwu_activate
(
struct
task_struct
*
p
,
struct
rq
*
rq
,
bool
is_sync
,
bool
is_migrate
,
bool
is_local
,
unsigned
long
en_flags
)
{
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups
);
if
(
is_sync
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_sync
);
if
(
is_migrate
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_migrate
);
if
(
is_local
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_local
);
else
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_remote
);
activate_task
(
rq
,
p
,
en_flags
);
}
static
inline
void
ttwu_post_activation
(
struct
task_struct
*
p
,
struct
rq
*
rq
,
int
wake_flags
,
bool
success
)
{
trace_sched_wakeup
(
p
,
success
);
check_preempt_curr
(
rq
,
p
,
wake_flags
);
p
->
state
=
TASK_RUNNING
;
#ifdef CONFIG_SMP
if
(
p
->
sched_class
->
task_woken
)
p
->
sched_class
->
task_woken
(
rq
,
p
);
if
(
unlikely
(
rq
->
idle_stamp
))
{
u64
delta
=
rq
->
clock
-
rq
->
idle_stamp
;
u64
max
=
2
*
sysctl_sched_migration_cost
;
if
(
delta
>
max
)
rq
->
avg_idle
=
max
;
else
update_avg
(
&
rq
->
avg_idle
,
delta
);
rq
->
idle_stamp
=
0
;
}
#endif
/* if a worker is waking up, notify workqueue */
if
((
p
->
flags
&
PF_WQ_WORKER
)
&&
success
)
wq_worker_waking_up
(
p
,
cpu_of
(
rq
));
}
/**
* try_to_wake_up - wake up a thread
* @p: the t
o-be-woken-up threa
d
* @p: the t
hread to be awakene
d
* @state: the mask of task states that can be woken
* @
sync: do a synchronous wakeup?
* @
wake_flags: wake modifier flags (WF_*)
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
...
...
@@ -2279,7 +2324,8 @@ static void update_avg(u64 *avg, u64 sample)
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
* returns failure only if the task is already active.
* Returns %true if @p was woken up, %false if it was already running
* or @state didn't match @p's state.
*/
static
int
try_to_wake_up
(
struct
task_struct
*
p
,
unsigned
int
state
,
int
wake_flags
)
...
...
@@ -2359,38 +2405,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
out_activate:
#endif
/* CONFIG_SMP */
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups
);
if
(
wake_flags
&
WF_SYNC
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_sync
);
if
(
orig_cpu
!=
cpu
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_migrate
);
if
(
cpu
==
this_cpu
)
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_local
);
else
schedstat_inc
(
p
,
se
.
statistics
.
nr_wakeups_remote
);
activate_task
(
rq
,
p
,
en_flags
);
ttwu_activate
(
p
,
rq
,
wake_flags
&
WF_SYNC
,
orig_cpu
!=
cpu
,
cpu
==
this_cpu
,
en_flags
);
success
=
1
;
out_running:
trace_sched_wakeup
(
p
,
success
);
check_preempt_curr
(
rq
,
p
,
wake_flags
);
p
->
state
=
TASK_RUNNING
;
#ifdef CONFIG_SMP
if
(
p
->
sched_class
->
task_woken
)
p
->
sched_class
->
task_woken
(
rq
,
p
);
if
(
unlikely
(
rq
->
idle_stamp
))
{
u64
delta
=
rq
->
clock
-
rq
->
idle_stamp
;
u64
max
=
2
*
sysctl_sched_migration_cost
;
if
(
delta
>
max
)
rq
->
avg_idle
=
max
;
else
update_avg
(
&
rq
->
avg_idle
,
delta
);
rq
->
idle_stamp
=
0
;
}
#endif
ttwu_post_activation
(
p
,
rq
,
wake_flags
,
success
);
out:
task_rq_unlock
(
rq
,
&
flags
);
put_cpu
();
...
...
@@ -2398,6 +2417,37 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
return
success
;
}
/**
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
* Put @p on the run-queue if it's not alredy there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
* the current task. this_rq() stays locked over invocation.
*/
static
void
try_to_wake_up_local
(
struct
task_struct
*
p
)
{
struct
rq
*
rq
=
task_rq
(
p
);
bool
success
=
false
;
BUG_ON
(
rq
!=
this_rq
());
BUG_ON
(
p
==
current
);
lockdep_assert_held
(
&
rq
->
lock
);
if
(
!
(
p
->
state
&
TASK_NORMAL
))
return
;
if
(
!
p
->
se
.
on_rq
)
{
if
(
likely
(
!
task_running
(
rq
,
p
)))
{
schedstat_inc
(
rq
,
ttwu_count
);
schedstat_inc
(
rq
,
ttwu_local
);
}
ttwu_activate
(
p
,
rq
,
false
,
false
,
true
,
ENQUEUE_WAKEUP
);
success
=
true
;
}
ttwu_post_activation
(
p
,
rq
,
0
,
success
);
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
...
...
@@ -3603,10 +3653,24 @@ asmlinkage void __sched schedule(void)
clear_tsk_need_resched
(
prev
);
if
(
prev
->
state
&&
!
(
preempt_count
()
&
PREEMPT_ACTIVE
))
{
if
(
unlikely
(
signal_pending_state
(
prev
->
state
,
prev
)))
if
(
unlikely
(
signal_pending_state
(
prev
->
state
,
prev
)))
{
prev
->
state
=
TASK_RUNNING
;
else
}
else
{
/*
* If a worker is going to sleep, notify and
* ask workqueue whether it wants to wake up a
* task to maintain concurrency. If so, wake
* up the task.
*/
if
(
prev
->
flags
&
PF_WQ_WORKER
)
{
struct
task_struct
*
to_wakeup
;
to_wakeup
=
wq_worker_sleeping
(
prev
,
cpu
);
if
(
to_wakeup
)
try_to_wake_up_local
(
to_wakeup
);
}
deactivate_task
(
rq
,
prev
,
DEQUEUE_SLEEP
);
}
switch_count
=
&
prev
->
nvcsw
;
}
...
...
@@ -5801,20 +5865,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
*/
static
struct
notifier_block
__cpuinitdata
migration_notifier
=
{
.
notifier_call
=
migration_call
,
.
priority
=
10
.
priority
=
CPU_PRI_MIGRATION
,
};
static
int
__cpuinit
sched_cpu_active
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
switch
(
action
&
~
CPU_TASKS_FROZEN
)
{
case
CPU_ONLINE
:
case
CPU_DOWN_FAILED
:
set_cpu_active
((
long
)
hcpu
,
true
);
return
NOTIFY_OK
;
default:
return
NOTIFY_DONE
;
}
}
static
int
__cpuinit
sched_cpu_inactive
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
switch
(
action
&
~
CPU_TASKS_FROZEN
)
{
case
CPU_DOWN_PREPARE
:
set_cpu_active
((
long
)
hcpu
,
false
);
return
NOTIFY_OK
;
default:
return
NOTIFY_DONE
;
}
}
static
int
__init
migration_init
(
void
)
{
void
*
cpu
=
(
void
*
)(
long
)
smp_processor_id
();
int
err
;
/*
Start one for the boot CPU:
*/
/*
Initialize migration for the boot CPU
*/
err
=
migration_call
(
&
migration_notifier
,
CPU_UP_PREPARE
,
cpu
);
BUG_ON
(
err
==
NOTIFY_BAD
);
migration_call
(
&
migration_notifier
,
CPU_ONLINE
,
cpu
);
register_cpu_notifier
(
&
migration_notifier
);
/* Register cpu active notifiers */
cpu_notifier
(
sched_cpu_active
,
CPU_PRI_SCHED_ACTIVE
);
cpu_notifier
(
sched_cpu_inactive
,
CPU_PRI_SCHED_INACTIVE
);
return
0
;
}
early_initcall
(
migration_init
);
...
...
@@ -7273,29 +7366,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
}
#endif
/* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
#ifndef CONFIG_CPUSETS
/*
* Add online and remove offline CPUs from the scheduler domains.
* When cpusets are enabled they take over this function.
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
*/
static
int
update_sched_domains
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
static
int
__cpuexit
cpuset_cpu_active
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
switch
(
action
)
{
switch
(
action
&
~
CPU_TASKS_FROZEN
)
{
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
case
CPU_DOWN_PREPARE
:
case
CPU_DOWN_PREPARE_FROZEN
:
case
CPU_DOWN_FAILED
:
case
CPU_DOWN_FAILED_FROZEN
:
partition_sched_domains
(
1
,
NULL
,
NULL
);
cpuset_update_active_cpus
();
return
NOTIFY_OK
;
default:
return
NOTIFY_DONE
;
}
}
static
int
__cpuexit
cpuset_cpu_inactive
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
switch
(
action
&
~
CPU_TASKS_FROZEN
)
{
case
CPU_DOWN_PREPARE
:
cpuset_update_active_cpus
();
return
NOTIFY_OK
;
default:
return
NOTIFY_DONE
;
}
}
#endif
static
int
update_runtime
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
...
...
@@ -7341,10 +7440,8 @@ void __init sched_init_smp(void)
mutex_unlock
(
&
sched_domains_mutex
);
put_online_cpus
();
#ifndef CONFIG_CPUSETS
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier
(
update_sched_domains
,
0
);
#endif
hotcpu_notifier
(
cpuset_cpu_active
,
CPU_PRI_CPUSET_ACTIVE
);
hotcpu_notifier
(
cpuset_cpu_inactive
,
CPU_PRI_CPUSET_INACTIVE
);
/* RT runtime code needs to handle some hotplug events */
hotcpu_notifier
(
update_runtime
,
0
);
...
...
kernel/workqueue_sched.h
0 → 100644
浏览文件 @
53c5f5ba
/*
* kernel/workqueue_sched.h
*
* Scheduler hooks for concurrency managed workqueue. Only to be
* included from sched.c and workqueue.c.
*/
static
inline
void
wq_worker_waking_up
(
struct
task_struct
*
task
,
unsigned
int
cpu
)
{
}
static
inline
struct
task_struct
*
wq_worker_sleeping
(
struct
task_struct
*
task
,
unsigned
int
cpu
)
{
return
NULL
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录