Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
4e28ec3d
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 2 年 前同步成功
通知
173
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4e28ec3d
编写于
12月 01, 2016
作者:
R
Rafael J. Wysocki
浏览文件
操作
浏览文件
下载
差异文件
Merge back earlier cpuidle material for v4.10.
上级
a2c1bc64
6af33995
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
143 addition
and
81 deletion
+143
-81
MAINTAINERS
MAINTAINERS
+3
-0
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle-powernv.c
+1
-1
drivers/cpuidle/cpuidle.c
drivers/cpuidle/cpuidle.c
+12
-1
drivers/cpuidle/dt_idle_states.c
drivers/cpuidle/dt_idle_states.c
+6
-0
drivers/cpuidle/governor.c
drivers/cpuidle/governor.c
+0
-4
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/ladder.c
+0
-2
drivers/cpuidle/governors/menu.c
drivers/cpuidle/governors/menu.c
+0
-2
include/linux/cpu.h
include/linux/cpu.h
+2
-0
include/linux/cpuidle.h
include/linux/cpuidle.h
+6
-3
include/linux/sched.h
include/linux/sched.h
+2
-1
kernel/fork.c
kernel/fork.c
+1
-1
kernel/sched/core.c
kernel/sched/core.c
+1
-0
kernel/sched/idle.c
kernel/sched/idle.c
+109
-66
未找到文件。
MAINTAINERS
浏览文件 @
4e28ec3d
...
...
@@ -3380,6 +3380,7 @@ M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
...
...
@@ -6289,9 +6290,11 @@ S: Maintained
F: drivers/platform/x86/intel-vbtn.c
INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
M: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
B: https://bugzilla.kernel.org
S: Supported
F: drivers/idle/intel_idle.c
...
...
drivers/cpuidle/cpuidle-powernv.c
浏览文件 @
4e28ec3d
...
...
@@ -22,7 +22,7 @@
#define POWERNV_THRESHOLD_LATENCY_NS 200000
struct
cpuidle_driver
powernv_idle_driver
=
{
st
atic
st
ruct
cpuidle_driver
powernv_idle_driver
=
{
.
name
=
"powernv_idle"
,
.
owner
=
THIS_MODULE
,
};
...
...
drivers/cpuidle/cpuidle.c
浏览文件 @
4e28ec3d
...
...
@@ -97,7 +97,17 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return
ret
;
}
#ifdef CONFIG_SUSPEND
/* Set the current cpu to use the deepest idle state, override governors */
void
cpuidle_use_deepest_state
(
bool
enable
)
{
struct
cpuidle_device
*
dev
;
preempt_disable
();
dev
=
cpuidle_get_device
();
dev
->
use_deepest_state
=
enable
;
preempt_enable
();
}
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
...
...
@@ -109,6 +119,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return
find_deepest_state
(
drv
,
dev
,
UINT_MAX
,
0
,
false
);
}
#ifdef CONFIG_SUSPEND
static
void
enter_freeze_proper
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
,
int
index
)
{
...
...
drivers/cpuidle/dt_idle_states.c
浏览文件 @
4e28ec3d
...
...
@@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
* state enter function.
*/
idle_state
->
enter
=
match_id
->
data
;
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
* safely. So enter() can be also enter_freeze() callback.
*/
idle_state
->
enter_freeze
=
match_id
->
data
;
err
=
of_property_read_u32
(
state_node
,
"wakeup-latency-us"
,
&
idle_state
->
exit_latency
);
...
...
drivers/cpuidle/governor.c
浏览文件 @
4e28ec3d
...
...
@@ -9,7 +9,6 @@
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
...
...
@@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
if
(
cpuidle_curr_governor
)
{
list_for_each_entry
(
dev
,
&
cpuidle_detected_devices
,
device_list
)
cpuidle_disable_device
(
dev
);
module_put
(
cpuidle_curr_governor
->
owner
);
}
cpuidle_curr_governor
=
gov
;
if
(
gov
)
{
if
(
!
try_module_get
(
cpuidle_curr_governor
->
owner
))
return
-
EINVAL
;
list_for_each_entry
(
dev
,
&
cpuidle_detected_devices
,
device_list
)
cpuidle_enable_device
(
dev
);
cpuidle_install_idle_handler
();
...
...
drivers/cpuidle/governors/ladder.c
浏览文件 @
4e28ec3d
...
...
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
...
...
@@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
.
enable
=
ladder_enable_device
,
.
select
=
ladder_select_state
,
.
reflect
=
ladder_reflect
,
.
owner
=
THIS_MODULE
,
};
/**
...
...
drivers/cpuidle/governors/menu.c
浏览文件 @
4e28ec3d
...
...
@@ -19,7 +19,6 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/module.h>
/*
* Please note when changing the tuning values:
...
...
@@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
.
enable
=
menu_enable_device
,
.
select
=
menu_select
,
.
reflect
=
menu_reflect
,
.
owner
=
THIS_MODULE
,
};
/**
...
...
include/linux/cpu.h
浏览文件 @
4e28ec3d
...
...
@@ -245,6 +245,8 @@ void arch_cpu_idle_dead(void);
int
cpu_report_state
(
int
cpu
);
int
cpu_check_up_prepare
(
int
cpu
);
void
cpu_set_state_online
(
int
cpu
);
void
play_idle
(
unsigned
long
duration_ms
);
#ifdef CONFIG_HOTPLUG_CPU
bool
cpu_wait_death
(
unsigned
int
cpu
,
int
seconds
);
bool
cpu_report_death
(
void
);
...
...
include/linux/cpuidle.h
浏览文件 @
4e28ec3d
...
...
@@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
struct
cpuidle_device
{
unsigned
int
registered
:
1
;
unsigned
int
enabled
:
1
;
unsigned
int
use_deepest_state
:
1
;
unsigned
int
cpu
;
int
last_residency
;
...
...
@@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
static
inline
struct
cpuidle_device
*
cpuidle_get_device
(
void
)
{
return
NULL
;
}
#endif
#if
defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
#if
def CONFIG_CPU_IDLE
extern
int
cpuidle_find_deepest_state
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
);
extern
int
cpuidle_enter_freeze
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
);
extern
void
cpuidle_use_deepest_state
(
bool
enable
);
#else
static
inline
int
cpuidle_find_deepest_state
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
)
...
...
@@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static
inline
int
cpuidle_enter_freeze
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
)
{
return
-
ENODEV
;
}
static
inline
void
cpuidle_use_deepest_state
(
bool
enable
)
{
}
#endif
/* kernel/sched/idle.c */
...
...
@@ -235,8 +240,6 @@ struct cpuidle_governor {
int
(
*
select
)
(
struct
cpuidle_driver
*
drv
,
struct
cpuidle_device
*
dev
);
void
(
*
reflect
)
(
struct
cpuidle_device
*
dev
,
int
index
);
struct
module
*
owner
;
};
#ifdef CONFIG_CPU_IDLE
...
...
include/linux/sched.h
浏览文件 @
4e28ec3d
...
...
@@ -2254,6 +2254,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
#define PF_IDLE 0x00000002
/* I am an IDLE thread */
#define PF_EXITING 0x00000004
/* getting shut down */
#define PF_EXITPIDONE 0x00000008
/* pi exit done on shut down */
#define PF_VCPU 0x00000010
/* I'm a virtual CPU */
...
...
@@ -2611,7 +2612,7 @@ extern struct task_struct *idle_task(int cpu);
*/
static
inline
bool
is_idle_task
(
const
struct
task_struct
*
p
)
{
return
p
->
pid
==
0
;
return
!!
(
p
->
flags
&
PF_IDLE
)
;
}
extern
struct
task_struct
*
curr_task
(
int
cpu
);
extern
void
ia64_set_curr_task
(
int
cpu
,
struct
task_struct
*
p
);
...
...
kernel/fork.c
浏览文件 @
4e28ec3d
...
...
@@ -1540,7 +1540,7 @@ static __latent_entropy struct task_struct *copy_process(
goto
bad_fork_cleanup_count
;
delayacct_tsk_init
(
p
);
/* Must remain after dup_task_struct() */
p
->
flags
&=
~
(
PF_SUPERPRIV
|
PF_WQ_WORKER
);
p
->
flags
&=
~
(
PF_SUPERPRIV
|
PF_WQ_WORKER
|
PF_IDLE
);
p
->
flags
|=
PF_FORKNOEXEC
;
INIT_LIST_HEAD
(
&
p
->
children
);
INIT_LIST_HEAD
(
&
p
->
sibling
);
...
...
kernel/sched/core.c
浏览文件 @
4e28ec3d
...
...
@@ -5279,6 +5279,7 @@ void init_idle(struct task_struct *idle, int cpu)
__sched_fork
(
0
,
idle
);
idle
->
state
=
TASK_RUNNING
;
idle
->
se
.
exec_start
=
sched_clock
();
idle
->
flags
|=
PF_IDLE
;
kasan_unpoison_task_stack
(
idle
);
...
...
kernel/sched/idle.c
浏览文件 @
4e28ec3d
...
...
@@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens.
*/
if
(
idle_should_freeze
())
{
entered_state
=
cpuidle_enter_freeze
(
drv
,
dev
);
if
(
entered_state
>
0
)
{
local_irq_enable
();
goto
exit_idle
;
if
(
idle_should_freeze
()
||
dev
->
use_deepest_state
)
{
if
(
idle_should_freeze
())
{
entered_state
=
cpuidle_enter_freeze
(
drv
,
dev
);
if
(
entered_state
>
0
)
{
local_irq_enable
();
goto
exit_idle
;
}
}
next_state
=
cpuidle_find_deepest_state
(
drv
,
dev
);
...
...
@@ -202,76 +205,65 @@ static void cpuidle_idle_call(void)
*
* Called with polling cleared.
*/
static
void
cpu_idle_loop
(
void
)
static
void
do_idle
(
void
)
{
int
cpu
=
smp_processor_id
();
while
(
1
)
{
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if
* rq->curr != rq->idle). This means that, if rq->idle has
* the polling bit set, then setting need_resched is
* guaranteed to cause the cpu to reschedule.
*/
__current_set_polling
();
quiet_vmstat
();
tick_nohz_idle_enter
();
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
* rq->idle). This means that, if rq->idle has the polling bit set,
* then setting need_resched is guaranteed to cause the CPU to
* reschedule.
*/
while
(
!
need_resched
())
{
check_pgt_cache
();
rmb
();
__current_set_polling
();
tick_nohz_idle_enter
();
if
(
cpu_is_offline
(
cpu
))
{
cpuhp_report_idle_dead
();
arch_cpu_idle_dead
();
}
while
(
!
need_resched
())
{
check_pgt_cache
();
rmb
();
local_irq_disable
();
arch_cpu_idle_enter
();
/*
* In poll mode we reenable interrupts and spin.
*
* Also if we detected in the wakeup from idle
* path that the tick broadcast device expired
* for us, we don't want to go deep idle as we
* know that the IPI is going to arrive right
* away
*/
if
(
cpu_idle_force_poll
||
tick_check_broadcast_expired
())
cpu_idle_poll
();
else
cpuidle_idle_call
();
arch_cpu_idle_exit
();
if
(
cpu_is_offline
(
smp_processor_id
()))
{
cpuhp_report_idle_dead
();
arch_cpu_idle_dead
();
}
/*
* Since we fell out of the loop above, we know
* TIF_NEED_RESCHED must be set, propagate it into
* PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched
();
tick_nohz_idle_exit
();
__current_clr_polling
();
local_irq_disable
();
arch_cpu_idle_enter
();
/*
*
We promise to call sched_ttwu_pending and reschedul
e
*
if need_resched is set while polling is set. That
*
means that clearing polling needs to be visible
*
before doing these things
.
*
In poll mode we reenable interrupts and spin. Also if w
e
*
detected in the wakeup from idle path that the tick
*
broadcast device expired for us, we don't want to go deep
*
idle as we know that the IPI is going to arrive right away
.
*/
smp_mb__after_atomic
();
sched_ttwu_pending
();
schedule_preempt_disabled
();
if
(
cpu_idle_force_poll
||
tick_check_broadcast_expired
())
cpu_idle_poll
();
else
cpuidle_idle_call
();
arch_cpu_idle_exit
();
}
/*
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
* be set, propagate it into PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will not have had
* an IPI to fold the state for us.
*/
preempt_set_need_resched
();
tick_nohz_idle_exit
();
__current_clr_polling
();
/*
* We promise to call sched_ttwu_pending() and reschedule if
* need_resched() is set while polling is set. That means that clearing
* polling needs to be visible before doing these things.
*/
smp_mb__after_atomic
();
sched_ttwu_pending
();
schedule_preempt_disabled
();
}
bool
cpu_in_idle
(
unsigned
long
pc
)
...
...
@@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
pc
<
(
unsigned
long
)
__cpuidle_text_end
;
}
struct
idle_timer
{
struct
hrtimer
timer
;
int
done
;
};
static
enum
hrtimer_restart
idle_inject_timer_fn
(
struct
hrtimer
*
timer
)
{
struct
idle_timer
*
it
=
container_of
(
timer
,
struct
idle_timer
,
timer
);
WRITE_ONCE
(
it
->
done
,
1
);
set_tsk_need_resched
(
current
);
return
HRTIMER_NORESTART
;
}
void
play_idle
(
unsigned
long
duration_ms
)
{
struct
idle_timer
it
;
/*
* Only FIFO tasks can disable the tick since they don't need the forced
* preemption.
*/
WARN_ON_ONCE
(
current
->
policy
!=
SCHED_FIFO
);
WARN_ON_ONCE
(
current
->
nr_cpus_allowed
!=
1
);
WARN_ON_ONCE
(
!
(
current
->
flags
&
PF_KTHREAD
));
WARN_ON_ONCE
(
!
(
current
->
flags
&
PF_NO_SETAFFINITY
));
WARN_ON_ONCE
(
!
duration_ms
);
rcu_sleep_check
();
preempt_disable
();
current
->
flags
|=
PF_IDLE
;
cpuidle_use_deepest_state
(
true
);
it
.
done
=
0
;
hrtimer_init_on_stack
(
&
it
.
timer
,
CLOCK_MONOTONIC
,
HRTIMER_MODE_REL
);
it
.
timer
.
function
=
idle_inject_timer_fn
;
hrtimer_start
(
&
it
.
timer
,
ms_to_ktime
(
duration_ms
),
HRTIMER_MODE_REL_PINNED
);
while
(
!
READ_ONCE
(
it
.
done
))
do_idle
();
cpuidle_use_deepest_state
(
false
);
current
->
flags
&=
~
PF_IDLE
;
preempt_fold_need_resched
();
preempt_enable
();
}
EXPORT_SYMBOL_GPL
(
play_idle
);
void
cpu_startup_entry
(
enum
cpuhp_state
state
)
{
/*
...
...
@@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
#endif
arch_cpu_idle_prepare
();
cpuhp_online_idle
(
state
);
cpu_idle_loop
();
while
(
1
)
do_idle
();
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录