Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
5d1191ab
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
5d1191ab
编写于
7月 04, 2016
作者:
R
Rafael J. Wysocki
浏览文件
操作
浏览文件
下载
差异文件
Merge back earlier cpufreq material for v4.8.
上级
742c87bf
4a7cb7a9
变更
36
隐藏空白更改
内联
并排
Showing
36 changed file
with
531 addition
and
758 deletion
+531
-758
Documentation/cpu-freq/core.txt
Documentation/cpu-freq/core.txt
+2
-2
Documentation/cpu-freq/cpu-drivers.txt
Documentation/cpu-freq/cpu-drivers.txt
+4
-6
arch/powerpc/platforms/cell/cpufreq_spudemand.c
arch/powerpc/platforms/cell/cpufreq_spudemand.c
+34
-38
arch/x86/include/asm/topology.h
arch/x86/include/asm/topology.h
+1
-11
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+3
-4
arch/x86/platform/atom/punit_atom_debug.c
arch/x86/platform/atom/punit_atom_debug.c
+3
-2
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_lpss.c
+3
-2
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig
+4
-9
drivers/cpufreq/amd_freq_sensitivity.c
drivers/cpufreq/amd_freq_sensitivity.c
+4
-6
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq.c
+102
-84
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_conservative.c
+17
-71
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.c
+22
-51
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_governor.h
+21
-3
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_ondemand.c
+15
-25
drivers/cpufreq/cpufreq_ondemand.h
drivers/cpufreq/cpufreq_ondemand.h
+0
-1
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_performance.c
+4
-15
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_powersave.c
+4
-15
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/cpufreq_stats.c
+22
-135
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/cpufreq_userspace.c
+52
-52
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/davinci-cpufreq.c
+1
-21
drivers/cpufreq/freq_table.c
drivers/cpufreq/freq_table.c
+17
-20
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/intel_pstate.c
+55
-33
drivers/cpufreq/mvebu-cpufreq.c
drivers/cpufreq/mvebu-cpufreq.c
+1
-1
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/powernv-cpufreq.c
+2
-3
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+1
-2
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
+7
-26
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
+2
-6
drivers/idle/intel_idle.c
drivers/idle/intel_idle.c
+36
-35
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-acpi.c
+2
-1
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/intel_telemetry_debugfs.c
+2
-1
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/intel_telemetry_pltdrv.c
+2
-1
drivers/powercap/intel_rapl.c
drivers/powercap/intel_rapl.c
+29
-21
drivers/thermal/cpu_cooling.c
drivers/thermal/cpu_cooling.c
+19
-5
drivers/thermal/intel_soc_dts_thermal.c
drivers/thermal/intel_soc_dts_thermal.c
+3
-1
include/linux/cpufreq.h
include/linux/cpufreq.h
+26
-15
kernel/sched/cpufreq_schedutil.c
kernel/sched/cpufreq_schedutil.c
+9
-34
未找到文件。
Documentation/cpu-freq/core.txt
浏览文件 @
5d1191ab
...
...
@@ -96,7 +96,7 @@ new - new frequency
For details about OPP, see Documentation/power/opp.txt
dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
cpufreq_
frequency_table_cpuinfo
which is provided with the list of
cpufreq_
table_validate_and_show()
which is provided with the list of
frequencies that are available for operation. This function provides
a ready to use conversion routine to translate the OPP layer's internal
information about the available frequencies into a format readily
...
...
@@ -110,7 +110,7 @@ dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
/* Do things */
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
if (!r)
cpufreq_
frequency_table_cpuinfo
(policy, freq_table);
cpufreq_
table_validate_and_show
(policy, freq_table);
/* Do other things */
}
...
...
Documentation/cpu-freq/cpu-drivers.txt
浏览文件 @
5d1191ab
...
...
@@ -231,7 +231,7 @@ if you want to skip one entry in the table, set the frequency to
CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
order.
By calling cpufreq_
frequency_table_cpuinfo
(struct cpufreq_policy *policy,
By calling cpufreq_
table_validate_and_show
(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
policy->min and policy->max are set to the same values. This is
...
...
@@ -244,14 +244,12 @@ policy->max, and all other criteria are met. This is helpful for the
->verify call.
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int target_freq,
unsigned int relation,
unsigned int *index);
unsigned int relation);
is the corresponding frequency table helper for the ->target
stage. Just pass the values to this function, and th
e unsigned int
index
returns the number of the frequency table entry which contains
stage. Just pass the values to this function, and th
is function
returns the number of the frequency table entry which contains
the frequency the CPU shall be set to.
The following macros can be used as iterators over cpufreq_frequency_table:
...
...
arch/powerpc/platforms/cell/cpufreq_spudemand.c
浏览文件 @
5d1191ab
...
...
@@ -85,61 +85,57 @@ static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
cancel_delayed_work_sync
(
&
info
->
work
);
}
static
int
spu_gov_
govern
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
static
int
spu_gov_
start
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
cpu
=
policy
->
cpu
;
struct
spu_gov_info_struct
*
info
,
*
affected_info
;
struct
spu_gov_info_struct
*
info
=
&
per_cpu
(
spu_gov_info
,
cpu
);
struct
spu_gov_info_struct
*
affected_info
;
int
i
;
int
ret
=
0
;
info
=
&
per_cpu
(
spu_gov_info
,
cpu
);
switch
(
event
)
{
case
CPUFREQ_GOV_START
:
if
(
!
cpu_online
(
cpu
))
{
printk
(
KERN_ERR
"cpu %d is not online
\n
"
,
cpu
);
ret
=
-
EINVAL
;
break
;
}
if
(
!
cpu_online
(
cpu
))
{
printk
(
KERN_ERR
"cpu %d is not online
\n
"
,
cpu
);
return
-
EINVAL
;
}
if
(
!
policy
->
cur
)
{
printk
(
KERN_ERR
"no cpu specified in policy
\n
"
);
ret
=
-
EINVAL
;
break
;
}
if
(
!
policy
->
cur
)
{
printk
(
KERN_ERR
"no cpu specified in policy
\n
"
);
return
-
EINVAL
;
}
/* initialize spu_gov_info for all affected cpus */
for_each_cpu
(
i
,
policy
->
cpus
)
{
affected_info
=
&
per_cpu
(
spu_gov_info
,
i
);
affected_info
->
policy
=
policy
;
}
/* initialize spu_gov_info for all affected cpus */
for_each_cpu
(
i
,
policy
->
cpus
)
{
affected_info
=
&
per_cpu
(
spu_gov_info
,
i
);
affected_info
->
policy
=
policy
;
}
info
->
poll_int
=
POLL_TIME
;
info
->
poll_int
=
POLL_TIME
;
/* setup timer */
spu_gov_init_work
(
info
);
/* setup timer */
spu_gov_init_work
(
info
);
break
;
return
0
;
}
case
CPUFREQ_GOV_STOP
:
/* cancel timer */
spu_gov_cancel_work
(
info
);
static
void
spu_gov_stop
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
cpu
=
policy
->
cpu
;
struct
spu_gov_info_struct
*
info
=
&
per_cpu
(
spu_gov_info
,
cpu
);
int
i
;
/* clean spu_gov_info for all affected cpus */
for_each_cpu
(
i
,
policy
->
cpus
)
{
info
=
&
per_cpu
(
spu_gov_info
,
i
);
info
->
policy
=
NULL
;
}
/* cancel timer */
spu_gov_cancel_work
(
info
);
break
;
/* clean spu_gov_info for all affected cpus */
for_each_cpu
(
i
,
policy
->
cpus
)
{
info
=
&
per_cpu
(
spu_gov_info
,
i
);
info
->
policy
=
NULL
;
}
return
ret
;
}
static
struct
cpufreq_governor
spu_governor
=
{
.
name
=
"spudemand"
,
.
governor
=
spu_gov_govern
,
.
start
=
spu_gov_start
,
.
stop
=
spu_gov_stop
,
.
owner
=
THIS_MODULE
,
};
...
...
arch/x86/include/asm/topology.h
浏览文件 @
5d1191ab
...
...
@@ -25,16 +25,6 @@
#ifndef _ASM_X86_TOPOLOGY_H
#define _ASM_X86_TOPOLOGY_H
#ifdef CONFIG_X86_32
# ifdef CONFIG_SMP
# define ENABLE_TOPO_DEFINES
# endif
#else
# ifdef CONFIG_SMP
# define ENABLE_TOPO_DEFINES
# endif
#endif
/*
* to preserve the visibility of NUMA_NO_NODE definition,
* moved to there from here. May be used independent of
...
...
@@ -123,7 +113,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#ifdef
ENABLE_TOPO_DEFINES
#ifdef
CONFIG_SMP
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
...
...
arch/x86/kernel/cpu/intel.c
浏览文件 @
5d1191ab
...
...
@@ -300,15 +300,14 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
}
/*
* P4 Xeon errat
a
037 workaround.
* P4 Xeon errat
um
037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
if
((
c
->
x86
==
15
)
&&
(
c
->
x86_model
==
1
)
&&
(
c
->
x86_mask
==
1
))
{
if
(
msr_set_bit
(
MSR_IA32_MISC_ENABLE
,
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT
)
>
0
)
{
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT
)
>
0
)
{
pr_info
(
"CPU: C0 stepping P4 Xeon detected.
\n
"
);
pr_info
(
"CPU: Disabling hardware prefetching (Errat
a
037)
\n
"
);
pr_info
(
"CPU: Disabling hardware prefetching (Errat
um
037)
\n
"
);
}
}
...
...
arch/x86/platform/atom/punit_atom_debug.c
浏览文件 @
5d1191ab
...
...
@@ -23,6 +23,7 @@
#include <linux/seq_file.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
/* Power gate status reg */
...
...
@@ -143,8 +144,8 @@ static void punit_dbgfs_unregister(void)
(kernel_ulong_t)&drv_data }
static
const
struct
x86_cpu_id
intel_punit_cpu_ids
[]
=
{
ICPU
(
55
,
punit_device_byt
),
/* Valleyview, Bay Trail */
ICPU
(
76
,
punit_device_cht
),
/* Braswell, Cherry Trail */
ICPU
(
INTEL_FAM6_ATOM_SILVERMONT1
,
punit_device_byt
),
ICPU
(
INTEL_FAM6_ATOM_AIRMONT
,
punit_device_cht
),
{}
};
...
...
drivers/acpi/acpi_lpss.c
浏览文件 @
5d1191ab
...
...
@@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss");
#ifdef CONFIG_X86_INTEL_LPSS
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#include <asm/pmc_atom.h>
...
...
@@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static
const
struct
x86_cpu_id
lpss_cpu_ids
[]
=
{
ICPU
(
0x37
),
/* Valleyview, Bay Trail */
ICPU
(
0x4c
),
/* Braswell, Cherry Trail */
ICPU
(
INTEL_FAM6_ATOM_SILVERMONT1
),
/* Valleyview, Bay Trail */
ICPU
(
INTEL_FAM6_ATOM_AIRMONT
),
/* Braswell, Cherry Trail */
{}
};
...
...
drivers/cpufreq/Kconfig
浏览文件 @
5d1191ab
...
...
@@ -31,23 +31,18 @@ config CPU_FREQ_BOOST_SW
depends on THERMAL
config CPU_FREQ_STAT
tristate "CPU frequency transla
tion statistics"
bool "CPU frequency transi
tion statistics"
default y
help
This driver exports CPU frequency statistics information through sysfs
file system.
To compile this driver as a module, choose M here: the
module will be called cpufreq_stats.
Export CPU frequency statistics information through sysfs.
If in doubt, say N.
config CPU_FREQ_STAT_DETAILS
bool "CPU frequency trans
la
tion statistics details"
bool "CPU frequency trans
i
tion statistics details"
depends on CPU_FREQ_STAT
help
This will show detail CPU frequency translation table in sysfs file
system.
Show detailed CPU frequency transition table in sysfs.
If in doubt, say N.
...
...
drivers/cpufreq/amd_freq_sensitivity.c
浏览文件 @
5d1191ab
...
...
@@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
struct
dbs_data
*
od_data
=
policy_dbs
->
dbs_data
;
struct
od_dbs_tuners
*
od_tuners
=
od_data
->
tuners
;
struct
od_policy_dbs_info
*
od_info
=
to_dbs_info
(
policy_dbs
);
if
(
!
od_info
->
freq_table
)
if
(
!
policy
->
freq_table
)
return
freq_next
;
rdmsr_on_cpu
(
policy
->
cpu
,
MSR_AMD64_FREQ_SENSITIVITY_ACTUAL
,
...
...
@@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
else
{
unsigned
int
index
;
cpufreq_frequency_table_target
(
policy
,
od_info
->
freq_table
,
policy
->
cur
-
1
,
CPUFREQ_RELATION_H
,
&
index
);
freq_next
=
od_info
->
freq_table
[
index
].
frequency
;
index
=
cpufreq_frequency_table_target
(
policy
,
policy
->
cur
-
1
,
CPUFREQ_RELATION_H
);
freq_next
=
policy
->
freq_table
[
index
].
frequency
;
}
data
->
freq_prev
=
freq_next
;
...
...
drivers/cpufreq/cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -74,19 +74,12 @@ static inline bool has_target(void)
}
/* internal prototypes */
static
int
cpufreq_governor
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
);
static
unsigned
int
__cpufreq_get
(
struct
cpufreq_policy
*
policy
);
static
int
cpufreq_init_governor
(
struct
cpufreq_policy
*
policy
);
static
void
cpufreq_exit_governor
(
struct
cpufreq_policy
*
policy
);
static
int
cpufreq_start_governor
(
struct
cpufreq_policy
*
policy
);
static
inline
void
cpufreq_exit_governor
(
struct
cpufreq_policy
*
policy
)
{
(
void
)
cpufreq_governor
(
policy
,
CPUFREQ_GOV_POLICY_EXIT
);
}
static
inline
void
cpufreq_stop_governor
(
struct
cpufreq_policy
*
policy
)
{
(
void
)
cpufreq_governor
(
policy
,
CPUFREQ_GOV_STOP
);
}
static
void
cpufreq_stop_governor
(
struct
cpufreq_policy
*
policy
);
static
void
cpufreq_governor_limits
(
struct
cpufreq_policy
*
policy
);
/**
* Two notifier lists: the "policy" list is involved in the
...
...
@@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL
(
get_governor_parent_kobj
);
struct
cpufreq_frequency_table
*
cpufreq_frequency_get_table
(
unsigned
int
cpu
)
{
struct
cpufreq_policy
*
policy
=
per_cpu
(
cpufreq_cpu_data
,
cpu
);
return
policy
&&
!
policy_is_inactive
(
policy
)
?
policy
->
freq_table
:
NULL
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_frequency_get_table
);
static
inline
u64
get_cpu_idle_time_jiffy
(
unsigned
int
cpu
,
u64
*
wall
)
{
u64
idle_time
;
...
...
@@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug
(
"FREQ: %lu - CPU: %lu
\n
"
,
(
unsigned
long
)
freqs
->
new
,
(
unsigned
long
)
freqs
->
cpu
);
trace_cpu_frequency
(
freqs
->
new
,
freqs
->
cpu
);
cpufreq_stats_record_transition
(
policy
,
freqs
->
new
);
srcu_notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_POSTCHANGE
,
freqs
);
if
(
likely
(
policy
)
&&
likely
(
policy
->
cpu
==
freqs
->
cpu
))
...
...
@@ -1115,6 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
CPUFREQ_REMOVE_POLICY
,
policy
);
down_write
(
&
policy
->
rwsem
);
cpufreq_stats_free_table
(
policy
);
cpufreq_remove_dev_symlink
(
policy
);
kobj
=
&
policy
->
kobj
;
cmp
=
&
policy
->
kobj_unregister
;
...
...
@@ -1265,13 +1251,12 @@ static int cpufreq_online(unsigned int cpu)
}
}
blocking_notifier_call_chain
(
&
cpufreq_policy_notifier_list
,
CPUFREQ_START
,
policy
);
if
(
new_policy
)
{
ret
=
cpufreq_add_dev_interface
(
policy
);
if
(
ret
)
goto
out_exit_policy
;
cpufreq_stats_create_table
(
policy
);
blocking_notifier_call_chain
(
&
cpufreq_policy_notifier_list
,
CPUFREQ_CREATE_POLICY
,
policy
);
...
...
@@ -1280,6 +1265,9 @@ static int cpufreq_online(unsigned int cpu)
write_unlock_irqrestore
(
&
cpufreq_driver_lock
,
flags
);
}
blocking_notifier_call_chain
(
&
cpufreq_policy_notifier_list
,
CPUFREQ_START
,
policy
);
ret
=
cpufreq_init_policy
(
policy
);
if
(
ret
)
{
pr_err
(
"%s: Failed to initialize policy for cpu: %d (%d)
\n
"
,
...
...
@@ -1864,14 +1852,17 @@ static int __target_intermediate(struct cpufreq_policy *policy,
return
ret
;
}
static
int
__target_index
(
struct
cpufreq_policy
*
policy
,
struct
cpufreq_frequency_table
*
freq_table
,
int
index
)
static
int
__target_index
(
struct
cpufreq_policy
*
policy
,
int
index
)
{
struct
cpufreq_freqs
freqs
=
{.
old
=
policy
->
cur
,
.
flags
=
0
};
unsigned
int
intermediate_freq
=
0
;
unsigned
int
newfreq
=
policy
->
freq_table
[
index
].
frequency
;
int
retval
=
-
EINVAL
;
bool
notify
;
if
(
newfreq
==
policy
->
cur
)
return
0
;
notify
=
!
(
cpufreq_driver
->
flags
&
CPUFREQ_ASYNC_NOTIFICATION
);
if
(
notify
)
{
/* Handle switching to intermediate frequency */
...
...
@@ -1886,7 +1877,7 @@ static int __target_index(struct cpufreq_policy *policy,
freqs
.
old
=
freqs
.
new
;
}
freqs
.
new
=
freq_table
[
index
].
frequency
;
freqs
.
new
=
newfreq
;
pr_debug
(
"%s: cpu: %d, oldfreq: %u, new freq: %u
\n
"
,
__func__
,
policy
->
cpu
,
freqs
.
old
,
freqs
.
new
);
...
...
@@ -1923,17 +1914,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned
int
relation
)
{
unsigned
int
old_target_freq
=
target_freq
;
struct
cpufreq_frequency_table
*
freq_table
;
int
index
,
retval
;
int
index
;
if
(
cpufreq_disabled
())
return
-
ENODEV
;
/* Make sure that target_freq is within supported range */
if
(
target_freq
>
policy
->
max
)
target_freq
=
policy
->
max
;
if
(
target_freq
<
policy
->
min
)
target_freq
=
policy
->
min
;
target_freq
=
clamp_val
(
target_freq
,
policy
->
min
,
policy
->
max
);
pr_debug
(
"target for CPU %u: %u kHz, relation %u, requested %u kHz
\n
"
,
policy
->
cpu
,
target_freq
,
relation
,
old_target_freq
);
...
...
@@ -1956,23 +1943,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if
(
!
cpufreq_driver
->
target_index
)
return
-
EINVAL
;
freq_table
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
if
(
unlikely
(
!
freq_table
))
{
pr_err
(
"%s: Unable to find freq_table
\n
"
,
__func__
);
return
-
EINVAL
;
}
retval
=
cpufreq_frequency_table_target
(
policy
,
freq_table
,
target_freq
,
relation
,
&
index
);
if
(
unlikely
(
retval
))
{
pr_err
(
"%s: Unable to find matching freq
\n
"
,
__func__
);
return
retval
;
}
if
(
freq_table
[
index
].
frequency
==
policy
->
cur
)
return
0
;
index
=
cpufreq_frequency_table_target
(
policy
,
target_freq
,
relation
);
return
__target_index
(
policy
,
freq_table
,
index
);
return
__target_index
(
policy
,
index
);
}
EXPORT_SYMBOL_GPL
(
__cpufreq_driver_target
);
...
...
@@ -1997,7 +1970,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
return
NULL
;
}
static
int
cpufreq_
governor
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
static
int
cpufreq_
init_governor
(
struct
cpufreq_policy
*
policy
)
{
int
ret
;
...
...
@@ -2025,36 +1998,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
}
}
if
(
event
==
CPUFREQ_GOV_POLICY_INIT
)
if
(
!
try_module_get
(
policy
->
governor
->
owner
))
return
-
EINVAL
;
pr_debug
(
"%s: for CPU %u, event %u
\n
"
,
__func__
,
policy
->
cpu
,
event
);
if
(
!
try_module_get
(
policy
->
governor
->
owner
))
return
-
EINVAL
;
ret
=
policy
->
governor
->
governor
(
policy
,
event
);
pr_debug
(
"%s: for CPU %u
\n
"
,
__func__
,
policy
->
cpu
);
if
(
event
==
CPUFREQ_GOV_POLICY_INIT
)
{
if
(
ret
)
if
(
policy
->
governor
->
init
)
{
ret
=
policy
->
governor
->
init
(
policy
);
if
(
ret
)
{
module_put
(
policy
->
governor
->
owner
);
else
policy
->
governor
->
initialized
++
;
}
else
if
(
event
==
CPUFREQ_GOV_POLICY_EXIT
)
{
policy
->
governor
->
initialized
--
;
module_put
(
policy
->
governor
->
owner
);
return
ret
;
}
}
return
ret
;
return
0
;
}
static
void
cpufreq_exit_governor
(
struct
cpufreq_policy
*
policy
)
{
if
(
cpufreq_suspended
||
!
policy
->
governor
)
return
;
pr_debug
(
"%s: for CPU %u
\n
"
,
__func__
,
policy
->
cpu
);
if
(
policy
->
governor
->
exit
)
policy
->
governor
->
exit
(
policy
);
module_put
(
policy
->
governor
->
owner
);
}
static
int
cpufreq_start_governor
(
struct
cpufreq_policy
*
policy
)
{
int
ret
;
if
(
cpufreq_suspended
)
return
0
;
if
(
!
policy
->
governor
)
return
-
EINVAL
;
pr_debug
(
"%s: for CPU %u
\n
"
,
__func__
,
policy
->
cpu
);
if
(
cpufreq_driver
->
get
&&
!
cpufreq_driver
->
setpolicy
)
cpufreq_update_current_freq
(
policy
);
ret
=
cpufreq_governor
(
policy
,
CPUFREQ_GOV_START
);
return
ret
?
ret
:
cpufreq_governor
(
policy
,
CPUFREQ_GOV_LIMITS
);
if
(
policy
->
governor
->
start
)
{
ret
=
policy
->
governor
->
start
(
policy
);
if
(
ret
)
return
ret
;
}
if
(
policy
->
governor
->
limits
)
policy
->
governor
->
limits
(
policy
);
return
0
;
}
static
void
cpufreq_stop_governor
(
struct
cpufreq_policy
*
policy
)
{
if
(
cpufreq_suspended
||
!
policy
->
governor
)
return
;
pr_debug
(
"%s: for CPU %u
\n
"
,
__func__
,
policy
->
cpu
);
if
(
policy
->
governor
->
stop
)
policy
->
governor
->
stop
(
policy
);
}
static
void
cpufreq_governor_limits
(
struct
cpufreq_policy
*
policy
)
{
if
(
cpufreq_suspended
||
!
policy
->
governor
)
return
;
pr_debug
(
"%s: for CPU %u
\n
"
,
__func__
,
policy
->
cpu
);
if
(
policy
->
governor
->
limits
)
policy
->
governor
->
limits
(
policy
);
}
int
cpufreq_register_governor
(
struct
cpufreq_governor
*
governor
)
...
...
@@ -2069,7 +2088,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock
(
&
cpufreq_governor_mutex
);
governor
->
initialized
=
0
;
err
=
-
EBUSY
;
if
(
!
find_governor
(
governor
->
name
))
{
err
=
0
;
...
...
@@ -2195,7 +2213,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if
(
new_policy
->
governor
==
policy
->
governor
)
{
pr_debug
(
"cpufreq: governor limits update
\n
"
);
return
cpufreq_governor
(
policy
,
CPUFREQ_GOV_LIMITS
);
cpufreq_governor_limits
(
policy
);
return
0
;
}
pr_debug
(
"governor switch
\n
"
);
...
...
@@ -2210,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */
policy
->
governor
=
new_policy
->
governor
;
ret
=
cpufreq_
governor
(
policy
,
CPUFREQ_GOV_POLICY_INIT
);
ret
=
cpufreq_
init_governor
(
policy
);
if
(
!
ret
)
{
ret
=
cpufreq_start_governor
(
policy
);
if
(
!
ret
)
{
...
...
@@ -2224,7 +2243,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug
(
"starting governor %s failed
\n
"
,
policy
->
governor
->
name
);
if
(
old_gov
)
{
policy
->
governor
=
old_gov
;
if
(
cpufreq_
governor
(
policy
,
CPUFREQ_GOV_POLICY_INIT
))
if
(
cpufreq_
init_governor
(
policy
))
policy
->
governor
=
NULL
;
else
cpufreq_start_governor
(
policy
);
...
...
@@ -2309,26 +2328,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
*********************************************************************/
static
int
cpufreq_boost_set_sw
(
int
state
)
{
struct
cpufreq_frequency_table
*
freq_table
;
struct
cpufreq_policy
*
policy
;
int
ret
=
-
EINVAL
;
for_each_active_policy
(
policy
)
{
freq_table
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
if
(
freq_table
)
{
ret
=
cpufreq_frequency_table_cpuinfo
(
policy
,
freq_table
);
if
(
ret
)
{
pr_err
(
"%s: Policy frequency update failed
\n
"
,
__func__
);
break
;
}
down_write
(
&
policy
->
rwsem
);
policy
->
user_policy
.
max
=
policy
->
max
;
cpufreq_governor
(
policy
,
CPUFREQ_GOV_LIMITS
);
up_write
(
&
policy
->
rwsem
);
if
(
!
policy
->
freq_table
)
continue
;
ret
=
cpufreq_frequency_table_cpuinfo
(
policy
,
policy
->
freq_table
);
if
(
ret
)
{
pr_err
(
"%s: Policy frequency update failed
\n
"
,
__func__
);
break
;
}
down_write
(
&
policy
->
rwsem
);
policy
->
user_policy
.
max
=
policy
->
max
;
cpufreq_governor_limits
(
policy
);
up_write
(
&
policy
->
rwsem
);
}
return
ret
;
...
...
drivers/cpufreq/cpufreq_conservative.c
浏览文件 @
5d1191ab
...
...
@@ -17,7 +17,6 @@
struct
cs_policy_dbs_info
{
struct
policy_dbs_info
policy_dbs
;
unsigned
int
down_skip
;
unsigned
int
requested_freq
;
};
static
inline
struct
cs_policy_dbs_info
*
to_dbs_info
(
struct
policy_dbs_info
*
policy_dbs
)
...
...
@@ -75,19 +74,17 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency increase */
if
(
load
>
dbs_data
->
up_threshold
)
{
unsigned
int
requested_freq
=
policy
->
cur
;
dbs_info
->
down_skip
=
0
;
/* if we are already at full speed then break out early */
if
(
dbs_info
->
requested_freq
==
policy
->
max
)
if
(
requested_freq
==
policy
->
max
)
goto
out
;
dbs_info
->
requested_freq
+=
get_freq_target
(
cs_tuners
,
policy
);
if
(
dbs_info
->
requested_freq
>
policy
->
max
)
dbs_info
->
requested_freq
=
policy
->
max
;
requested_freq
+=
get_freq_target
(
cs_tuners
,
policy
);
__cpufreq_driver_target
(
policy
,
dbs_info
->
requested_freq
,
CPUFREQ_RELATION_H
);
__cpufreq_driver_target
(
policy
,
requested_freq
,
CPUFREQ_RELATION_H
);
goto
out
;
}
...
...
@@ -98,36 +95,27 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if
(
load
<
cs_tuners
->
down_threshold
)
{
unsigned
int
freq_target
;
unsigned
int
freq_target
,
requested_freq
=
policy
->
cur
;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if
(
policy
->
cur
==
policy
->
min
)
if
(
requested_freq
==
policy
->
min
)
goto
out
;
freq_target
=
get_freq_target
(
cs_tuners
,
policy
);
if
(
dbs_info
->
requested_freq
>
freq_target
)
dbs_info
->
requested_freq
-=
freq_target
;
if
(
requested_freq
>
freq_target
)
requested_freq
-=
freq_target
;
else
dbs_info
->
requested_freq
=
policy
->
min
;
requested_freq
=
policy
->
min
;
__cpufreq_driver_target
(
policy
,
dbs_info
->
requested_freq
,
CPUFREQ_RELATION_L
);
__cpufreq_driver_target
(
policy
,
requested_freq
,
CPUFREQ_RELATION_L
);
}
out:
return
dbs_data
->
sampling_rate
;
}
static
int
dbs_cpufreq_notifier
(
struct
notifier_block
*
nb
,
unsigned
long
val
,
void
*
data
);
static
struct
notifier_block
cs_cpufreq_notifier_block
=
{
.
notifier_call
=
dbs_cpufreq_notifier
,
};
/************************** sysfs interface ************************/
static
struct
dbs_governor
cs_dbs_gov
;
static
ssize_t
store_sampling_down_factor
(
struct
gov_attr_set
*
attr_set
,
const
char
*
buf
,
size_t
count
)
...
...
@@ -268,15 +256,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs)
kfree
(
to_dbs_info
(
policy_dbs
));
}
static
int
cs_init
(
struct
dbs_data
*
dbs_data
,
bool
notify
)
static
int
cs_init
(
struct
dbs_data
*
dbs_data
)
{
struct
cs_dbs_tuners
*
tuners
;
tuners
=
kzalloc
(
sizeof
(
*
tuners
),
GFP_KERNEL
);
if
(
!
tuners
)
{
pr_err
(
"%s: kzalloc failed
\n
"
,
__func__
);
if
(
!
tuners
)
return
-
ENOMEM
;
}
tuners
->
down_threshold
=
DEF_FREQUENCY_DOWN_THRESHOLD
;
tuners
->
freq_step
=
DEF_FREQUENCY_STEP
;
...
...
@@ -288,19 +274,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
dbs_data
->
min_sampling_rate
=
MIN_SAMPLING_RATE_RATIO
*
jiffies_to_usecs
(
10
);
if
(
notify
)
cpufreq_register_notifier
(
&
cs_cpufreq_notifier_block
,
CPUFREQ_TRANSITION_NOTIFIER
);
return
0
;
}
static
void
cs_exit
(
struct
dbs_data
*
dbs_data
,
bool
notify
)
static
void
cs_exit
(
struct
dbs_data
*
dbs_data
)
{
if
(
notify
)
cpufreq_unregister_notifier
(
&
cs_cpufreq_notifier_block
,
CPUFREQ_TRANSITION_NOTIFIER
);
kfree
(
dbs_data
->
tuners
);
}
...
...
@@ -309,16 +287,10 @@ static void cs_start(struct cpufreq_policy *policy)
struct
cs_policy_dbs_info
*
dbs_info
=
to_dbs_info
(
policy
->
governor_data
);
dbs_info
->
down_skip
=
0
;
dbs_info
->
requested_freq
=
policy
->
cur
;
}
static
struct
dbs_governor
cs_dbs_gov
=
{
.
gov
=
{
.
name
=
"conservative"
,
.
governor
=
cpufreq_governor_dbs
,
.
max_transition_latency
=
TRANSITION_LATENCY_LIMIT
,
.
owner
=
THIS_MODULE
,
},
static
struct
dbs_governor
cs_governor
=
{
.
gov
=
CPUFREQ_DBS_GOVERNOR_INITIALIZER
(
"conservative"
),
.
kobj_type
=
{
.
default_attrs
=
cs_attributes
},
.
gov_dbs_timer
=
cs_dbs_timer
,
.
alloc
=
cs_alloc
,
...
...
@@ -328,33 +300,7 @@ static struct dbs_governor cs_dbs_gov = {
.
start
=
cs_start
,
};
#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
static
int
dbs_cpufreq_notifier
(
struct
notifier_block
*
nb
,
unsigned
long
val
,
void
*
data
)
{
struct
cpufreq_freqs
*
freq
=
data
;
struct
cpufreq_policy
*
policy
=
cpufreq_cpu_get_raw
(
freq
->
cpu
);
struct
cs_policy_dbs_info
*
dbs_info
;
if
(
!
policy
)
return
0
;
/* policy isn't governed by conservative governor */
if
(
policy
->
governor
!=
CPU_FREQ_GOV_CONSERVATIVE
)
return
0
;
dbs_info
=
to_dbs_info
(
policy
->
governor_data
);
/*
* we only care if our internally tracked freq moves outside the 'valid'
* ranges of frequency available to us otherwise we do not change it
*/
if
(
dbs_info
->
requested_freq
>
policy
->
max
||
dbs_info
->
requested_freq
<
policy
->
min
)
dbs_info
->
requested_freq
=
freq
->
new
;
return
0
;
}
#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
static
int
__init
cpufreq_gov_dbs_init
(
void
)
{
...
...
drivers/cpufreq/cpufreq_governor.c
浏览文件 @
5d1191ab
...
...
@@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
synchronize_sched
();
}
static
void
gov_cancel_work
(
struct
cpufreq_policy
*
policy
)
{
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
gov_clear_update_util
(
policy_dbs
->
policy
);
irq_work_sync
(
&
policy_dbs
->
irq_work
);
cancel_work_sync
(
&
policy_dbs
->
work
);
atomic_set
(
&
policy_dbs
->
work_count
,
0
);
policy_dbs
->
work_in_progress
=
false
;
}
static
struct
policy_dbs_info
*
alloc_policy_dbs_info
(
struct
cpufreq_policy
*
policy
,
struct
dbs_governor
*
gov
)
{
...
...
@@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
gov
->
free
(
policy_dbs
);
}
static
int
cpufreq
_governor_init
(
struct
cpufreq_policy
*
policy
)
int
cpufreq_dbs
_governor_init
(
struct
cpufreq_policy
*
policy
)
{
struct
dbs_governor
*
gov
=
dbs_governor_of
(
policy
);
struct
dbs_data
*
dbs_data
;
...
...
@@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
gov_attr_set_init
(
&
dbs_data
->
attr_set
,
&
policy_dbs
->
list
);
ret
=
gov
->
init
(
dbs_data
,
!
policy
->
governor
->
initialized
);
ret
=
gov
->
init
(
dbs_data
);
if
(
ret
)
goto
free_policy_dbs_info
;
...
...
@@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
goto
out
;
/* Failure, so roll back. */
pr_err
(
"
cpufreq: Governor
initialization failed (dbs_data kobject init error %d)
\n
"
,
ret
);
pr_err
(
"initialization failed (dbs_data kobject init error %d)
\n
"
,
ret
);
policy
->
governor_data
=
NULL
;
if
(
!
have_governor_per_policy
())
gov
->
gdbs_data
=
NULL
;
gov
->
exit
(
dbs_data
,
!
policy
->
governor
->
initialized
);
gov
->
exit
(
dbs_data
);
kfree
(
dbs_data
);
free_policy_dbs_info:
...
...
@@ -474,8 +463,9 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
mutex_unlock
(
&
gov_dbs_data_mutex
);
return
ret
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_dbs_governor_init
);
static
int
cpufreq
_governor_exit
(
struct
cpufreq_policy
*
policy
)
void
cpufreq_dbs
_governor_exit
(
struct
cpufreq_policy
*
policy
)
{
struct
dbs_governor
*
gov
=
dbs_governor_of
(
policy
);
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
...
...
@@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
if
(
!
have_governor_per_policy
())
gov
->
gdbs_data
=
NULL
;
gov
->
exit
(
dbs_data
,
policy
->
governor
->
initialized
==
1
);
gov
->
exit
(
dbs_data
);
kfree
(
dbs_data
);
}
free_policy_dbs_info
(
policy_dbs
,
gov
);
mutex_unlock
(
&
gov_dbs_data_mutex
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_dbs_governor_exit
);
static
int
cpufreq
_governor_start
(
struct
cpufreq_policy
*
policy
)
int
cpufreq_dbs
_governor_start
(
struct
cpufreq_policy
*
policy
)
{
struct
dbs_governor
*
gov
=
dbs_governor_of
(
policy
);
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
...
...
@@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
gov_set_update_util
(
policy_dbs
,
sampling_rate
);
return
0
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_dbs_governor_start
);
static
int
cpufreq
_governor_stop
(
struct
cpufreq_policy
*
policy
)
void
cpufreq_dbs
_governor_stop
(
struct
cpufreq_policy
*
policy
)
{
gov_cancel_work
(
policy
);
return
0
;
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
gov_clear_update_util
(
policy_dbs
->
policy
);
irq_work_sync
(
&
policy_dbs
->
irq_work
);
cancel_work_sync
(
&
policy_dbs
->
work
);
atomic_set
(
&
policy_dbs
->
work_count
,
0
);
policy_dbs
->
work_in_progress
=
false
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_dbs_governor_stop
);
static
int
cpufreq
_governor_limits
(
struct
cpufreq_policy
*
policy
)
void
cpufreq_dbs
_governor_limits
(
struct
cpufreq_policy
*
policy
)
{
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
mutex_lock
(
&
policy_dbs
->
timer_mutex
);
if
(
policy
->
max
<
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
else
if
(
policy
->
min
>
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
cpufreq_policy_apply_limits
(
policy
);
gov_update_sample_delay
(
policy_dbs
,
0
);
mutex_unlock
(
&
policy_dbs
->
timer_mutex
);
return
0
;
}
int
cpufreq_governor_dbs
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
{
if
(
event
==
CPUFREQ_GOV_POLICY_INIT
)
{
return
cpufreq_governor_init
(
policy
);
}
else
if
(
policy
->
governor_data
)
{
switch
(
event
)
{
case
CPUFREQ_GOV_POLICY_EXIT
:
return
cpufreq_governor_exit
(
policy
);
case
CPUFREQ_GOV_START
:
return
cpufreq_governor_start
(
policy
);
case
CPUFREQ_GOV_STOP
:
return
cpufreq_governor_stop
(
policy
);
case
CPUFREQ_GOV_LIMITS
:
return
cpufreq_governor_limits
(
policy
);
}
}
return
-
EINVAL
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_
governor_db
s
);
EXPORT_SYMBOL_GPL
(
cpufreq_
dbs_governor_limit
s
);
drivers/cpufreq/cpufreq_governor.h
浏览文件 @
5d1191ab
...
...
@@ -138,8 +138,8 @@ struct dbs_governor {
unsigned
int
(
*
gov_dbs_timer
)(
struct
cpufreq_policy
*
policy
);
struct
policy_dbs_info
*
(
*
alloc
)(
void
);
void
(
*
free
)(
struct
policy_dbs_info
*
policy_dbs
);
int
(
*
init
)(
struct
dbs_data
*
dbs_data
,
bool
notify
);
void
(
*
exit
)(
struct
dbs_data
*
dbs_data
,
bool
notify
);
int
(
*
init
)(
struct
dbs_data
*
dbs_data
);
void
(
*
exit
)(
struct
dbs_data
*
dbs_data
);
void
(
*
start
)(
struct
cpufreq_policy
*
policy
);
};
...
...
@@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy
return
container_of
(
policy
->
governor
,
struct
dbs_governor
,
gov
);
}
/* Governor callback routines */
int
cpufreq_dbs_governor_init
(
struct
cpufreq_policy
*
policy
);
void
cpufreq_dbs_governor_exit
(
struct
cpufreq_policy
*
policy
);
int
cpufreq_dbs_governor_start
(
struct
cpufreq_policy
*
policy
);
void
cpufreq_dbs_governor_stop
(
struct
cpufreq_policy
*
policy
);
void
cpufreq_dbs_governor_limits
(
struct
cpufreq_policy
*
policy
);
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
{ \
.name = _name_, \
.max_transition_latency = TRANSITION_LATENCY_LIMIT, \
.owner = THIS_MODULE, \
.init = cpufreq_dbs_governor_init, \
.exit = cpufreq_dbs_governor_exit, \
.start = cpufreq_dbs_governor_start, \
.stop = cpufreq_dbs_governor_stop, \
.limits = cpufreq_dbs_governor_limits, \
}
/* Governor specific operations */
struct
od_ops
{
unsigned
int
(
*
powersave_bias_target
)(
struct
cpufreq_policy
*
policy
,
...
...
@@ -155,7 +174,6 @@ struct od_ops {
};
unsigned
int
dbs_update
(
struct
cpufreq_policy
*
policy
);
int
cpufreq_governor_dbs
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
);
void
od_register_powersave_bias_handler
(
unsigned
int
(
*
f
)
(
struct
cpufreq_policy
*
,
unsigned
int
,
unsigned
int
),
unsigned
int
powersave_bias
);
...
...
drivers/cpufreq/cpufreq_ondemand.c
浏览文件 @
5d1191ab
...
...
@@ -65,34 +65,32 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
{
unsigned
int
freq_req
,
freq_reduc
,
freq_avg
;
unsigned
int
freq_hi
,
freq_lo
;
unsigned
int
index
=
0
;
unsigned
int
index
;
unsigned
int
delay_hi_us
;
struct
policy_dbs_info
*
policy_dbs
=
policy
->
governor_data
;
struct
od_policy_dbs_info
*
dbs_info
=
to_dbs_info
(
policy_dbs
);
struct
dbs_data
*
dbs_data
=
policy_dbs
->
dbs_data
;
struct
od_dbs_tuners
*
od_tuners
=
dbs_data
->
tuners
;
struct
cpufreq_frequency_table
*
freq_table
=
policy
->
freq_table
;
if
(
!
dbs_info
->
freq_table
)
{
if
(
!
freq_table
)
{
dbs_info
->
freq_lo
=
0
;
dbs_info
->
freq_lo_delay_us
=
0
;
return
freq_next
;
}
cpufreq_frequency_table_target
(
policy
,
dbs_info
->
freq_table
,
freq_next
,
relation
,
&
index
);
freq_req
=
dbs_info
->
freq_table
[
index
].
frequency
;
index
=
cpufreq_frequency_table_target
(
policy
,
freq_next
,
relation
);
freq_req
=
freq_table
[
index
].
frequency
;
freq_reduc
=
freq_req
*
od_tuners
->
powersave_bias
/
1000
;
freq_avg
=
freq_req
-
freq_reduc
;
/* Find freq bounds for freq_avg in freq_table */
index
=
0
;
cpufreq_frequency_table_target
(
policy
,
dbs_info
->
freq_table
,
freq_avg
,
CPUFREQ_RELATION_H
,
&
index
);
freq_lo
=
dbs_info
->
freq_table
[
index
].
frequency
;
index
=
0
;
cpufreq_frequency_table_target
(
policy
,
dbs_info
->
freq_table
,
freq_avg
,
CPUFREQ_RELATION_L
,
&
index
);
freq_hi
=
dbs_info
->
freq_table
[
index
].
frequency
;
index
=
cpufreq_frequency_table_target
(
policy
,
freq_avg
,
CPUFREQ_RELATION_H
);
freq_lo
=
freq_table
[
index
].
frequency
;
index
=
cpufreq_frequency_table_target
(
policy
,
freq_avg
,
CPUFREQ_RELATION_L
);
freq_hi
=
freq_table
[
index
].
frequency
;
/* Find out how long we have to be in hi and lo freqs */
if
(
freq_hi
==
freq_lo
)
{
...
...
@@ -113,7 +111,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
struct
od_policy_dbs_info
*
dbs_info
=
to_dbs_info
(
policy
->
governor_data
);
dbs_info
->
freq_table
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
dbs_info
->
freq_lo
=
0
;
}
...
...
@@ -361,17 +358,15 @@ static void od_free(struct policy_dbs_info *policy_dbs)
kfree
(
to_dbs_info
(
policy_dbs
));
}
static
int
od_init
(
struct
dbs_data
*
dbs_data
,
bool
notify
)
static
int
od_init
(
struct
dbs_data
*
dbs_data
)
{
struct
od_dbs_tuners
*
tuners
;
u64
idle_time
;
int
cpu
;
tuners
=
kzalloc
(
sizeof
(
*
tuners
),
GFP_KERNEL
);
if
(
!
tuners
)
{
pr_err
(
"%s: kzalloc failed
\n
"
,
__func__
);
if
(
!
tuners
)
return
-
ENOMEM
;
}
cpu
=
get_cpu
();
idle_time
=
get_cpu_idle_time_us
(
cpu
,
NULL
);
...
...
@@ -402,7 +397,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
return
0
;
}
static
void
od_exit
(
struct
dbs_data
*
dbs_data
,
bool
notify
)
static
void
od_exit
(
struct
dbs_data
*
dbs_data
)
{
kfree
(
dbs_data
->
tuners
);
}
...
...
@@ -420,12 +415,7 @@ static struct od_ops od_ops = {
};
static
struct
dbs_governor
od_dbs_gov
=
{
.
gov
=
{
.
name
=
"ondemand"
,
.
governor
=
cpufreq_governor_dbs
,
.
max_transition_latency
=
TRANSITION_LATENCY_LIMIT
,
.
owner
=
THIS_MODULE
,
},
.
gov
=
CPUFREQ_DBS_GOVERNOR_INITIALIZER
(
"ondemand"
),
.
kobj_type
=
{
.
default_attrs
=
od_attributes
},
.
gov_dbs_timer
=
od_dbs_timer
,
.
alloc
=
od_alloc
,
...
...
drivers/cpufreq/cpufreq_ondemand.h
浏览文件 @
5d1191ab
...
...
@@ -13,7 +13,6 @@
struct
od_policy_dbs_info
{
struct
policy_dbs_info
policy_dbs
;
struct
cpufreq_frequency_table
*
freq_table
;
unsigned
int
freq_lo
;
unsigned
int
freq_lo_delay_us
;
unsigned
int
freq_hi_delay_us
;
...
...
drivers/cpufreq/cpufreq_performance.c
浏览文件 @
5d1191ab
...
...
@@ -16,27 +16,16 @@
#include <linux/init.h>
#include <linux/module.h>
static
int
cpufreq_governor_performance
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
static
void
cpufreq_gov_performance_limits
(
struct
cpufreq_policy
*
policy
)
{
switch
(
event
)
{
case
CPUFREQ_GOV_START
:
case
CPUFREQ_GOV_LIMITS
:
pr_debug
(
"setting to %u kHz because of event %u
\n
"
,
policy
->
max
,
event
);
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
break
;
default:
break
;
}
return
0
;
pr_debug
(
"setting to %u kHz
\n
"
,
policy
->
max
);
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
}
static
struct
cpufreq_governor
cpufreq_gov_performance
=
{
.
name
=
"performance"
,
.
governor
=
cpufreq_governor_performance
,
.
owner
=
THIS_MODULE
,
.
limits
=
cpufreq_gov_performance_limits
,
};
static
int
__init
cpufreq_gov_performance_init
(
void
)
...
...
drivers/cpufreq/cpufreq_powersave.c
浏览文件 @
5d1191ab
...
...
@@ -16,26 +16,15 @@
#include <linux/init.h>
#include <linux/module.h>
static
int
cpufreq_governor_powersave
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
static
void
cpufreq_gov_powersave_limits
(
struct
cpufreq_policy
*
policy
)
{
switch
(
event
)
{
case
CPUFREQ_GOV_START
:
case
CPUFREQ_GOV_LIMITS
:
pr_debug
(
"setting to %u kHz because of event %u
\n
"
,
policy
->
min
,
event
);
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
break
;
default:
break
;
}
return
0
;
pr_debug
(
"setting to %u kHz
\n
"
,
policy
->
min
);
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
}
static
struct
cpufreq_governor
cpufreq_gov_powersave
=
{
.
name
=
"powersave"
,
.
governor
=
cpufreq_governor_powersave
,
.
limits
=
cpufreq_gov_powersave_limits
,
.
owner
=
THIS_MODULE
,
};
...
...
drivers/cpufreq/cpufreq_stats.c
浏览文件 @
5d1191ab
...
...
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/cputime.h>
static
spinlock_t
cpufreq_stats_lock
;
static
DEFINE_SPINLOCK
(
cpufreq_stats_lock
)
;
struct
cpufreq_stats
{
unsigned
int
total_trans
;
...
...
@@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
ssize_t
len
=
0
;
int
i
;
if
(
policy
->
fast_switch_enabled
)
return
0
;
cpufreq_stats_update
(
stats
);
for
(
i
=
0
;
i
<
stats
->
state_num
;
i
++
)
{
len
+=
sprintf
(
buf
+
len
,
"%u %llu
\n
"
,
stats
->
freq_table
[
i
],
...
...
@@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t
len
=
0
;
int
i
,
j
;
if
(
policy
->
fast_switch_enabled
)
return
0
;
len
+=
snprintf
(
buf
+
len
,
PAGE_SIZE
-
len
,
" From : To
\n
"
);
len
+=
snprintf
(
buf
+
len
,
PAGE_SIZE
-
len
,
" : "
);
for
(
i
=
0
;
i
<
stats
->
state_num
;
i
++
)
{
...
...
@@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
return
-
1
;
}
static
void
__
cpufreq_stats_free_table
(
struct
cpufreq_policy
*
policy
)
void
cpufreq_stats_free_table
(
struct
cpufreq_policy
*
policy
)
{
struct
cpufreq_stats
*
stats
=
policy
->
stats
;
...
...
@@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
policy
->
stats
=
NULL
;
}
static
void
cpufreq_stats_free_table
(
unsigned
int
cpu
)
{
struct
cpufreq_policy
*
policy
;
policy
=
cpufreq_cpu_get
(
cpu
);
if
(
!
policy
)
return
;
__cpufreq_stats_free_table
(
policy
);
cpufreq_cpu_put
(
policy
);
}
static
int
__cpufreq_stats_create_table
(
struct
cpufreq_policy
*
policy
)
void
cpufreq_stats_create_table
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
i
=
0
,
count
=
0
,
ret
=
-
ENOMEM
;
struct
cpufreq_stats
*
stats
;
unsigned
int
alloc_size
;
unsigned
int
cpu
=
policy
->
cpu
;
struct
cpufreq_frequency_table
*
pos
,
*
table
;
/* We need cpufreq table for creating stats table */
table
=
cpufreq_frequency_get_table
(
cpu
)
;
table
=
policy
->
freq_table
;
if
(
unlikely
(
!
table
))
return
0
;
return
;
/* stats already initialized */
if
(
policy
->
stats
)
return
-
EEXIST
;
return
;
stats
=
kzalloc
(
sizeof
(
*
stats
),
GFP_KERNEL
);
if
(
!
stats
)
return
-
ENOMEM
;
return
;
/* Find total allocation size */
cpufreq_for_each_valid_entry
(
pos
,
table
)
...
...
@@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
policy
->
stats
=
stats
;
ret
=
sysfs_create_group
(
&
policy
->
kobj
,
&
stats_attr_group
);
if
(
!
ret
)
return
0
;
return
;
/* We failed, release resources */
policy
->
stats
=
NULL
;
kfree
(
stats
->
time_in_state
);
free_stat:
kfree
(
stats
);
return
ret
;
}
static
void
cpufreq_stats_create_table
(
unsigned
int
cpu
)
{
struct
cpufreq_policy
*
policy
;
/*
* "likely(!policy)" because normally cpufreq_stats will be registered
* before cpufreq driver
*/
policy
=
cpufreq_cpu_get
(
cpu
);
if
(
likely
(
!
policy
))
return
;
__cpufreq_stats_create_table
(
policy
);
cpufreq_cpu_put
(
policy
);
}
static
int
cpufreq_stat_notifier_policy
(
struct
notifier_block
*
nb
,
unsigned
long
val
,
void
*
data
)
void
cpufreq_stats_record_transition
(
struct
cpufreq_policy
*
policy
,
unsigned
int
new_freq
)
{
int
ret
=
0
;
struct
cpufreq_policy
*
policy
=
data
;
if
(
val
==
CPUFREQ_CREATE_POLICY
)
ret
=
__cpufreq_stats_create_table
(
policy
);
else
if
(
val
==
CPUFREQ_REMOVE_POLICY
)
__cpufreq_stats_free_table
(
policy
);
return
ret
;
}
static
int
cpufreq_stat_notifier_trans
(
struct
notifier_block
*
nb
,
unsigned
long
val
,
void
*
data
)
{
struct
cpufreq_freqs
*
freq
=
data
;
struct
cpufreq_policy
*
policy
=
cpufreq_cpu_get
(
freq
->
cpu
);
struct
cpufreq_stats
*
stats
;
struct
cpufreq_stats
*
stats
=
policy
->
stats
;
int
old_index
,
new_index
;
if
(
!
policy
)
{
pr_err
(
"%s: No policy found
\n
"
,
__func__
);
return
0
;
}
if
(
val
!=
CPUFREQ_POSTCHANGE
)
goto
put_policy
;
if
(
!
policy
->
stats
)
{
if
(
!
stats
)
{
pr_debug
(
"%s: No stats found
\n
"
,
__func__
);
goto
put_policy
;
return
;
}
stats
=
policy
->
stats
;
old_index
=
stats
->
last_index
;
new_index
=
freq_table_get_index
(
stats
,
freq
->
new
);
new_index
=
freq_table_get_index
(
stats
,
new_freq
);
/* We can't do stats->time_in_state[-1]= .. */
if
(
old_index
==
-
1
||
new_index
==
-
1
)
goto
put_policy
;
if
(
old_index
==
new_index
)
goto
put_policy
;
if
(
old_index
==
-
1
||
new_index
==
-
1
||
old_index
==
new_index
)
return
;
cpufreq_stats_update
(
stats
);
...
...
@@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
stats
->
trans_table
[
old_index
*
stats
->
max_state
+
new_index
]
++
;
#endif
stats
->
total_trans
++
;
put_policy:
cpufreq_cpu_put
(
policy
);
return
0
;
}
static
struct
notifier_block
notifier_policy_block
=
{
.
notifier_call
=
cpufreq_stat_notifier_policy
};
static
struct
notifier_block
notifier_trans_block
=
{
.
notifier_call
=
cpufreq_stat_notifier_trans
};
static
int
__init
cpufreq_stats_init
(
void
)
{
int
ret
;
unsigned
int
cpu
;
spin_lock_init
(
&
cpufreq_stats_lock
);
ret
=
cpufreq_register_notifier
(
&
notifier_policy_block
,
CPUFREQ_POLICY_NOTIFIER
);
if
(
ret
)
return
ret
;
for_each_online_cpu
(
cpu
)
cpufreq_stats_create_table
(
cpu
);
ret
=
cpufreq_register_notifier
(
&
notifier_trans_block
,
CPUFREQ_TRANSITION_NOTIFIER
);
if
(
ret
)
{
cpufreq_unregister_notifier
(
&
notifier_policy_block
,
CPUFREQ_POLICY_NOTIFIER
);
for_each_online_cpu
(
cpu
)
cpufreq_stats_free_table
(
cpu
);
return
ret
;
}
return
0
;
}
static
void
__exit
cpufreq_stats_exit
(
void
)
{
unsigned
int
cpu
;
cpufreq_unregister_notifier
(
&
notifier_policy_block
,
CPUFREQ_POLICY_NOTIFIER
);
cpufreq_unregister_notifier
(
&
notifier_trans_block
,
CPUFREQ_TRANSITION_NOTIFIER
);
for_each_online_cpu
(
cpu
)
cpufreq_stats_free_table
(
cpu
);
}
MODULE_AUTHOR
(
"Zou Nan hai <nanhai.zou@intel.com>"
);
MODULE_DESCRIPTION
(
"Export cpufreq stats via sysfs"
);
MODULE_LICENSE
(
"GPL"
);
module_init
(
cpufreq_stats_init
);
module_exit
(
cpufreq_stats_exit
);
drivers/cpufreq/cpufreq_userspace.c
浏览文件 @
5d1191ab
...
...
@@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
return
0
;
}
static
int
cpufreq_governor_userspace
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
static
void
cpufreq_userspace_policy_exit
(
struct
cpufreq_policy
*
policy
)
{
mutex_lock
(
&
userspace_mutex
);
kfree
(
policy
->
governor_data
);
policy
->
governor_data
=
NULL
;
mutex_unlock
(
&
userspace_mutex
);
}
static
int
cpufreq_userspace_policy_start
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
*
setspeed
=
policy
->
governor_data
;
unsigned
int
cpu
=
policy
->
cpu
;
int
rc
=
0
;
if
(
event
==
CPUFREQ_GOV_POLICY_INIT
)
return
cpufreq_userspace_policy_init
(
policy
);
BUG_ON
(
!
policy
->
cur
);
pr_debug
(
"started managing cpu %u
\n
"
,
policy
->
cpu
);
if
(
!
setspeed
)
return
-
EINVAL
;
switch
(
event
)
{
case
CPUFREQ_GOV_POLICY_EXIT
:
mutex_lock
(
&
userspace_mutex
);
policy
->
governor_data
=
NULL
;
kfree
(
setspeed
);
mutex_unlock
(
&
userspace_mutex
);
break
;
case
CPUFREQ_GOV_START
:
BUG_ON
(
!
policy
->
cur
);
pr_debug
(
"started managing cpu %u
\n
"
,
cpu
);
mutex_lock
(
&
userspace_mutex
);
per_cpu
(
cpu_is_managed
,
cpu
)
=
1
;
*
setspeed
=
policy
->
cur
;
mutex_unlock
(
&
userspace_mutex
);
break
;
case
CPUFREQ_GOV_STOP
:
pr_debug
(
"managing cpu %u stopped
\n
"
,
cpu
);
mutex_lock
(
&
userspace_mutex
);
per_cpu
(
cpu_is_managed
,
cpu
)
=
0
;
*
setspeed
=
0
;
mutex_unlock
(
&
userspace_mutex
);
break
;
case
CPUFREQ_GOV_LIMITS
:
mutex_lock
(
&
userspace_mutex
);
pr_debug
(
"limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz
\n
"
,
cpu
,
policy
->
min
,
policy
->
max
,
policy
->
cur
,
*
setspeed
);
if
(
policy
->
max
<
*
setspeed
)
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
else
if
(
policy
->
min
>
*
setspeed
)
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
else
__cpufreq_driver_target
(
policy
,
*
setspeed
,
CPUFREQ_RELATION_L
);
mutex_unlock
(
&
userspace_mutex
);
break
;
}
return
rc
;
mutex_lock
(
&
userspace_mutex
);
per_cpu
(
cpu_is_managed
,
policy
->
cpu
)
=
1
;
*
setspeed
=
policy
->
cur
;
mutex_unlock
(
&
userspace_mutex
);
return
0
;
}
static
void
cpufreq_userspace_policy_stop
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
*
setspeed
=
policy
->
governor_data
;
pr_debug
(
"managing cpu %u stopped
\n
"
,
policy
->
cpu
);
mutex_lock
(
&
userspace_mutex
);
per_cpu
(
cpu_is_managed
,
policy
->
cpu
)
=
0
;
*
setspeed
=
0
;
mutex_unlock
(
&
userspace_mutex
);
}
static
void
cpufreq_userspace_policy_limits
(
struct
cpufreq_policy
*
policy
)
{
unsigned
int
*
setspeed
=
policy
->
governor_data
;
mutex_lock
(
&
userspace_mutex
);
pr_debug
(
"limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz
\n
"
,
policy
->
cpu
,
policy
->
min
,
policy
->
max
,
policy
->
cur
,
*
setspeed
);
if
(
policy
->
max
<
*
setspeed
)
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
else
if
(
policy
->
min
>
*
setspeed
)
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
else
__cpufreq_driver_target
(
policy
,
*
setspeed
,
CPUFREQ_RELATION_L
);
mutex_unlock
(
&
userspace_mutex
);
}
static
struct
cpufreq_governor
cpufreq_gov_userspace
=
{
.
name
=
"userspace"
,
.
governor
=
cpufreq_governor_userspace
,
.
init
=
cpufreq_userspace_policy_init
,
.
exit
=
cpufreq_userspace_policy_exit
,
.
start
=
cpufreq_userspace_policy_start
,
.
stop
=
cpufreq_userspace_policy_stop
,
.
limits
=
cpufreq_userspace_policy_limits
,
.
store_setspeed
=
cpufreq_set
,
.
show_setspeed
=
show_speed
,
.
owner
=
THIS_MODULE
,
...
...
drivers/cpufreq/davinci-cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -38,26 +38,6 @@ struct davinci_cpufreq {
};
static
struct
davinci_cpufreq
cpufreq
;
static
int
davinci_verify_speed
(
struct
cpufreq_policy
*
policy
)
{
struct
davinci_cpufreq_config
*
pdata
=
cpufreq
.
dev
->
platform_data
;
struct
cpufreq_frequency_table
*
freq_table
=
pdata
->
freq_table
;
struct
clk
*
armclk
=
cpufreq
.
armclk
;
if
(
freq_table
)
return
cpufreq_frequency_table_verify
(
policy
,
freq_table
);
if
(
policy
->
cpu
)
return
-
EINVAL
;
cpufreq_verify_within_cpu_limits
(
policy
);
policy
->
min
=
clk_round_rate
(
armclk
,
policy
->
min
*
1000
)
/
1000
;
policy
->
max
=
clk_round_rate
(
armclk
,
policy
->
max
*
1000
)
/
1000
;
cpufreq_verify_within_limits
(
policy
,
policy
->
cpuinfo
.
min_freq
,
policy
->
cpuinfo
.
max_freq
);
return
0
;
}
static
int
davinci_target
(
struct
cpufreq_policy
*
policy
,
unsigned
int
idx
)
{
struct
davinci_cpufreq_config
*
pdata
=
cpufreq
.
dev
->
platform_data
;
...
...
@@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
static
struct
cpufreq_driver
davinci_driver
=
{
.
flags
=
CPUFREQ_STICKY
|
CPUFREQ_NEED_INITIAL_FREQ_CHECK
,
.
verify
=
davinci_verify_speed
,
.
verify
=
cpufreq_generic_frequency_table_verify
,
.
target_index
=
davinci_target
,
.
get
=
cpufreq_generic_get
,
.
init
=
davinci_cpu_init
,
...
...
drivers/cpufreq/freq_table.c
浏览文件 @
5d1191ab
...
...
@@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
else
return
0
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_frequency_table_cpuinfo
);
int
cpufreq_frequency_table_verify
(
struct
cpufreq_policy
*
policy
,
struct
cpufreq_frequency_table
*
table
)
...
...
@@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
*/
int
cpufreq_generic_frequency_table_verify
(
struct
cpufreq_policy
*
policy
)
{
struct
cpufreq_frequency_table
*
table
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
if
(
!
table
)
if
(
!
policy
->
freq_table
)
return
-
ENODEV
;
return
cpufreq_frequency_table_verify
(
policy
,
table
);
return
cpufreq_frequency_table_verify
(
policy
,
policy
->
freq_
table
);
}
EXPORT_SYMBOL_GPL
(
cpufreq_generic_frequency_table_verify
);
int
cpufreq_frequency_table_target
(
struct
cpufreq_policy
*
policy
,
struct
cpufreq_frequency_table
*
table
,
unsigned
int
target_freq
,
unsigned
int
relation
,
unsigned
int
*
index
)
unsigned
int
target_freq
,
unsigned
int
relation
)
{
struct
cpufreq_frequency_table
optimal
=
{
.
driver_data
=
~
0
,
...
...
@@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
.
frequency
=
0
,
};
struct
cpufreq_frequency_table
*
pos
;
struct
cpufreq_frequency_table
*
table
=
policy
->
freq_table
;
unsigned
int
freq
,
diff
,
i
=
0
;
int
index
;
pr_debug
(
"request for target %u kHz (relation: %u) for cpu %u
\n
"
,
target_freq
,
relation
,
policy
->
cpu
);
...
...
@@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
}
if
(
optimal
.
driver_data
>
i
)
{
if
(
suboptimal
.
driver_data
>
i
)
return
-
EINVAL
;
*
index
=
suboptimal
.
driver_data
;
}
else
*
index
=
optimal
.
driver_data
;
if
(
suboptimal
.
driver_data
>
i
)
{
WARN
(
1
,
"Invalid frequency table: %d
\n
"
,
policy
->
cpu
);
return
0
;
}
pr_debug
(
"target index is %u, freq is:%u kHz
\n
"
,
*
index
,
table
[
*
index
].
frequency
);
index
=
suboptimal
.
driver_data
;
}
else
index
=
optimal
.
driver_data
;
return
0
;
pr_debug
(
"target index is %u, freq is:%u kHz
\n
"
,
index
,
table
[
index
].
frequency
);
return
index
;
}
EXPORT_SYMBOL_GPL
(
cpufreq_frequency_table_target
);
int
cpufreq_frequency_table_get_index
(
struct
cpufreq_policy
*
policy
,
unsigned
int
freq
)
{
struct
cpufreq_frequency_table
*
pos
,
*
table
;
struct
cpufreq_frequency_table
*
pos
,
*
table
=
policy
->
freq_table
;
table
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
if
(
unlikely
(
!
table
))
{
pr_debug
(
"%s: Unable to find frequency table
\n
"
,
__func__
);
return
-
ENOENT
;
...
...
drivers/cpufreq/intel_pstate.c
浏览文件 @
5d1191ab
...
...
@@ -35,6 +35,7 @@
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
...
...
@@ -281,9 +282,9 @@ struct cpu_defaults {
static
inline
int32_t
get_target_pstate_use_performance
(
struct
cpudata
*
cpu
);
static
inline
int32_t
get_target_pstate_use_cpu_load
(
struct
cpudata
*
cpu
);
static
struct
pstate_adjust_policy
pid_params
;
static
struct
pstate_funcs
pstate_funcs
;
static
int
hwp_active
;
static
struct
pstate_adjust_policy
pid_params
__read_mostly
;
static
struct
pstate_funcs
pstate_funcs
__read_mostly
;
static
int
hwp_active
__read_mostly
;
#ifdef CONFIG_ACPI
static
bool
acpi_ppc
;
...
...
@@ -1091,6 +1092,26 @@ static struct cpu_defaults knl_params = {
},
};
static
struct
cpu_defaults
bxt_params
=
{
.
pid_policy
=
{
.
sample_rate_ms
=
10
,
.
deadband
=
0
,
.
setpoint
=
60
,
.
p_gain_pct
=
14
,
.
d_gain_pct
=
0
,
.
i_gain_pct
=
4
,
},
.
funcs
=
{
.
get_max
=
core_get_max_pstate
,
.
get_max_physical
=
core_get_max_pstate_physical
,
.
get_min
=
core_get_min_pstate
,
.
get_turbo
=
core_get_turbo_pstate
,
.
get_scaling
=
core_get_scaling
,
.
get_val
=
core_get_val
,
.
get_target_pstate
=
get_target_pstate_use_cpu_load
,
},
};
static
void
intel_pstate_get_min_max
(
struct
cpudata
*
cpu
,
int
*
min
,
int
*
max
)
{
int
max_perf
=
cpu
->
pstate
.
turbo_pstate
;
...
...
@@ -1334,29 +1355,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
(unsigned long)&policy }
static
const
struct
x86_cpu_id
intel_pstate_cpu_ids
[]
=
{
ICPU
(
0x2a
,
core_params
),
ICPU
(
0x2d
,
core_params
),
ICPU
(
0x37
,
silvermont_params
),
ICPU
(
0x3a
,
core_params
),
ICPU
(
0x3c
,
core_params
),
ICPU
(
0x3d
,
core_params
),
ICPU
(
0x3e
,
core_params
),
ICPU
(
0x3f
,
core_params
),
ICPU
(
0x45
,
core_params
),
ICPU
(
0x46
,
core_params
),
ICPU
(
0x47
,
core_params
),
ICPU
(
0x4c
,
airmont_params
),
ICPU
(
0x4e
,
core_params
),
ICPU
(
0x4f
,
core_params
),
ICPU
(
0x5e
,
core_params
),
ICPU
(
0x56
,
core_params
),
ICPU
(
0x57
,
knl_params
),
ICPU
(
INTEL_FAM6_SANDYBRIDGE
,
core_params
),
ICPU
(
INTEL_FAM6_SANDYBRIDGE_X
,
core_params
),
ICPU
(
INTEL_FAM6_ATOM_SILVERMONT1
,
silvermont_params
),
ICPU
(
INTEL_FAM6_IVYBRIDGE
,
core_params
),
ICPU
(
INTEL_FAM6_HASWELL_CORE
,
core_params
),
ICPU
(
INTEL_FAM6_BROADWELL_CORE
,
core_params
),
ICPU
(
INTEL_FAM6_IVYBRIDGE_X
,
core_params
),
ICPU
(
INTEL_FAM6_HASWELL_X
,
core_params
),
ICPU
(
INTEL_FAM6_HASWELL_ULT
,
core_params
),
ICPU
(
INTEL_FAM6_HASWELL_GT3E
,
core_params
),
ICPU
(
INTEL_FAM6_BROADWELL_GT3E
,
core_params
),
ICPU
(
INTEL_FAM6_ATOM_AIRMONT
,
airmont_params
),
ICPU
(
INTEL_FAM6_SKYLAKE_MOBILE
,
core_params
),
ICPU
(
INTEL_FAM6_BROADWELL_X
,
core_params
),
ICPU
(
INTEL_FAM6_SKYLAKE_DESKTOP
,
core_params
),
ICPU
(
INTEL_FAM6_BROADWELL_XEON_D
,
core_params
),
ICPU
(
INTEL_FAM6_XEON_PHI_KNL
,
knl_params
),
ICPU
(
INTEL_FAM6_ATOM_GOLDMONT
,
bxt_params
),
{}
};
MODULE_DEVICE_TABLE
(
x86cpu
,
intel_pstate_cpu_ids
);
static
const
struct
x86_cpu_id
intel_pstate_cpu_oob_ids
[]
=
{
ICPU
(
0x56
,
core_params
),
static
const
struct
x86_cpu_id
intel_pstate_cpu_oob_ids
[]
__initconst
=
{
ICPU
(
INTEL_FAM6_BROADWELL_XEON_D
,
core_params
),
{}
};
...
...
@@ -1575,12 +1597,12 @@ static struct cpufreq_driver intel_pstate_driver = {
.
name
=
"intel_pstate"
,
};
static
int
__initdata
no_load
;
static
int
__initdata
no_hwp
;
static
int
__initdata
hwp_only
;
static
unsigned
int
force_load
;
static
int
no_load
__initdata
;
static
int
no_hwp
__initdata
;
static
int
hwp_only
__initdata
;
static
unsigned
int
force_load
__initdata
;
static
int
intel_pstate_msrs_not_valid
(
void
)
static
int
__init
intel_pstate_msrs_not_valid
(
void
)
{
if
(
!
pstate_funcs
.
get_max
()
||
!
pstate_funcs
.
get_min
()
||
...
...
@@ -1590,7 +1612,7 @@ static int intel_pstate_msrs_not_valid(void)
return
0
;
}
static
void
copy_pid_params
(
struct
pstate_adjust_policy
*
policy
)
static
void
__init
copy_pid_params
(
struct
pstate_adjust_policy
*
policy
)
{
pid_params
.
sample_rate_ms
=
policy
->
sample_rate_ms
;
pid_params
.
sample_rate_ns
=
pid_params
.
sample_rate_ms
*
NSEC_PER_MSEC
;
...
...
@@ -1601,7 +1623,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
pid_params
.
setpoint
=
policy
->
setpoint
;
}
static
void
copy_cpu_funcs
(
struct
pstate_funcs
*
funcs
)
static
void
__init
copy_cpu_funcs
(
struct
pstate_funcs
*
funcs
)
{
pstate_funcs
.
get_max
=
funcs
->
get_max
;
pstate_funcs
.
get_max_physical
=
funcs
->
get_max_physical
;
...
...
@@ -1616,7 +1638,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
#ifdef CONFIG_ACPI
static
bool
intel_pstate_no_acpi_pss
(
void
)
static
bool
__init
intel_pstate_no_acpi_pss
(
void
)
{
int
i
;
...
...
@@ -1645,7 +1667,7 @@ static bool intel_pstate_no_acpi_pss(void)
return
true
;
}
static
bool
intel_pstate_has_acpi_ppc
(
void
)
static
bool
__init
intel_pstate_has_acpi_ppc
(
void
)
{
int
i
;
...
...
@@ -1673,7 +1695,7 @@ struct hw_vendor_info {
};
/* Hardware vendor-specific info that has its own power management modes */
static
struct
hw_vendor_info
vendor_info
[]
=
{
static
struct
hw_vendor_info
vendor_info
[]
__initdata
=
{
{
1
,
"HP "
,
"ProLiant"
,
PSS
},
{
1
,
"ORACLE"
,
"X4-2 "
,
PPC
},
{
1
,
"ORACLE"
,
"X4-2L "
,
PPC
},
...
...
@@ -1692,7 +1714,7 @@ static struct hw_vendor_info vendor_info[] = {
{
0
,
""
,
""
},
};
static
bool
intel_pstate_platform_pwr_mgmt_exists
(
void
)
static
bool
__init
intel_pstate_platform_pwr_mgmt_exists
(
void
)
{
struct
acpi_table_header
hdr
;
struct
hw_vendor_info
*
v_info
;
...
...
drivers/cpufreq/mvebu-cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -70,7 +70,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
continue
;
}
clk
=
clk_get
(
cpu_dev
,
0
);
clk
=
clk_get
(
cpu_dev
,
NULL
);
if
(
IS_ERR
(
clk
))
{
pr_err
(
"Cannot get clock for CPU %d
\n
"
,
cpu
);
return
PTR_ERR
(
clk
);
...
...
drivers/cpufreq/powernv-cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -760,9 +760,8 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
struct
cpufreq_policy
policy
;
cpufreq_get_policy
(
&
policy
,
cpu
);
cpufreq_frequency_table_target
(
&
policy
,
policy
.
freq_table
,
policy
.
cur
,
CPUFREQ_RELATION_C
,
&
index
);
index
=
cpufreq_frequency_table_target
(
&
policy
,
policy
.
cur
,
CPUFREQ_RELATION_C
);
powernv_cpufreq_target_index
(
&
policy
,
index
);
cpumask_andnot
(
&
mask
,
&
mask
,
policy
.
cpus
);
}
...
...
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
浏览文件 @
5d1191ab
...
...
@@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb,
unsigned
long
event
,
void
*
data
)
{
struct
cpufreq_policy
*
policy
=
data
;
struct
cpufreq_frequency_table
*
cbe_freqs
;
struct
cpufreq_frequency_table
*
cbe_freqs
=
policy
->
freq_table
;
u8
node
;
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
...
...
@@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb,
if
(
event
==
CPUFREQ_START
)
return
0
;
cbe_freqs
=
cpufreq_frequency_get_table
(
policy
->
cpu
);
node
=
cbe_cpu_to_node
(
policy
->
cpu
);
pr_debug
(
"got notified, event=%lu, node=%u
\n
"
,
event
,
node
);
...
...
drivers/cpufreq/s3c24xx-cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
__func__
,
policy
,
target_freq
,
relation
);
if
(
ftab
)
{
if
(
cpufreq_frequency_table_target
(
policy
,
ftab
,
target_freq
,
relation
,
&
index
))
{
s3c_freq_dbg
(
"%s: table failed
\n
"
,
__func__
);
return
-
EINVAL
;
}
index
=
cpufreq_frequency_table_target
(
policy
,
target_freq
,
relation
);
s3c_freq_dbg
(
"%s: adjust %d to entry %d (%u)
\n
"
,
__func__
,
target_freq
,
index
,
ftab
[
index
].
frequency
);
...
...
@@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
pll
=
NULL
;
}
else
{
struct
cpufreq_policy
tmp_policy
;
int
ret
;
/* we keep the cpu pll table in Hz, to ensure we get an
* accurate value for the PLL output. */
...
...
@@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
tmp_policy
.
min
=
policy
->
min
*
1000
;
tmp_policy
.
max
=
policy
->
max
*
1000
;
tmp_policy
.
cpu
=
policy
->
cpu
;
tmp_policy
.
freq_table
=
pll_reg
;
/* cpufreq_frequency_table_target
uses a pointer to 'index'
*
which is the number
of the table entry, not the value of
/* cpufreq_frequency_table_target
returns the index
* of the table entry, not the value of
* the table entry's index field. */
ret
=
cpufreq_frequency_table_target
(
&
tmp_policy
,
pll_reg
,
target_freq
,
relation
,
&
index
);
if
(
ret
<
0
)
{
pr_err
(
"%s: no PLL available
\n
"
,
__func__
);
goto
err_notpossible
;
}
index
=
cpufreq_frequency_table_target
(
&
tmp_policy
,
target_freq
,
relation
);
pll
=
pll_reg
+
index
;
s3c_freq_dbg
(
"%s: target %u => %u
\n
"
,
...
...
@@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
}
return
s3c_cpufreq_settarget
(
policy
,
target_freq
,
pll
);
err_notpossible:
pr_err
(
"no compatible settings for %d
\n
"
,
target_freq
);
return
-
EINVAL
;
}
struct
clk
*
s3c_cpufreq_clk_get
(
struct
device
*
dev
,
const
char
*
name
)
...
...
@@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void)
{
int
size
,
ret
;
if
(
!
cpu_cur
.
info
->
calc_freqtable
)
return
-
EINVAL
;
kfree
(
ftab
);
ftab
=
NULL
;
size
=
cpu_cur
.
info
->
calc_freqtable
(
&
cpu_cur
,
NULL
,
0
);
size
++
;
...
...
drivers/cpufreq/s5pv210-cpufreq.c
浏览文件 @
5d1191ab
...
...
@@ -246,12 +246,8 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
new_freq
=
s5pv210_freq_table
[
index
].
frequency
;
/* Finding current running level index */
if
(
cpufreq_frequency_table_target
(
policy
,
s5pv210_freq_table
,
old_freq
,
CPUFREQ_RELATION_H
,
&
priv_index
))
{
ret
=
-
EINVAL
;
goto
exit
;
}
priv_index
=
cpufreq_frequency_table_target
(
policy
,
old_freq
,
CPUFREQ_RELATION_H
);
arm_volt
=
dvs_conf
[
index
].
arm_volt
;
int_volt
=
dvs_conf
[
index
].
int_volt
;
...
...
drivers/idle/intel_idle.c
浏览文件 @
5d1191ab
...
...
@@ -62,6 +62,7 @@
#include <linux/cpu.h>
#include <linux/module.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/msr.h>
...
...
@@ -1020,38 +1021,38 @@ static const struct idle_cpu idle_cpu_bxt = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
static
const
struct
x86_cpu_id
intel_idle_ids
[]
__initconst
=
{
ICPU
(
0x1a
,
idle_cpu_nehalem
),
ICPU
(
0x1e
,
idle_cpu_nehalem
),
ICPU
(
0x1f
,
idle_cpu_nehalem
),
ICPU
(
0x25
,
idle_cpu_nehalem
),
ICPU
(
0x2c
,
idle_cpu_nehalem
),
ICPU
(
0x2e
,
idle_cpu_nehalem
),
ICPU
(
0x1c
,
idle_cpu_atom
),
ICPU
(
0x26
,
idle_cpu_lincroft
),
ICPU
(
0x2f
,
idle_cpu_nehalem
),
ICPU
(
0x2a
,
idle_cpu_snb
),
ICPU
(
0x2d
,
idle_cpu_snb
),
ICPU
(
0x36
,
idle_cpu_atom
),
ICPU
(
0x37
,
idle_cpu_byt
),
ICPU
(
0x4c
,
idle_cpu_cht
),
ICPU
(
0x3a
,
idle_cpu_ivb
),
ICPU
(
0x3e
,
idle_cpu_ivt
),
ICPU
(
0x3c
,
idle_cpu_hsw
),
ICPU
(
0x3f
,
idle_cpu_hsw
),
ICPU
(
0x45
,
idle_cpu_hsw
),
ICPU
(
0x46
,
idle_cpu_hsw
),
ICPU
(
0x4d
,
idle_cpu_avn
),
ICPU
(
0x3d
,
idle_cpu_bdw
),
ICPU
(
0x47
,
idle_cpu_bdw
),
ICPU
(
0x4f
,
idle_cpu_bdw
),
ICPU
(
0x56
,
idle_cpu_bdw
),
ICPU
(
0x4e
,
idle_cpu_skl
),
ICPU
(
0x5e
,
idle_cpu_skl
),
ICPU
(
0x8e
,
idle_cpu_skl
),
ICPU
(
0x9e
,
idle_cpu_skl
),
ICPU
(
0x55
,
idle_cpu_skx
),
ICPU
(
0x57
,
idle_cpu_knl
),
ICPU
(
0x5c
,
idle_cpu_bxt
),
ICPU
(
INTEL_FAM6_NEHALEM_EP
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_NEHALEM
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_WESTMERE2
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_WESTMERE
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_WESTMERE_EP
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_NEHALEM_EX
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_ATOM_PINEVIEW
,
idle_cpu_atom
),
ICPU
(
INTEL_FAM6_ATOM_LINCROFT
,
idle_cpu_lincroft
),
ICPU
(
INTEL_FAM6_WESTMERE_EX
,
idle_cpu_nehalem
),
ICPU
(
INTEL_FAM6_SANDYBRIDGE
,
idle_cpu_snb
),
ICPU
(
INTEL_FAM6_SANDYBRIDGE_X
,
idle_cpu_snb
),
ICPU
(
INTEL_FAM6_ATOM_CEDARVIEW
,
idle_cpu_atom
),
ICPU
(
INTEL_FAM6_ATOM_SILVERMONT1
,
idle_cpu_byt
),
ICPU
(
INTEL_FAM6_ATOM_AIRMONT
,
idle_cpu_cht
),
ICPU
(
INTEL_FAM6_IVYBRIDGE
,
idle_cpu_ivb
),
ICPU
(
INTEL_FAM6_IVYBRIDGE_X
,
idle_cpu_ivt
),
ICPU
(
INTEL_FAM6_HASWELL_CORE
,
idle_cpu_hsw
),
ICPU
(
INTEL_FAM6_HASWELL_X
,
idle_cpu_hsw
),
ICPU
(
INTEL_FAM6_HASWELL_ULT
,
idle_cpu_hsw
),
ICPU
(
INTEL_FAM6_HASWELL_GT3E
,
idle_cpu_hsw
),
ICPU
(
INTEL_FAM6_ATOM_SILVERMONT2
,
idle_cpu_avn
),
ICPU
(
INTEL_FAM6_BROADWELL_CORE
,
idle_cpu_bdw
),
ICPU
(
INTEL_FAM6_BROADWELL_GT3E
,
idle_cpu_bdw
),
ICPU
(
INTEL_FAM6_BROADWELL_X
,
idle_cpu_bdw
),
ICPU
(
INTEL_FAM6_BROADWELL_XEON_D
,
idle_cpu_bdw
),
ICPU
(
INTEL_FAM6_SKYLAKE_MOBILE
,
idle_cpu_skl
),
ICPU
(
INTEL_FAM6_SKYLAKE_DESKTOP
,
idle_cpu_skl
),
ICPU
(
INTEL_FAM6_KABYLAKE_MOBILE
,
idle_cpu_skl
),
ICPU
(
INTEL_FAM6_KABYLAKE_DESKTOP
,
idle_cpu_skl
),
ICPU
(
INTEL_FAM6_SKYLAKE_X
,
idle_cpu_skx
),
ICPU
(
INTEL_FAM6_XEON_PHI_KNL
,
idle_cpu_knl
),
ICPU
(
INTEL_FAM6_ATOM_GOLDMONT
,
idle_cpu_bxt
),
{}
};
MODULE_DEVICE_TABLE
(
x86cpu
,
intel_idle_ids
);
...
...
@@ -1261,13 +1262,13 @@ static void intel_idle_state_table_update(void)
{
switch
(
boot_cpu_data
.
x86_model
)
{
case
0x3e
:
/* IVT */
case
INTEL_FAM6_IVYBRIDGE_X
:
ivt_idle_state_table_update
();
break
;
case
0x5c
:
/* BXT */
case
INTEL_FAM6_ATOM_GOLDMONT
:
bxt_idle_state_table_update
();
break
;
case
0x5e
:
/* SKL-H */
case
INTEL_FAM6_SKYLAKE_DESKTOP
:
sklh_idle_state_table_update
();
break
;
}
...
...
drivers/mmc/host/sdhci-acpi.c
浏览文件 @
5d1191ab
...
...
@@ -43,6 +43,7 @@
#ifdef CONFIG_X86
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#endif
...
...
@@ -126,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static
bool
sdhci_acpi_byt
(
void
)
{
static
const
struct
x86_cpu_id
byt
[]
=
{
{
X86_VENDOR_INTEL
,
6
,
0x37
},
{
X86_VENDOR_INTEL
,
6
,
INTEL_FAM6_ATOM_SILVERMONT1
},
{}
};
...
...
drivers/platform/x86/intel_telemetry_debugfs.c
浏览文件 @
5d1191ab
...
...
@@ -32,6 +32,7 @@
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
...
...
@@ -331,7 +332,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static
const
struct
x86_cpu_id
telemetry_debugfs_cpu_ids
[]
=
{
TELEM_DEBUGFS_CPU
(
0x5c
,
telem_apl_debugfs_conf
),
TELEM_DEBUGFS_CPU
(
INTEL_FAM6_ATOM_GOLDMONT
,
telem_apl_debugfs_conf
),
{}
};
...
...
drivers/platform/x86/intel_telemetry_pltdrv.c
浏览文件 @
5d1191ab
...
...
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
...
...
@@ -163,7 +164,7 @@ static struct telemetry_plt_config telem_apl_config = {
};
static
const
struct
x86_cpu_id
telemetry_cpu_ids
[]
=
{
TELEM_CPU
(
0x5c
,
telem_apl_config
),
TELEM_CPU
(
INTEL_FAM6_ATOM_GOLDMONT
,
telem_apl_config
),
{}
};
...
...
drivers/powercap/intel_rapl.c
浏览文件 @
5d1191ab
...
...
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
...
...
@@ -1096,27 +1097,34 @@ static const struct rapl_defaults rapl_defaults_cht = {
}
static
const
struct
x86_cpu_id
rapl_ids
[]
__initconst
=
{
RAPL_CPU
(
0x2a
,
rapl_defaults_core
),
/* Sandy Bridge */
RAPL_CPU
(
0x2d
,
rapl_defaults_core
),
/* Sandy Bridge EP */
RAPL_CPU
(
0x37
,
rapl_defaults_byt
),
/* Valleyview */
RAPL_CPU
(
0x3a
,
rapl_defaults_core
),
/* Ivy Bridge */
RAPL_CPU
(
0x3c
,
rapl_defaults_core
),
/* Haswell */
RAPL_CPU
(
0x3d
,
rapl_defaults_core
),
/* Broadwell */
RAPL_CPU
(
0x3f
,
rapl_defaults_hsw_server
),
/* Haswell servers */
RAPL_CPU
(
0x4f
,
rapl_defaults_hsw_server
),
/* Broadwell servers */
RAPL_CPU
(
0x45
,
rapl_defaults_core
),
/* Haswell ULT */
RAPL_CPU
(
0x46
,
rapl_defaults_core
),
/* Haswell */
RAPL_CPU
(
0x47
,
rapl_defaults_core
),
/* Broadwell-H */
RAPL_CPU
(
0x4E
,
rapl_defaults_core
),
/* Skylake */
RAPL_CPU
(
0x4C
,
rapl_defaults_cht
),
/* Braswell/Cherryview */
RAPL_CPU
(
0x4A
,
rapl_defaults_tng
),
/* Tangier */
RAPL_CPU
(
0x56
,
rapl_defaults_core
),
/* Future Xeon */
RAPL_CPU
(
0x5A
,
rapl_defaults_ann
),
/* Annidale */
RAPL_CPU
(
0X5C
,
rapl_defaults_core
),
/* Broxton */
RAPL_CPU
(
0x5E
,
rapl_defaults_core
),
/* Skylake-H/S */
RAPL_CPU
(
0x57
,
rapl_defaults_hsw_server
),
/* Knights Landing */
RAPL_CPU
(
0x8E
,
rapl_defaults_core
),
/* Kabylake */
RAPL_CPU
(
0x9E
,
rapl_defaults_core
),
/* Kabylake */
RAPL_CPU
(
INTEL_FAM6_SANDYBRIDGE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_SANDYBRIDGE_X
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_IVYBRIDGE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_HASWELL_CORE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_HASWELL_ULT
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_HASWELL_GT3E
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_HASWELL_X
,
rapl_defaults_hsw_server
),
RAPL_CPU
(
INTEL_FAM6_BROADWELL_CORE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_BROADWELL_GT3E
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_BROADWELL_XEON_D
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_BROADWELL_X
,
rapl_defaults_hsw_server
),
RAPL_CPU
(
INTEL_FAM6_SKYLAKE_DESKTOP
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_SKYLAKE_MOBILE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_SKYLAKE_X
,
rapl_defaults_hsw_server
),
RAPL_CPU
(
INTEL_FAM6_KABYLAKE_MOBILE
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_KABYLAKE_DESKTOP
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_ATOM_SILVERMONT1
,
rapl_defaults_byt
),
RAPL_CPU
(
INTEL_FAM6_ATOM_AIRMONT
,
rapl_defaults_cht
),
RAPL_CPU
(
INTEL_FAM6_ATOM_MERRIFIELD1
,
rapl_defaults_tng
),
RAPL_CPU
(
INTEL_FAM6_ATOM_MERRIFIELD2
,
rapl_defaults_ann
),
RAPL_CPU
(
INTEL_FAM6_ATOM_GOLDMONT
,
rapl_defaults_core
),
RAPL_CPU
(
INTEL_FAM6_XEON_PHI_KNL
,
rapl_defaults_hsw_server
),
{}
};
MODULE_DEVICE_TABLE
(
x86cpu
,
rapl_ids
);
...
...
drivers/thermal/cpu_cooling.c
浏览文件 @
5d1191ab
...
...
@@ -787,22 +787,34 @@ __cpufreq_cooling_register(struct device_node *np,
const
struct
cpumask
*
clip_cpus
,
u32
capacitance
,
get_static_t
plat_static_func
)
{
struct
cpufreq_policy
*
policy
;
struct
thermal_cooling_device
*
cool_dev
;
struct
cpufreq_cooling_device
*
cpufreq_dev
;
char
dev_name
[
THERMAL_NAME_LENGTH
];
struct
cpufreq_frequency_table
*
pos
,
*
table
;
struct
cpumask
temp_mask
;
unsigned
int
freq
,
i
,
num_cpus
;
int
ret
;
table
=
cpufreq_frequency_get_table
(
cpumask_first
(
clip_cpus
));
cpumask_and
(
&
temp_mask
,
clip_cpus
,
cpu_online_mask
);
policy
=
cpufreq_cpu_get
(
cpumask_first
(
&
temp_mask
));
if
(
!
policy
)
{
pr_debug
(
"%s: CPUFreq policy not found
\n
"
,
__func__
);
return
ERR_PTR
(
-
EPROBE_DEFER
);
}
table
=
policy
->
freq_table
;
if
(
!
table
)
{
pr_debug
(
"%s: CPUFreq table not found
\n
"
,
__func__
);
return
ERR_PTR
(
-
EPROBE_DEFER
);
cool_dev
=
ERR_PTR
(
-
ENODEV
);
goto
put_policy
;
}
cpufreq_dev
=
kzalloc
(
sizeof
(
*
cpufreq_dev
),
GFP_KERNEL
);
if
(
!
cpufreq_dev
)
return
ERR_PTR
(
-
ENOMEM
);
if
(
!
cpufreq_dev
)
{
cool_dev
=
ERR_PTR
(
-
ENOMEM
);
goto
put_policy
;
}
num_cpus
=
cpumask_weight
(
clip_cpus
);
cpufreq_dev
->
time_in_idle
=
kcalloc
(
num_cpus
,
...
...
@@ -892,7 +904,7 @@ __cpufreq_cooling_register(struct device_node *np,
CPUFREQ_POLICY_NOTIFIER
);
mutex_unlock
(
&
cooling_cpufreq_lock
);
return
cool_dev
;
goto
put_policy
;
remove_idr:
release_idr
(
&
cpufreq_idr
,
cpufreq_dev
->
id
);
...
...
@@ -906,6 +918,8 @@ __cpufreq_cooling_register(struct device_node *np,
kfree
(
cpufreq_dev
->
time_in_idle
);
free_cdev:
kfree
(
cpufreq_dev
);
put_policy:
cpufreq_cpu_put
(
policy
);
return
cool_dev
;
}
...
...
drivers/thermal/intel_soc_dts_thermal.c
浏览文件 @
5d1191ab
...
...
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "intel_soc_dts_iosf.h"
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
...
...
@@ -42,7 +43,8 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static
const
struct
x86_cpu_id
soc_thermal_ids
[]
=
{
{
X86_VENDOR_INTEL
,
X86_FAMILY_ANY
,
0x37
,
0
,
BYT_SOC_DTS_APIC_IRQ
},
{
X86_VENDOR_INTEL
,
6
,
INTEL_FAM6_ATOM_SILVERMONT1
,
0
,
BYT_SOC_DTS_APIC_IRQ
},
{}
};
MODULE_DEVICE_TABLE
(
x86cpu
,
soc_thermal_ids
);
...
...
include/linux/cpufreq.h
浏览文件 @
5d1191ab
...
...
@@ -185,6 +185,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
static
inline
void
disable_cpufreq
(
void
)
{
}
#endif
#ifdef CONFIG_CPU_FREQ_STAT
void
cpufreq_stats_create_table
(
struct
cpufreq_policy
*
policy
);
void
cpufreq_stats_free_table
(
struct
cpufreq_policy
*
policy
);
void
cpufreq_stats_record_transition
(
struct
cpufreq_policy
*
policy
,
unsigned
int
new_freq
);
#else
static
inline
void
cpufreq_stats_create_table
(
struct
cpufreq_policy
*
policy
)
{
}
static
inline
void
cpufreq_stats_free_table
(
struct
cpufreq_policy
*
policy
)
{
}
static
inline
void
cpufreq_stats_record_transition
(
struct
cpufreq_policy
*
policy
,
unsigned
int
new_freq
)
{
}
#endif
/* CONFIG_CPU_FREQ_STAT */
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
*********************************************************************/
...
...
@@ -455,18 +467,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define MIN_LATENCY_MULTIPLIER (20)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct
cpufreq_governor
{
char
name
[
CPUFREQ_NAME_LEN
];
int
initialized
;
int
(
*
governor
)
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
);
int
(
*
init
)(
struct
cpufreq_policy
*
policy
);
void
(
*
exit
)(
struct
cpufreq_policy
*
policy
);
int
(
*
start
)(
struct
cpufreq_policy
*
policy
);
void
(
*
stop
)(
struct
cpufreq_policy
*
policy
);
void
(
*
limits
)(
struct
cpufreq_policy
*
policy
);
ssize_t
(
*
show_setspeed
)
(
struct
cpufreq_policy
*
policy
,
char
*
buf
);
int
(
*
store_setspeed
)
(
struct
cpufreq_policy
*
policy
,
...
...
@@ -493,6 +500,14 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
struct
cpufreq_governor
*
cpufreq_default_governor
(
void
);
struct
cpufreq_governor
*
cpufreq_fallback_governor
(
void
);
static
inline
void
cpufreq_policy_apply_limits
(
struct
cpufreq_policy
*
policy
)
{
if
(
policy
->
max
<
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
else
if
(
policy
->
min
>
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
}
/* Governor attribute set */
struct
gov_attr_set
{
struct
kobject
kobj
;
...
...
@@ -583,10 +598,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
int
cpufreq_generic_frequency_table_verify
(
struct
cpufreq_policy
*
policy
);
int
cpufreq_frequency_table_target
(
struct
cpufreq_policy
*
policy
,
struct
cpufreq_frequency_table
*
table
,
unsigned
int
target_freq
,
unsigned
int
relation
,
unsigned
int
*
index
);
unsigned
int
relation
);
int
cpufreq_frequency_table_get_index
(
struct
cpufreq_policy
*
policy
,
unsigned
int
freq
);
...
...
@@ -617,8 +630,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
return
false
;
}
#endif
/* the following funtion is for cpufreq core use only */
struct
cpufreq_frequency_table
*
cpufreq_frequency_get_table
(
unsigned
int
cpu
);
/* the following are really really optional */
extern
struct
freq_attr
cpufreq_freq_attr_scaling_available_freqs
;
...
...
kernel/sched/cpufreq_schedutil.c
浏览文件 @
5d1191ab
...
...
@@ -394,7 +394,7 @@ static int sugov_init(struct cpufreq_policy *policy)
return
ret
;
}
static
int
sugov_exit
(
struct
cpufreq_policy
*
policy
)
static
void
sugov_exit
(
struct
cpufreq_policy
*
policy
)
{
struct
sugov_policy
*
sg_policy
=
policy
->
governor_data
;
struct
sugov_tunables
*
tunables
=
sg_policy
->
tunables
;
...
...
@@ -412,7 +412,6 @@ static int sugov_exit(struct cpufreq_policy *policy)
mutex_unlock
(
&
global_tunables_lock
);
sugov_policy_free
(
sg_policy
);
return
0
;
}
static
int
sugov_start
(
struct
cpufreq_policy
*
policy
)
...
...
@@ -444,7 +443,7 @@ static int sugov_start(struct cpufreq_policy *policy)
return
0
;
}
static
int
sugov_stop
(
struct
cpufreq_policy
*
policy
)
static
void
sugov_stop
(
struct
cpufreq_policy
*
policy
)
{
struct
sugov_policy
*
sg_policy
=
policy
->
governor_data
;
unsigned
int
cpu
;
...
...
@@ -456,53 +455,29 @@ static int sugov_stop(struct cpufreq_policy *policy)
irq_work_sync
(
&
sg_policy
->
irq_work
);
cancel_work_sync
(
&
sg_policy
->
work
);
return
0
;
}
static
int
sugov_limits
(
struct
cpufreq_policy
*
policy
)
static
void
sugov_limits
(
struct
cpufreq_policy
*
policy
)
{
struct
sugov_policy
*
sg_policy
=
policy
->
governor_data
;
if
(
!
policy
->
fast_switch_enabled
)
{
mutex_lock
(
&
sg_policy
->
work_lock
);
if
(
policy
->
max
<
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
max
,
CPUFREQ_RELATION_H
);
else
if
(
policy
->
min
>
policy
->
cur
)
__cpufreq_driver_target
(
policy
,
policy
->
min
,
CPUFREQ_RELATION_L
);
cpufreq_policy_apply_limits
(
policy
);
mutex_unlock
(
&
sg_policy
->
work_lock
);
}
sg_policy
->
need_freq_update
=
true
;
return
0
;
}
int
sugov_governor
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
)
{
if
(
event
==
CPUFREQ_GOV_POLICY_INIT
)
{
return
sugov_init
(
policy
);
}
else
if
(
policy
->
governor_data
)
{
switch
(
event
)
{
case
CPUFREQ_GOV_POLICY_EXIT
:
return
sugov_exit
(
policy
);
case
CPUFREQ_GOV_START
:
return
sugov_start
(
policy
);
case
CPUFREQ_GOV_STOP
:
return
sugov_stop
(
policy
);
case
CPUFREQ_GOV_LIMITS
:
return
sugov_limits
(
policy
);
}
}
return
-
EINVAL
;
}
static
struct
cpufreq_governor
schedutil_gov
=
{
.
name
=
"schedutil"
,
.
governor
=
sugov_governor
,
.
owner
=
THIS_MODULE
,
.
init
=
sugov_init
,
.
exit
=
sugov_exit
,
.
start
=
sugov_start
,
.
stop
=
sugov_stop
,
.
limits
=
sugov_limits
,
};
static
int
__init
sugov_module_init
(
void
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录