提交 9fa64d64 编写于 作者: R Rafael J. Wysocki

Merge back intel_pstate fixes for v4.6.

* pm-cpufreq:
  intel_pstate: Avoid extra invocation of intel_pstate_sample()
  intel_pstate: Do not set utilization update hook too early
...@@ -910,7 +910,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) ...@@ -910,7 +910,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
cpu->prev_aperf = aperf; cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf; cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc; cpu->prev_tsc = tsc;
return true; /*
* First time this function is invoked in a given cycle, all of the
* previous sample data fields are equal to zero or stale and they must
* be populated with meaningful numbers for things to work, so assume
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
return !!cpu->last_sample_time;
} }
static inline int32_t get_avg_frequency(struct cpudata *cpu) static inline int32_t get_avg_frequency(struct cpudata *cpu)
...@@ -984,8 +991,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) ...@@ -984,8 +991,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
* enough period of time to adjust our busyness. * enough period of time to adjust our busyness.
*/ */
duration_ns = cpu->sample.time - cpu->last_sample_time; duration_ns = cpu->sample.time - cpu->last_sample_time;
if ((s64)duration_ns > pid_params.sample_rate_ns * 3 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
&& cpu->last_sample_time > 0) {
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
int_tofp(duration_ns)); int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio); core_busy = mul_fp(core_busy, sample_ratio);
...@@ -1100,10 +1106,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) ...@@ -1100,10 +1106,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu); intel_pstate_get_cpu_pstates(cpu);
intel_pstate_busy_pid_reset(cpu); intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu, 0);
cpu->update_util.func = intel_pstate_update_util; cpu->update_util.func = intel_pstate_update_util;
cpufreq_set_update_util_data(cpunum, &cpu->update_util);
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
...@@ -1122,18 +1126,33 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) ...@@ -1122,18 +1126,33 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
return get_avg_frequency(cpu); return get_avg_frequency(cpu);
} }
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
}
static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
cpufreq_set_update_util_data(cpu, NULL);
synchronize_sched();
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy) static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{ {
if (!policy->cpuinfo.max_freq) if (!policy->cpuinfo.max_freq)
return -ENODEV; return -ENODEV;
intel_pstate_clear_update_util_hook(policy->cpu);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
policy->max >= policy->cpuinfo.max_freq) { policy->max >= policy->cpuinfo.max_freq) {
pr_debug("intel_pstate: set performance\n"); pr_debug("intel_pstate: set performance\n");
limits = &performance_limits; limits = &performance_limits;
if (hwp_active) goto out;
intel_pstate_hwp_set(policy->cpus);
return 0;
} }
pr_debug("intel_pstate: set powersave\n"); pr_debug("intel_pstate: set powersave\n");
...@@ -1163,6 +1182,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -1163,6 +1182,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
int_tofp(100)); int_tofp(100));
out:
intel_pstate_set_update_util_hook(policy->cpu);
if (hwp_active) if (hwp_active)
intel_pstate_hwp_set(policy->cpus); intel_pstate_hwp_set(policy->cpus);
...@@ -1187,8 +1209,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) ...@@ -1187,8 +1209,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
cpufreq_set_update_util_data(cpu_num, NULL); intel_pstate_clear_update_util_hook(cpu_num);
synchronize_sched();
if (hwp_active) if (hwp_active)
return; return;
...@@ -1455,8 +1476,7 @@ static int __init intel_pstate_init(void) ...@@ -1455,8 +1476,7 @@ static int __init intel_pstate_init(void)
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) { if (all_cpu_data[cpu]) {
cpufreq_set_update_util_data(cpu, NULL); intel_pstate_clear_update_util_hook(cpu);
synchronize_sched();
kfree(all_cpu_data[cpu]); kfree(all_cpu_data[cpu]);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册