提交 751a03c3 编写于 作者: L Linus Torvalds

Merge tag 'pm+acpi-3.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:
 "These three commits fix a recent intel_pstate regression and two old
  bugs that should be fixed in -stable too, one in the ACPI processor
  driver and one in the firmare loader.

  Specifics:

   - One of the recent intel_pstate driver fixes introduced a rounding
     error that on some systems causes the frequency to be stuck at the
     lowest level forever.  Fix from Dirk Brandewie.

   - The firmware_class driver's PM notifier doesn't handle the
     PM_RESTORE_PREPARE event during hibernation image restore and that
     leads to a deadlock on umhelper_sem in __usermodehelper_disable().
     Fix from Sebastian Capella.

   - acpi_processor_set_throttling() abuses set_cpus_allowed_ptr() in a
     nasty way which triggers the WARN_ON_ONCE() in wq_worker_waking_up()
     among other things.  Fix from Lan Tianyu"

* tag 'pm+acpi-3.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / processor: Rework processor throttling with work_on_cpu()
  PM / hibernate: Fix restore hang in freeze_processes()
  intel_pstate: Change busy calculation to use fixed point math.
......@@ -56,6 +56,12 @@ struct throttling_tstate {
int target_state; /* target T-state */
};
struct acpi_processor_throttling_arg {
struct acpi_processor *pr;
int target_state;
bool force;
};
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
......@@ -1060,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
static long acpi_processor_throttling_fn(void *data)
{
struct acpi_processor_throttling_arg *arg = data;
struct acpi_processor *pr = arg->pr;
return pr->throttling.acpi_processor_set_throttling(pr,
arg->target_state, arg->force);
}
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
......@@ -1080,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
free_cpumask_var(saved_mask);
return -ENOMEM;
}
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
......@@ -1096,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENODEV;
}
cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
cpumask_and(online_throttling_cpus, cpu_online_mask,
p_throttling->shared_cpu_map);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
......@@ -1118,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
/* Can't migrate to the pr->id CPU. Exit */
ret = -ENODEV;
goto exit;
}
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state, force);
arg.pr = pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask,
p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
......@@ -1153,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
"on CPU %d\n", i));
continue;
}
t_state.cpu = i;
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(i)))
continue;
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state, force);
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
&arg);
}
}
/*
......@@ -1168,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
for_each_cpu(i, online_throttling_cpus) {
for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
/* restore the previous state */
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, saved_mask);
exit:
free_cpumask_var(online_throttling_cpus);
free_cpumask_var(saved_mask);
return ret;
}
......
......@@ -1580,6 +1580,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
......
......@@ -39,9 +39,10 @@
#define BYT_TURBO_RATIOS 0x66c
#define FRAC_BITS 8
#define FRAC_BITS 6
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
......@@ -556,18 +557,20 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
u64 c0_pct;
int32_t core_pct;
int32_t c0_pct;
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
core_pct = div_fp(int_tofp((sample->aperf)),
int_tofp((sample->mperf)));
core_pct = mul_fp(core_pct, int_tofp(100));
FP_ROUNDUP(core_pct);
c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
sample->freq = fp_toint(
mul_fp(int_tofp(cpu->pstate.max_pstate),
int_tofp(core_pct * 1000)));
mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
sample->core_pct_busy = mul_fp(int_tofp(core_pct),
div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
sample->core_pct_busy = mul_fp(core_pct, c0_pct);
}
static inline void intel_pstate_sample(struct cpudata *cpu)
......@@ -579,6 +582,10 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc();
aperf = aperf >> FRAC_BITS;
mperf = mperf >> FRAC_BITS;
tsc = tsc >> FRAC_BITS;
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
......@@ -610,7 +617,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return FP_ROUNDUP(core_busy);
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册