提交 d68bddb7 编写于 作者: H Heiko Carstens 提交者: Martin Schwidefsky

[S390] topology: increase poll frequency if change is anticipated

Increase cpu topology change poll frequency if a change is anticipated.
Otherwise a user might be a bit confused to have to wait up to a minute
in order to see a change this should be visible immediatly.
However there is no guarantee that the change will happen during the
time frame the poll frequency is increased.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 c5328901
...@@ -35,11 +35,13 @@ int topology_cpu_init(struct cpu *); ...@@ -35,11 +35,13 @@ int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc); int topology_set_cpu_management(int fc);
void topology_schedule_update(void); void topology_schedule_update(void);
void store_topology(struct sysinfo_15_1_x *info); void store_topology(struct sysinfo_15_1_x *info);
void topology_expect_change(void);
#else /* CONFIG_SCHED_BOOK */ #else /* CONFIG_SCHED_BOOK */
static inline void topology_schedule_update(void) { } static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; } static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline void topology_expect_change(void) { }
#endif /* CONFIG_SCHED_BOOK */ #endif /* CONFIG_SCHED_BOOK */
......
...@@ -867,6 +867,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, ...@@ -867,6 +867,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
if (!rc) { if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_STANDBY; smp_cpu_state[cpu] = CPU_STATE_STANDBY;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
topology_expect_change();
} }
} }
break; break;
...@@ -876,6 +877,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, ...@@ -876,6 +877,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
if (!rc) { if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
topology_expect_change();
} }
} }
break; break;
......
...@@ -31,7 +31,6 @@ struct mask_info { ...@@ -31,7 +31,6 @@ struct mask_info {
static int topology_enabled = 1; static int topology_enabled = 1;
static void topology_work_fn(struct work_struct *work); static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info; static struct sysinfo_15_1_x *tl_info;
static struct timer_list topology_timer;
static void set_topology_timer(void); static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn); static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */ /* topology_lock protects the core linked list */
...@@ -297,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) ...@@ -297,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored)
set_topology_timer(); set_topology_timer();
} }
static struct timer_list topology_timer =
TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
static atomic_t topology_poll = ATOMIC_INIT(0);
static void set_topology_timer(void) static void set_topology_timer(void)
{ {
topology_timer.function = topology_timer_fn; if (atomic_add_unless(&topology_poll, -1, 0))
topology_timer.data = 0; mod_timer(&topology_timer, jiffies + HZ / 10);
topology_timer.expires = jiffies + 60 * HZ; else
add_timer(&topology_timer); mod_timer(&topology_timer, jiffies + HZ * 60);
}
void topology_expect_change(void)
{
if (!MACHINE_HAS_TOPOLOGY)
return;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer.
*/
if (atomic_read(&topology_poll) > 60)
return;
atomic_add(60, &topology_poll);
set_topology_timer();
} }
static int __init early_parse_topology(char *p) static int __init early_parse_topology(char *p)
...@@ -379,8 +396,10 @@ static ssize_t dispatching_store(struct sysdev_class *dev, ...@@ -379,8 +396,10 @@ static ssize_t dispatching_store(struct sysdev_class *dev,
if (cpu_management == val) if (cpu_management == val)
goto out; goto out;
rc = topology_set_cpu_management(val); rc = topology_set_cpu_management(val);
if (!rc) if (rc)
cpu_management = val; goto out;
cpu_management = val;
topology_expect_change();
out: out:
mutex_unlock(&smp_cpu_state_mutex); mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus(); put_online_cpus();
...@@ -438,7 +457,6 @@ static int __init topology_init(void) ...@@ -438,7 +457,6 @@ static int __init topology_init(void)
topology_update_polarization_simple(); topology_update_polarization_simple();
goto out; goto out;
} }
init_timer_deferrable(&topology_timer);
set_topology_timer(); set_topology_timer();
out: out:
update_cpu_core_map(); update_cpu_core_map();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册