提交 a5acbfbd 编写于 作者: R Rafael J. Wysocki

Merge branch 'pm-cpufreq-governor' into pm-cpufreq

...@@ -19,6 +19,7 @@ config CPU_FREQ ...@@ -19,6 +19,7 @@ config CPU_FREQ
if CPU_FREQ if CPU_FREQ
config CPU_FREQ_GOV_COMMON config CPU_FREQ_GOV_COMMON
select IRQ_WORK
bool bool
config CPU_FREQ_BOOST_SW config CPU_FREQ_BOOST_SW
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include "cpufreq_governor.h" #include "cpufreq_ondemand.h"
#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080 #define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081 #define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
...@@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, ...@@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
long d_actual, d_reference; long d_actual, d_reference;
struct msr actual, reference; struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu); struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
struct dbs_data *od_data = policy->governor_data; struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners; struct od_dbs_tuners *od_tuners = od_data->tuners;
struct od_cpu_dbs_info_s *od_info = struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
if (!od_info->freq_table) if (!od_info->freq_table)
return freq_next; return freq_next;
......
...@@ -64,7 +64,6 @@ static LIST_HEAD(cpufreq_governor_list); ...@@ -64,7 +64,6 @@ static LIST_HEAD(cpufreq_governor_list);
static struct cpufreq_driver *cpufreq_driver; static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_RWLOCK(cpufreq_driver_lock); static DEFINE_RWLOCK(cpufreq_driver_lock);
DEFINE_MUTEX(cpufreq_governor_lock);
/* Flag to suspend/resume CPUFreq governors */ /* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended; static bool cpufreq_suspended;
...@@ -75,10 +74,8 @@ static inline bool has_target(void) ...@@ -75,10 +74,8 @@ static inline bool has_target(void)
} }
/* internal prototypes */ /* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy); static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static void handle_update(struct work_struct *work);
/** /**
* Two notifier lists: the "policy" list is involved in the * Two notifier lists: the "policy" list is involved in the
...@@ -955,30 +952,38 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp ...@@ -955,30 +952,38 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus)) if (cpumask_test_cpu(cpu, policy->cpus))
return 0; return 0;
down_write(&policy->rwsem);
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) { if (ret) {
pr_err("%s: Failed to stop governor\n", __func__); pr_err("%s: Failed to stop governor\n", __func__);
return ret; goto unlock;
} }
} }
down_write(&policy->rwsem);
cpumask_set_cpu(cpu, policy->cpus); cpumask_set_cpu(cpu, policy->cpus);
up_write(&policy->rwsem);
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret) if (!ret)
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
if (ret) { if (ret)
pr_err("%s: Failed to start governor\n", __func__); pr_err("%s: Failed to start governor\n", __func__);
return ret;
}
} }
return 0; unlock:
up_write(&policy->rwsem);
return ret;
}
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
unsigned int cpu = policy->cpu;
pr_debug("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
} }
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
...@@ -1267,9 +1272,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1267,9 +1272,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return ret; return ret;
} }
static void cpufreq_offline_prepare(unsigned int cpu) static void cpufreq_offline(unsigned int cpu)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu); pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
...@@ -1279,13 +1285,13 @@ static void cpufreq_offline_prepare(unsigned int cpu) ...@@ -1279,13 +1285,13 @@ static void cpufreq_offline_prepare(unsigned int cpu)
return; return;
} }
down_write(&policy->rwsem);
if (has_target()) { if (has_target()) {
int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) if (ret)
pr_err("%s: Failed to stop governor\n", __func__); pr_err("%s: Failed to stop governor\n", __func__);
} }
down_write(&policy->rwsem);
cpumask_clear_cpu(cpu, policy->cpus); cpumask_clear_cpu(cpu, policy->cpus);
if (policy_is_inactive(policy)) { if (policy_is_inactive(policy)) {
...@@ -1298,39 +1304,27 @@ static void cpufreq_offline_prepare(unsigned int cpu) ...@@ -1298,39 +1304,27 @@ static void cpufreq_offline_prepare(unsigned int cpu)
/* Nominate new CPU */ /* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus); policy->cpu = cpumask_any(policy->cpus);
} }
up_write(&policy->rwsem);
/* Start governor again for active policy */ /* Start governor again for active policy */
if (!policy_is_inactive(policy)) { if (!policy_is_inactive(policy)) {
if (has_target()) { if (has_target()) {
int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret) if (!ret)
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
if (ret) if (ret)
pr_err("%s: Failed to start governor\n", __func__); pr_err("%s: Failed to start governor\n", __func__);
} }
} else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}
}
static void cpufreq_offline_finish(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) { goto unlock;
pr_debug("%s: No cpu_data found\n", __func__);
return;
} }
/* Only proceed for inactive policies */ if (cpufreq_driver->stop_cpu)
if (!policy_is_inactive(policy)) cpufreq_driver->stop_cpu(policy);
return;
/* If cpu is last user of policy, free policy */ /* If cpu is last user of policy, free policy */
if (has_target()) { if (has_target()) {
int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret) if (ret)
pr_err("%s: Failed to exit governor\n", __func__); pr_err("%s: Failed to exit governor\n", __func__);
} }
...@@ -1344,6 +1338,9 @@ static void cpufreq_offline_finish(unsigned int cpu) ...@@ -1344,6 +1338,9 @@ static void cpufreq_offline_finish(unsigned int cpu)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
policy->freq_table = NULL; policy->freq_table = NULL;
} }
unlock:
up_write(&policy->rwsem);
} }
/** /**
...@@ -1359,10 +1356,8 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1359,10 +1356,8 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy) if (!policy)
return; return;
if (cpu_online(cpu)) { if (cpu_online(cpu))
cpufreq_offline_prepare(cpu); cpufreq_offline(cpu);
cpufreq_offline_finish(cpu);
}
cpumask_clear_cpu(cpu, policy->real_cpus); cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, cpu); remove_cpu_dev_symlink(policy, cpu);
...@@ -1371,15 +1366,6 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1371,15 +1366,6 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_policy_free(policy, true); cpufreq_policy_free(policy, true);
} }
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
unsigned int cpu = policy->cpu;
pr_debug("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
/** /**
* cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
* in deep trouble. * in deep trouble.
...@@ -1542,6 +1528,7 @@ EXPORT_SYMBOL(cpufreq_generic_suspend); ...@@ -1542,6 +1528,7 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void) void cpufreq_suspend(void)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret;
if (!cpufreq_driver) if (!cpufreq_driver)
return; return;
...@@ -1552,7 +1539,11 @@ void cpufreq_suspend(void) ...@@ -1552,7 +1539,11 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__); pr_debug("%s: Suspending Governors\n", __func__);
for_each_active_policy(policy) { for_each_active_policy(policy) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) down_write(&policy->rwsem);
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
up_write(&policy->rwsem);
if (ret)
pr_err("%s: Failed to stop governor for policy: %p\n", pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy); __func__, policy);
else if (cpufreq_driver->suspend else if (cpufreq_driver->suspend
...@@ -1574,6 +1565,7 @@ void cpufreq_suspend(void) ...@@ -1574,6 +1565,7 @@ void cpufreq_suspend(void)
void cpufreq_resume(void) void cpufreq_resume(void)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret;
if (!cpufreq_driver) if (!cpufreq_driver)
return; return;
...@@ -1586,14 +1578,21 @@ void cpufreq_resume(void) ...@@ -1586,14 +1578,21 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__); pr_debug("%s: Resuming Governors\n", __func__);
for_each_active_policy(policy) { for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %p\n", __func__, pr_err("%s: Failed to resume driver: %p\n", __func__,
policy); policy);
else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) } else {
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) down_write(&policy->rwsem);
ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret)
cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
up_write(&policy->rwsem);
if (ret)
pr_err("%s: Failed to start governor for policy: %p\n", pr_err("%s: Failed to start governor for policy: %p\n",
__func__, policy); __func__, policy);
} }
}
/* /*
* schedule call cpufreq_update_policy() for first-online CPU, as that * schedule call cpufreq_update_policy() for first-online CPU, as that
...@@ -1878,8 +1877,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void) ...@@ -1878,8 +1877,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
return NULL; return NULL;
} }
static int __cpufreq_governor(struct cpufreq_policy *policy, static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
unsigned int event)
{ {
int ret; int ret;
...@@ -1913,21 +1911,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -1913,21 +1911,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event); pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
|| (!policy->governor_enabled
&& (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
mutex_unlock(&cpufreq_governor_lock);
return -EBUSY;
}
if (event == CPUFREQ_GOV_STOP)
policy->governor_enabled = false;
else if (event == CPUFREQ_GOV_START)
policy->governor_enabled = true;
mutex_unlock(&cpufreq_governor_lock);
ret = policy->governor->governor(policy, event); ret = policy->governor->governor(policy, event);
if (!ret) { if (!ret) {
...@@ -1935,14 +1918,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -1935,14 +1918,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->governor->initialized++; policy->governor->initialized++;
else if (event == CPUFREQ_GOV_POLICY_EXIT) else if (event == CPUFREQ_GOV_POLICY_EXIT)
policy->governor->initialized--; policy->governor->initialized--;
} else {
/* Restore original values */
mutex_lock(&cpufreq_governor_lock);
if (event == CPUFREQ_GOV_STOP)
policy->governor_enabled = true;
else if (event == CPUFREQ_GOV_START)
policy->governor_enabled = false;
mutex_unlock(&cpufreq_governor_lock);
} }
if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) || if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
...@@ -2097,7 +2072,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2097,7 +2072,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor; old_gov = policy->governor;
/* end old governor */ /* end old governor */
if (old_gov) { if (old_gov) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) { if (ret) {
/* This can happen due to race with other operations */ /* This can happen due to race with other operations */
pr_debug("%s: Failed to Stop Governor: %s (%d)\n", pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
...@@ -2105,10 +2080,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2105,10 +2080,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret; return ret;
} }
up_write(&policy->rwsem); ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
down_write(&policy->rwsem);
if (ret) { if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n", pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret); __func__, old_gov->name, ret);
...@@ -2118,32 +2090,30 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2118,32 +2090,30 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */ /* start new governor */
policy->governor = new_policy->governor; policy->governor = new_policy->governor;
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
if (!ret) { if (!ret) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret) if (!ret)
goto out; goto out;
up_write(&policy->rwsem); cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
down_write(&policy->rwsem);
} }
/* new governor failed, so re-start old one */ /* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n", policy->governor->name); pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) { if (old_gov) {
policy->governor = old_gov; policy->governor = old_gov;
if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
policy->governor = NULL; policy->governor = NULL;
else else
__cpufreq_governor(policy, CPUFREQ_GOV_START); cpufreq_governor(policy, CPUFREQ_GOV_START);
} }
return ret; return ret;
out: out:
pr_debug("governor: change or update limits\n"); pr_debug("governor: change or update limits\n");
return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
} }
/** /**
...@@ -2210,11 +2180,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, ...@@ -2210,11 +2180,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
cpufreq_offline_prepare(cpu); cpufreq_offline(cpu);
break;
case CPU_POST_DEAD:
cpufreq_offline_finish(cpu);
break; break;
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
...@@ -2247,8 +2213,11 @@ static int cpufreq_boost_set_sw(int state) ...@@ -2247,8 +2213,11 @@ static int cpufreq_boost_set_sw(int state)
__func__); __func__);
break; break;
} }
down_write(&policy->rwsem);
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
up_write(&policy->rwsem);
} }
} }
......
...@@ -14,6 +14,22 @@ ...@@ -14,6 +14,22 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
{
return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
}
struct cs_dbs_tuners {
unsigned int down_threshold;
unsigned int freq_step;
};
/* Conservative governor macros */ /* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20) #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
...@@ -21,18 +37,6 @@ ...@@ -21,18 +37,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1) #define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10) #define MAX_SAMPLING_DOWN_FACTOR (10)
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event);
static struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy) struct cpufreq_policy *policy)
{ {
...@@ -54,27 +58,28 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, ...@@ -54,27 +58,28 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* Any frequency increase takes it to the maximum frequency. Frequency reduction * Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of maximum frequency * happens at minimum steps of 5% (default) of maximum frequency
*/ */
static void cs_check_cpu(int cpu, unsigned int load) static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{ {
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy; struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy->governor_data; struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
/* /*
* break out if we 'cannot' reduce the speed as the user might * break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero * want freq_step to be zero
*/ */
if (cs_tuners->freq_step == 0) if (cs_tuners->freq_step == 0)
return; goto out;
/* Check for frequency increase */ /* Check for frequency increase */
if (load > cs_tuners->up_threshold) { if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0; dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */ /* if we are already at full speed then break out early */
if (dbs_info->requested_freq == policy->max) if (dbs_info->requested_freq == policy->max)
return; goto out;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy); dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
...@@ -83,12 +88,12 @@ static void cs_check_cpu(int cpu, unsigned int load) ...@@ -83,12 +88,12 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq, __cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
return; goto out;
} }
/* if sampling_down_factor is active break out early */ /* if sampling_down_factor is active break out early */
if (++dbs_info->down_skip < cs_tuners->sampling_down_factor) if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
return; goto out;
dbs_info->down_skip = 0; dbs_info->down_skip = 0;
/* Check for frequency decrease */ /* Check for frequency decrease */
...@@ -98,7 +103,7 @@ static void cs_check_cpu(int cpu, unsigned int load) ...@@ -98,7 +103,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
* if we cannot reduce the frequency anymore, break out early * if we cannot reduce the frequency anymore, break out early
*/ */
if (policy->cur == policy->min) if (policy->cur == policy->min)
return; goto out;
freq_target = get_freq_target(cs_tuners, policy); freq_target = get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > freq_target) if (dbs_info->requested_freq > freq_target)
...@@ -108,58 +113,25 @@ static void cs_check_cpu(int cpu, unsigned int load) ...@@ -108,58 +113,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq, __cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L); CPUFREQ_RELATION_L);
return;
} }
}
static unsigned int cs_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
{
struct dbs_data *dbs_data = policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
if (modify_all)
dbs_check_cpu(dbs_data, policy->cpu);
return delay_for_sampling_rate(cs_tuners->sampling_rate); out:
return dbs_data->sampling_rate;
} }
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data) void *data);
{
struct cpufreq_freqs *freq = data;
struct cs_cpu_dbs_info_s *dbs_info =
&per_cpu(cs_cpu_dbs_info, freq->cpu);
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
if (!policy)
return 0;
/* policy isn't governed by conservative governor */
if (policy->governor != &cpufreq_gov_conservative)
return 0;
/*
* we only care if our internally tracked freq moves outside the 'valid'
* ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
dbs_info->requested_freq = freq->new;
return 0;
}
static struct notifier_block cs_cpufreq_notifier_block = { static struct notifier_block cs_cpufreq_notifier_block = {
.notifier_call = dbs_cpufreq_notifier, .notifier_call = dbs_cpufreq_notifier,
}; };
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
static struct common_dbs_data cs_dbs_cdata; static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input; unsigned int input;
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
...@@ -167,22 +139,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, ...@@ -167,22 +139,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL; return -EINVAL;
cs_tuners->sampling_down_factor = input; dbs_data->sampling_down_factor = input;
return count;
}
static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
return count; return count;
} }
...@@ -197,7 +154,7 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, ...@@ -197,7 +154,7 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL; return -EINVAL;
cs_tuners->up_threshold = input; dbs_data->up_threshold = input;
return count; return count;
} }
...@@ -211,7 +168,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, ...@@ -211,7 +168,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
/* cannot be lower than 11 otherwise freq will not fall */ /* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 || if (ret != 1 || input < 11 || input > 100 ||
input >= cs_tuners->up_threshold) input >= dbs_data->up_threshold)
return -EINVAL; return -EINVAL;
cs_tuners->down_threshold = input; cs_tuners->down_threshold = input;
...@@ -221,8 +178,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, ...@@ -221,8 +178,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input;
unsigned int input, j;
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
...@@ -232,21 +188,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, ...@@ -232,21 +188,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
if (input > 1) if (input > 1)
input = 1; input = 1;
if (input == cs_tuners->ignore_nice_load) /* nothing to do */ if (input == dbs_data->ignore_nice_load) /* nothing to do */
return count; return count;
cs_tuners->ignore_nice_load = input; dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */ /* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) { gov_update_cpu_data(dbs_data);
struct cs_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count; return count;
} }
...@@ -272,55 +221,47 @@ static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, ...@@ -272,55 +221,47 @@ static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
return count; return count;
} }
show_store_one(cs, sampling_rate); gov_show_one_common(sampling_rate);
show_store_one(cs, sampling_down_factor); gov_show_one_common(sampling_down_factor);
show_store_one(cs, up_threshold); gov_show_one_common(up_threshold);
show_store_one(cs, down_threshold); gov_show_one_common(ignore_nice_load);
show_store_one(cs, ignore_nice_load); gov_show_one_common(min_sampling_rate);
show_store_one(cs, freq_step); gov_show_one(cs, down_threshold);
declare_show_sampling_rate_min(cs); gov_show_one(cs, freq_step);
gov_sys_pol_attr_rw(sampling_rate); gov_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor); gov_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold); gov_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold); gov_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(ignore_nice_load); gov_attr_ro(min_sampling_rate);
gov_sys_pol_attr_rw(freq_step); gov_attr_rw(down_threshold);
gov_sys_pol_attr_ro(sampling_rate_min); gov_attr_rw(freq_step);
static struct attribute *dbs_attributes_gov_sys[] = { static struct attribute *cs_attributes[] = {
&sampling_rate_min_gov_sys.attr, &min_sampling_rate.attr,
&sampling_rate_gov_sys.attr, &sampling_rate.attr,
&sampling_down_factor_gov_sys.attr, &sampling_down_factor.attr,
&up_threshold_gov_sys.attr, &up_threshold.attr,
&down_threshold_gov_sys.attr, &down_threshold.attr,
&ignore_nice_load_gov_sys.attr, &ignore_nice_load.attr,
&freq_step_gov_sys.attr, &freq_step.attr,
NULL NULL
}; };
static struct attribute_group cs_attr_group_gov_sys = { /************************** sysfs end ************************/
.attrs = dbs_attributes_gov_sys,
.name = "conservative",
};
static struct attribute *dbs_attributes_gov_pol[] = { static struct policy_dbs_info *cs_alloc(void)
&sampling_rate_min_gov_pol.attr, {
&sampling_rate_gov_pol.attr, struct cs_policy_dbs_info *dbs_info;
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
&ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
static struct attribute_group cs_attr_group_gov_pol = { dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
.attrs = dbs_attributes_gov_pol, return dbs_info ? &dbs_info->policy_dbs : NULL;
.name = "conservative", }
};
/************************** sysfs end ************************/ static void cs_free(struct policy_dbs_info *policy_dbs)
{
kfree(to_dbs_info(policy_dbs));
}
static int cs_init(struct dbs_data *dbs_data, bool notify) static int cs_init(struct dbs_data *dbs_data, bool notify)
{ {
...@@ -332,11 +273,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify) ...@@ -332,11 +273,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
return -ENOMEM; return -ENOMEM;
} }
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP; tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
dbs_data->tuners = tuners; dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
...@@ -358,35 +299,66 @@ static void cs_exit(struct dbs_data *dbs_data, bool notify) ...@@ -358,35 +299,66 @@ static void cs_exit(struct dbs_data *dbs_data, bool notify)
kfree(dbs_data->tuners); kfree(dbs_data->tuners);
} }
define_get_cpu_dbs_routines(cs_cpu_dbs_info); static void cs_start(struct cpufreq_policy *policy)
{
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
dbs_info->requested_freq = policy->cur;
}
static struct common_dbs_data cs_dbs_cdata = { static struct dbs_governor cs_dbs_gov = {
.governor = GOV_CONSERVATIVE, .gov = {
.attr_group_gov_sys = &cs_attr_group_gov_sys, .name = "conservative",
.attr_group_gov_pol = &cs_attr_group_gov_pol, .governor = cpufreq_governor_dbs,
.get_cpu_cdbs = get_cpu_cdbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT,
.get_cpu_dbs_info_s = get_cpu_dbs_info_s, .owner = THIS_MODULE,
},
.kobj_type = { .default_attrs = cs_attributes },
.gov_dbs_timer = cs_dbs_timer, .gov_dbs_timer = cs_dbs_timer,
.gov_check_cpu = cs_check_cpu, .alloc = cs_alloc,
.free = cs_free,
.init = cs_init, .init = cs_init,
.exit = cs_exit, .exit = cs_exit,
.mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex), .start = cs_start,
}; };
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, #define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
unsigned int event)
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{ {
return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); struct cpufreq_freqs *freq = data;
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
struct cs_policy_dbs_info *dbs_info;
if (!policy)
return 0;
/* policy isn't governed by conservative governor */
if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
return 0;
dbs_info = to_dbs_info(policy->governor_data);
/*
* we only care if our internally tracked freq moves outside the 'valid'
* ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
dbs_info->requested_freq = freq->new;
return 0;
} }
static int __init cpufreq_gov_dbs_init(void) static int __init cpufreq_gov_dbs_init(void)
{ {
return cpufreq_register_governor(&cpufreq_gov_conservative); return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
} }
static void __exit cpufreq_gov_dbs_exit(void) static void __exit cpufreq_gov_dbs_exit(void)
{ {
cpufreq_unregister_governor(&cpufreq_gov_conservative); cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
} }
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
...@@ -398,7 +370,7 @@ MODULE_LICENSE("GPL"); ...@@ -398,7 +370,7 @@ MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
struct cpufreq_governor *cpufreq_default_governor(void) struct cpufreq_governor *cpufreq_default_governor(void)
{ {
return &cpufreq_gov_conservative; return CPU_FREQ_GOV_CONSERVATIVE;
} }
fs_initcall(cpufreq_gov_dbs_init); fs_initcall(cpufreq_gov_dbs_init);
......
此差异已折叠。
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define _CPUFREQ_GOVERNOR_H #define _CPUFREQ_GOVERNOR_H
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/irq_work.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -41,96 +42,68 @@ ...@@ -41,96 +42,68 @@
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* /*
* Macro for creating governors sysfs routines * Abbreviations:
* * dbs: used as a shortform for demand based switching It helps to keep variable
* - gov_sys: One governor instance per whole system * names smaller, simpler
* - gov_pol: One governor instance per policy * cdbs: common dbs
* od_*: On-demand governor
* cs_*: Conservative governor
*/ */
/* Create attributes */ /* Governor demand based switching data (per-policy or global). */
#define gov_sys_attr_ro(_name) \ struct dbs_data {
static struct global_attr _name##_gov_sys = \ int usage_count;
__ATTR(_name, 0444, show_##_name##_gov_sys, NULL) void *tuners;
unsigned int min_sampling_rate;
#define gov_sys_attr_rw(_name) \ unsigned int ignore_nice_load;
static struct global_attr _name##_gov_sys = \ unsigned int sampling_rate;
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys) unsigned int sampling_down_factor;
unsigned int up_threshold;
#define gov_pol_attr_ro(_name) \ unsigned int io_is_busy;
static struct freq_attr _name##_gov_pol = \
__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
#define gov_pol_attr_rw(_name) \
static struct freq_attr _name##_gov_pol = \
__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
#define gov_sys_pol_attr_rw(_name) \ struct kobject kobj;
gov_sys_attr_rw(_name); \ struct list_head policy_dbs_list;
gov_pol_attr_rw(_name) /*
* Protect concurrent updates to governor tunables from sysfs,
* policy_dbs_list and usage_count.
*/
struct mutex mutex;
};
#define gov_sys_pol_attr_ro(_name) \ /* Governor's specific attributes */
gov_sys_attr_ro(_name); \ struct dbs_data;
gov_pol_attr_ro(_name) struct governor_attr {
struct attribute attr;
ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
size_t count);
};
/* Create show/store routines */ #define gov_show_one(_gov, file_name) \
#define show_one(_gov, file_name) \ static ssize_t show_##file_name \
static ssize_t show_##file_name##_gov_sys \ (struct dbs_data *dbs_data, char *buf) \
(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \ { \
struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
} \
\
static ssize_t show_##file_name##_gov_pol \
(struct cpufreq_policy *policy, char *buf) \
{ \
struct dbs_data *dbs_data = policy->governor_data; \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \ struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \ return sprintf(buf, "%u\n", tuners->file_name); \
} }
#define store_one(_gov, file_name) \ #define gov_show_one_common(file_name) \
static ssize_t store_##file_name##_gov_sys \ static ssize_t show_##file_name \
(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ (struct dbs_data *dbs_data, char *buf) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return store_##file_name(dbs_data, buf, count); \
} \
\
static ssize_t store_##file_name##_gov_pol \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \ { \
struct dbs_data *dbs_data = policy->governor_data; \ return sprintf(buf, "%u\n", dbs_data->file_name); \
return store_##file_name(dbs_data, buf, count); \
} }
#define show_store_one(_gov, file_name) \ #define gov_attr_ro(_name) \
show_one(_gov, file_name); \ static struct governor_attr _name = \
store_one(_gov, file_name) __ATTR(_name, 0444, show_##_name, NULL)
/* create helper routines */ #define gov_attr_rw(_name) \
#define define_get_cpu_dbs_routines(_dbs_info) \ static struct governor_attr _name = \
static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \ __ATTR(_name, 0644, show_##_name, store_##_name)
{ \
return &per_cpu(_dbs_info, cpu).cdbs; \
} \
\
static void *get_cpu_dbs_info_s(int cpu) \
{ \
return &per_cpu(_dbs_info, cpu); \
}
/*
* Abbreviations:
* dbs: used as a shortform for demand based switching It helps to keep variable
* names smaller, simpler
* cdbs: common dbs
* od_*: On-demand governor
* cs_*: Conservative governor
*/
/* Common to all CPUs of a policy */ /* Common to all CPUs of a policy */
struct cpu_common_dbs_info { struct policy_dbs_info {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
/* /*
* Per policy mutex that serializes load evaluation from limit-change * Per policy mutex that serializes load evaluation from limit-change
...@@ -138,11 +111,27 @@ struct cpu_common_dbs_info { ...@@ -138,11 +111,27 @@ struct cpu_common_dbs_info {
*/ */
struct mutex timer_mutex; struct mutex timer_mutex;
ktime_t time_stamp; u64 last_sample_time;
atomic_t skip_work; s64 sample_delay_ns;
atomic_t work_count;
struct irq_work irq_work;
struct work_struct work; struct work_struct work;
/* dbs_data may be shared between multiple policy objects */
struct dbs_data *dbs_data;
struct list_head list;
/* Multiplier for increasing sample delay temporarily. */
unsigned int rate_mult;
/* Status indicators */
bool is_shared; /* This object is used by multiple CPUs */
bool work_in_progress; /* Work is being queued up or in progress */
}; };
static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
unsigned int delay_us)
{
policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
}
/* Per cpu structures */ /* Per cpu structures */
struct cpu_dbs_info { struct cpu_dbs_info {
u64 prev_cpu_idle; u64 prev_cpu_idle;
...@@ -155,54 +144,14 @@ struct cpu_dbs_info { ...@@ -155,54 +144,14 @@ struct cpu_dbs_info {
* wake-up from idle. * wake-up from idle.
*/ */
unsigned int prev_load; unsigned int prev_load;
struct timer_list timer; struct update_util_data update_util;
struct cpu_common_dbs_info *shared; struct policy_dbs_info *policy_dbs;
};
struct od_cpu_dbs_info_s {
struct cpu_dbs_info cdbs;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
unsigned int rate_mult;
unsigned int sample_type:1;
};
struct cs_cpu_dbs_info_s {
struct cpu_dbs_info cdbs;
unsigned int down_skip;
unsigned int requested_freq;
};
/* Per policy Governors sysfs tunables */
struct od_dbs_tuners {
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
struct cs_dbs_tuners {
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int freq_step;
}; };
/* Common Governor data across policies */ /* Common Governor data across policies */
struct dbs_data; struct dbs_governor {
struct common_dbs_data { struct cpufreq_governor gov;
/* Common across governors */ struct kobj_type kobj_type;
#define GOV_ONDEMAND 0
#define GOV_CONSERVATIVE 1
int governor;
struct attribute_group *attr_group_gov_sys; /* one governor - system */
struct attribute_group *attr_group_gov_pol; /* one governor - policy */
/* /*
* Common data for platforms that don't set * Common data for platforms that don't set
...@@ -210,74 +159,32 @@ struct common_dbs_data { ...@@ -210,74 +159,32 @@ struct common_dbs_data {
*/ */
struct dbs_data *gdbs_data; struct dbs_data *gdbs_data;
struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu); unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
void *(*get_cpu_dbs_info_s)(int cpu); struct policy_dbs_info *(*alloc)(void);
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy, void (*free)(struct policy_dbs_info *policy_dbs);
bool modify_all);
void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data, bool notify); int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify); void (*exit)(struct dbs_data *dbs_data, bool notify);
void (*start)(struct cpufreq_policy *policy);
/* Governor specific ops, see below */
void *gov_ops;
/*
* Protects governor's data (struct dbs_data and struct common_dbs_data)
*/
struct mutex mutex;
}; };
/* Governor Per policy data */ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
struct dbs_data { {
struct common_dbs_data *cdata; return container_of(policy->governor, struct dbs_governor, gov);
unsigned int min_sampling_rate; }
int usage_count;
void *tuners;
};
/* Governor specific ops, will be passed to dbs_data->gov_ops */ /* Governor specific operations */
struct od_ops { struct od_ops {
void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation); unsigned int freq_next, unsigned int relation);
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
}; };
static inline int delay_for_sampling_rate(unsigned int sampling_rate) unsigned int dbs_update(struct cpufreq_policy *policy);
{ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
int delay = usecs_to_jiffies(sampling_rate);
/* We want all CPUs to do sampling nearly on same jiffy */
if (num_online_cpus() > 1)
delay -= jiffies % delay;
return delay;
}
#define declare_show_sampling_rate_min(_gov) \
static ssize_t show_sampling_rate_min_gov_sys \
(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
} \
\
static ssize_t show_sampling_rate_min_gov_pol \
(struct cpufreq_policy *policy, char *buf) \
{ \
struct dbs_data *dbs_data = policy->governor_data; \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
}
extern struct mutex cpufreq_governor_lock;
void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay);
void gov_cancel_work(struct cpu_common_dbs_info *shared);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f) void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int), (struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias); unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void); void od_unregister_powersave_bias_handler(void);
ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */ #endif /* _CPUFREQ_GOVERNOR_H */
此差异已折叠。
/*
* Header file for CPUFreq ondemand governor and related code.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "cpufreq_governor.h"
struct od_policy_dbs_info {
struct policy_dbs_info policy_dbs;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_delay_us;
unsigned int freq_hi_delay_us;
unsigned int sample_type:1;
};
static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
{
return container_of(policy_dbs, struct od_policy_dbs_info, policy_dbs);
}
struct od_dbs_tuners {
unsigned int powersave_bias;
};
...@@ -71,7 +71,7 @@ struct sample { ...@@ -71,7 +71,7 @@ struct sample {
u64 mperf; u64 mperf;
u64 tsc; u64 tsc;
int freq; int freq;
ktime_t time; u64 time;
}; };
struct pstate_data { struct pstate_data {
...@@ -103,13 +103,13 @@ struct _pid { ...@@ -103,13 +103,13 @@ struct _pid {
struct cpudata { struct cpudata {
int cpu; int cpu;
struct timer_list timer; struct update_util_data update_util;
struct pstate_data pstate; struct pstate_data pstate;
struct vid_data vid; struct vid_data vid;
struct _pid pid; struct _pid pid;
ktime_t last_sample_time; u64 last_sample_time;
u64 prev_aperf; u64 prev_aperf;
u64 prev_mperf; u64 prev_mperf;
u64 prev_tsc; u64 prev_tsc;
...@@ -120,6 +120,7 @@ struct cpudata { ...@@ -120,6 +120,7 @@ struct cpudata {
static struct cpudata **all_cpu_data; static struct cpudata **all_cpu_data;
struct pstate_adjust_policy { struct pstate_adjust_policy {
int sample_rate_ms; int sample_rate_ms;
s64 sample_rate_ns;
int deadband; int deadband;
int setpoint; int setpoint;
int p_gain_pct; int p_gain_pct;
...@@ -718,7 +719,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) ...@@ -718,7 +719,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
if (limits->no_turbo && !limits->turbo_disabled) if (limits->no_turbo && !limits->turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); wrmsrl(MSR_IA32_PERF_CTL, val);
} }
static int knl_get_turbo_pstate(void) static int knl_get_turbo_pstate(void)
...@@ -889,7 +890,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) ...@@ -889,7 +890,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
sample->core_pct_busy = (int32_t)core_pct; sample->core_pct_busy = (int32_t)core_pct;
} }
static inline void intel_pstate_sample(struct cpudata *cpu) static inline void intel_pstate_sample(struct cpudata *cpu, u64 time)
{ {
u64 aperf, mperf; u64 aperf, mperf;
unsigned long flags; unsigned long flags;
...@@ -906,7 +907,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu) ...@@ -906,7 +907,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
local_irq_restore(flags); local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time; cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = ktime_get(); cpu->sample.time = time;
cpu->sample.aperf = aperf; cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf; cpu->sample.mperf = mperf;
cpu->sample.tsc = tsc; cpu->sample.tsc = tsc;
...@@ -921,22 +922,6 @@ static inline void intel_pstate_sample(struct cpudata *cpu) ...@@ -921,22 +922,6 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
cpu->prev_tsc = tsc; cpu->prev_tsc = tsc;
} }
static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
{
int delay;
delay = msecs_to_jiffies(50);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
int delay;
delay = msecs_to_jiffies(pid_params.sample_rate_ms);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{ {
struct sample *sample = &cpu->sample; struct sample *sample = &cpu->sample;
...@@ -976,8 +961,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) ...@@ -976,8 +961,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
{ {
int32_t core_busy, max_pstate, current_pstate, sample_ratio; int32_t core_busy, max_pstate, current_pstate, sample_ratio;
s64 duration_us; u64 duration_ns;
u32 sample_time;
/* /*
* core_busy is the ratio of actual performance to max * core_busy is the ratio of actual performance to max
...@@ -996,18 +980,16 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) ...@@ -996,18 +980,16 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
/* /*
* Since we have a deferred timer, it will not fire unless * Since our utilization update callback will not run unless we are
* we are in C0. So, determine if the actual elapsed time * in C0, check if the actual elapsed time is significantly greater (3x)
* is significantly greater (3x) than our sample interval. If it * than our sample interval. If it is, then we were idle for a long
* is, then we were idle for a long enough period of time * enough period of time to adjust our busyness.
* to adjust our busyness.
*/ */
sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; duration_ns = cpu->sample.time - cpu->last_sample_time;
duration_us = ktime_us_delta(cpu->sample.time, if ((s64)duration_ns > pid_params.sample_rate_ns * 3
cpu->last_sample_time); && cpu->last_sample_time > 0) {
if (duration_us > sample_time * 3) { sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
sample_ratio = div_fp(int_tofp(sample_time), int_tofp(duration_ns));
int_tofp(duration_us));
core_busy = mul_fp(core_busy, sample_ratio); core_busy = mul_fp(core_busy, sample_ratio);
} }
...@@ -1037,23 +1019,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ...@@ -1037,23 +1019,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
sample->freq); sample->freq);
} }
static void intel_hwp_timer_func(unsigned long __data) static void intel_pstate_update_util(struct update_util_data *data, u64 time,
{ unsigned long util, unsigned long max)
struct cpudata *cpu = (struct cpudata *) __data;
intel_pstate_sample(cpu);
intel_hwp_set_sample_time(cpu);
}
static void intel_pstate_timer_func(unsigned long __data)
{ {
struct cpudata *cpu = (struct cpudata *) __data; struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns = time - cpu->sample.time;
intel_pstate_sample(cpu);
if ((s64)delta_ns >= pid_params.sample_rate_ns) {
intel_pstate_sample(cpu, time);
if (!hwp_active)
intel_pstate_adjust_busy_pstate(cpu); intel_pstate_adjust_busy_pstate(cpu);
}
intel_pstate_set_sample_time(cpu);
} }
#define ICPU(model, policy) \ #define ICPU(model, policy) \
...@@ -1101,24 +1077,19 @@ static int intel_pstate_init_cpu(unsigned int cpunum) ...@@ -1101,24 +1077,19 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum; cpu->cpu = cpunum;
if (hwp_active) if (hwp_active) {
intel_pstate_hwp_enable(cpu); intel_pstate_hwp_enable(cpu);
pid_params.sample_rate_ms = 50;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
}
intel_pstate_get_cpu_pstates(cpu); intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer);
cpu->timer.data = (unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
if (!hwp_active)
cpu->timer.function = intel_pstate_timer_func;
else
cpu->timer.function = intel_hwp_timer_func;
intel_pstate_busy_pid_reset(cpu); intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu); intel_pstate_sample(cpu, 0);
add_timer_on(&cpu->timer, cpunum); cpu->update_util.func = intel_pstate_update_util;
cpufreq_set_update_util_data(cpunum, &cpu->update_util);
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
...@@ -1202,7 +1173,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) ...@@ -1202,7 +1173,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
del_timer_sync(&all_cpu_data[cpu_num]->timer); cpufreq_set_update_util_data(cpu_num, NULL);
synchronize_sched();
if (hwp_active) if (hwp_active)
return; return;
...@@ -1266,6 +1239,7 @@ static int intel_pstate_msrs_not_valid(void) ...@@ -1266,6 +1239,7 @@ static int intel_pstate_msrs_not_valid(void)
static void copy_pid_params(struct pstate_adjust_policy *policy) static void copy_pid_params(struct pstate_adjust_policy *policy)
{ {
pid_params.sample_rate_ms = policy->sample_rate_ms; pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
pid_params.p_gain_pct = policy->p_gain_pct; pid_params.p_gain_pct = policy->p_gain_pct;
pid_params.i_gain_pct = policy->i_gain_pct; pid_params.i_gain_pct = policy->i_gain_pct;
pid_params.d_gain_pct = policy->d_gain_pct; pid_params.d_gain_pct = policy->d_gain_pct;
...@@ -1467,7 +1441,8 @@ static int __init intel_pstate_init(void) ...@@ -1467,7 +1441,8 @@ static int __init intel_pstate_init(void)
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) { if (all_cpu_data[cpu]) {
del_timer_sync(&all_cpu_data[cpu]->timer); cpufreq_set_update_util_data(cpu, NULL);
synchronize_sched();
kfree(all_cpu_data[cpu]); kfree(all_cpu_data[cpu]);
} }
} }
......
...@@ -80,7 +80,6 @@ struct cpufreq_policy { ...@@ -80,7 +80,6 @@ struct cpufreq_policy {
unsigned int last_policy; /* policy before unplug */ unsigned int last_policy; /* policy before unplug */
struct cpufreq_governor *governor; /* see below */ struct cpufreq_governor *governor; /* see below */
void *governor_data; void *governor_data;
bool governor_enabled; /* governor start/stop flag */
char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
...@@ -100,10 +99,6 @@ struct cpufreq_policy { ...@@ -100,10 +99,6 @@ struct cpufreq_policy {
* - Any routine that will write to the policy structure and/or may take away * - Any routine that will write to the policy structure and/or may take away
* the policy altogether (eg. CPU hotplug), will hold this lock in write * the policy altogether (eg. CPU hotplug), will hold this lock in write
* mode before doing so. * mode before doing so.
*
* Additional rules:
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
*/ */
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
......
...@@ -3207,4 +3207,13 @@ static inline unsigned long rlimit_max(unsigned int limit) ...@@ -3207,4 +3207,13 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit); return task_rlimit_max(current, limit);
} }
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
void (*func)(struct update_util_data *data,
u64 time, unsigned long util, unsigned long max);
};
void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
#endif /* CONFIG_CPU_FREQ */
#endif #endif
...@@ -19,3 +19,4 @@ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o ...@@ -19,3 +19,4 @@ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
/*
* Scheduler code and data structures related to cpufreq.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "sched.h"
DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
/**
* cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
* @cpu: The CPU to set the pointer for.
* @data: New pointer value.
*
* Set and publish the update_util_data pointer for the given CPU. That pointer
* points to a struct update_util_data object containing a callback function
* to call from cpufreq_update_util(). That function will be called from an RCU
* read-side critical section, so it must not sleep.
*
* Callers must use RCU-sched callbacks to free any memory that might be
* accessed via the old update_util_data pointer or invoke synchronize_sched()
* right after this function to avoid use-after-free.
*/
void cpufreq_set_update_util_data(int cpu, struct update_util_data *data)
{
if (WARN_ON(data && !data->func))
return;
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
}
EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data);
...@@ -726,6 +726,10 @@ static void update_curr_dl(struct rq *rq) ...@@ -726,6 +726,10 @@ static void update_curr_dl(struct rq *rq)
if (!dl_task(curr) || !on_dl_rq(dl_se)) if (!dl_task(curr) || !on_dl_rq(dl_se))
return; return;
/* Kick cpufreq (see the comment in linux/cpufreq.h). */
if (cpu_of(rq) == smp_processor_id())
cpufreq_trigger_update(rq_clock(rq));
/* /*
* Consumed budget is computed considering the time as * Consumed budget is computed considering the time as
* observed by schedulable tasks (excluding time spent * observed by schedulable tasks (excluding time spent
......
...@@ -2824,7 +2824,8 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) ...@@ -2824,7 +2824,8 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
{ {
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq); u64 now = cfs_rq_clock_task(cfs_rq);
int cpu = cpu_of(rq_of(cfs_rq)); struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
/* /*
* Track task load average for carrying it to new CPU after migrated, and * Track task load average for carrying it to new CPU after migrated, and
...@@ -2836,6 +2837,29 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) ...@@ -2836,6 +2837,29 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg) if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
update_tg_load_avg(cfs_rq, 0); update_tg_load_avg(cfs_rq, 0);
if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
unsigned long max = rq->cpu_capacity_orig;
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
* a real problem -- added to that it only calls on the local
* CPU, so if we enqueue remotely we'll miss an update, but
* the next tick/schedule should update.
*
* It will not get called when we go idle, because the idle
* thread is a different class (!fair), nor will the utilization
* number include things like RT tasks.
*
* As is, the util number is not freq-invariant (we'd have to
* implement arch_scale_freq_capacity() for that).
*
* See cpu_util().
*/
cpufreq_update_util(rq_clock(rq),
min(cfs_rq->avg.util_avg, max), max);
}
} }
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
......
...@@ -945,6 +945,10 @@ static void update_curr_rt(struct rq *rq) ...@@ -945,6 +945,10 @@ static void update_curr_rt(struct rq *rq)
if (curr->sched_class != &rt_sched_class) if (curr->sched_class != &rt_sched_class)
return; return;
/* Kick cpufreq (see the comment in linux/cpufreq.h). */
if (cpu_of(rq) == smp_processor_id())
cpufreq_trigger_update(rq_clock(rq));
delta_exec = rq_clock_task(rq) - curr->se.exec_start; delta_exec = rq_clock_task(rq) - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) if (unlikely((s64)delta_exec <= 0))
return; return;
......
...@@ -1738,3 +1738,51 @@ static inline u64 irq_time_read(int cpu) ...@@ -1738,3 +1738,51 @@ static inline u64 irq_time_read(int cpu)
} }
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_CPU_FREQ
DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
/**
* cpufreq_update_util - Take a note about CPU utilization changes.
* @time: Current time.
* @util: Current utilization.
* @max: Utilization ceiling.
*
* This function is called by the scheduler on every invocation of
* update_load_avg() on the CPU whose utilization is being updated.
*
* It can only be called from RCU-sched read-side critical sections.
*/
static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max)
{
struct update_util_data *data;
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
if (data)
data->func(data, time, util, max);
}
/**
* cpufreq_trigger_update - Trigger CPU performance state evaluation if needed.
* @time: Current time.
*
* The way cpufreq is currently arranged requires it to evaluate the CPU
* performance state (frequency/voltage) on a regular basis to prevent it from
* being stuck in a completely inadequate performance level for too long.
* That is not guaranteed to happen if the updates are only triggered from CFS,
* though, because they may not be coming in if RT or deadline tasks are active
* all the time (or there are RT and DL tasks only).
*
* As a workaround for that issue, this function is called by the RT and DL
* sched classes to trigger extra cpufreq updates to prevent it from stalling,
* but that really is a band-aid. Going forward it should be replaced with
* solutions targeted more specifically at RT and DL tasks.
*/
static inline void cpufreq_trigger_update(u64 time)
{
cpufreq_update_util(time, ULONG_MAX, 0);
}
#else
static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) {}
static inline void cpufreq_trigger_update(u64 time) {}
#endif /* CONFIG_CPU_FREQ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册