提交 6bd51658 编写于 作者: R Rafael J. Wysocki

Merge branch 'pm-fixes' into fixes

* pm-fixes:
  cpufreq / intel_pstate: Do not load on VM that does not report max P state.
  cpufreq / intel_pstate: Fix intel_pstate_init() error path
  PM / QoS: Remove device PM QoS sysfs attributes at the right place
  PM / QoS: Fix concurrency issues and memory leaks in device PM QoS
  cpufreq: highbank: do not initialize array with a loop
  PM / OPP: improve introductory documentation
  cpufreq: Fix a typo in comment
  mailbox, pl320-ipc: remove __init from probe function
*=============*
* OPP Library *
*=============*
Operating Performance Points (OPP) Library
==========================================
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
......@@ -16,15 +15,31 @@ Contents
1. Introduction
===============
1.1 What is an Operating Performance Point (OPP)?
Complex SoCs of today consists of a multiple sub-modules working in conjunction.
In an operational system executing varied use cases, not all modules in the SoC
need to function at their highest performing frequency all the time. To
facilitate this, sub-modules in a SoC are grouped into domains, allowing some
domains to run at lower voltage and frequency while other domains are loaded
more. The set of discrete tuples consisting of frequency and voltage pairs that
domains to run at lower voltage and frequency while other domains run at
voltage/frequency pairs that are higher.
The set of discrete tuples consisting of frequency and voltage pairs that
the device will support per domain are called Operating Performance Points or
OPPs.
As an example:
Let us consider an MPU device which supports the following:
{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
{1GHz at minimum voltage of 1.3V}
We can represent these as three OPPs as the following {Hz, uV} tuples:
{300000000, 1000000}
{800000000, 1200000}
{1000000000, 1300000}
1.2 Operating Performance Points Library
OPP library provides a set of helper functions to organize and query the OPP
information. The library is located in drivers/base/power/opp.c and the header
is located in include/linux/opp.h. OPP library can be enabled by enabling
......
......@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx);
}
......@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
......
......@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
{
if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock);
dev->power.power_state = PMSG_INVALID;
dev->power.qos = NULL;
dev->power.early_init = true;
}
}
......@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_sleep_init(struct device *dev) {}
static inline void device_pm_add(struct device *dev)
{
dev_pm_qos_constraints_init(dev);
}
static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_remove(struct device *dev)
{
dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev);
}
......
......@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include "power.h"
......@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
struct pm_qos_flags *pqf;
s32 val;
if (!qos)
if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags;
......@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->latency);
}
/**
......@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
return 0;
}
/**
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
* @dev: target device
*
* Called from the device PM subsystem during device insertion under
* device_pm_lock().
*/
void dev_pm_qos_constraints_init(struct device *dev)
{
mutex_lock(&dev_pm_qos_mtx);
dev->power.qos = NULL;
dev->power.power_state = PMSG_ON;
mutex_unlock(&dev_pm_qos_mtx);
}
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
......@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
mutex_lock(&dev_pm_qos_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
qos = dev->power.qos;
if (!qos)
goto out;
......@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
}
spin_lock_irq(&dev->power.lock);
dev->power.qos = NULL;
dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
......@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
"%s() called for already added request\n", __func__))
return -EINVAL;
req->dev = dev;
mutex_lock(&dev_pm_qos_mtx);
if (!dev->power.qos) {
if (dev->power.power_state.event == PM_EVENT_INVALID) {
/* The device has been removed from the system. */
req->dev = NULL;
ret = -ENODEV;
goto out;
} else {
/*
* Allocate the constraints data on the first call to
* add_request, i.e. only if the data is not already
* allocated and if the device has not been removed.
*/
ret = dev_pm_qos_constraints_allocate(dev);
}
}
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret) {
req->dev = dev;
req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
}
out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
......@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 curr_value;
int ret = 0;
if (!req->dev->power.qos)
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
switch(req->type) {
......@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
......@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
"%s() called for unknown object\n", __func__))
return -EINVAL;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
/**
* dev_pm_qos_remove_request - modifies an existing qos request
......@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret = 0;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
int ret;
mutex_lock(&dev_pm_qos_mtx);
if (req->dev->power.qos) {
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
} else {
/* Return if the device has been removed */
ret = -ENODEV;
}
ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
......@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
if (!dev->power.qos)
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
ret = blocking_notifier_chain_register(
......@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
if (dev->power.qos)
if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(
dev->power.qos->latency.notifiers,
notifier);
......@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
struct dev_pm_qos_request *req = NULL;
switch(type) {
case DEV_PM_QOS_LATENCY:
dev_pm_qos_remove_request(dev->power.qos->latency_req);
req = dev->power.qos->latency_req;
dev->power.qos->latency_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
dev_pm_qos_remove_request(dev->power.qos->flags_req);
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
}
__dev_pm_qos_remove_request(req);
kfree(req);
}
/**
......@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
if (dev->power.qos && dev->power.qos->latency_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
if (ret < 0)
if (ret < 0) {
kfree(req);
return ret;
}
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->latency_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
goto out;
}
dev->power.qos->latency_req = req;
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
pm_qos_sysfs_remove_latency(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
}
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (dev->power.qos && dev->power.qos->latency_req) {
pm_qos_sysfs_remove_latency(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
......@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (!device_is_registered(dev))
return -EINVAL;
if (dev->power.qos && dev->power.qos->flags_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
pm_runtime_get_sync(dev);
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
if (ret < 0)
goto fail;
if (ret < 0) {
kfree(req);
return ret;
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->flags_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
goto out;
}
dev->power.qos->flags_req = req;
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
fail:
out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
pm_qos_sysfs_remove_flags(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}
}
/**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space.
*/
void dev_pm_qos_hide_flags(struct device *dev)
{
if (dev->power.qos && dev->power.qos->flags_req) {
pm_qos_sysfs_remove_flags(dev);
pm_runtime_get_sync(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
pm_runtime_put(dev);
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
......@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
s32 value;
int ret;
if (!dev->power.qos || !dev->power.qos->flags_req)
return -EINVAL;
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
ret = -EINVAL;
goto out;
}
value = dev_pm_qos_requested_flags(dev);
if (set)
value |= mask;
......@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
#endif /* CONFIG_PM_RUNTIME */
......@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
......
......@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
* dbs: used as a shortform for demand based switching It helps to keep variable
* names smaller, simpler
* cdbs: common dbs
* on_*: On-demand governor
* od_*: On-demand governor
* cs_*: Conservative governor
*/
......
......@@ -28,13 +28,7 @@
static int hb_voltage_change(unsigned int freq)
{
int i;
u32 msg[HB_CPUFREQ_IPC_LEN];
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
msg[1] = freq / 1000000;
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
msg[i] = 0;
u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
return pl320_ipc_transmit(msg);
}
......
......@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
if (!policy->cpuinfo.max_freq)
return -ENODEV;
intel_pstate_get_min_max(cpu, &min, &max);
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
......@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
.owner = THIS_MODULE,
};
static void intel_pstate_exit(void)
{
int cpu;
sysfs_remove_group(intel_pstate_kobject,
&intel_pstate_attr_group);
debugfs_remove_recursive(debugfs_parent);
cpufreq_unregister_driver(&intel_pstate_driver);
if (!all_cpu_data)
return;
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
del_timer_sync(&all_cpu_data[cpu]->timer);
kfree(all_cpu_data[cpu]);
}
}
put_online_cpus();
vfree(all_cpu_data);
}
module_exit(intel_pstate_exit);
static int __initdata no_load;
static int __init intel_pstate_init(void)
{
int rc = 0;
int cpu, rc = 0;
const struct x86_cpu_id *id;
if (no_load)
......@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
intel_pstate_sysfs_expose_params();
return rc;
out:
intel_pstate_exit();
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
del_timer_sync(&all_cpu_data[cpu]->timer);
kfree(all_cpu_data[cpu]);
}
}
put_online_cpus();
vfree(all_cpu_data);
return -ENODEV;
}
device_initcall(intel_pstate_init);
......
......@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
static int __init pl320_probe(struct amba_device *adev,
const struct amba_id *id)
static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册