qos.c 23.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Devices PM QoS constraints management
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *
 * This module exposes the interface to kernel space for specifying
 * per-device PM QoS dependencies. It provides infrastructure for registration
 * of:
 *
 * Dependents on a QoS value : register requests
 * Watchers of QoS value : get notified when target QoS value changes
 *
 * This QoS design is best effort based. Dependents register their QoS needs.
 * Watchers register to keep track of the current QoS needs of the system.
V
Viresh Kumar 已提交
20 21 22
 * Watchers can register a per-device notification callback using the
 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
 * per-device constraint data struct.
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * Note about the per-device constraint data struct allocation:
 * . The per-device constraints data struct ptr is tored into the device
 *    dev_pm_info.
 * . To minimize the data usage by the per-device constraints, the data struct
 *   is only allocated at the first call to dev_pm_qos_add_request.
 * . The data is later free'd when the device is removed from the system.
 *  . A global mutex protects the constraints users from the data being
 *     allocated and free'd.
 */

#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
39
#include <linux/export.h>
40
#include <linux/pm_runtime.h>
41
#include <linux/err.h>
42
#include <trace/events/power.h>
43

44
#include "power.h"
45 46

static DEFINE_MUTEX(dev_pm_qos_mtx);
47
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48

49 50 51 52 53 54 55 56 57 58 59 60 61
/**
 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
 * @dev: Device to check the PM QoS flags for.
 * @mask: Flags to check against.
 *
 * This routine must be called with dev->power.lock held.
 */
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
{
	struct dev_pm_qos *qos = dev->power.qos;
	struct pm_qos_flags *pqf;
	s32 val;

62 63
	lockdep_assert_held(&dev->power.lock);

64
	if (IS_ERR_OR_NULL(qos))
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
		return PM_QOS_FLAGS_UNDEFINED;

	pqf = &qos->flags;
	if (list_empty(&pqf->list))
		return PM_QOS_FLAGS_UNDEFINED;

	val = pqf->effective_flags & mask;
	if (val)
		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;

	return PM_QOS_FLAGS_NONE;
}

/**
 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
 * @dev: Device to check the PM QoS flags for.
 * @mask: Flags to check against.
 */
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
{
	unsigned long irqflags;
	enum pm_qos_flags_status ret;

	spin_lock_irqsave(&dev->power.lock, irqflags);
	ret = __dev_pm_qos_flags(dev, mask);
	spin_unlock_irqrestore(&dev->power.lock, irqflags);

	return ret;
}
94
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95

96
/**
97 98 99 100 101 102 103
 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
 * @dev: Device to get the PM QoS constraint value for.
 *
 * This routine must be called with dev->power.lock held.
 */
s32 __dev_pm_qos_read_value(struct device *dev)
{
104 105
	lockdep_assert_held(&dev->power.lock);

106
	return dev_pm_qos_raw_read_value(dev);
107 108 109 110
}

/**
 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
111 112 113 114 115
 * @dev: Device to get the PM QoS constraint value for.
 */
s32 dev_pm_qos_read_value(struct device *dev)
{
	unsigned long flags;
116
	s32 ret;
117 118

	spin_lock_irqsave(&dev->power.lock, flags);
119
	ret = __dev_pm_qos_read_value(dev);
120 121 122 123 124
	spin_unlock_irqrestore(&dev->power.lock, flags);

	return ret;
}

125 126 127 128 129
/**
 * apply_constraint - Add/modify/remove device PM QoS request.
 * @req: Constraint request to apply
 * @action: Action to perform (add/update/remove).
 * @value: Value to assign to the QoS request.
130 131
 *
 * Internal function to update the constraints list using the PM QoS core
V
Viresh Kumar 已提交
132
 * code and if needed call the per-device callbacks.
133 134
 */
static int apply_constraint(struct dev_pm_qos_request *req,
135
			    enum pm_qos_req_action action, s32 value)
136
{
137 138
	struct dev_pm_qos *qos = req->dev->power.qos;
	int ret;
139

140
	switch(req->type) {
141 142 143
	case DEV_PM_QOS_RESUME_LATENCY:
		ret = pm_qos_update_target(&qos->resume_latency,
					   &req->data.pnode, action, value);
144
		break;
145 146 147 148 149 150 151 152
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		ret = pm_qos_update_target(&qos->latency_tolerance,
					   &req->data.pnode, action, value);
		if (ret) {
			value = pm_qos_read_value(&qos->latency_tolerance);
			req->dev->power.set_latency_tolerance(req->dev, value);
		}
		break;
153 154 155 156 157 158
	case DEV_PM_QOS_FLAGS:
		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
					  action, value);
		break;
	default:
		ret = -EINVAL;
159 160 161 162
	}

	return ret;
}
163 164 165 166 167 168 169 170 171 172

/*
 * dev_pm_qos_constraints_allocate
 * @dev: device to allocate data for
 *
 * Called at the first call to add_request, for constraint data allocation
 * Must be called with the dev_pm_qos_mtx mutex held
 */
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
173
	struct dev_pm_qos *qos;
174 175 176
	struct pm_qos_constraints *c;
	struct blocking_notifier_head *n;

177 178
	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
	if (!qos)
179 180 181 182
		return -ENOMEM;

	n = kzalloc(sizeof(*n), GFP_KERNEL);
	if (!n) {
183
		kfree(qos);
184 185 186 187
		return -ENOMEM;
	}
	BLOCKING_INIT_NOTIFIER_HEAD(n);

188
	c = &qos->resume_latency;
189
	plist_head_init(&c->list);
190 191
	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192
	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
193 194 195
	c->type = PM_QOS_MIN;
	c->notifiers = n;

196 197 198 199 200 201 202
	c = &qos->latency_tolerance;
	plist_head_init(&c->list);
	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
	c->type = PM_QOS_MIN;

203 204
	INIT_LIST_HEAD(&qos->flags.list);

205
	spin_lock_irq(&dev->power.lock);
206
	dev->power.qos = qos;
207
	spin_unlock_irq(&dev->power.lock);
208 209 210 211

	return 0;
}

212 213
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
static void __dev_pm_qos_hide_flags(struct device *dev);
214 215 216 217 218

/**
 * dev_pm_qos_constraints_destroy
 * @dev: target device
 *
219
 * Called from the device PM subsystem on device removal under device_pm_lock().
220 221 222
 */
void dev_pm_qos_constraints_destroy(struct device *dev)
{
223
	struct dev_pm_qos *qos;
224
	struct dev_pm_qos_request *req, *tmp;
225
	struct pm_qos_constraints *c;
226
	struct pm_qos_flags *f;
227

228
	mutex_lock(&dev_pm_qos_sysfs_mtx);
229

230
	/*
231 232
	 * If the device's PM QoS resume latency limit or PM QoS flags have been
	 * exposed to user space, they have to be hidden at this point.
233
	 */
234
	pm_qos_sysfs_remove_resume_latency(dev);
235 236 237 238
	pm_qos_sysfs_remove_flags(dev);

	mutex_lock(&dev_pm_qos_mtx);

239 240
	__dev_pm_qos_hide_latency_limit(dev);
	__dev_pm_qos_hide_flags(dev);
241

242 243
	qos = dev->power.qos;
	if (!qos)
244
		goto out;
245

246
	/* Flush the constraints lists for the device. */
247
	c = &qos->resume_latency;
248
	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
249 250 251 252 253 254
		/*
		 * Update constraints list and call the notification
		 * callbacks if needed
		 */
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
255
	}
256 257 258 259 260
	c = &qos->latency_tolerance;
	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
	}
261 262 263 264
	f = &qos->flags;
	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
265 266
	}

267
	spin_lock_irq(&dev->power.lock);
268
	dev->power.qos = ERR_PTR(-ENODEV);
269 270
	spin_unlock_irq(&dev->power.lock);

271
	kfree(qos->resume_latency.notifiers);
272
	kfree(qos);
273 274

 out:
275
	mutex_unlock(&dev_pm_qos_mtx);
276 277

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
278 279
}

280 281
static bool dev_pm_qos_invalid_req_type(struct device *dev,
					enum dev_pm_qos_req_type type)
282
{
283 284
	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
	       !dev->power.set_latency_tolerance;
285 286 287 288 289 290 291 292
}

static int __dev_pm_qos_add_request(struct device *dev,
				    struct dev_pm_qos_request *req,
				    enum dev_pm_qos_req_type type, s32 value)
{
	int ret = 0;

293
	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
		return -EINVAL;

	if (WARN(dev_pm_qos_request_active(req),
		 "%s() called for already added request\n", __func__))
		return -EINVAL;

	if (IS_ERR(dev->power.qos))
		ret = -ENODEV;
	else if (!dev->power.qos)
		ret = dev_pm_qos_constraints_allocate(dev);

	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
	if (!ret) {
		req->dev = dev;
		req->type = type;
		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
	}
	return ret;
}

314 315 316 317
/**
 * dev_pm_qos_add_request - inserts new qos request into the list
 * @dev: target device for the constraint
 * @req: pointer to a preallocated handle
318
 * @type: type of the request
319 320 321 322 323 324 325 326 327 328
 * @value: defines the qos request
 *
 * This function inserts a new entry in the device constraints list of
 * requested qos performance characteristics. It recomputes the aggregate
 * QoS expectations of parameters and initializes the dev_pm_qos_request
 * handle.  Caller needs to save this handle for later use in updates and
 * removal.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
329 330 331
 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
 * to allocate for data structures, -ENODEV if the device has just been removed
 * from the system.
332 333 334
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
335 336
 */
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
337
			   enum dev_pm_qos_req_type type, s32 value)
338
{
339
	int ret;
340

341
	mutex_lock(&dev_pm_qos_mtx);
342
	ret = __dev_pm_qos_add_request(dev, req, type, value);
343 344 345 346 347
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);

348 349 350 351 352 353 354 355 356 357 358
/**
 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
 * @req : PM QoS request to modify.
 * @new_value: New value to request.
 */
static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
				       s32 new_value)
{
	s32 curr_value;
	int ret = 0;

359 360 361 362 363 364 365
	if (!req) /*guard against callers passing in null */
		return -EINVAL;

	if (WARN(!dev_pm_qos_request_active(req),
		 "%s() called for unknown object\n", __func__))
		return -EINVAL;

366
	if (IS_ERR_OR_NULL(req->dev->power.qos))
367 368 369
		return -ENODEV;

	switch(req->type) {
370
	case DEV_PM_QOS_RESUME_LATENCY:
371
	case DEV_PM_QOS_LATENCY_TOLERANCE:
372 373 374 375 376 377 378 379 380
		curr_value = req->data.pnode.prio;
		break;
	case DEV_PM_QOS_FLAGS:
		curr_value = req->data.flr.flags;
		break;
	default:
		return -EINVAL;
	}

381 382
	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
					new_value);
383 384 385 386 387 388
	if (curr_value != new_value)
		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);

	return ret;
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402
/**
 * dev_pm_qos_update_request - modifies an existing qos request
 * @req : handle to list element holding a dev_pm_qos request to use
 * @new_value: defines the qos request
 *
 * Updates an existing dev PM qos request along with updating the
 * target value.
 *
 * Attempts are made to make this code callable on hot code paths.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 * removed from the system
403 404 405
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
406
 */
407
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
408
{
409
	int ret;
410

411 412 413 414 415 416 417 418 419
	mutex_lock(&dev_pm_qos_mtx);
	ret = __dev_pm_qos_update_request(req, new_value);
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);

static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
420
	int ret;
421

422 423 424
	if (!req) /*guard against callers passing in null */
		return -EINVAL;

425 426
	if (WARN(!dev_pm_qos_request_active(req),
		 "%s() called for unknown object\n", __func__))
427 428
		return -EINVAL;

429 430 431
	if (IS_ERR_OR_NULL(req->dev->power.qos))
		return -ENODEV;

432 433
	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
					PM_QOS_DEFAULT_VALUE);
434 435
	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
	memset(req, 0, sizeof(*req));
436 437 438 439 440 441 442 443 444 445 446 447 448 449
	return ret;
}

/**
 * dev_pm_qos_remove_request - modifies an existing qos request
 * @req: handle to request list element
 *
 * Will remove pm qos request from the list of constraints and
 * recompute the current target value. Call this on slow code paths.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 * removed from the system
450 451 452
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
453 454 455
 */
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
456
	int ret;
457 458

	mutex_lock(&dev_pm_qos_mtx);
459
	ret = __dev_pm_qos_remove_request(req);
460 461 462 463 464 465 466 467 468 469 470 471 472 473
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);

/**
 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
 * of per-device PM QoS constraints
 *
 * @dev: target device for the constraint
 * @notifier: notifier block managed by caller.
 *
 * Will register the notifier into a notification chain that gets called
 * upon changes to the target value for the device.
474 475 476
 *
 * If the device's constraints object doesn't exist when this routine is called,
 * it will be created (or error code will be returned if that fails).
477 478 479
 */
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
{
480
	int ret = 0;
481 482 483

	mutex_lock(&dev_pm_qos_mtx);

484 485 486 487
	if (IS_ERR(dev->power.qos))
		ret = -ENODEV;
	else if (!dev->power.qos)
		ret = dev_pm_qos_constraints_allocate(dev);
488 489

	if (!ret)
490 491
		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
						       notifier);
492 493

	mutex_unlock(&dev_pm_qos_mtx);
494
	return ret;
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);

/**
 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
 * of per-device PM QoS constraints
 *
 * @dev: target device for the constraint
 * @notifier: notifier block to be removed.
 *
 * Will remove the notifier from the notification chain that gets called
 * upon changes to the target value.
 */
int dev_pm_qos_remove_notifier(struct device *dev,
			       struct notifier_block *notifier)
{
	int retval = 0;

	mutex_lock(&dev_pm_qos_mtx);

515
	/* Silently return if the constraints object is not present. */
516
	if (!IS_ERR_OR_NULL(dev->power.qos))
517 518
		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
							    notifier);
519 520 521 522 523

	mutex_unlock(&dev_pm_qos_mtx);
	return retval;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
524

525 526 527 528
/**
 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
 * @dev: Device whose ancestor to add the request for.
 * @req: Pointer to the preallocated handle.
529
 * @type: Type of the request.
530 531 532
 * @value: Constraint latency value.
 */
int dev_pm_qos_add_ancestor_request(struct device *dev,
533 534
				    struct dev_pm_qos_request *req,
				    enum dev_pm_qos_req_type type, s32 value)
535 536
{
	struct device *ancestor = dev->parent;
537
	int ret = -ENODEV;
538

539 540 541 542
	switch (type) {
	case DEV_PM_QOS_RESUME_LATENCY:
		while (ancestor && !ancestor->power.ignore_children)
			ancestor = ancestor->parent;
543

544 545 546 547 548 549 550 551 552
		break;
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		while (ancestor && !ancestor->power.set_latency_tolerance)
			ancestor = ancestor->parent;

		break;
	default:
		ancestor = NULL;
	}
553
	if (ancestor)
554
		ret = dev_pm_qos_add_request(ancestor, req, type, value);
555

556
	if (ret < 0)
557 558
		req->dev = NULL;

559
	return ret;
560 561
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562

563 564
static void __dev_pm_qos_drop_user_request(struct device *dev,
					   enum dev_pm_qos_req_type type)
565
{
566 567
	struct dev_pm_qos_request *req = NULL;

568
	switch(type) {
569 570 571
	case DEV_PM_QOS_RESUME_LATENCY:
		req = dev->power.qos->resume_latency_req;
		dev->power.qos->resume_latency_req = NULL;
572
		break;
573 574 575 576
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		req = dev->power.qos->latency_tolerance_req;
		dev->power.qos->latency_tolerance_req = NULL;
		break;
577
	case DEV_PM_QOS_FLAGS:
578
		req = dev->power.qos->flags_req;
579 580 581
		dev->power.qos->flags_req = NULL;
		break;
	}
582 583
	__dev_pm_qos_remove_request(req);
	kfree(req);
584 585
}

586 587 588 589 590 591 592 593
static void dev_pm_qos_drop_user_request(struct device *dev,
					 enum dev_pm_qos_req_type type)
{
	mutex_lock(&dev_pm_qos_mtx);
	__dev_pm_qos_drop_user_request(dev, type);
	mutex_unlock(&dev_pm_qos_mtx);
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
/**
 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
 * @value: Initial value of the latency limit.
 */
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
	struct dev_pm_qos_request *req;
	int ret;

	if (!device_is_registered(dev) || value < 0)
		return -EINVAL;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;

611
	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
612 613
	if (ret < 0) {
		kfree(req);
614
		return ret;
615 616
	}

617 618
	mutex_lock(&dev_pm_qos_sysfs_mtx);

619 620
	mutex_lock(&dev_pm_qos_mtx);

621
	if (IS_ERR_OR_NULL(dev->power.qos))
622
		ret = -ENODEV;
623
	else if (dev->power.qos->resume_latency_req)
624 625 626 627 628
		ret = -EEXIST;

	if (ret < 0) {
		__dev_pm_qos_remove_request(req);
		kfree(req);
629
		mutex_unlock(&dev_pm_qos_mtx);
630 631
		goto out;
	}
632
	dev->power.qos->resume_latency_req = req;
633 634 635

	mutex_unlock(&dev_pm_qos_mtx);

636
	ret = pm_qos_sysfs_add_resume_latency(dev);
637
	if (ret)
638
		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639

640
 out:
641
	mutex_unlock(&dev_pm_qos_sysfs_mtx);
642 643 644 645
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);

646 647
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
648 649
	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
650 651
}

652 653 654 655 656 657
/**
 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
 */
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
658 659
	mutex_lock(&dev_pm_qos_sysfs_mtx);

660
	pm_qos_sysfs_remove_resume_latency(dev);
661

662
	mutex_lock(&dev_pm_qos_mtx);
663
	__dev_pm_qos_hide_latency_limit(dev);
664
	mutex_unlock(&dev_pm_qos_mtx);
665 666

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
667 668
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687

/**
 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
 * @dev: Device whose PM QoS flags are to be exposed to user space.
 * @val: Initial values of the flags.
 */
int dev_pm_qos_expose_flags(struct device *dev, s32 val)
{
	struct dev_pm_qos_request *req;
	int ret;

	if (!device_is_registered(dev))
		return -EINVAL;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
688 689 690 691 692 693
	if (ret < 0) {
		kfree(req);
		return ret;
	}

	pm_runtime_get_sync(dev);
694 695
	mutex_lock(&dev_pm_qos_sysfs_mtx);

696 697
	mutex_lock(&dev_pm_qos_mtx);

698
	if (IS_ERR_OR_NULL(dev->power.qos))
699 700 701 702 703 704 705
		ret = -ENODEV;
	else if (dev->power.qos->flags_req)
		ret = -EEXIST;

	if (ret < 0) {
		__dev_pm_qos_remove_request(req);
		kfree(req);
706
		mutex_unlock(&dev_pm_qos_mtx);
707 708
		goto out;
	}
709
	dev->power.qos->flags_req = req;
710 711 712

	mutex_unlock(&dev_pm_qos_mtx);

713 714
	ret = pm_qos_sysfs_add_flags(dev);
	if (ret)
715
		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
716

717
 out:
718
	mutex_unlock(&dev_pm_qos_sysfs_mtx);
719
	pm_runtime_put(dev);
720 721 722 723
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);

724 725
static void __dev_pm_qos_hide_flags(struct device *dev)
{
726
	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
727 728 729
		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}

730 731 732 733 734 735
/**
 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
 * @dev: Device whose PM QoS flags are to be hidden from user space.
 */
void dev_pm_qos_hide_flags(struct device *dev)
{
736
	pm_runtime_get_sync(dev);
737 738 739 740
	mutex_lock(&dev_pm_qos_sysfs_mtx);

	pm_qos_sysfs_remove_flags(dev);

741
	mutex_lock(&dev_pm_qos_mtx);
742
	__dev_pm_qos_hide_flags(dev);
743
	mutex_unlock(&dev_pm_qos_mtx);
744 745

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
746
	pm_runtime_put(dev);
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);

/**
 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
 * @dev: Device to update the PM QoS flags request for.
 * @mask: Flags to set/clear.
 * @set: Whether to set or clear the flags (true means set).
 */
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
{
	s32 value;
	int ret;

	pm_runtime_get_sync(dev);
	mutex_lock(&dev_pm_qos_mtx);

764
	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
765 766 767 768
		ret = -EINVAL;
		goto out;
	}

769 770 771 772 773 774 775 776
	value = dev_pm_qos_requested_flags(dev);
	if (set)
		value |= mask;
	else
		value &= ~mask;

	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);

777
 out:
778 779 780 781
	mutex_unlock(&dev_pm_qos_mtx);
	pm_runtime_put(dev);
	return ret;
}
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815

/**
 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
 * @dev: Device to obtain the user space latency tolerance for.
 */
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{
	s32 ret;

	mutex_lock(&dev_pm_qos_mtx);
	ret = IS_ERR_OR_NULL(dev->power.qos)
		|| !dev->power.qos->latency_tolerance_req ?
			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
			dev->power.qos->latency_tolerance_req->data.pnode.prio;
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}

/**
 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
 * @dev: Device to update the user space latency tolerance for.
 * @val: New user space latency tolerance for @dev (negative values disable).
 */
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{
	int ret;

	mutex_lock(&dev_pm_qos_mtx);

	if (IS_ERR_OR_NULL(dev->power.qos)
	    || !dev->power.qos->latency_tolerance_req) {
		struct dev_pm_qos_request *req;

		if (val < 0) {
816 817 818 819
			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
				ret = 0;
			else
				ret = -EINVAL;
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
			goto out;
		}
		req = kzalloc(sizeof(*req), GFP_KERNEL);
		if (!req) {
			ret = -ENOMEM;
			goto out;
		}
		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
		if (ret < 0) {
			kfree(req);
			goto out;
		}
		dev->power.qos->latency_tolerance_req = req;
	} else {
		if (val < 0) {
			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
			ret = 0;
		} else {
			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
		}
	}

 out:
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
846
EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883

/**
 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
 * @dev: Device whose latency tolerance to expose
 */
int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{
	int ret;

	if (!dev->power.set_latency_tolerance)
		return -EINVAL;

	mutex_lock(&dev_pm_qos_sysfs_mtx);
	ret = pm_qos_sysfs_add_latency_tolerance(dev);
	mutex_unlock(&dev_pm_qos_sysfs_mtx);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);

/**
 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
 * @dev: Device whose latency tolerance to hide
 */
void dev_pm_qos_hide_latency_tolerance(struct device *dev)
{
	mutex_lock(&dev_pm_qos_sysfs_mtx);
	pm_qos_sysfs_remove_latency_tolerance(dev);
	mutex_unlock(&dev_pm_qos_sysfs_mtx);

	/* Remove the request from user space now */
	pm_runtime_get_sync(dev);
	dev_pm_qos_update_user_latency_tolerance(dev,
		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
	pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);