qos.c 23.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Devices PM QoS constraints management
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *
 * This module exposes the interface to kernel space for specifying
 * per-device PM QoS dependencies. It provides infrastructure for registration
 * of:
 *
 * Dependents on a QoS value : register requests
 * Watchers of QoS value : get notified when target QoS value changes
 *
 * This QoS design is best effort based. Dependents register their QoS needs.
 * Watchers register to keep track of the current QoS needs of the system.
V
Viresh Kumar 已提交
20 21 22
 * Watchers can register a per-device notification callback using the
 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
 * per-device constraint data struct.
23 24
 *
 * Note about the per-device constraint data struct allocation:
25
 * . The per-device constraints data struct ptr is stored into the device
26 27 28 29 30 31 32 33 34 35 36 37 38
 *    dev_pm_info.
 * . To minimize the data usage by the per-device constraints, the data struct
 *   is only allocated at the first call to dev_pm_qos_add_request.
 * . The data is later free'd when the device is removed from the system.
 *  . A global mutex protects the constraints users from the data being
 *     allocated and free'd.
 */

#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
39
#include <linux/export.h>
40
#include <linux/pm_runtime.h>
41
#include <linux/err.h>
42
#include <trace/events/power.h>
43

44
#include "power.h"
45 46

static DEFINE_MUTEX(dev_pm_qos_mtx);
47
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48

49 50 51 52 53 54 55 56 57 58 59 60 61
/**
 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
 * @dev: Device to check the PM QoS flags for.
 * @mask: Flags to check against.
 *
 * This routine must be called with dev->power.lock held.
 */
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
{
	struct dev_pm_qos *qos = dev->power.qos;
	struct pm_qos_flags *pqf;
	s32 val;

62 63
	lockdep_assert_held(&dev->power.lock);

64
	if (IS_ERR_OR_NULL(qos))
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
		return PM_QOS_FLAGS_UNDEFINED;

	pqf = &qos->flags;
	if (list_empty(&pqf->list))
		return PM_QOS_FLAGS_UNDEFINED;

	val = pqf->effective_flags & mask;
	if (val)
		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;

	return PM_QOS_FLAGS_NONE;
}

/**
 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
 * @dev: Device to check the PM QoS flags for.
 * @mask: Flags to check against.
 */
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
{
	unsigned long irqflags;
	enum pm_qos_flags_status ret;

	spin_lock_irqsave(&dev->power.lock, irqflags);
	ret = __dev_pm_qos_flags(dev, mask);
	spin_unlock_irqrestore(&dev->power.lock, irqflags);

	return ret;
}
94
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95

96
/**
97 98 99 100 101 102 103
 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
 * @dev: Device to get the PM QoS constraint value for.
 *
 * This routine must be called with dev->power.lock held.
 */
s32 __dev_pm_qos_read_value(struct device *dev)
{
104 105
	lockdep_assert_held(&dev->power.lock);

106
	return dev_pm_qos_raw_read_value(dev);
107 108 109 110
}

/**
 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
111 112 113 114 115
 * @dev: Device to get the PM QoS constraint value for.
 */
s32 dev_pm_qos_read_value(struct device *dev)
{
	unsigned long flags;
116
	s32 ret;
117 118

	spin_lock_irqsave(&dev->power.lock, flags);
119
	ret = __dev_pm_qos_read_value(dev);
120 121 122 123 124
	spin_unlock_irqrestore(&dev->power.lock, flags);

	return ret;
}

125 126 127 128 129
/**
 * apply_constraint - Add/modify/remove device PM QoS request.
 * @req: Constraint request to apply
 * @action: Action to perform (add/update/remove).
 * @value: Value to assign to the QoS request.
130 131
 *
 * Internal function to update the constraints list using the PM QoS core
V
Viresh Kumar 已提交
132
 * code and if needed call the per-device callbacks.
133 134
 */
static int apply_constraint(struct dev_pm_qos_request *req,
135
			    enum pm_qos_req_action action, s32 value)
136
{
137 138
	struct dev_pm_qos *qos = req->dev->power.qos;
	int ret;
139

140
	switch(req->type) {
141
	case DEV_PM_QOS_RESUME_LATENCY:
142 143 144
		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
			value = 0;

145 146
		ret = pm_qos_update_target(&qos->resume_latency,
					   &req->data.pnode, action, value);
147
		break;
148 149 150 151 152 153 154 155
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		ret = pm_qos_update_target(&qos->latency_tolerance,
					   &req->data.pnode, action, value);
		if (ret) {
			value = pm_qos_read_value(&qos->latency_tolerance);
			req->dev->power.set_latency_tolerance(req->dev, value);
		}
		break;
156 157 158 159 160 161
	case DEV_PM_QOS_FLAGS:
		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
					  action, value);
		break;
	default:
		ret = -EINVAL;
162 163 164 165
	}

	return ret;
}
166 167 168 169 170 171 172 173 174 175

/*
 * dev_pm_qos_constraints_allocate
 * @dev: device to allocate data for
 *
 * Called at the first call to add_request, for constraint data allocation
 * Must be called with the dev_pm_qos_mtx mutex held
 */
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
176
	struct dev_pm_qos *qos;
177 178 179
	struct pm_qos_constraints *c;
	struct blocking_notifier_head *n;

180 181
	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
	if (!qos)
182 183 184 185
		return -ENOMEM;

	n = kzalloc(sizeof(*n), GFP_KERNEL);
	if (!n) {
186
		kfree(qos);
187 188 189 190
		return -ENOMEM;
	}
	BLOCKING_INIT_NOTIFIER_HEAD(n);

191
	c = &qos->resume_latency;
192
	plist_head_init(&c->list);
193 194
	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
195
	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
196 197 198
	c->type = PM_QOS_MIN;
	c->notifiers = n;

199 200 201 202 203 204 205
	c = &qos->latency_tolerance;
	plist_head_init(&c->list);
	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
	c->type = PM_QOS_MIN;

206 207
	INIT_LIST_HEAD(&qos->flags.list);

208
	spin_lock_irq(&dev->power.lock);
209
	dev->power.qos = qos;
210
	spin_unlock_irq(&dev->power.lock);
211 212 213 214

	return 0;
}

215 216
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
static void __dev_pm_qos_hide_flags(struct device *dev);
217 218 219 220 221

/**
 * dev_pm_qos_constraints_destroy
 * @dev: target device
 *
222
 * Called from the device PM subsystem on device removal under device_pm_lock().
223 224 225
 */
void dev_pm_qos_constraints_destroy(struct device *dev)
{
226
	struct dev_pm_qos *qos;
227
	struct dev_pm_qos_request *req, *tmp;
228
	struct pm_qos_constraints *c;
229
	struct pm_qos_flags *f;
230

231
	mutex_lock(&dev_pm_qos_sysfs_mtx);
232

233
	/*
234 235
	 * If the device's PM QoS resume latency limit or PM QoS flags have been
	 * exposed to user space, they have to be hidden at this point.
236
	 */
237
	pm_qos_sysfs_remove_resume_latency(dev);
238 239 240 241
	pm_qos_sysfs_remove_flags(dev);

	mutex_lock(&dev_pm_qos_mtx);

242 243
	__dev_pm_qos_hide_latency_limit(dev);
	__dev_pm_qos_hide_flags(dev);
244

245 246
	qos = dev->power.qos;
	if (!qos)
247
		goto out;
248

249
	/* Flush the constraints lists for the device. */
250
	c = &qos->resume_latency;
251
	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
252 253 254 255 256 257
		/*
		 * Update constraints list and call the notification
		 * callbacks if needed
		 */
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
258
	}
259 260 261 262 263
	c = &qos->latency_tolerance;
	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
	}
264 265 266 267
	f = &qos->flags;
	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
		memset(req, 0, sizeof(*req));
268 269
	}

270
	spin_lock_irq(&dev->power.lock);
271
	dev->power.qos = ERR_PTR(-ENODEV);
272 273
	spin_unlock_irq(&dev->power.lock);

274
	kfree(qos->resume_latency.notifiers);
275
	kfree(qos);
276 277

 out:
278
	mutex_unlock(&dev_pm_qos_mtx);
279 280

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
281 282
}

283 284
static bool dev_pm_qos_invalid_req_type(struct device *dev,
					enum dev_pm_qos_req_type type)
285
{
286 287
	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
	       !dev->power.set_latency_tolerance;
288 289 290 291 292 293 294 295
}

static int __dev_pm_qos_add_request(struct device *dev,
				    struct dev_pm_qos_request *req,
				    enum dev_pm_qos_req_type type, s32 value)
{
	int ret = 0;

296
	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
		return -EINVAL;

	if (WARN(dev_pm_qos_request_active(req),
		 "%s() called for already added request\n", __func__))
		return -EINVAL;

	if (IS_ERR(dev->power.qos))
		ret = -ENODEV;
	else if (!dev->power.qos)
		ret = dev_pm_qos_constraints_allocate(dev);

	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
	if (!ret) {
		req->dev = dev;
		req->type = type;
		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
	}
	return ret;
}

317 318 319 320
/**
 * dev_pm_qos_add_request - inserts new qos request into the list
 * @dev: target device for the constraint
 * @req: pointer to a preallocated handle
321
 * @type: type of the request
322 323 324 325 326 327 328 329 330 331
 * @value: defines the qos request
 *
 * This function inserts a new entry in the device constraints list of
 * requested qos performance characteristics. It recomputes the aggregate
 * QoS expectations of parameters and initializes the dev_pm_qos_request
 * handle.  Caller needs to save this handle for later use in updates and
 * removal.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
332 333 334
 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
 * to allocate for data structures, -ENODEV if the device has just been removed
 * from the system.
335 336 337
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
338 339
 */
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
340
			   enum dev_pm_qos_req_type type, s32 value)
341
{
342
	int ret;
343

344
	mutex_lock(&dev_pm_qos_mtx);
345
	ret = __dev_pm_qos_add_request(dev, req, type, value);
346 347 348 349 350
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);

351 352 353 354 355 356 357 358 359 360 361
/**
 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
 * @req : PM QoS request to modify.
 * @new_value: New value to request.
 */
static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
				       s32 new_value)
{
	s32 curr_value;
	int ret = 0;

362 363 364 365 366 367 368
	if (!req) /*guard against callers passing in null */
		return -EINVAL;

	if (WARN(!dev_pm_qos_request_active(req),
		 "%s() called for unknown object\n", __func__))
		return -EINVAL;

369
	if (IS_ERR_OR_NULL(req->dev->power.qos))
370 371 372
		return -ENODEV;

	switch(req->type) {
373
	case DEV_PM_QOS_RESUME_LATENCY:
374
	case DEV_PM_QOS_LATENCY_TOLERANCE:
375 376 377 378 379 380 381 382 383
		curr_value = req->data.pnode.prio;
		break;
	case DEV_PM_QOS_FLAGS:
		curr_value = req->data.flr.flags;
		break;
	default:
		return -EINVAL;
	}

384 385
	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
					new_value);
386 387 388 389 390 391
	if (curr_value != new_value)
		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);

	return ret;
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405
/**
 * dev_pm_qos_update_request - modifies an existing qos request
 * @req : handle to list element holding a dev_pm_qos request to use
 * @new_value: defines the qos request
 *
 * Updates an existing dev PM qos request along with updating the
 * target value.
 *
 * Attempts are made to make this code callable on hot code paths.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 * removed from the system
406 407 408
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
409
 */
410
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
411
{
412
	int ret;
413

414 415 416 417 418 419 420 421 422
	mutex_lock(&dev_pm_qos_mtx);
	ret = __dev_pm_qos_update_request(req, new_value);
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);

static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
423
	int ret;
424

425 426 427
	if (!req) /*guard against callers passing in null */
		return -EINVAL;

428 429
	if (WARN(!dev_pm_qos_request_active(req),
		 "%s() called for unknown object\n", __func__))
430 431
		return -EINVAL;

432 433 434
	if (IS_ERR_OR_NULL(req->dev->power.qos))
		return -ENODEV;

435 436
	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
					PM_QOS_DEFAULT_VALUE);
437 438
	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
	memset(req, 0, sizeof(*req));
439 440 441 442 443 444 445 446 447 448 449 450 451 452
	return ret;
}

/**
 * dev_pm_qos_remove_request - modifies an existing qos request
 * @req: handle to request list element
 *
 * Will remove pm qos request from the list of constraints and
 * recompute the current target value. Call this on slow code paths.
 *
 * Returns 1 if the aggregated constraint value has changed,
 * 0 if the aggregated constraint value has not changed,
 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
 * removed from the system
453 454 455
 *
 * Callers should ensure that the target device is not RPM_SUSPENDED before
 * using this function for requests of type DEV_PM_QOS_FLAGS.
456 457 458
 */
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
459
	int ret;
460 461

	mutex_lock(&dev_pm_qos_mtx);
462
	ret = __dev_pm_qos_remove_request(req);
463 464 465 466 467 468 469 470 471 472 473 474 475 476
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);

/**
 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
 * of per-device PM QoS constraints
 *
 * @dev: target device for the constraint
 * @notifier: notifier block managed by caller.
 *
 * Will register the notifier into a notification chain that gets called
 * upon changes to the target value for the device.
477 478 479
 *
 * If the device's constraints object doesn't exist when this routine is called,
 * it will be created (or error code will be returned if that fails).
480 481 482
 */
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
{
483
	int ret = 0;
484 485 486

	mutex_lock(&dev_pm_qos_mtx);

487 488 489 490
	if (IS_ERR(dev->power.qos))
		ret = -ENODEV;
	else if (!dev->power.qos)
		ret = dev_pm_qos_constraints_allocate(dev);
491 492

	if (!ret)
493 494
		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
						       notifier);
495 496

	mutex_unlock(&dev_pm_qos_mtx);
497
	return ret;
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);

/**
 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
 * of per-device PM QoS constraints
 *
 * @dev: target device for the constraint
 * @notifier: notifier block to be removed.
 *
 * Will remove the notifier from the notification chain that gets called
 * upon changes to the target value.
 */
int dev_pm_qos_remove_notifier(struct device *dev,
			       struct notifier_block *notifier)
{
	int retval = 0;

	mutex_lock(&dev_pm_qos_mtx);

518
	/* Silently return if the constraints object is not present. */
519
	if (!IS_ERR_OR_NULL(dev->power.qos))
520 521
		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
							    notifier);
522 523 524 525 526

	mutex_unlock(&dev_pm_qos_mtx);
	return retval;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
527

528 529 530 531
/**
 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
 * @dev: Device whose ancestor to add the request for.
 * @req: Pointer to the preallocated handle.
532
 * @type: Type of the request.
533 534 535
 * @value: Constraint latency value.
 */
int dev_pm_qos_add_ancestor_request(struct device *dev,
536 537
				    struct dev_pm_qos_request *req,
				    enum dev_pm_qos_req_type type, s32 value)
538 539
{
	struct device *ancestor = dev->parent;
540
	int ret = -ENODEV;
541

542 543 544 545
	switch (type) {
	case DEV_PM_QOS_RESUME_LATENCY:
		while (ancestor && !ancestor->power.ignore_children)
			ancestor = ancestor->parent;
546

547 548 549 550 551 552 553 554 555
		break;
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		while (ancestor && !ancestor->power.set_latency_tolerance)
			ancestor = ancestor->parent;

		break;
	default:
		ancestor = NULL;
	}
556
	if (ancestor)
557
		ret = dev_pm_qos_add_request(ancestor, req, type, value);
558

559
	if (ret < 0)
560 561
		req->dev = NULL;

562
	return ret;
563 564
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
565

566 567
static void __dev_pm_qos_drop_user_request(struct device *dev,
					   enum dev_pm_qos_req_type type)
568
{
569 570
	struct dev_pm_qos_request *req = NULL;

571
	switch(type) {
572 573 574
	case DEV_PM_QOS_RESUME_LATENCY:
		req = dev->power.qos->resume_latency_req;
		dev->power.qos->resume_latency_req = NULL;
575
		break;
576 577 578 579
	case DEV_PM_QOS_LATENCY_TOLERANCE:
		req = dev->power.qos->latency_tolerance_req;
		dev->power.qos->latency_tolerance_req = NULL;
		break;
580
	case DEV_PM_QOS_FLAGS:
581
		req = dev->power.qos->flags_req;
582 583 584
		dev->power.qos->flags_req = NULL;
		break;
	}
585 586
	__dev_pm_qos_remove_request(req);
	kfree(req);
587 588
}

589 590 591 592 593 594 595 596
static void dev_pm_qos_drop_user_request(struct device *dev,
					 enum dev_pm_qos_req_type type)
{
	mutex_lock(&dev_pm_qos_mtx);
	__dev_pm_qos_drop_user_request(dev, type);
	mutex_unlock(&dev_pm_qos_mtx);
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
/**
 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
 * @value: Initial value of the latency limit.
 */
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
	struct dev_pm_qos_request *req;
	int ret;

	if (!device_is_registered(dev) || value < 0)
		return -EINVAL;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;

614
	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
615 616
	if (ret < 0) {
		kfree(req);
617
		return ret;
618 619
	}

620 621
	mutex_lock(&dev_pm_qos_sysfs_mtx);

622 623
	mutex_lock(&dev_pm_qos_mtx);

624
	if (IS_ERR_OR_NULL(dev->power.qos))
625
		ret = -ENODEV;
626
	else if (dev->power.qos->resume_latency_req)
627 628 629 630 631
		ret = -EEXIST;

	if (ret < 0) {
		__dev_pm_qos_remove_request(req);
		kfree(req);
632
		mutex_unlock(&dev_pm_qos_mtx);
633 634
		goto out;
	}
635
	dev->power.qos->resume_latency_req = req;
636 637 638

	mutex_unlock(&dev_pm_qos_mtx);

639
	ret = pm_qos_sysfs_add_resume_latency(dev);
640
	if (ret)
641
		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
642

643
 out:
644
	mutex_unlock(&dev_pm_qos_sysfs_mtx);
645 646 647 648
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);

649 650
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
651 652
	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
653 654
}

655 656 657 658 659 660
/**
 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
 */
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
661 662
	mutex_lock(&dev_pm_qos_sysfs_mtx);

663
	pm_qos_sysfs_remove_resume_latency(dev);
664

665
	mutex_lock(&dev_pm_qos_mtx);
666
	__dev_pm_qos_hide_latency_limit(dev);
667
	mutex_unlock(&dev_pm_qos_mtx);
668 669

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
670 671
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690

/**
 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
 * @dev: Device whose PM QoS flags are to be exposed to user space.
 * @val: Initial values of the flags.
 */
int dev_pm_qos_expose_flags(struct device *dev, s32 val)
{
	struct dev_pm_qos_request *req;
	int ret;

	if (!device_is_registered(dev))
		return -EINVAL;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
691 692 693 694 695 696
	if (ret < 0) {
		kfree(req);
		return ret;
	}

	pm_runtime_get_sync(dev);
697 698
	mutex_lock(&dev_pm_qos_sysfs_mtx);

699 700
	mutex_lock(&dev_pm_qos_mtx);

701
	if (IS_ERR_OR_NULL(dev->power.qos))
702 703 704 705 706 707 708
		ret = -ENODEV;
	else if (dev->power.qos->flags_req)
		ret = -EEXIST;

	if (ret < 0) {
		__dev_pm_qos_remove_request(req);
		kfree(req);
709
		mutex_unlock(&dev_pm_qos_mtx);
710 711
		goto out;
	}
712
	dev->power.qos->flags_req = req;
713 714 715

	mutex_unlock(&dev_pm_qos_mtx);

716 717
	ret = pm_qos_sysfs_add_flags(dev);
	if (ret)
718
		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
719

720
 out:
721
	mutex_unlock(&dev_pm_qos_sysfs_mtx);
722
	pm_runtime_put(dev);
723 724 725 726
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);

727 728
static void __dev_pm_qos_hide_flags(struct device *dev)
{
729
	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
730 731 732
		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}

733 734 735 736 737 738
/**
 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
 * @dev: Device whose PM QoS flags are to be hidden from user space.
 */
void dev_pm_qos_hide_flags(struct device *dev)
{
739
	pm_runtime_get_sync(dev);
740 741 742 743
	mutex_lock(&dev_pm_qos_sysfs_mtx);

	pm_qos_sysfs_remove_flags(dev);

744
	mutex_lock(&dev_pm_qos_mtx);
745
	__dev_pm_qos_hide_flags(dev);
746
	mutex_unlock(&dev_pm_qos_mtx);
747 748

	mutex_unlock(&dev_pm_qos_sysfs_mtx);
749
	pm_runtime_put(dev);
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);

/**
 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
 * @dev: Device to update the PM QoS flags request for.
 * @mask: Flags to set/clear.
 * @set: Whether to set or clear the flags (true means set).
 */
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
{
	s32 value;
	int ret;

	pm_runtime_get_sync(dev);
	mutex_lock(&dev_pm_qos_mtx);

767
	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
768 769 770 771
		ret = -EINVAL;
		goto out;
	}

772 773 774 775 776 777 778 779
	value = dev_pm_qos_requested_flags(dev);
	if (set)
		value |= mask;
	else
		value &= ~mask;

	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);

780
 out:
781 782 783 784
	mutex_unlock(&dev_pm_qos_mtx);
	pm_runtime_put(dev);
	return ret;
}
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818

/**
 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
 * @dev: Device to obtain the user space latency tolerance for.
 */
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{
	s32 ret;

	mutex_lock(&dev_pm_qos_mtx);
	ret = IS_ERR_OR_NULL(dev->power.qos)
		|| !dev->power.qos->latency_tolerance_req ?
			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
			dev->power.qos->latency_tolerance_req->data.pnode.prio;
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}

/**
 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
 * @dev: Device to update the user space latency tolerance for.
 * @val: New user space latency tolerance for @dev (negative values disable).
 */
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{
	int ret;

	mutex_lock(&dev_pm_qos_mtx);

	if (IS_ERR_OR_NULL(dev->power.qos)
	    || !dev->power.qos->latency_tolerance_req) {
		struct dev_pm_qos_request *req;

		if (val < 0) {
819 820 821 822
			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
				ret = 0;
			else
				ret = -EINVAL;
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
			goto out;
		}
		req = kzalloc(sizeof(*req), GFP_KERNEL);
		if (!req) {
			ret = -ENOMEM;
			goto out;
		}
		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
		if (ret < 0) {
			kfree(req);
			goto out;
		}
		dev->power.qos->latency_tolerance_req = req;
	} else {
		if (val < 0) {
			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
			ret = 0;
		} else {
			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
		}
	}

 out:
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
849
EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886

/**
 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
 * @dev: Device whose latency tolerance to expose
 */
int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{
	int ret;

	if (!dev->power.set_latency_tolerance)
		return -EINVAL;

	mutex_lock(&dev_pm_qos_sysfs_mtx);
	ret = pm_qos_sysfs_add_latency_tolerance(dev);
	mutex_unlock(&dev_pm_qos_sysfs_mtx);

	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);

/**
 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
 * @dev: Device whose latency tolerance to hide
 */
void dev_pm_qos_hide_latency_tolerance(struct device *dev)
{
	mutex_lock(&dev_pm_qos_sysfs_mtx);
	pm_qos_sysfs_remove_latency_tolerance(dev);
	mutex_unlock(&dev_pm_qos_sysfs_mtx);

	/* Remove the request from user space now */
	pm_runtime_get_sync(dev);
	dev_pm_qos_update_user_latency_tolerance(dev,
		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
	pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);