intel_wakeref.h 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H

#include <linux/atomic.h>
#include <linux/mutex.h>
12
#include <linux/refcount.h>
13
#include <linux/stackdepot.h>
14
#include <linux/timer.h>
15

16 17 18 19 20 21
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif

22
struct intel_runtime_pm;
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39

typedef depot_stack_handle_t intel_wakeref_t;

struct intel_wakeref {
	atomic_t count;
	struct mutex mutex;
	intel_wakeref_t wakeref;
};

void __intel_wakeref_init(struct intel_wakeref *wf,
			  struct lock_class_key *key);
#define intel_wakeref_init(wf) do {					\
	static struct lock_class_key __key;				\
									\
	__intel_wakeref_init((wf), &__key);				\
} while (0)

40
int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
41 42
			      struct intel_wakeref *wf,
			      int (*fn)(struct intel_wakeref *wf));
43
int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
			     struct intel_wakeref *wf,
			     int (*fn)(struct intel_wakeref *wf));

/**
 * intel_wakeref_get: Acquire the wakeref
 * @i915: the drm_i915_private device
 * @wf: the wakeref
 * @fn: callback for acquired the wakeref, called only on first acquire.
 *
 * Acquire a hold on the wakeref. The first user to do so, will acquire
 * the runtime pm wakeref and then call the @fn underneath the wakeref
 * mutex.
 *
 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
 * will be released and the acquisition unwound, and an error reported.
 *
 * Returns: 0 if the wakeref was acquired successfully, or a negative error
 * code otherwise.
 */
static inline int
64
intel_wakeref_get(struct intel_runtime_pm *rpm,
65 66 67 68
		  struct intel_wakeref *wf,
		  int (*fn)(struct intel_wakeref *wf))
{
	if (unlikely(!atomic_inc_not_zero(&wf->count)))
69
		return __intel_wakeref_get_first(rpm, wf, fn);
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	return 0;
}

/**
 * intel_wakeref_put: Release the wakeref
 * @i915: the drm_i915_private device
 * @wf: the wakeref
 * @fn: callback for releasing the wakeref, called only on final release.
 *
 * Release our hold on the wakeref. When there are no more users,
 * the runtime pm wakeref will be released after the @fn callback is called
 * underneath the wakeref mutex.
 *
 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
 * is retained and an error reported.
 *
 * Returns: 0 if the wakeref was released successfully, or a negative error
 * code otherwise.
 */
static inline int
91
intel_wakeref_put(struct intel_runtime_pm *rpm,
92 93 94
		  struct intel_wakeref *wf,
		  int (*fn)(struct intel_wakeref *wf))
{
95
	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
96
	if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
97
		return __intel_wakeref_put_last(rpm, wf, fn);
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

	return 0;
}

/**
 * intel_wakeref_lock: Lock the wakeref (mutex)
 * @wf: the wakeref
 *
 * Locks the wakeref to prevent it being acquired or released. New users
 * can still adjust the counter, but the wakeref itself (and callback)
 * cannot be acquired or released.
 */
static inline void
intel_wakeref_lock(struct intel_wakeref *wf)
	__acquires(wf->mutex)
{
	mutex_lock(&wf->mutex);
}

/**
 * intel_wakeref_unlock: Unlock the wakeref
 * @wf: the wakeref
 *
 * Releases a previously acquired intel_wakeref_lock().
 */
static inline void
intel_wakeref_unlock(struct intel_wakeref *wf)
	__releases(wf->mutex)
{
	mutex_unlock(&wf->mutex);
}

/**
131
 * intel_wakeref_is_active: Query whether the wakeref is currently held
132 133 134 135 136
 * @wf: the wakeref
 *
 * Returns: true if the wakeref is currently held.
 */
static inline bool
137
intel_wakeref_is_active(const struct intel_wakeref *wf)
138
{
139
	return READ_ONCE(wf->wakeref);
140 141
}

142
struct intel_wakeref_auto {
143
	struct intel_runtime_pm *rpm;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	struct timer_list timer;
	intel_wakeref_t wakeref;
	spinlock_t lock;
	refcount_t count;
};

/**
 * intel_wakeref_auto: Delay the runtime-pm autosuspend
 * @wf: the wakeref
 * @timeout: relative timeout in jiffies
 *
 * The runtime-pm core uses a suspend delay after the last wakeref
 * is released before triggering runtime suspend of the device. That
 * delay is configurable via sysfs with little regard to the device
 * characteristics. Instead, we want to tune the autosuspend based on our
 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
 * timeout.
 *
 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
 * suspend immediately.
 */
void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);

void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
168
			     struct intel_runtime_pm *rpm);
169 170
void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);

171
#endif /* INTEL_WAKEREF_H */