blk-rq-qos.h 5.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10
#ifndef RQ_QOS_H
#define RQ_QOS_H

#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/blk_types.h>
#include <linux/atomic.h>
#include <linux/wait.h>

M
Ming Lei 已提交
11 12 13 14
#include "blk-mq-debugfs.h"

struct blk_mq_debugfs_attr;

15 16
enum rq_qos_id {
	RQ_QOS_WBT,
17
	RQ_QOS_LATENCY,
T
Tejun Heo 已提交
18
	RQ_QOS_COST,
19 20 21 22 23 24 25 26 27 28 29 30
};

struct rq_wait {
	wait_queue_head_t wait;
	atomic_t inflight;
};

struct rq_qos {
	struct rq_qos_ops *ops;
	struct request_queue *q;
	enum rq_qos_id id;
	struct rq_qos *next;
M
Ming Lei 已提交
31 32 33
#ifdef CONFIG_BLK_DEBUG_FS
	struct dentry *debugfs_dir;
#endif
34 35 36
};

struct rq_qos_ops {
37
	void (*throttle)(struct rq_qos *, struct bio *);
38
	void (*track)(struct rq_qos *, struct request *, struct bio *);
T
Tejun Heo 已提交
39
	void (*merge)(struct rq_qos *, struct request *, struct bio *);
40 41 42
	void (*issue)(struct rq_qos *, struct request *);
	void (*requeue)(struct rq_qos *, struct request *);
	void (*done)(struct rq_qos *, struct request *);
J
Josef Bacik 已提交
43
	void (*done_bio)(struct rq_qos *, struct bio *);
44
	void (*cleanup)(struct rq_qos *, struct bio *);
45
	void (*queue_depth_changed)(struct rq_qos *);
46
	void (*exit)(struct rq_qos *);
M
Ming Lei 已提交
47
	const struct blk_mq_debugfs_attr *debugfs_attrs;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
};

struct rq_depth {
	unsigned int max_depth;

	int scale_step;
	bool scaled_max;

	unsigned int queue_depth;
	unsigned int default_depth;
};

static inline struct rq_qos *rq_qos_id(struct request_queue *q,
				       enum rq_qos_id id)
{
	struct rq_qos *rqos;
	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
		if (rqos->id == id)
			break;
	}
	return rqos;
}

static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
{
	return rq_qos_id(q, RQ_QOS_WBT);
}

static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
{
78
	return rq_qos_id(q, RQ_QOS_LATENCY);
79 80
}

M
Ming Lei 已提交
81 82 83 84 85
static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
{
	switch (id) {
	case RQ_QOS_WBT:
		return "wbt";
86 87
	case RQ_QOS_LATENCY:
		return "latency";
T
Tejun Heo 已提交
88 89
	case RQ_QOS_COST:
		return "cost";
M
Ming Lei 已提交
90 91 92 93
	}
	return "unknown";
}

94 95 96 97 98 99 100 101 102 103
static inline void rq_wait_init(struct rq_wait *rq_wait)
{
	atomic_set(&rq_wait->inflight, 0);
	init_waitqueue_head(&rq_wait->wait);
}

static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
	rqos->next = q->rq_qos;
	q->rq_qos = rqos;
M
Ming Lei 已提交
104 105 106

	if (rqos->ops->debugfs_attrs)
		blk_mq_debugfs_register_rqos(rqos);
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
	struct rq_qos *cur, *prev = NULL;
	for (cur = q->rq_qos; cur; cur = cur->next) {
		if (cur == rqos) {
			if (prev)
				prev->next = rqos->next;
			else
				q->rq_qos = cur;
			break;
		}
		prev = cur;
	}
M
Ming Lei 已提交
122 123

	blk_mq_debugfs_unregister_rqos(rqos);
124 125
}

J
Josef Bacik 已提交
126 127 128 129 130 131
typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);

void rq_qos_wait(struct rq_wait *rqw, void *private_data,
		 acquire_inflight_cb_t *acquire_inflight_cb,
		 cleanup_cb_t *cleanup_cb);
132
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
133 134
bool rq_depth_scale_up(struct rq_depth *rqd);
bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
135 136
bool rq_depth_calc_max_depth(struct rq_depth *rqd);

137 138 139 140 141 142
void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
T
Tejun Heo 已提交
143
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
144
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
145
void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
{
	if (q->rq_qos)
		__rq_qos_cleanup(q->rq_qos, bio);
}

static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
	if (q->rq_qos)
		__rq_qos_done(q->rq_qos, rq);
}

static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
{
	if (q->rq_qos)
		__rq_qos_issue(q->rq_qos, rq);
}

static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
{
	if (q->rq_qos)
		__rq_qos_requeue(q->rq_qos, rq);
}

static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
{
	if (q->rq_qos)
		__rq_qos_done_bio(q->rq_qos, bio);
}

static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
179 180 181 182 183
	/*
	 * BIO_TRACKED lets controllers know that a bio went through the
	 * normal rq_qos path.
	 */
	bio_set_flag(bio, BIO_TRACKED);
184 185 186 187 188 189 190 191 192 193 194
	if (q->rq_qos)
		__rq_qos_throttle(q->rq_qos, bio);
}

static inline void rq_qos_track(struct request_queue *q, struct request *rq,
				struct bio *bio)
{
	if (q->rq_qos)
		__rq_qos_track(q->rq_qos, rq, bio);
}

T
Tejun Heo 已提交
195 196 197 198 199 200 201
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
				struct bio *bio)
{
	if (q->rq_qos)
		__rq_qos_merge(q->rq_qos, rq, bio);
}

202 203 204 205 206 207
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
{
	if (q->rq_qos)
		__rq_qos_queue_depth_changed(q->rq_qos);
}

208
void rq_qos_exit(struct request_queue *);
209

210
#endif