blk-mq.h 8.1 KB
Newer Older
1 2 3 4
#ifndef BLK_MQ_H
#define BLK_MQ_H

#include <linux/blkdev.h>
5
#include <linux/sbitmap.h>
6
#include <linux/srcu.h>
7 8

struct blk_mq_tags;
9
struct blk_flush_queue;
10 11 12 13 14

struct blk_mq_hw_ctx {
	struct {
		spinlock_t		lock;
		struct list_head	dispatch;
15
		unsigned long		state;		/* BLK_MQ_S_* flags */
16 17
	} ____cacheline_aligned_in_smp;

18
	struct work_struct	run_work;
19
	cpumask_var_t		cpumask;
20 21
	int			next_cpu;
	int			next_cpu_batch;
22 23 24

	unsigned long		flags;		/* BLK_MQ_F_* flags */

25
	void			*sched_data;
26
	struct request_queue	*queue;
27
	struct blk_flush_queue	*fq;
28 29 30

	void			*driver_data;

31
	struct sbitmap		ctx_map;
32

33
	struct blk_mq_ctx	**ctxs;
34
	unsigned int		nr_ctx;
35

36
	wait_queue_t		dispatch_wait;
37
	atomic_t		wait_index;
38 39

	struct blk_mq_tags	*tags;
40
	struct blk_mq_tags	*sched_tags;
41

42 43
	struct srcu_struct	queue_rq_srcu;

44 45
	unsigned long		queued;
	unsigned long		run;
46
#define BLK_MQ_MAX_DISPATCH_ORDER	7
47 48 49
	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];

	unsigned int		numa_node;
50
	unsigned int		queue_num;
51

52 53
	atomic_t		nr_active;

54
	struct delayed_work	delayed_run_work;
55 56
	struct delayed_work	delay_work;

57
	struct hlist_node	cpuhp_dead;
58
	struct kobject		kobj;
J
Jens Axboe 已提交
59

60
	unsigned long		poll_considered;
J
Jens Axboe 已提交
61 62
	unsigned long		poll_invoked;
	unsigned long		poll_success;
63 64
};

65
struct blk_mq_tag_set {
66
	unsigned int		*mq_map;
J
Jens Axboe 已提交
67
	const struct blk_mq_ops	*ops;
68
	unsigned int		nr_hw_queues;
69
	unsigned int		queue_depth;	/* max hw supported */
70 71 72 73 74
	unsigned int		reserved_tags;
	unsigned int		cmd_size;	/* per-request extra data */
	int			numa_node;
	unsigned int		timeout;
	unsigned int		flags;		/* BLK_MQ_F_* */
75 76 77
	void			*driver_data;

	struct blk_mq_tags	**tags;
78 79 80

	struct mutex		tag_list_lock;
	struct list_head	tag_list;
81 82
};

83 84 85 86 87 88 89
struct blk_mq_queue_data {
	struct request *rq;
	struct list_head *list;
	bool last;
};

typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
90
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
91 92
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
93 94 95 96
typedef int (init_request_fn)(void *, struct request *, unsigned int,
		unsigned int, unsigned int);
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
		unsigned int);
97
typedef int (reinit_request_fn)(void *, struct request *);
98

99 100
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
		bool);
K
Keith Busch 已提交
101
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
J
Jens Axboe 已提交
102
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
103
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
J
Jens Axboe 已提交
104

105

106 107 108 109 110 111 112 113 114
struct blk_mq_ops {
	/*
	 * Queue request
	 */
	queue_rq_fn		*queue_rq;

	/*
	 * Called on request timeout
	 */
115
	timeout_fn		*timeout;
116

J
Jens Axboe 已提交
117 118 119 120 121
	/*
	 * Called to poll for completion of a specific tag.
	 */
	poll_fn			*poll;

122 123
	softirq_done_fn		*complete;

124 125 126 127 128 129 130
	/*
	 * Called when the block layer side of a hardware queue has been
	 * set up, allowing the driver to allocate/init matching structures.
	 * Ditto for exit/teardown.
	 */
	init_hctx_fn		*init_hctx;
	exit_hctx_fn		*exit_hctx;
131 132 133 134

	/*
	 * Called for every command allocated by the block layer to allow
	 * the driver to set up driver specific data.
135 136 137 138
	 *
	 * Tag greater than or equal to queue_depth is for setting up
	 * flush request.
	 *
139 140 141 142
	 * Ditto for exit/teardown.
	 */
	init_request_fn		*init_request;
	exit_request_fn		*exit_request;
143
	reinit_request_fn	*reinit_request;
144 145

	map_queues_fn		*map_queues;
146 147 148 149 150 151 152 153
};

enum {
	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */

	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
154 155
	BLK_MQ_F_TAG_SHARED	= 1 << 1,
	BLK_MQ_F_SG_MERGE	= 1 << 2,
156
	BLK_MQ_F_DEFER_ISSUE	= 1 << 4,
157
	BLK_MQ_F_BLOCKING	= 1 << 5,
158
	BLK_MQ_F_NO_SCHED	= 1 << 6,
S
Shaohua Li 已提交
159 160
	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
161

162
	BLK_MQ_S_STOPPED	= 0,
163
	BLK_MQ_S_TAG_ACTIVE	= 1,
164
	BLK_MQ_S_SCHED_RESTART	= 2,
165
	BLK_MQ_S_TAG_WAITING	= 3,
166

167
	BLK_MQ_MAX_DEPTH	= 10240,
168 169

	BLK_MQ_CPU_WORK_BATCH	= 8,
170
};
S
Shaohua Li 已提交
171 172 173 174 175 176
#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
177

178
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
179 180
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q);
181 182
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
183

184 185 186
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);

187 188 189 190
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);

void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
191 192 193 194

enum {
	BLK_MQ_REQ_NOWAIT	= (1 << 0), /* return when out of requests */
	BLK_MQ_REQ_RESERVED	= (1 << 1), /* allocate from reserved pool */
195
	BLK_MQ_REQ_INTERNAL	= (1 << 2), /* allocate internal/sched tag */
196 197
};

198
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
199
		unsigned int flags);
M
Ming Lin 已提交
200 201
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
		unsigned int flags, unsigned int hctx_idx);
202
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
203

B
Bart Van Assche 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
enum {
	BLK_MQ_UNIQUE_TAG_BITS = 16,
	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
};

u32 blk_mq_unique_tag(struct request *rq);

static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
{
	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
}

static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
{
	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}

221

222
int blk_mq_request_started(struct request *rq);
223
void blk_mq_start_request(struct request *rq);
224 225
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
226

227 228 229
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list);
230
void blk_mq_kick_requeue_list(struct request_queue *q);
231
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
232
void blk_mq_abort_requeue_list(struct request_queue *q);
233
void blk_mq_complete_request(struct request *rq, int error);
234

235
bool blk_mq_queue_stopped(struct request_queue *q);
236 237
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
238
void blk_mq_stop_hw_queues(struct request_queue *q);
239
void blk_mq_start_hw_queues(struct request_queue *q);
240
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
241
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
242
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
243
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
244
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
245 246
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv);
247
void blk_mq_freeze_queue(struct request_queue *q);
248 249
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
250
void blk_mq_freeze_queue_wait(struct request_queue *q);
251 252
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout);
253
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
254

255
int blk_mq_map_queues(struct blk_mq_tag_set *set);
K
Keith Busch 已提交
256 257
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);

258 259
/*
 * Driver command data is immediately after the request. So subtract request
J
Jens Axboe 已提交
260
 * size to get back to the original request, add request size to get the PDU.
261 262 263 264 265 266 267
 */
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
	return pdu - sizeof(struct request);
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
J
Jens Axboe 已提交
268
	return rq + 1;
269 270 271
}

#define queue_for_each_hw_ctx(q, hctx, i)				\
272 273
	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
274 275

#define hctx_for_each_ctx(hctx, ctx, i)					\
276 277
	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
278 279

#endif