blk.h 13.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

5
#include <linux/idr.h>
6 7
#include <linux/blk-mq.h>
#include "blk-mq.h"
8

J
Jens Axboe 已提交
9 10 11 12 13 14
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME	(HZ/50UL)

/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ	32

15 16 17
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT		(5 * HZ)

18 19 20 21
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif

22 23 24 25 26 27 28 29
struct blk_flush_queue {
	unsigned int		flush_queue_delayed:1;
	unsigned int		flush_pending_idx:1;
	unsigned int		flush_running_idx:1;
	unsigned long		flush_pending_since;
	struct list_head	flush_queue[2];
	struct list_head	flush_data_in_flight;
	struct request		*flush_rq;
30 31 32 33 34 35

	/*
	 * flush_rq shares tag with this rq, both can't be active
	 * at the same time
	 */
	struct request		*orig_rq;
36 37 38
	spinlock_t		mq_flush_lock;
};

39
extern struct kmem_cache *blk_requestq_cachep;
40
extern struct kmem_cache *request_cachep;
41
extern struct kobj_type blk_queue_ktype;
42
extern struct ida blk_queue_ida;
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/*
 * @q->queue_lock is set while a queue is being initialized. Since we know
 * that no other threads access the queue object before @q->queue_lock has
 * been set, it is safe to manipulate queue flags without holding the
 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
 * blk_init_allocated_queue().
 */
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
	if (q->queue_lock)
		lockdep_assert_held(q->queue_lock);
}

static inline void queue_flag_set_unlocked(unsigned int flag,
					   struct request_queue *q)
{
	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
	    kref_read(&q->kobj.kref))
		lockdep_assert_held(q->queue_lock);
	__set_bit(flag, &q->queue_flags);
}

static inline void queue_flag_clear_unlocked(unsigned int flag,
					     struct request_queue *q)
{
	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
	    kref_read(&q->kobj.kref))
		lockdep_assert_held(q->queue_lock);
	__clear_bit(flag, &q->queue_flags);
}

static inline int queue_flag_test_and_clear(unsigned int flag,
					    struct request_queue *q)
{
	queue_lockdep_assert_held(q);

	if (test_bit(flag, &q->queue_flags)) {
		__clear_bit(flag, &q->queue_flags);
		return 1;
	}

	return 0;
}

static inline int queue_flag_test_and_set(unsigned int flag,
					  struct request_queue *q)
{
	queue_lockdep_assert_held(q);

	if (!test_bit(flag, &q->queue_flags)) {
		__set_bit(flag, &q->queue_flags);
		return 0;
	}

	return 1;
}

static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
	queue_lockdep_assert_held(q);
	__set_bit(flag, &q->queue_flags);
}

static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
	queue_lockdep_assert_held(q);
	__clear_bit(flag, &q->queue_flags);
}

113
static inline struct blk_flush_queue *blk_get_flush_queue(
114
		struct request_queue *q, struct blk_mq_ctx *ctx)
115
{
C
Christoph Hellwig 已提交
116 117 118
	if (q->mq_ops)
		return blk_mq_map_queue(q, ctx->cpu)->fq;
	return q->fq;
119 120
}

T
Tejun Heo 已提交
121 122 123 124 125
static inline void __blk_get_queue(struct request_queue *q)
{
	kobject_get(&q->kobj);
}

126 127 128
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
		int node, int cmd_size);
void blk_free_flush_queue(struct blk_flush_queue *q);
129

130 131
int blk_init_rl(struct request_list *rl, struct request_queue *q,
		gfp_t gfp_mask);
132
void blk_exit_rl(struct request_queue *q, struct request_list *rl);
133
void blk_exit_queue(struct request_queue *q);
J
Jens Axboe 已提交
134 135
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
			struct bio *bio);
136 137
void blk_queue_bypass_start(struct request_queue *q);
void blk_queue_bypass_end(struct request_queue *q);
138
void __blk_queue_free_tags(struct request_queue *q);
139 140 141 142 143 144 145 146 147 148 149 150
void blk_freeze_queue(struct request_queue *q);

static inline void blk_queue_enter_live(struct request_queue *q)
{
	/*
	 * Given that running in generic_make_request() context
	 * guarantees that a live reference against q_usage_counter has
	 * been established, further references under that same context
	 * need not check that the queue has been frozen (marked dead).
	 */
	percpu_ref_get(&q->q_usage_counter);
}
151

152 153
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
154 155 156 157 158 159 160
bool __bio_integrity_endio(struct bio *);
static inline bool bio_integrity_endio(struct bio *bio)
{
	if (bio_integrity(bio))
		return __bio_integrity_endio(bio);
	return true;
}
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192

static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	struct bio_integrity_payload *bip = bio_integrity(req->bio);
	struct bio_integrity_payload *bip_next = bio_integrity(next);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}

static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	return false;
}

193 194 195
static inline void blk_flush_integrity(void)
{
}
196 197 198 199
static inline bool bio_integrity_endio(struct bio *bio)
{
	return true;
}
200
#endif /* CONFIG_BLK_DEV_INTEGRITY */
201

202
void blk_timeout_work(struct work_struct *work);
203
unsigned long blk_rq_timeout(unsigned long timeout);
204
void blk_add_timer(struct request *req);
J
Jens Axboe 已提交
205 206
void blk_delete_timer(struct request *);

207 208 209 210 211

bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
			     struct bio *bio);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
			    struct bio *bio);
212 213
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio);
214
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
215 216
			    unsigned int *request_count,
			    struct request **same_queue_rq);
217
unsigned int blk_plug_queued_count(struct request_queue *q);
218 219 220

void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
221
void blk_account_io_done(struct request *req, u64 now);
222

J
Jens Axboe 已提交
223 224
/*
 * EH timer and IO completion will both attempt to 'grab' the request, make
225 226
 * sure that only one of them succeeds. Steal the bottom bit of the
 * __deadline field for this.
J
Jens Axboe 已提交
227 228 229
 */
static inline int blk_mark_rq_complete(struct request *rq)
{
230
	return test_and_set_bit(0, &rq->__deadline);
J
Jens Axboe 已提交
231 232 233 234
}

static inline void blk_clear_rq_complete(struct request *rq)
{
235 236 237 238 239 240
	clear_bit(0, &rq->__deadline);
}

static inline bool blk_rq_is_complete(struct request *rq)
{
	return test_bit(0, &rq->__deadline);
J
Jens Axboe 已提交
241
}
J
Jens Axboe 已提交
242

243 244 245
/*
 * Internal elevator interface
 */
246
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
247

248
void blk_insert_flush(struct request *rq);
T
Tejun Heo 已提交
249

250 251 252 253
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

254 255
	if (e->type->ops.sq.elevator_activate_req_fn)
		e->type->ops.sq.elevator_activate_req_fn(q, rq);
256 257 258 259 260 261
}

static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

262 263
	if (e->type->ops.sq.elevator_deactivate_req_fn)
		e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
264 265
}

266
int elevator_init(struct request_queue *);
267
int elevator_init_mq(struct request_queue *q);
268 269
int elevator_switch_mq(struct request_queue *q,
			      struct elevator_type *new_e);
270
void elevator_exit(struct request_queue *, struct elevator_queue *);
271 272 273
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);

274 275
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);

276 277 278 279 280 281 282 283 284 285 286 287
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

288 289 290 291
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, 
		      struct bio *bio);
292 293
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
294 295
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
				struct request *next);
296
void blk_recalc_rq_segments(struct request *rq);
297
void blk_rq_set_mixed_merge(struct request *rq);
298
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
299
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
300

301 302
void blk_queue_congestion_threshold(struct request_queue *q);

303 304
int blk_dev_init(void);

305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
/*
 * Return the threshold (number of used requests) at which the queue is
 * considered to be congested.  It include a little hysteresis to keep the
 * context switch rate down.
 */
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
	return q->nr_congestion_on;
}

/*
 * The threshold at which a queue is considered to be uncongested
 */
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
	return q->nr_congestion_off;
}

324 325
extern int blk_update_nr_requests(struct request_queue *, unsigned int);

326 327 328 329 330
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
 *	b) the queue had IO stats enabled when this request was started, and
331
 *	c) it's a file system request
332
 */
333
static inline bool blk_do_io_stat(struct request *rq)
334
{
335
	return rq->rq_disk &&
336
	       (rq->rq_flags & RQF_IO_STAT) &&
337
		!blk_rq_is_passthrough(rq);
338 339
}

340 341 342 343 344 345 346
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
/*
 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
 * setting the deadline clears the bottom bit, potentially clearing the
 * completed bit. The user has to be OK with this (current ones are fine).
 */
static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
{
	rq->__deadline = time & ~0x1UL;
}

static inline unsigned long blk_rq_deadline(struct request *rq)
{
	return rq->__deadline & ~0x1UL;
}

362 363 364 365
/*
 * Internal io_context interface
 */
void get_io_context(struct io_context *ioc);
366
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
367 368
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
			     gfp_t gfp_mask);
369
void ioc_clear_queue(struct request_queue *q);
370

371
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
372

J
Jens Axboe 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
/**
 * rq_ioc - determine io_context for request allocation
 * @bio: request being allocated is for this bio (can be %NULL)
 *
 * Determine io_context to use for request allocation for @bio.  May return
 * %NULL if %current->io_context doesn't exist.
 */
static inline struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
	if (bio && bio->bi_ioc)
		return bio->bi_ioc;
#endif
	return current->io_context;
}

389 390 391 392 393
/**
 * create_io_context - try to create task->io_context
 * @gfp_mask: allocation mask
 * @node: allocation node
 *
394 395 396
 * If %current->io_context is %NULL, allocate a new io_context and install
 * it.  Returns the current %current->io_context which may be %NULL if
 * allocation failed.
397 398
 *
 * Note that this function can't be called with IRQ disabled because
399
 * task_lock which protects %current->io_context is IRQ-unsafe.
400
 */
401
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
402 403
{
	WARN_ON_ONCE(irqs_disabled());
404 405 406
	if (unlikely(!current->io_context))
		create_task_io_context(current, gfp_mask, node);
	return current->io_context;
407 408 409 410 411
}

/*
 * Internal throttling interface
 */
412
#ifdef CONFIG_BLK_DEV_THROTTLING
413
extern void blk_throtl_drain(struct request_queue *q);
414 415
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
416
extern void blk_throtl_register_queue(struct request_queue *q);
417
#else /* CONFIG_BLK_DEV_THROTTLING */
418
static inline void blk_throtl_drain(struct request_queue *q) { }
419 420
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
421
static inline void blk_throtl_register_queue(struct request_queue *q) { }
422
#endif /* CONFIG_BLK_DEV_THROTTLING */
423 424 425 426
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count);
427
extern void blk_throtl_bio_endio(struct bio *bio);
428
extern void blk_throtl_stat_add(struct request *rq, u64 time);
429 430
#else
static inline void blk_throtl_bio_endio(struct bio *bio) { }
431
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
432
#endif
433

434 435 436 437 438 439 440 441 442 443 444 445 446
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_BOUNCE */

447 448
extern void blk_drain_queue(struct request_queue *q);

449 450 451 452 453 454
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif

455
#endif /* BLK_INTERNAL_H */