blk.h 10.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

5
#include <linux/idr.h>
6
#include <linux/blk-mq.h>
7
#include <xen/xen.h>
8
#include "blk-mq.h"
9
#include "blk-mq-sched.h"
10

11 12 13
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT		(5 * HZ)

14 15 16 17
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif

18 19 20 21 22 23 24 25
struct blk_flush_queue {
	unsigned int		flush_queue_delayed:1;
	unsigned int		flush_pending_idx:1;
	unsigned int		flush_running_idx:1;
	unsigned long		flush_pending_since;
	struct list_head	flush_queue[2];
	struct list_head	flush_data_in_flight;
	struct request		*flush_rq;
26 27 28 29 30 31

	/*
	 * flush_rq shares tag with this rq, both can't be active
	 * at the same time
	 */
	struct request		*orig_rq;
32 33 34
	spinlock_t		mq_flush_lock;
};

35 36
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;
37
extern struct ida blk_queue_ida;
38

39 40
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
41
{
42
	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
43 44
}

T
Tejun Heo 已提交
45 46 47 48 49
static inline void __blk_get_queue(struct request_queue *q)
{
	kobject_get(&q->kobj);
}

50
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
51
		int node, int cmd_size, gfp_t flags);
52
void blk_free_flush_queue(struct blk_flush_queue *q);
53

54 55 56 57 58 59 60 61 62 63 64 65
void blk_freeze_queue(struct request_queue *q);

static inline void blk_queue_enter_live(struct request_queue *q)
{
	/*
	 * Given that running in generic_make_request() context
	 * guarantees that a live reference against q_usage_counter has
	 * been established, further references under that same context
	 * need not check that the queue has been frozen (marked dead).
	 */
	percpu_ref_get(&q->q_usage_counter);
}
66

67 68
static inline bool biovec_phys_mergeable(struct request_queue *q,
		struct bio_vec *vec1, struct bio_vec *vec2)
69
{
70
	unsigned long mask = queue_segment_boundary(q);
C
Christoph Hellwig 已提交
71 72
	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
73 74

	if (addr1 + vec1->bv_len != addr2)
75
		return false;
76
	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
77
		return false;
78 79
	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
		return false;
80 81 82
	return true;
}

83 84 85
static inline bool __bvec_gap_to_prev(struct request_queue *q,
		struct bio_vec *bprv, unsigned int offset)
{
86
	return (offset & queue_virt_boundary(q)) ||
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}

/*
 * Check if adding a bio_vec after bprv with offset would create a gap in
 * the SG list. Most drivers don't care about this, but some do.
 */
static inline bool bvec_gap_to_prev(struct request_queue *q,
		struct bio_vec *bprv, unsigned int offset)
{
	if (!queue_virt_boundary(q))
		return false;
	return __bvec_gap_to_prev(q, bprv, offset);
}

102 103 104 105 106 107 108 109 110 111 112 113
static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
{
	rq->nr_phys_segments = nr_segs;
	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;
	rq->ioprio = bio_prio(bio);

	if (bio->bi_disk)
		rq->rq_disk = bio->bi_disk;
}

114 115
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
116 117 118 119 120 121 122
bool __bio_integrity_endio(struct bio *);
static inline bool bio_integrity_endio(struct bio *bio)
{
	if (bio_integrity(bio))
		return __bio_integrity_endio(bio);
	return true;
}
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154

static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	struct bio_integrity_payload *bip = bio_integrity(req->bio);
	struct bio_integrity_payload *bip_next = bio_integrity(next);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}

static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	return false;
}

155 156 157
static inline void blk_flush_integrity(void)
{
}
158 159 160 161
static inline bool bio_integrity_endio(struct bio *bio)
{
	return true;
}
162
#endif /* CONFIG_BLK_DEV_INTEGRITY */
163

164
unsigned long blk_rq_timeout(unsigned long timeout);
165
void blk_add_timer(struct request *req);
166

167 168 169 170
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs);
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs);
171 172
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio);
173
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
174
		unsigned int nr_segs, struct request **same_queue_rq);
175 176 177

void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
178
void blk_account_io_done(struct request *req, u64 now);
179

180 181 182
/*
 * Internal elevator interface
 */
183
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
184

185
void blk_insert_flush(struct request *rq);
T
Tejun Heo 已提交
186

187
int elevator_init_mq(struct request_queue *q);
188 189
int elevator_switch_mq(struct request_queue *q,
			      struct elevator_type *new_e);
190
void __elevator_exit(struct request_queue *, struct elevator_queue *);
191
int elv_register_queue(struct request_queue *q, bool uevent);
192 193
void elv_unregister_queue(struct request_queue *q);

194 195 196 197 198 199 200
static inline void elevator_exit(struct request_queue *q,
		struct elevator_queue *e)
{
	blk_mq_sched_free_requests(q);
	__elevator_exit(q, e);
}

201 202
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);

203 204 205 206 207 208 209 210 211 212 213 214
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

215 216 217 218 219 220
void __blk_queue_split(struct request_queue *q, struct bio **bio,
		unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
		unsigned int nr_segs);
int ll_front_merge_fn(struct request *req,  struct bio *bio,
		unsigned int nr_segs);
221 222
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
223 224
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
				struct request *next);
225
unsigned int blk_recalc_rq_segments(struct request *rq);
226
void blk_rq_set_mixed_merge(struct request *rq);
227
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
228
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
229

230 231
int blk_dev_init(void);

232 233 234 235 236
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
 *	b) the queue had IO stats enabled when this request was started, and
237
 *	c) it's a file system request
238
 */
239
static inline bool blk_do_io_stat(struct request *rq)
240
{
241
	return rq->rq_disk &&
242
	       (rq->rq_flags & RQF_IO_STAT) &&
243
		!blk_rq_is_passthrough(rq);
244 245
}

246 247 248 249 250 251 252
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
}

253 254 255 256 257 258 259 260 261 262
/*
 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 * is defined as 'unsigned int', meantime it has to aligned to with logical
 * block size which is the minimum accepted unit by hardware.
 */
static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
{
	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
}

263 264 265 266
/*
 * Internal io_context interface
 */
void get_io_context(struct io_context *ioc);
267
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
268 269
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
			     gfp_t gfp_mask);
270
void ioc_clear_queue(struct request_queue *q);
271

272
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
273 274 275 276 277 278

/**
 * create_io_context - try to create task->io_context
 * @gfp_mask: allocation mask
 * @node: allocation node
 *
279 280 281
 * If %current->io_context is %NULL, allocate a new io_context and install
 * it.  Returns the current %current->io_context which may be %NULL if
 * allocation failed.
282 283
 *
 * Note that this function can't be called with IRQ disabled because
284
 * task_lock which protects %current->io_context is IRQ-unsafe.
285
 */
286
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
287 288
{
	WARN_ON_ONCE(irqs_disabled());
289 290 291
	if (unlikely(!current->io_context))
		create_task_io_context(current, gfp_mask, node);
	return current->io_context;
292 293 294 295 296
}

/*
 * Internal throttling interface
 */
297
#ifdef CONFIG_BLK_DEV_THROTTLING
298
extern void blk_throtl_drain(struct request_queue *q);
299 300
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
301
extern void blk_throtl_register_queue(struct request_queue *q);
302
#else /* CONFIG_BLK_DEV_THROTTLING */
303
static inline void blk_throtl_drain(struct request_queue *q) { }
304 305
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
306
static inline void blk_throtl_register_queue(struct request_queue *q) { }
307
#endif /* CONFIG_BLK_DEV_THROTTLING */
308 309 310 311
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count);
312
extern void blk_throtl_bio_endio(struct bio *bio);
313
extern void blk_throtl_stat_add(struct request *rq, u64 time);
314 315
#else
static inline void blk_throtl_bio_endio(struct bio *bio) { }
316
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
317
#endif
318

319 320 321 322 323 324 325 326 327 328 329 330 331
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_BOUNCE */

332 333 334 335 336 337
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif

338 339
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);

340 341 342 343 344 345
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
#else
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
#endif

346
#endif /* BLK_INTERNAL_H */