blk-mq-tag.h 2.2 KB
Newer Older
1 2 3
#ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H

4 5
#include "blk-mq.h"

6 7 8 9 10 11 12 13 14 15
enum {
	BT_WAIT_QUEUES	= 8,
	BT_WAIT_BATCH	= 8,
};

struct bt_wait_state {
	atomic_t wait_cnt;
	wait_queue_head_t wait;
} ____cacheline_aligned_in_smp;

16 17
#define TAG_TO_INDEX(bt, tag)	((tag) >> (bt)->bits_per_word)
#define TAG_TO_BIT(bt, tag)	((tag) & ((1 << (bt)->bits_per_word) - 1))
18 19 20 21

struct blk_mq_bitmap_tags {
	unsigned int depth;
	unsigned int wake_cnt;
22
	unsigned int bits_per_word;
23 24

	unsigned int map_nr;
25
	struct blk_align_bitmap *map;
26

27
	atomic_t wake_index;
28 29
	struct bt_wait_state *bs;
};
30 31 32 33 34 35 36 37

/*
 * Tag address space map.
 */
struct blk_mq_tags {
	unsigned int nr_tags;
	unsigned int nr_reserved_tags;

38 39
	atomic_t active_queues;

40 41
	struct blk_mq_bitmap_tags bitmap_tags;
	struct blk_mq_bitmap_tags breserved_tags;
42 43 44

	struct request **rqs;
	struct list_head page_list;
S
Shaohua Li 已提交
45 46

	int alloc_policy;
K
Keith Busch 已提交
47
	cpumask_var_t cpumask;
48 49
};

50

S
Shaohua Li 已提交
51
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
52 53
extern void blk_mq_free_tags(struct blk_mq_tags *tags);

54
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
55
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
56 57
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
58
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
59
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
60
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
61 62 63 64 65 66 67 68 69 70 71 72

enum {
	BLK_MQ_TAG_CACHE_MIN	= 1,
	BLK_MQ_TAG_CACHE_MAX	= 64,
};

enum {
	BLK_MQ_TAG_FAIL		= -1U,
	BLK_MQ_TAG_MIN		= BLK_MQ_TAG_CACHE_MIN,
	BLK_MQ_TAG_MAX		= BLK_MQ_TAG_FAIL - 1,
};

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);

static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
	if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return false;

	return __blk_mq_tag_busy(hctx);
}

static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return;

	__blk_mq_tag_idle(hctx);
}

92
#endif