blk.h 4.6 KB
Newer Older
1 2 3
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

J
Jens Axboe 已提交
4 5 6 7 8 9
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME	(HZ/50UL)

/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ	32

10 11 12
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;

J
Jens Axboe 已提交
13 14 15
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
			struct bio *bio);
16 17
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
		      struct bio *bio);
18
void blk_dequeue_request(struct request *rq);
19 20
void __blk_queue_free_tags(struct request_queue *q);

J
Jens Axboe 已提交
21 22
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
J
Jens Axboe 已提交
23 24 25
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
26
void __generic_unplug_device(struct request_queue *);
J
Jens Axboe 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

/*
 * Internal atomic flags for request handling
 */
enum rq_atomic_flags {
	REQ_ATOM_COMPLETE = 0,
};

/*
 * EH timer and IO completion will both attempt to 'grab' the request, make
 * sure that only one of them suceeds
 */
static inline int blk_mark_rq_complete(struct request *rq)
{
	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}

static inline void blk_clear_rq_complete(struct request *rq)
{
	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
J
Jens Axboe 已提交
48

49 50 51 52 53
/*
 * Internal elevator interface
 */
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))

54
struct request *blk_do_flush(struct request_queue *q, struct request *rq);
T
Tejun Heo 已提交
55

56 57 58 59 60 61 62
static inline struct request *__elv_next_request(struct request_queue *q)
{
	struct request *rq;

	while (1) {
		while (!list_empty(&q->queue_head)) {
			rq = list_entry_rq(q->queue_head.next);
63
			if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
T
Tejun Heo 已提交
64
			    (rq->cmd_flags & REQ_FLUSH_SEQ))
65
				return rq;
66
			rq = blk_do_flush(q, rq);
T
Tejun Heo 已提交
67
			if (rq)
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
				return rq;
		}

		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
			return NULL;
	}
}

static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_activate_req_fn)
		e->ops->elevator_activate_req_fn(q, rq);
}

static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_deactivate_req_fn)
		e->ops->elevator_deactivate_req_fn(q, rq);
}

92 93 94 95 96 97 98 99 100 101 102 103
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

J
Jens Axboe 已提交
104 105
struct io_context *current_io_context(gfp_t gfp_flags, int node);

106 107 108 109 110 111 112
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, 
		      struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
113
void blk_rq_set_mixed_merge(struct request *rq);
114

115 116
void blk_queue_congestion_threshold(struct request_queue *q);

117 118
int blk_dev_init(void);

119 120 121 122
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);


123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * Return the threshold (number of used requests) at which the queue is
 * considered to be congested.  It include a little hysteresis to keep the
 * context switch rate down.
 */
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
	return q->nr_congestion_on;
}

/*
 * The threshold at which a queue is considered to be uncongested
 */
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
	return q->nr_congestion_off;
}

141 142
static inline int blk_cpu_to_group(int cpu)
{
143
	int group = NR_CPUS;
144
#ifdef CONFIG_SCHED_MC
145
	const struct cpumask *mask = cpu_coregroup_mask(cpu);
146
	group = cpumask_first(mask);
147
#elif defined(CONFIG_SCHED_SMT)
148
	group = cpumask_first(topology_thread_cpumask(cpu));
149 150 151
#else
	return cpu;
#endif
152 153 154
	if (likely(group < NR_CPUS))
		return group;
	return cpu;
155 156
}

157 158 159 160 161
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
 *	b) the queue had IO stats enabled when this request was started, and
K
Kiyoshi Ueda 已提交
162
 *	c) it's a file system request or a discard request
163
 */
164
static inline int blk_do_io_stat(struct request *rq)
165
{
166 167 168 169
	return rq->rq_disk &&
	       (rq->cmd_flags & REQ_IO_STAT) &&
	       (rq->cmd_type == REQ_TYPE_FS ||
	        (rq->cmd_flags & REQ_DISCARD));
170 171
}

172
#endif