blk.h 4.5 KB
Newer Older
1 2 3
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

J
Jens Axboe 已提交
4 5 6 7 8 9
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME	(HZ/50UL)

/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ	32

10 11 12
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;

J
Jens Axboe 已提交
13 14 15
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
			struct bio *bio);
16 17
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
		      struct bio *bio);
18
void blk_dequeue_request(struct request *rq);
19 20
void __blk_queue_free_tags(struct request_queue *q);

J
Jens Axboe 已提交
21 22
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
J
Jens Axboe 已提交
23 24 25
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
26
void __generic_unplug_device(struct request_queue *);
J
Jens Axboe 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

/*
 * Internal atomic flags for request handling
 */
enum rq_atomic_flags {
	REQ_ATOM_COMPLETE = 0,
};

/*
 * EH timer and IO completion will both attempt to 'grab' the request, make
 * sure that only one of them suceeds
 */
static inline int blk_mark_rq_complete(struct request *rq)
{
	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}

static inline void blk_clear_rq_complete(struct request *rq)
{
	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
J
Jens Axboe 已提交
48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * Internal elevator interface
 */
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))

static inline struct request *__elv_next_request(struct request_queue *q)
{
	struct request *rq;

	while (1) {
		while (!list_empty(&q->queue_head)) {
			rq = list_entry_rq(q->queue_head.next);
			if (blk_do_ordered(q, &rq))
				return rq;
		}

		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
			return NULL;
	}
}

static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_activate_req_fn)
		e->ops->elevator_activate_req_fn(q, rq);
}

static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_deactivate_req_fn)
		e->ops->elevator_deactivate_req_fn(q, rq);
}

86 87 88 89 90 91 92 93 94 95 96 97
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

J
Jens Axboe 已提交
98 99
struct io_context *current_io_context(gfp_t gfp_flags, int node);

100 101 102 103 104 105 106 107
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, 
		      struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);

108 109
void blk_queue_congestion_threshold(struct request_queue *q);

110 111
int blk_dev_init(void);

J
Jens Axboe 已提交
112 113
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);
J
Jens Axboe 已提交
114 115


116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
/*
 * Return the threshold (number of used requests) at which the queue is
 * considered to be congested.  It include a little hysteresis to keep the
 * context switch rate down.
 */
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
	return q->nr_congestion_on;
}

/*
 * The threshold at which a queue is considered to be uncongested
 */
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
	return q->nr_congestion_off;
}

134 135 136 137 138 139 140 141
#if defined(CONFIG_BLK_DEV_INTEGRITY)

#define rq_for_each_integrity_segment(bvl, _rq, _iter)		\
	__rq_for_each_bio(_iter.bio, _rq)			\
		bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)

#endif /* BLK_DEV_INTEGRITY */

142 143 144
static inline int blk_cpu_to_group(int cpu)
{
#ifdef CONFIG_SCHED_MC
145 146
	const struct cpumask *mask = cpu_coregroup_mask(cpu);
	return cpumask_first(mask);
147
#elif defined(CONFIG_SCHED_SMT)
148
	return cpumask_first(topology_thread_cpumask(cpu));
149 150 151 152 153
#else
	return cpu;
#endif
}

154 155 156 157 158
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
 *	b) the queue had IO stats enabled when this request was started, and
K
Kiyoshi Ueda 已提交
159
 *	c) it's a file system request or a discard request
160
 */
161
static inline int blk_do_io_stat(struct request *rq)
162
{
K
Kiyoshi Ueda 已提交
163 164
	return rq->rq_disk && blk_rq_io_stat(rq) &&
	       (blk_fs_request(rq) || blk_discard_rq(rq));
165 166
}

167
#endif