blk.h 4.1 KB
Newer Older
1 2 3
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

J
Jens Axboe 已提交
4 5 6 7 8 9
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME	(HZ/50UL)

/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ	32

10 11 12
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;

J
Jens Axboe 已提交
13 14 15
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
			struct bio *bio);
16 17
void __blk_queue_free_tags(struct request_queue *q);

J
Jens Axboe 已提交
18 19
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
J
Jens Axboe 已提交
20 21 22
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
23
void __generic_unplug_device(struct request_queue *);
J
Jens Axboe 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

/*
 * Internal atomic flags for request handling
 */
enum rq_atomic_flags {
	REQ_ATOM_COMPLETE = 0,
};

/*
 * EH timer and IO completion will both attempt to 'grab' the request, make
 * sure that only one of them suceeds
 */
static inline int blk_mark_rq_complete(struct request *rq)
{
	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}

static inline void blk_clear_rq_complete(struct request *rq)
{
	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
J
Jens Axboe 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * Internal elevator interface
 */
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))

static inline struct request *__elv_next_request(struct request_queue *q)
{
	struct request *rq;

	while (1) {
		while (!list_empty(&q->queue_head)) {
			rq = list_entry_rq(q->queue_head.next);
			if (blk_do_ordered(q, &rq))
				return rq;
		}

		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
			return NULL;
	}
}

static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_activate_req_fn)
		e->ops->elevator_activate_req_fn(q, rq);
}

static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
	struct elevator_queue *e = q->elevator;

	if (e->ops->elevator_deactivate_req_fn)
		e->ops->elevator_deactivate_req_fn(q, rq);
}

83 84 85 86 87 88 89 90 91 92 93 94
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

J
Jens Axboe 已提交
95 96
struct io_context *current_io_context(gfp_t gfp_flags, int node);

97 98 99 100 101 102 103 104 105
int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, 
		      struct bio *bio);
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
void blk_recalc_rq_sectors(struct request *rq, int nsect);

106 107
void blk_queue_congestion_threshold(struct request_queue *q);

108 109
int blk_dev_init(void);

J
Jens Axboe 已提交
110 111
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);
J
Jens Axboe 已提交
112 113


114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/*
 * Return the threshold (number of used requests) at which the queue is
 * considered to be congested.  It include a little hysteresis to keep the
 * context switch rate down.
 */
static inline int queue_congestion_on_threshold(struct request_queue *q)
{
	return q->nr_congestion_on;
}

/*
 * The threshold at which a queue is considered to be uncongested
 */
static inline int queue_congestion_off_threshold(struct request_queue *q)
{
	return q->nr_congestion_off;
}

132 133 134 135 136 137 138 139
#if defined(CONFIG_BLK_DEV_INTEGRITY)

#define rq_for_each_integrity_segment(bvl, _rq, _iter)		\
	__rq_for_each_bio(_iter.bio, _rq)			\
		bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)

#endif /* BLK_DEV_INTEGRITY */

140 141 142
static inline int blk_cpu_to_group(int cpu)
{
#ifdef CONFIG_SCHED_MC
143 144
	const struct cpumask *mask = cpu_coregroup_mask(cpu);
	return cpumask_first(mask);
145
#elif defined(CONFIG_SCHED_SMT)
146
	return cpumask_first(topology_thread_cpumask(cpu));
147 148 149 150 151
#else
	return cpu;
#endif
}

152
static inline int blk_do_io_stat(struct request *rq)
153
{
154
	return rq->rq_disk && blk_rq_io_stat(rq);
155 156
}

157
#endif