提交 7749a8d4 编写于 作者: J Jens Axboe

[PATCH] Propagate down request sync flag

We need to do this, otherwise the io schedulers don't get access to the
sync flag. Then they cannot tell the difference between a regular write
and an O_DIRECT write, which can cause a performance loss.
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 445722f9
......@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues;
}
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{
if (rw == READ || rw == WRITE_SYNC)
/*
* Use the per-process queue, for read requests and syncronous writes
*/
if (!(rw & REQ_RW) || is_sync)
return task->pid;
return CFQ_KEY_ASYNC;
......@@ -473,7 +476,7 @@ static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
......@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_queue *cfqq;
unsigned int key;
key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/*
* don't force setup of a queue from here, as a call to may_queue
......@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (cfqq) {
cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq);
......@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
pid_t key = cfq_queue_pid(tsk, rw);
const int is_sync = rq_is_sync(rq);
pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq;
unsigned long flags;
int is_sync = key != CFQ_KEY_ASYNC;
might_sleep_if(gfp_mask & __GFP_WAIT);
......
......@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
gfp_t gfp_mask)
static struct request *get_request(request_queue_t *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
const int rw = rw_flags & 0x01;
int may_queue, priv;
may_queue = elv_may_queue(q, rw);
may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
......@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, priv, gfp_mask);
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
......@@ -2162,12 +2163,13 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
static struct request *get_request_wait(request_queue_t *q, int rw,
static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
struct request *rq;
rq = get_request(q, rw, bio, GFP_NOIO);
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
......@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
rq = get_request(q, rw, bio, GFP_NOIO);
rq = get_request(q, rw_flags, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
......@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
int rw_flags;
nr_sectors = bio_sectors(bio);
......@@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio)
}
get_rq:
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_RW_SYNC;
/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
req = get_request_wait(q, bio_data_dir(bio), bio);
req = get_request_wait(q, rw_flags, bio);
/*
* After dropping the lock and possibly sleeping here, our request
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册