提交 30d7b944 编写于 作者: S Shaohua Li 提交者: Jens Axboe

block cfq: don't use atomic_t for cfq_queue

cfq_queue->ref is used with queue_lock hold, so ref doesn't need to be an atomic
and atomic operation is slower.
Signed-off-by: NShaohua Li <shaohua.li@intel.com>
Reviewed-by: NJeff Moyer <jmoyer@redhat.com>
Acked-by: NVivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <jaxboe@fusionio.com>
上级 b7908c10
...@@ -96,7 +96,7 @@ struct cfq_rb_root { ...@@ -96,7 +96,7 @@ struct cfq_rb_root {
*/ */
struct cfq_queue { struct cfq_queue {
/* reference count */ /* reference count */
atomic_t ref; int ref;
/* various state flags, see below */ /* various state flags, see below */
unsigned int flags; unsigned int flags;
/* parent cfq_data */ /* parent cfq_data */
...@@ -2025,7 +2025,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq) ...@@ -2025,7 +2025,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
int process_refs, io_refs; int process_refs, io_refs;
io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
process_refs = atomic_read(&cfqq->ref) - io_refs; process_refs = cfqq->ref - io_refs;
BUG_ON(process_refs < 0); BUG_ON(process_refs < 0);
return process_refs; return process_refs;
} }
...@@ -2065,10 +2065,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) ...@@ -2065,10 +2065,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
*/ */
if (new_process_refs >= process_refs) { if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq; cfqq->new_cfqq = new_cfqq;
atomic_add(process_refs, &new_cfqq->ref); new_cfqq->ref += process_refs;
} else { } else {
new_cfqq->new_cfqq = cfqq; new_cfqq->new_cfqq = cfqq;
atomic_add(new_process_refs, &cfqq->ref); cfqq->ref += new_process_refs;
} }
} }
...@@ -2532,9 +2532,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq) ...@@ -2532,9 +2532,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
struct cfq_data *cfqd = cfqq->cfqd; struct cfq_data *cfqd = cfqq->cfqd;
struct cfq_group *cfqg, *orig_cfqg; struct cfq_group *cfqg, *orig_cfqg;
BUG_ON(atomic_read(&cfqq->ref) <= 0); BUG_ON(cfqq->ref <= 0);
if (!atomic_dec_and_test(&cfqq->ref)) cfqq->ref--;
if (cfqq->ref)
return; return;
cfq_log_cfqq(cfqd, cfqq, "put_queue"); cfq_log_cfqq(cfqd, cfqq, "put_queue");
...@@ -2837,7 +2838,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -2837,7 +2838,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
RB_CLEAR_NODE(&cfqq->p_node); RB_CLEAR_NODE(&cfqq->p_node);
INIT_LIST_HEAD(&cfqq->fifo); INIT_LIST_HEAD(&cfqq->fifo);
atomic_set(&cfqq->ref, 0); cfqq->ref = 0;
cfqq->cfqd = cfqd; cfqq->cfqd = cfqd;
cfq_mark_cfqq_prio_changed(cfqq); cfq_mark_cfqq_prio_changed(cfqq);
...@@ -2973,11 +2974,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, ...@@ -2973,11 +2974,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
* pin the queue now that it's allocated, scheduler exit will prune it * pin the queue now that it's allocated, scheduler exit will prune it
*/ */
if (!is_sync && !(*async_cfqq)) { if (!is_sync && !(*async_cfqq)) {
atomic_inc(&cfqq->ref); cfqq->ref++;
*async_cfqq = cfqq; *async_cfqq = cfqq;
} }
atomic_inc(&cfqq->ref); cfqq->ref++;
return cfqq; return cfqq;
} }
...@@ -3679,7 +3680,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) ...@@ -3679,7 +3680,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
} }
cfqq->allocated[rw]++; cfqq->allocated[rw]++;
atomic_inc(&cfqq->ref); cfqq->ref++;
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
...@@ -3860,6 +3861,10 @@ static void *cfq_init_queue(struct request_queue *q) ...@@ -3860,6 +3861,10 @@ static void *cfq_init_queue(struct request_queue *q)
if (!cfqd) if (!cfqd)
return NULL; return NULL;
/*
* Don't need take queue_lock in the routine, since we are
* initializing the ioscheduler, and nobody is using cfqd
*/
cfqd->cic_index = i; cfqd->cic_index = i;
/* Init root service tree */ /* Init root service tree */
...@@ -3899,7 +3904,7 @@ static void *cfq_init_queue(struct request_queue *q) ...@@ -3899,7 +3904,7 @@ static void *cfq_init_queue(struct request_queue *q)
* will not attempt to free it. * will not attempt to free it.
*/ */
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
atomic_inc(&cfqd->oom_cfqq.ref); cfqd->oom_cfqq.ref++;
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
INIT_LIST_HEAD(&cfqd->cic_list); INIT_LIST_HEAD(&cfqd->cic_list);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册