提交 e459dd08 编写于 作者: C Corrado Zoccolo 提交者: Jens Axboe

cfq-iosched: fix ncq detection code

CFQ's detection of queueing devices initially assumes a queuing device
and detects if the queue depth reaches a certain threshold.
However, it will reconsider this choice periodically.

Unfortunately, if device is considered not queuing, CFQ will force a
unit queue depth for some workloads, thus defeating the detection logic.
This leads to poor performance on queuing hardware,
since the idle window remains enabled.

Given this premise, switching to hw_tag = 0 after we have proved at
least once that the device is NCQ capable is not a good choice.

The new detection code starts in an indeterminate state, in which CFQ behaves
as if hw_tag = 1, and then, if for a long observation period we never saw
large depth, we switch to hw_tag = 0, otherwise we stick to hw_tag = 1,
without reconsidering it again.
Signed-off-by: NCorrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 75e7b634
...@@ -191,8 +191,14 @@ struct cfq_data { ...@@ -191,8 +191,14 @@ struct cfq_data {
*/ */
int rq_queued; int rq_queued;
int hw_tag; int hw_tag;
int hw_tag_samples; /*
int rq_in_driver_peak; * hw_tag can be
* -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
* 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
* 0 => no NCQ
*/
int hw_tag_est_depth;
unsigned int hw_tag_samples;
/* /*
* idle window management * idle window management
...@@ -2518,8 +2524,11 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) ...@@ -2518,8 +2524,11 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
{ {
struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_queue *cfqq = cfqd->active_queue;
if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
cfqd->rq_in_driver_peak = rq_in_driver(cfqd); cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
if (cfqd->hw_tag == 1)
return;
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
...@@ -2538,13 +2547,10 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) ...@@ -2538,13 +2547,10 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
if (cfqd->hw_tag_samples++ < 50) if (cfqd->hw_tag_samples++ < 50)
return; return;
if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
cfqd->hw_tag = 1; cfqd->hw_tag = 1;
else else
cfqd->hw_tag = 0; cfqd->hw_tag = 0;
cfqd->hw_tag_samples = 0;
cfqd->rq_in_driver_peak = 0;
} }
static void cfq_completed_request(struct request_queue *q, struct request *rq) static void cfq_completed_request(struct request_queue *q, struct request *rq)
...@@ -2951,7 +2957,7 @@ static void *cfq_init_queue(struct request_queue *q) ...@@ -2951,7 +2957,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_latency = 1; cfqd->cfq_latency = 1;
cfqd->hw_tag = 1; cfqd->hw_tag = -1;
cfqd->last_end_sync_rq = jiffies; cfqd->last_end_sync_rq = jiffies;
return cfqd; return cfqd;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册