提交 cdf8a6fb 编写于 作者: A Adrian Hunter 提交者: Ulf Hansson

mmc: block: Introduce queue semantics

Change from viewing the requests in progress as 'current' and 'previous',
to viewing them as a queue. The current request is allocated to the first
free slot. The presence of incomplete requests is determined from the
count (mq->qcnt) of entries in the queue. Non-read-write requests (i.e.
discards and flushes) are not added to the queue at all and require no
special handling. Also no special handling is needed for the
MMC_BLK_NEW_REQUEST case.

As well as allowing an arbitrarily sized queue, the queue thread function
is significantly simpler.
Signed-off-by: NAdrian Hunter <adrian.hunter@intel.com>
Reviewed-by: NLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: NUlf Hansson <ulf.hansson@linaro.org>
上级 8ddfe07e
...@@ -129,6 +129,13 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, ...@@ -129,6 +129,13 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md); struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries); static int get_card_status(struct mmc_card *card, u32 *status, int retries);
static void mmc_blk_requeue(struct request_queue *q, struct request *req)
{
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
spin_unlock_irq(q->queue_lock);
}
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{ {
struct mmc_blk_data *md; struct mmc_blk_data *md;
...@@ -1585,11 +1592,14 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, ...@@ -1585,11 +1592,14 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
return req_pending; return req_pending;
} }
static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req) static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
struct request *req,
struct mmc_queue_req *mqrq)
{ {
if (mmc_card_removed(card)) if (mmc_card_removed(card))
req->rq_flags |= RQF_QUIET; req->rq_flags |= RQF_QUIET;
while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req))); while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
mmc_queue_req_free(mq, mqrq);
} }
/** /**
...@@ -1609,6 +1619,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req, ...@@ -1609,6 +1619,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
if (mmc_card_removed(mq->card)) { if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET; req->rq_flags |= RQF_QUIET;
blk_end_request_all(req, -EIO); blk_end_request_all(req, -EIO);
mmc_queue_req_free(mq, mqrq);
return; return;
} }
/* Else proceed and try to restart the current async request */ /* Else proceed and try to restart the current async request */
...@@ -1623,14 +1634,23 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1623,14 +1634,23 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
struct mmc_blk_request *brq; struct mmc_blk_request *brq;
int disable_multi = 0, retry = 0, type, retune_retry_done = 0; int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status; enum mmc_blk_status status;
struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; struct mmc_queue_req *mqrq_cur = NULL;
struct mmc_queue_req *mq_rq; struct mmc_queue_req *mq_rq;
struct request *old_req; struct request *old_req;
struct mmc_async_req *new_areq; struct mmc_async_req *new_areq;
struct mmc_async_req *old_areq; struct mmc_async_req *old_areq;
bool req_pending = true; bool req_pending = true;
if (!new_req && !mq->mqrq_prev->req) if (new_req) {
mqrq_cur = mmc_queue_req_find(mq, new_req);
if (!mqrq_cur) {
WARN_ON(1);
mmc_blk_requeue(mq->queue, new_req);
new_req = NULL;
}
}
if (!mq->qcnt)
return; return;
do { do {
...@@ -1643,7 +1663,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1643,7 +1663,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
!IS_ALIGNED(blk_rq_sectors(new_req), 8)) { !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n", pr_err("%s: Transfer size is not 4KB sector size aligned\n",
new_req->rq_disk->disk_name); new_req->rq_disk->disk_name);
mmc_blk_rw_cmd_abort(card, new_req); mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
return; return;
} }
...@@ -1659,8 +1679,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1659,8 +1679,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
* and there is nothing more to do until it is * and there is nothing more to do until it is
* complete. * complete.
*/ */
if (status == MMC_BLK_NEW_REQUEST)
mq->new_request = true;
return; return;
} }
...@@ -1693,7 +1711,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1693,7 +1711,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
pr_err("%s BUG rq_tot %d d_xfer %d\n", pr_err("%s BUG rq_tot %d d_xfer %d\n",
__func__, blk_rq_bytes(old_req), __func__, blk_rq_bytes(old_req),
brq->data.bytes_xfered); brq->data.bytes_xfered);
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
return; return;
} }
break; break;
...@@ -1701,11 +1719,14 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1701,11 +1719,14 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
if (mmc_blk_reset(md, card->host, type)) { if (mmc_blk_reset(md, card->host, type)) {
if (req_pending) if (req_pending)
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
else
mmc_queue_req_free(mq, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
} }
if (!req_pending) { if (!req_pending) {
mmc_queue_req_free(mq, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
} }
...@@ -1718,7 +1739,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1718,7 +1739,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
case MMC_BLK_ABORT: case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type)) if (!mmc_blk_reset(md, card->host, type))
break; break;
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
case MMC_BLK_DATA_ERR: { case MMC_BLK_DATA_ERR: {
...@@ -1728,7 +1749,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1728,7 +1749,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
if (!err) if (!err)
break; break;
if (err == -ENODEV) { if (err == -ENODEV) {
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
} }
...@@ -1750,18 +1771,19 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1750,18 +1771,19 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
req_pending = blk_end_request(old_req, -EIO, req_pending = blk_end_request(old_req, -EIO,
brq->data.blksz); brq->data.blksz);
if (!req_pending) { if (!req_pending) {
mmc_queue_req_free(mq, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
} }
break; break;
case MMC_BLK_NOMEDIUM: case MMC_BLK_NOMEDIUM:
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
default: default:
pr_err("%s: Unhandled return value (%d)", pr_err("%s: Unhandled return value (%d)",
old_req->rq_disk->disk_name, status); old_req->rq_disk->disk_name, status);
mmc_blk_rw_cmd_abort(card, old_req); mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return; return;
} }
...@@ -1778,6 +1800,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1778,6 +1800,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
mq_rq->brq.retune_retry_done = retune_retry_done; mq_rq->brq.retune_retry_done = retune_retry_done;
} }
} while (req_pending); } while (req_pending);
mmc_queue_req_free(mq, mq_rq);
} }
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
...@@ -1785,9 +1809,8 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1785,9 +1809,8 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
int ret; int ret;
struct mmc_blk_data *md = mq->blkdata; struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card; struct mmc_card *card = md->queue.card;
bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req) if (req && !mq->qcnt)
/* claim host only for the first request */ /* claim host only for the first request */
mmc_get_card(card); mmc_get_card(card);
...@@ -1799,20 +1822,19 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1799,20 +1822,19 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out; goto out;
} }
mq->new_request = false;
if (req && req_op(req) == REQ_OP_DISCARD) { if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */ /* complete ongoing async transfer before issuing discard */
if (card->host->areq) if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req); mmc_blk_issue_discard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
/* complete ongoing async transfer before issuing secure erase*/ /* complete ongoing async transfer before issuing secure erase*/
if (card->host->areq) if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req); mmc_blk_issue_secdiscard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_FLUSH) { } else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */ /* complete ongoing async transfer before issuing flush */
if (card->host->areq) if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req); mmc_blk_issue_flush(mq, req);
} else { } else {
...@@ -1821,13 +1843,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1821,13 +1843,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
} }
out: out:
if ((!req && !mq->new_request) || req_is_special) if (!mq->qcnt)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
* In case sepecial request, there is no reentry to
* the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
*/
mmc_put_card(card); mmc_put_card(card);
} }
......
...@@ -40,6 +40,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) ...@@ -40,6 +40,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_OK; return BLKPREP_OK;
} }
struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
struct request *req)
{
struct mmc_queue_req *mqrq;
int i = ffz(mq->qslots);
if (i >= mq->qdepth)
return NULL;
mqrq = &mq->mqrq[i];
WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
test_bit(mqrq->task_id, &mq->qslots));
mqrq->req = req;
mq->qcnt += 1;
__set_bit(mqrq->task_id, &mq->qslots);
return mqrq;
}
void mmc_queue_req_free(struct mmc_queue *mq,
struct mmc_queue_req *mqrq)
{
WARN_ON(!mqrq->req || mq->qcnt < 1 ||
!test_bit(mqrq->task_id, &mq->qslots));
mqrq->req = NULL;
mq->qcnt -= 1;
__clear_bit(mqrq->task_id, &mq->qslots);
}
static int mmc_queue_thread(void *d) static int mmc_queue_thread(void *d)
{ {
struct mmc_queue *mq = d; struct mmc_queue *mq = d;
...@@ -50,7 +79,7 @@ static int mmc_queue_thread(void *d) ...@@ -50,7 +79,7 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem); down(&mq->thread_sem);
do { do {
struct request *req = NULL; struct request *req;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
...@@ -63,38 +92,17 @@ static int mmc_queue_thread(void *d) ...@@ -63,38 +92,17 @@ static int mmc_queue_thread(void *d)
* Dispatch queue is empty so set flags for * Dispatch queue is empty so set flags for
* mmc_request_fn() to wake us up. * mmc_request_fn() to wake us up.
*/ */
if (mq->mqrq_prev->req) if (mq->qcnt)
cntx->is_waiting_last_req = true; cntx->is_waiting_last_req = true;
else else
mq->asleep = true; mq->asleep = true;
} }
mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) { if (req || mq->qcnt) {
bool req_is_special = mmc_req_is_special(req);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req); mmc_blk_issue_rq(mq, req);
cond_resched(); cond_resched();
if (mq->new_request) {
mq->new_request = false;
continue; /* fetch again */
}
/*
* Current request becomes previous request
* and vice versa.
* In case of special requests, current request
* has been finished. Do not assign it to previous
* request.
*/
if (req_is_special)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
swap(mq->mqrq_prev, mq->mqrq_cur);
} else { } else {
if (kthread_should_stop()) { if (kthread_should_stop()) {
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
...@@ -177,6 +185,20 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -177,6 +185,20 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
} }
static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
{
struct mmc_queue_req *mqrq;
int i;
mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
if (mqrq) {
for (i = 0; i < qdepth; i++)
mqrq[i].task_id = i;
}
return mqrq;
}
#ifdef CONFIG_MMC_BLOCK_BOUNCE #ifdef CONFIG_MMC_BLOCK_BOUNCE
static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
unsigned int bouncesz) unsigned int bouncesz)
...@@ -279,12 +301,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -279,12 +301,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return -ENOMEM; return -ENOMEM;
mq->qdepth = 2; mq->qdepth = 2;
mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), mq->mqrq = mmc_queue_alloc_mqrqs(mq->qdepth);
GFP_KERNEL);
if (!mq->mqrq) if (!mq->mqrq)
goto blk_cleanup; goto blk_cleanup;
mq->mqrq_cur = &mq->mqrq[0];
mq->mqrq_prev = &mq->mqrq[1];
mq->queue->queuedata = mq; mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_prep_rq(mq->queue, mmc_prep_request);
......
...@@ -34,21 +34,21 @@ struct mmc_queue_req { ...@@ -34,21 +34,21 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg; struct scatterlist *bounce_sg;
unsigned int bounce_sg_len; unsigned int bounce_sg_len;
struct mmc_async_req areq; struct mmc_async_req areq;
int task_id;
}; };
struct mmc_queue { struct mmc_queue {
struct mmc_card *card; struct mmc_card *card;
struct task_struct *thread; struct task_struct *thread;
struct semaphore thread_sem; struct semaphore thread_sem;
bool new_request;
bool suspended; bool suspended;
bool asleep; bool asleep;
struct mmc_blk_data *blkdata; struct mmc_blk_data *blkdata;
struct request_queue *queue; struct request_queue *queue;
struct mmc_queue_req *mqrq; struct mmc_queue_req *mqrq;
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
int qdepth; int qdepth;
int qcnt;
unsigned long qslots;
}; };
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
...@@ -64,4 +64,8 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); ...@@ -64,4 +64,8 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *); extern int mmc_access_rpmb(struct mmc_queue *);
extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
struct request *);
extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册