提交 1919631e 编写于 作者: P Paolo Bonzini 提交者: Stefan Hajnoczi

block: explicitly acquire aiocontext in bottom halves that need it

Reviewed-by: NStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: NFam Zheng <famz@redhat.com>
Reviewed-by: NDaniel P. Berrange <berrange@redhat.com>
Message-id: 20170213135235.12274-15-pbonzini@redhat.com
Signed-off-by: NStefan Hajnoczi <stefanha@redhat.com>
上级 9d456654
...@@ -310,8 +310,11 @@ static void qemu_archipelago_complete_aio(void *opaque) ...@@ -310,8 +310,11 @@ static void qemu_archipelago_complete_aio(void *opaque)
{ {
AIORequestData *reqdata = (AIORequestData *) opaque; AIORequestData *reqdata = (AIORequestData *) opaque;
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb; ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
aio_context_acquire(ctx);
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret); aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
aio_context_release(ctx);
aio_cb->status = 0; aio_cb->status = 0;
qemu_aio_unref(aio_cb); qemu_aio_unref(aio_cb);
......
...@@ -60,7 +60,7 @@ static int64_t blkreplay_getlength(BlockDriverState *bs) ...@@ -60,7 +60,7 @@ static int64_t blkreplay_getlength(BlockDriverState *bs)
static void blkreplay_bh_cb(void *opaque) static void blkreplay_bh_cb(void *opaque)
{ {
Request *req = opaque; Request *req = opaque;
qemu_coroutine_enter(req->co); aio_co_wake(req->co);
qemu_bh_delete(req->bh); qemu_bh_delete(req->bh);
g_free(req); g_free(req);
} }
......
...@@ -939,9 +939,12 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) ...@@ -939,9 +939,12 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
static void error_callback_bh(void *opaque) static void error_callback_bh(void *opaque)
{ {
struct BlockBackendAIOCB *acb = opaque; struct BlockBackendAIOCB *acb = opaque;
AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
bdrv_dec_in_flight(acb->common.bs); bdrv_dec_in_flight(acb->common.bs);
aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret); acb->common.cb(acb->common.opaque, acb->ret);
aio_context_release(ctx);
qemu_aio_unref(acb); qemu_aio_unref(acb);
} }
...@@ -983,9 +986,12 @@ static void blk_aio_complete(BlkAioEmAIOCB *acb) ...@@ -983,9 +986,12 @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
static void blk_aio_complete_bh(void *opaque) static void blk_aio_complete_bh(void *opaque)
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
assert(acb->has_returned); assert(acb->has_returned);
aio_context_acquire(ctx);
blk_aio_complete(acb); blk_aio_complete(acb);
aio_context_release(ctx);
} }
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
......
...@@ -796,13 +796,18 @@ static void curl_readv_bh_cb(void *p) ...@@ -796,13 +796,18 @@ static void curl_readv_bh_cb(void *p)
{ {
CURLState *state; CURLState *state;
int running; int running;
int ret = -EINPROGRESS;
CURLAIOCB *acb = p; CURLAIOCB *acb = p;
BDRVCURLState *s = acb->common.bs->opaque; BlockDriverState *bs = acb->common.bs;
BDRVCURLState *s = bs->opaque;
AioContext *ctx = bdrv_get_aio_context(bs);
size_t start = acb->sector_num * BDRV_SECTOR_SIZE; size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
size_t end; size_t end;
aio_context_acquire(ctx);
// In case we have the requested data already (e.g. read-ahead), // In case we have the requested data already (e.g. read-ahead),
// we can just call the callback and be done. // we can just call the callback and be done.
switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) { switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
...@@ -810,7 +815,7 @@ static void curl_readv_bh_cb(void *p) ...@@ -810,7 +815,7 @@ static void curl_readv_bh_cb(void *p)
qemu_aio_unref(acb); qemu_aio_unref(acb);
// fall through // fall through
case FIND_RET_WAIT: case FIND_RET_WAIT:
return; goto out;
default: default:
break; break;
} }
...@@ -818,9 +823,8 @@ static void curl_readv_bh_cb(void *p) ...@@ -818,9 +823,8 @@ static void curl_readv_bh_cb(void *p)
// No cache found, so let's start a new request // No cache found, so let's start a new request
state = curl_init_state(acb->common.bs, s); state = curl_init_state(acb->common.bs, s);
if (!state) { if (!state) {
acb->common.cb(acb->common.opaque, -EIO); ret = -EIO;
qemu_aio_unref(acb); goto out;
return;
} }
acb->start = 0; acb->start = 0;
...@@ -834,9 +838,8 @@ static void curl_readv_bh_cb(void *p) ...@@ -834,9 +838,8 @@ static void curl_readv_bh_cb(void *p)
state->orig_buf = g_try_malloc(state->buf_len); state->orig_buf = g_try_malloc(state->buf_len);
if (state->buf_len && state->orig_buf == NULL) { if (state->buf_len && state->orig_buf == NULL) {
curl_clean_state(state); curl_clean_state(state);
acb->common.cb(acb->common.opaque, -ENOMEM); ret = -ENOMEM;
qemu_aio_unref(acb); goto out;
return;
} }
state->acb[0] = acb; state->acb[0] = acb;
...@@ -849,6 +852,13 @@ static void curl_readv_bh_cb(void *p) ...@@ -849,6 +852,13 @@ static void curl_readv_bh_cb(void *p)
/* Tell curl it needs to kick things off */ /* Tell curl it needs to kick things off */
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
out:
if (ret != -EINPROGRESS) {
acb->common.cb(acb->common.opaque, ret);
qemu_aio_unref(acb);
}
aio_context_release(ctx);
} }
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs, static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
......
...@@ -698,13 +698,6 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, ...@@ -698,13 +698,6 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
return qemu_gluster_glfs_init(gconf, errp); return qemu_gluster_glfs_init(gconf, errp);
} }
static void qemu_gluster_complete_aio(void *opaque)
{
GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
qemu_coroutine_enter(acb->coroutine);
}
/* /*
* AIO callback routine called from GlusterFS thread. * AIO callback routine called from GlusterFS thread.
*/ */
...@@ -720,7 +713,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) ...@@ -720,7 +713,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
acb->ret = -EIO; /* Partial read/write - fail it */ acb->ret = -EIO; /* Partial read/write - fail it */
} }
aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb); aio_co_schedule(acb->aio_context, acb->coroutine);
} }
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
......
...@@ -189,7 +189,7 @@ static void bdrv_co_drain_bh_cb(void *opaque) ...@@ -189,7 +189,7 @@ static void bdrv_co_drain_bh_cb(void *opaque)
bdrv_dec_in_flight(bs); bdrv_dec_in_flight(bs);
bdrv_drained_begin(bs); bdrv_drained_begin(bs);
data->done = true; data->done = true;
qemu_coroutine_enter(co); aio_co_wake(co);
} }
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
...@@ -2152,9 +2152,13 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb) ...@@ -2152,9 +2152,13 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
static void bdrv_co_em_bh(void *opaque) static void bdrv_co_em_bh(void *opaque)
{ {
BlockAIOCBCoroutine *acb = opaque; BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
AioContext *ctx = bdrv_get_aio_context(bs);
assert(!acb->need_bh); assert(!acb->need_bh);
aio_context_acquire(ctx);
bdrv_co_complete(acb); bdrv_co_complete(acb);
aio_context_release(ctx);
} }
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
......
...@@ -136,13 +136,16 @@ static void ...@@ -136,13 +136,16 @@ static void
iscsi_bh_cb(void *p) iscsi_bh_cb(void *p)
{ {
IscsiAIOCB *acb = p; IscsiAIOCB *acb = p;
AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
qemu_bh_delete(acb->bh); qemu_bh_delete(acb->bh);
g_free(acb->buf); g_free(acb->buf);
acb->buf = NULL; acb->buf = NULL;
aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->status); acb->common.cb(acb->common.opaque, acb->status);
aio_context_release(ctx);
if (acb->task != NULL) { if (acb->task != NULL) {
scsi_free_scsi_task(acb->task); scsi_free_scsi_task(acb->task);
...@@ -165,8 +168,9 @@ iscsi_schedule_bh(IscsiAIOCB *acb) ...@@ -165,8 +168,9 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
static void iscsi_co_generic_bh_cb(void *opaque) static void iscsi_co_generic_bh_cb(void *opaque)
{ {
struct IscsiTask *iTask = opaque; struct IscsiTask *iTask = opaque;
iTask->complete = 1; iTask->complete = 1;
qemu_coroutine_enter(iTask->co); aio_co_wake(iTask->co);
} }
static void iscsi_retry_timer_expired(void *opaque) static void iscsi_retry_timer_expired(void *opaque)
......
...@@ -54,10 +54,10 @@ struct LinuxAioState { ...@@ -54,10 +54,10 @@ struct LinuxAioState {
io_context_t ctx; io_context_t ctx;
EventNotifier e; EventNotifier e;
/* io queue for submit at batch */ /* io queue for submit at batch. Protected by AioContext lock. */
LaioQueue io_q; LaioQueue io_q;
/* I/O completion processing */ /* I/O completion processing. Only runs in I/O thread. */
QEMUBH *completion_bh; QEMUBH *completion_bh;
int event_idx; int event_idx;
int event_max; int event_max;
...@@ -75,6 +75,7 @@ static inline ssize_t io_event_ret(struct io_event *ev) ...@@ -75,6 +75,7 @@ static inline ssize_t io_event_ret(struct io_event *ev)
*/ */
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
{ {
LinuxAioState *s = laiocb->ctx;
int ret; int ret;
ret = laiocb->ret; ret = laiocb->ret;
...@@ -93,6 +94,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) ...@@ -93,6 +94,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
} }
laiocb->ret = ret; laiocb->ret = ret;
aio_context_acquire(s->aio_context);
if (laiocb->co) { if (laiocb->co) {
/* If the coroutine is already entered it must be in ioq_submit() and /* If the coroutine is already entered it must be in ioq_submit() and
* will notice laio->ret has been filled in when it eventually runs * will notice laio->ret has been filled in when it eventually runs
...@@ -106,6 +108,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) ...@@ -106,6 +108,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
laiocb->common.cb(laiocb->common.opaque, ret); laiocb->common.cb(laiocb->common.opaque, ret);
qemu_aio_unref(laiocb); qemu_aio_unref(laiocb);
} }
aio_context_release(s->aio_context);
} }
/** /**
...@@ -234,9 +237,12 @@ static void qemu_laio_process_completions(LinuxAioState *s) ...@@ -234,9 +237,12 @@ static void qemu_laio_process_completions(LinuxAioState *s)
static void qemu_laio_process_completions_and_submit(LinuxAioState *s) static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
{ {
qemu_laio_process_completions(s); qemu_laio_process_completions(s);
aio_context_acquire(s->aio_context);
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s); ioq_submit(s);
} }
aio_context_release(s->aio_context);
} }
static void qemu_laio_completion_bh(void *opaque) static void qemu_laio_completion_bh(void *opaque)
...@@ -251,9 +257,7 @@ static void qemu_laio_completion_cb(EventNotifier *e) ...@@ -251,9 +257,7 @@ static void qemu_laio_completion_cb(EventNotifier *e)
LinuxAioState *s = container_of(e, LinuxAioState, e); LinuxAioState *s = container_of(e, LinuxAioState, e);
if (event_notifier_test_and_clear(&s->e)) { if (event_notifier_test_and_clear(&s->e)) {
aio_context_acquire(s->aio_context);
qemu_laio_process_completions_and_submit(s); qemu_laio_process_completions_and_submit(s);
aio_context_release(s->aio_context);
} }
} }
...@@ -267,9 +271,7 @@ static bool qemu_laio_poll_cb(void *opaque) ...@@ -267,9 +271,7 @@ static bool qemu_laio_poll_cb(void *opaque)
return false; return false;
} }
aio_context_acquire(s->aio_context);
qemu_laio_process_completions_and_submit(s); qemu_laio_process_completions_and_submit(s);
aio_context_release(s->aio_context);
return true; return true;
} }
...@@ -459,6 +461,7 @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) ...@@ -459,6 +461,7 @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
{ {
aio_set_event_notifier(old_context, &s->e, false, NULL, NULL); aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
qemu_bh_delete(s->completion_bh); qemu_bh_delete(s->completion_bh);
s->aio_context = NULL;
} }
void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
......
...@@ -237,8 +237,9 @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) ...@@ -237,8 +237,9 @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
static void nfs_co_generic_bh_cb(void *opaque) static void nfs_co_generic_bh_cb(void *opaque)
{ {
NFSRPC *task = opaque; NFSRPC *task = opaque;
task->complete = 1; task->complete = 1;
qemu_coroutine_enter(task->co); aio_co_wake(task->co);
} }
static void static void
......
...@@ -134,7 +134,11 @@ static const AIOCBInfo null_aiocb_info = { ...@@ -134,7 +134,11 @@ static const AIOCBInfo null_aiocb_info = {
static void null_bh_cb(void *opaque) static void null_bh_cb(void *opaque)
{ {
NullAIOCB *acb = opaque; NullAIOCB *acb = opaque;
AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0); acb->common.cb(acb->common.opaque, 0);
aio_context_release(ctx);
qemu_aio_unref(acb); qemu_aio_unref(acb);
} }
......
...@@ -942,6 +942,7 @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, ...@@ -942,6 +942,7 @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
static void qed_aio_complete_bh(void *opaque) static void qed_aio_complete_bh(void *opaque)
{ {
QEDAIOCB *acb = opaque; QEDAIOCB *acb = opaque;
BDRVQEDState *s = acb_to_s(acb);
BlockCompletionFunc *cb = acb->common.cb; BlockCompletionFunc *cb = acb->common.cb;
void *user_opaque = acb->common.opaque; void *user_opaque = acb->common.opaque;
int ret = acb->bh_ret; int ret = acb->bh_ret;
...@@ -949,7 +950,9 @@ static void qed_aio_complete_bh(void *opaque) ...@@ -949,7 +950,9 @@ static void qed_aio_complete_bh(void *opaque)
qemu_aio_unref(acb); qemu_aio_unref(acb);
/* Invoke callback */ /* Invoke callback */
qed_acquire(s);
cb(user_opaque, ret); cb(user_opaque, ret);
qed_release(s);
} }
static void qed_aio_complete(QEDAIOCB *acb, int ret) static void qed_aio_complete(QEDAIOCB *acb, int ret)
......
...@@ -413,6 +413,7 @@ shutdown: ...@@ -413,6 +413,7 @@ shutdown:
static void qemu_rbd_complete_aio(RADOSCB *rcb) static void qemu_rbd_complete_aio(RADOSCB *rcb)
{ {
RBDAIOCB *acb = rcb->acb; RBDAIOCB *acb = rcb->acb;
AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
int64_t r; int64_t r;
r = rcb->ret; r = rcb->ret;
...@@ -445,7 +446,10 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb) ...@@ -445,7 +446,10 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
} }
qemu_vfree(acb->bounce); qemu_vfree(acb->bounce);
aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
aio_context_release(ctx);
qemu_aio_unref(acb); qemu_aio_unref(acb);
} }
......
...@@ -166,8 +166,10 @@ static void dma_blk_cb(void *opaque, int ret) ...@@ -166,8 +166,10 @@ static void dma_blk_cb(void *opaque, int ret)
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align)); QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
} }
aio_context_acquire(dbs->ctx);
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
dma_blk_cb, dbs, dbs->io_func_opaque); dma_blk_cb, dbs, dbs->io_func_opaque);
aio_context_release(dbs->ctx);
assert(dbs->acb); assert(dbs->acb);
} }
......
...@@ -647,6 +647,7 @@ static void virtio_blk_dma_restart_bh(void *opaque) ...@@ -647,6 +647,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
s->rq = NULL; s->rq = NULL;
aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (req) { while (req) {
VirtIOBlockReq *next = req->next; VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) { if (virtio_blk_handle_request(req, &mrb)) {
...@@ -667,6 +668,7 @@ static void virtio_blk_dma_restart_bh(void *opaque) ...@@ -667,6 +668,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
if (mrb.num_reqs) { if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb); virtio_blk_submit_multireq(s->blk, &mrb);
} }
aio_context_release(blk_get_aio_context(s->conf.conf.blk));
} }
static void virtio_blk_dma_restart_cb(void *opaque, int running, static void virtio_blk_dma_restart_cb(void *opaque, int running,
......
...@@ -105,6 +105,7 @@ static void scsi_dma_restart_bh(void *opaque) ...@@ -105,6 +105,7 @@ static void scsi_dma_restart_bh(void *opaque)
qemu_bh_delete(s->bh); qemu_bh_delete(s->bh);
s->bh = NULL; s->bh = NULL;
aio_context_acquire(blk_get_aio_context(s->conf.blk));
QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
scsi_req_ref(req); scsi_req_ref(req);
if (req->retry) { if (req->retry) {
...@@ -122,6 +123,7 @@ static void scsi_dma_restart_bh(void *opaque) ...@@ -122,6 +123,7 @@ static void scsi_dma_restart_bh(void *opaque)
} }
scsi_req_unref(req); scsi_req_unref(req);
} }
aio_context_release(blk_get_aio_context(s->conf.blk));
} }
void scsi_req_retry(SCSIRequest *req) void scsi_req_retry(SCSIRequest *req)
......
...@@ -114,9 +114,7 @@ int aio_bh_poll(AioContext *ctx) ...@@ -114,9 +114,7 @@ int aio_bh_poll(AioContext *ctx)
ret = 1; ret = 1;
} }
bh->idle = 0; bh->idle = 0;
aio_context_acquire(ctx);
aio_bh_call(bh); aio_bh_call(bh);
aio_context_release(ctx);
} }
if (bh->deleted) { if (bh->deleted) {
deleted = true; deleted = true;
...@@ -389,7 +387,9 @@ static void co_schedule_bh_cb(void *opaque) ...@@ -389,7 +387,9 @@ static void co_schedule_bh_cb(void *opaque)
Coroutine *co = QSLIST_FIRST(&straight); Coroutine *co = QSLIST_FIRST(&straight);
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
trace_aio_co_schedule_bh_cb(ctx, co); trace_aio_co_schedule_bh_cb(ctx, co);
aio_context_acquire(ctx);
qemu_coroutine_enter(co); qemu_coroutine_enter(co);
aio_context_release(ctx);
} }
} }
......
...@@ -165,6 +165,7 @@ static void thread_pool_completion_bh(void *opaque) ...@@ -165,6 +165,7 @@ static void thread_pool_completion_bh(void *opaque)
ThreadPool *pool = opaque; ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next; ThreadPoolElement *elem, *next;
aio_context_acquire(pool->ctx);
restart: restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) { if (elem->state != THREAD_DONE) {
...@@ -191,6 +192,7 @@ restart: ...@@ -191,6 +192,7 @@ restart:
qemu_aio_unref(elem); qemu_aio_unref(elem);
} }
} }
aio_context_release(pool->ctx);
} }
static void thread_pool_cancel(BlockAIOCB *acb) static void thread_pool_cancel(BlockAIOCB *acb)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册