提交 6181478f 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

# gpg: Signature made Mon 27 Feb 2017 16:33:23 GMT
# gpg:                using RSA key 0x9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  tests-aio-multithread: use atomic_read properly
  iscsi: do not use aio_context_acquire/release
  nfs: do not use aio_context_acquire/release
  curl: do not use aio_context_acquire/release
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -135,6 +135,7 @@ typedef struct BDRVCURLState { ...@@ -135,6 +135,7 @@ typedef struct BDRVCURLState {
char *cookie; char *cookie;
bool accept_range; bool accept_range;
AioContext *aio_context; AioContext *aio_context;
QemuMutex mutex;
char *username; char *username;
char *password; char *password;
char *proxyusername; char *proxyusername;
...@@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, ...@@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
return FIND_RET_NONE; return FIND_RET_NONE;
} }
/* Called with s->mutex held. */
static void curl_multi_check_completion(BDRVCURLState *s) static void curl_multi_check_completion(BDRVCURLState *s)
{ {
int msgs_in_queue; int msgs_in_queue;
...@@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s) ...@@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s)
continue; continue;
} }
qemu_mutex_unlock(&s->mutex);
acb->common.cb(acb->common.opaque, -EPROTO); acb->common.cb(acb->common.opaque, -EPROTO);
qemu_mutex_lock(&s->mutex);
qemu_aio_unref(acb); qemu_aio_unref(acb);
state->acb[i] = NULL; state->acb[i] = NULL;
} }
...@@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s) ...@@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s)
} }
} }
/* Called with s->mutex held. */
static void curl_multi_do_locked(CURLState *s) static void curl_multi_do_locked(CURLState *s)
{ {
CURLSocket *socket, *next_socket; CURLSocket *socket, *next_socket;
...@@ -409,19 +414,19 @@ static void curl_multi_do(void *arg) ...@@ -409,19 +414,19 @@ static void curl_multi_do(void *arg)
{ {
CURLState *s = (CURLState *)arg; CURLState *s = (CURLState *)arg;
aio_context_acquire(s->s->aio_context); qemu_mutex_lock(&s->s->mutex);
curl_multi_do_locked(s); curl_multi_do_locked(s);
aio_context_release(s->s->aio_context); qemu_mutex_unlock(&s->s->mutex);
} }
static void curl_multi_read(void *arg) static void curl_multi_read(void *arg)
{ {
CURLState *s = (CURLState *)arg; CURLState *s = (CURLState *)arg;
aio_context_acquire(s->s->aio_context); qemu_mutex_lock(&s->s->mutex);
curl_multi_do_locked(s); curl_multi_do_locked(s);
curl_multi_check_completion(s->s); curl_multi_check_completion(s->s);
aio_context_release(s->s->aio_context); qemu_mutex_unlock(&s->s->mutex);
} }
static void curl_multi_timeout_do(void *arg) static void curl_multi_timeout_do(void *arg)
...@@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg) ...@@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg)
return; return;
} }
aio_context_acquire(s->aio_context); qemu_mutex_lock(&s->mutex);
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
curl_multi_check_completion(s); curl_multi_check_completion(s);
aio_context_release(s->aio_context); qemu_mutex_unlock(&s->mutex);
#else #else
abort(); abort();
#endif #endif
...@@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, ...@@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
curl_easy_cleanup(state->curl); curl_easy_cleanup(state->curl);
state->curl = NULL; state->curl = NULL;
qemu_mutex_init(&s->mutex);
curl_attach_aio_context(bs, bdrv_get_aio_context(bs)); curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
qemu_opts_del(opts); qemu_opts_del(opts);
...@@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p) ...@@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p)
CURLAIOCB *acb = p; CURLAIOCB *acb = p;
BlockDriverState *bs = acb->common.bs; BlockDriverState *bs = acb->common.bs;
BDRVCURLState *s = bs->opaque; BDRVCURLState *s = bs->opaque;
AioContext *ctx = bdrv_get_aio_context(bs);
size_t start = acb->sector_num * BDRV_SECTOR_SIZE; size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
size_t end; size_t end;
aio_context_acquire(ctx); qemu_mutex_lock(&s->mutex);
// In case we have the requested data already (e.g. read-ahead), // In case we have the requested data already (e.g. read-ahead),
// we can just call the callback and be done. // we can just call the callback and be done.
...@@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p) ...@@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p)
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
out: out:
aio_context_release(ctx); qemu_mutex_unlock(&s->mutex);
if (ret != -EINPROGRESS) { if (ret != -EINPROGRESS) {
acb->common.cb(acb->common.opaque, ret); acb->common.cb(acb->common.opaque, ret);
qemu_aio_unref(acb); qemu_aio_unref(acb);
...@@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs) ...@@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs)
DPRINTF("CURL: Close\n"); DPRINTF("CURL: Close\n");
curl_detach_aio_context(bs); curl_detach_aio_context(bs);
qemu_mutex_destroy(&s->mutex);
g_free(s->cookie); g_free(s->cookie);
g_free(s->url); g_free(s->url);
......
...@@ -58,6 +58,7 @@ typedef struct IscsiLun { ...@@ -58,6 +58,7 @@ typedef struct IscsiLun {
int events; int events;
QEMUTimer *nop_timer; QEMUTimer *nop_timer;
QEMUTimer *event_timer; QEMUTimer *event_timer;
QemuMutex mutex;
struct scsi_inquiry_logical_block_provisioning lbp; struct scsi_inquiry_logical_block_provisioning lbp;
struct scsi_inquiry_block_limits bl; struct scsi_inquiry_block_limits bl;
unsigned char *zeroblock; unsigned char *zeroblock;
...@@ -252,6 +253,7 @@ static int iscsi_translate_sense(struct scsi_sense *sense) ...@@ -252,6 +253,7 @@ static int iscsi_translate_sense(struct scsi_sense *sense)
return ret; return ret;
} }
/* Called (via iscsi_service) with QemuMutex held. */
static void static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque) void *command_data, void *opaque)
...@@ -352,6 +354,7 @@ static const AIOCBInfo iscsi_aiocb_info = { ...@@ -352,6 +354,7 @@ static const AIOCBInfo iscsi_aiocb_info = {
static void iscsi_process_read(void *arg); static void iscsi_process_read(void *arg);
static void iscsi_process_write(void *arg); static void iscsi_process_write(void *arg);
/* Called with QemuMutex held. */
static void static void
iscsi_set_events(IscsiLun *iscsilun) iscsi_set_events(IscsiLun *iscsilun)
{ {
...@@ -395,10 +398,10 @@ iscsi_process_read(void *arg) ...@@ -395,10 +398,10 @@ iscsi_process_read(void *arg)
IscsiLun *iscsilun = arg; IscsiLun *iscsilun = arg;
struct iscsi_context *iscsi = iscsilun->iscsi; struct iscsi_context *iscsi = iscsilun->iscsi;
aio_context_acquire(iscsilun->aio_context); qemu_mutex_lock(&iscsilun->mutex);
iscsi_service(iscsi, POLLIN); iscsi_service(iscsi, POLLIN);
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
aio_context_release(iscsilun->aio_context); qemu_mutex_unlock(&iscsilun->mutex);
} }
static void static void
...@@ -407,10 +410,10 @@ iscsi_process_write(void *arg) ...@@ -407,10 +410,10 @@ iscsi_process_write(void *arg)
IscsiLun *iscsilun = arg; IscsiLun *iscsilun = arg;
struct iscsi_context *iscsi = iscsilun->iscsi; struct iscsi_context *iscsi = iscsilun->iscsi;
aio_context_acquire(iscsilun->aio_context); qemu_mutex_lock(&iscsilun->mutex);
iscsi_service(iscsi, POLLOUT); iscsi_service(iscsi, POLLOUT);
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
aio_context_release(iscsilun->aio_context); qemu_mutex_unlock(&iscsilun->mutex);
} }
static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
...@@ -589,6 +592,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, ...@@ -589,6 +592,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
uint64_t lba; uint64_t lba;
uint32_t num_sectors; uint32_t num_sectors;
bool fua = flags & BDRV_REQ_FUA; bool fua = flags & BDRV_REQ_FUA;
int r = 0;
if (fua) { if (fua) {
assert(iscsilun->dpofua); assert(iscsilun->dpofua);
...@@ -604,6 +608,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors, ...@@ -604,6 +608,7 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
lba = sector_qemu2lun(sector_num, iscsilun); lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun); num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask); iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
retry: retry:
if (iscsilun->use_16_for_rw) { if (iscsilun->use_16_for_rw) {
#if LIBISCSI_API_VERSION >= (20160603) #if LIBISCSI_API_VERSION >= (20160603)
...@@ -640,7 +645,9 @@ retry: ...@@ -640,7 +645,9 @@ retry:
#endif #endif
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.task != NULL) { if (iTask.task != NULL) {
...@@ -655,12 +662,15 @@ retry: ...@@ -655,12 +662,15 @@ retry:
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors); iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
return iTask.err_code; r = iTask.err_code;
goto out_unlock;
} }
iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors); iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors);
return 0; out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
return r;
} }
...@@ -693,18 +703,21 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, ...@@ -693,18 +703,21 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
goto out; goto out;
} }
qemu_mutex_lock(&iscsilun->mutex);
retry: retry:
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
sector_qemu2lun(sector_num, iscsilun), sector_qemu2lun(sector_num, iscsilun),
8 + 16, iscsi_co_generic_cb, 8 + 16, iscsi_co_generic_cb,
&iTask) == NULL) { &iTask) == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out_unlock;
} }
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.do_retry) { if (iTask.do_retry) {
...@@ -721,20 +734,20 @@ retry: ...@@ -721,20 +734,20 @@ retry:
* because the device is busy or the cmd is not * because the device is busy or the cmd is not
* supported) we pretend all blocks are allocated * supported) we pretend all blocks are allocated
* for backwards compatibility */ * for backwards compatibility */
goto out; goto out_unlock;
} }
lbas = scsi_datain_unmarshall(iTask.task); lbas = scsi_datain_unmarshall(iTask.task);
if (lbas == NULL) { if (lbas == NULL) {
ret = -EIO; ret = -EIO;
goto out; goto out_unlock;
} }
lbasd = &lbas->descriptors[0]; lbasd = &lbas->descriptors[0];
if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
ret = -EIO; ret = -EIO;
goto out; goto out_unlock;
} }
*pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
...@@ -756,6 +769,8 @@ retry: ...@@ -756,6 +769,8 @@ retry:
if (*pnum > nb_sectors) { if (*pnum > nb_sectors) {
*pnum = nb_sectors; *pnum = nb_sectors;
} }
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
out: out:
if (iTask.task != NULL) { if (iTask.task != NULL) {
scsi_free_scsi_task(iTask.task); scsi_free_scsi_task(iTask.task);
...@@ -818,6 +833,7 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs, ...@@ -818,6 +833,7 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
num_sectors = sector_qemu2lun(nb_sectors, iscsilun); num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask); iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
retry: retry:
if (iscsilun->use_16_for_rw) { if (iscsilun->use_16_for_rw) {
#if LIBISCSI_API_VERSION >= (20160603) #if LIBISCSI_API_VERSION >= (20160603)
...@@ -855,7 +871,9 @@ retry: ...@@ -855,7 +871,9 @@ retry:
#endif #endif
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.task != NULL) { if (iTask.task != NULL) {
...@@ -867,6 +885,7 @@ retry: ...@@ -867,6 +885,7 @@ retry:
iTask.complete = 0; iTask.complete = 0;
goto retry; goto retry;
} }
qemu_mutex_unlock(&iscsilun->mutex);
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
return iTask.err_code; return iTask.err_code;
...@@ -881,6 +900,7 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) ...@@ -881,6 +900,7 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
struct IscsiTask iTask; struct IscsiTask iTask;
iscsi_co_init_iscsitask(iscsilun, &iTask); iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
retry: retry:
if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0,
0, iscsi_co_generic_cb, &iTask) == NULL) { 0, iscsi_co_generic_cb, &iTask) == NULL) {
...@@ -889,7 +909,9 @@ retry: ...@@ -889,7 +909,9 @@ retry:
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.task != NULL) { if (iTask.task != NULL) {
...@@ -901,6 +923,7 @@ retry: ...@@ -901,6 +923,7 @@ retry:
iTask.complete = 0; iTask.complete = 0;
goto retry; goto retry;
} }
qemu_mutex_unlock(&iscsilun->mutex);
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
return iTask.err_code; return iTask.err_code;
...@@ -910,6 +933,7 @@ retry: ...@@ -910,6 +933,7 @@ retry:
} }
#ifdef __linux__ #ifdef __linux__
/* Called (via iscsi_service) with QemuMutex held. */
static void static void
iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status, iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque) void *command_data, void *opaque)
...@@ -1034,6 +1058,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, ...@@ -1034,6 +1058,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
acb->task->expxferlen = acb->ioh->dxfer_len; acb->task->expxferlen = acb->ioh->dxfer_len;
data.size = 0; data.size = 0;
qemu_mutex_lock(&iscsilun->mutex);
if (acb->task->xfer_dir == SCSI_XFER_WRITE) { if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
if (acb->ioh->iovec_count == 0) { if (acb->ioh->iovec_count == 0) {
data.data = acb->ioh->dxferp; data.data = acb->ioh->dxferp;
...@@ -1049,6 +1074,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, ...@@ -1049,6 +1074,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
iscsi_aio_ioctl_cb, iscsi_aio_ioctl_cb,
(data.size > 0) ? &data : NULL, (data.size > 0) ? &data : NULL,
acb) != 0) { acb) != 0) {
qemu_mutex_unlock(&iscsilun->mutex);
scsi_free_scsi_task(acb->task); scsi_free_scsi_task(acb->task);
qemu_aio_unref(acb); qemu_aio_unref(acb);
return NULL; return NULL;
...@@ -1068,6 +1094,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, ...@@ -1068,6 +1094,7 @@ static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
} }
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
return &acb->common; return &acb->common;
} }
...@@ -1092,6 +1119,7 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) ...@@ -1092,6 +1119,7 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
IscsiLun *iscsilun = bs->opaque; IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask; struct IscsiTask iTask;
struct unmap_list list; struct unmap_list list;
int r = 0;
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
return -ENOTSUP; return -ENOTSUP;
...@@ -1106,15 +1134,19 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) ...@@ -1106,15 +1134,19 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
list.num = count / iscsilun->block_size; list.num = count / iscsilun->block_size;
iscsi_co_init_iscsitask(iscsilun, &iTask); iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
retry: retry:
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
iscsi_co_generic_cb, &iTask) == NULL) { iscsi_co_generic_cb, &iTask) == NULL) {
return -ENOMEM; r = -ENOMEM;
goto out_unlock;
} }
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.task != NULL) { if (iTask.task != NULL) {
...@@ -1131,17 +1163,20 @@ retry: ...@@ -1131,17 +1163,20 @@ retry:
/* the target might fail with a check condition if it /* the target might fail with a check condition if it
is not happy with the alignment of the UNMAP request is not happy with the alignment of the UNMAP request
we silently fail in this case */ we silently fail in this case */
return 0; goto out_unlock;
} }
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
return iTask.err_code; r = iTask.err_code;
goto out_unlock;
} }
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS); count >> BDRV_SECTOR_BITS);
return 0; out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
return r;
} }
static int static int
...@@ -1153,6 +1188,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, ...@@ -1153,6 +1188,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
uint64_t lba; uint64_t lba;
uint32_t nb_blocks; uint32_t nb_blocks;
bool use_16_for_ws = iscsilun->use_16_for_rw; bool use_16_for_ws = iscsilun->use_16_for_rw;
int r = 0;
if (!is_byte_request_lun_aligned(offset, count, iscsilun)) { if (!is_byte_request_lun_aligned(offset, count, iscsilun)) {
return -ENOTSUP; return -ENOTSUP;
...@@ -1186,6 +1222,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, ...@@ -1186,6 +1222,7 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
} }
} }
qemu_mutex_lock(&iscsilun->mutex);
iscsi_co_init_iscsitask(iscsilun, &iTask); iscsi_co_init_iscsitask(iscsilun, &iTask);
retry: retry:
if (use_16_for_ws) { if (use_16_for_ws) {
...@@ -1205,7 +1242,9 @@ retry: ...@@ -1205,7 +1242,9 @@ retry:
while (!iTask.complete) { while (!iTask.complete) {
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
qemu_mutex_unlock(&iscsilun->mutex);
qemu_coroutine_yield(); qemu_coroutine_yield();
qemu_mutex_lock(&iscsilun->mutex);
} }
if (iTask.status == SCSI_STATUS_CHECK_CONDITION && if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
...@@ -1215,7 +1254,8 @@ retry: ...@@ -1215,7 +1254,8 @@ retry:
/* WRITE SAME is not supported by the target */ /* WRITE SAME is not supported by the target */
iscsilun->has_write_same = false; iscsilun->has_write_same = false;
scsi_free_scsi_task(iTask.task); scsi_free_scsi_task(iTask.task);
return -ENOTSUP; r = -ENOTSUP;
goto out_unlock;
} }
if (iTask.task != NULL) { if (iTask.task != NULL) {
...@@ -1231,7 +1271,8 @@ retry: ...@@ -1231,7 +1271,8 @@ retry:
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
count >> BDRV_SECTOR_BITS); count >> BDRV_SECTOR_BITS);
return iTask.err_code; r = iTask.err_code;
goto out_unlock;
} }
if (flags & BDRV_REQ_MAY_UNMAP) { if (flags & BDRV_REQ_MAY_UNMAP) {
...@@ -1242,7 +1283,9 @@ retry: ...@@ -1242,7 +1283,9 @@ retry:
count >> BDRV_SECTOR_BITS); count >> BDRV_SECTOR_BITS);
} }
return 0; out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
return r;
} }
static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts, static void apply_chap(struct iscsi_context *iscsi, QemuOpts *opts,
...@@ -1331,7 +1374,7 @@ static void iscsi_nop_timed_event(void *opaque) ...@@ -1331,7 +1374,7 @@ static void iscsi_nop_timed_event(void *opaque)
{ {
IscsiLun *iscsilun = opaque; IscsiLun *iscsilun = opaque;
aio_context_acquire(iscsilun->aio_context); qemu_mutex_lock(&iscsilun->mutex);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting..."); error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true; iscsilun->request_timed_out = true;
...@@ -1344,7 +1387,7 @@ static void iscsi_nop_timed_event(void *opaque) ...@@ -1344,7 +1387,7 @@ static void iscsi_nop_timed_event(void *opaque)
iscsi_set_events(iscsilun); iscsi_set_events(iscsilun);
out: out:
aio_context_release(iscsilun->aio_context); qemu_mutex_unlock(&iscsilun->mutex);
} }
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
...@@ -1890,6 +1933,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, ...@@ -1890,6 +1933,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
scsi_free_scsi_task(task); scsi_free_scsi_task(task);
task = NULL; task = NULL;
qemu_mutex_init(&iscsilun->mutex);
iscsi_attach_aio_context(bs, iscsilun->aio_context); iscsi_attach_aio_context(bs, iscsilun->aio_context);
/* Guess the internal cluster (page) size of the iscsi target by the means /* Guess the internal cluster (page) size of the iscsi target by the means
...@@ -1935,6 +1979,7 @@ static void iscsi_close(BlockDriverState *bs) ...@@ -1935,6 +1979,7 @@ static void iscsi_close(BlockDriverState *bs)
iscsi_destroy_context(iscsi); iscsi_destroy_context(iscsi);
g_free(iscsilun->zeroblock); g_free(iscsilun->zeroblock);
iscsi_allocmap_free(iscsilun); iscsi_allocmap_free(iscsilun);
qemu_mutex_destroy(&iscsilun->mutex);
memset(iscsilun, 0, sizeof(IscsiLun)); memset(iscsilun, 0, sizeof(IscsiLun));
} }
......
...@@ -54,6 +54,7 @@ typedef struct NFSClient { ...@@ -54,6 +54,7 @@ typedef struct NFSClient {
int events; int events;
bool has_zero_init; bool has_zero_init;
AioContext *aio_context; AioContext *aio_context;
QemuMutex mutex;
blkcnt_t st_blocks; blkcnt_t st_blocks;
bool cache_used; bool cache_used;
NFSServer *server; NFSServer *server;
...@@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options, ...@@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options,
static void nfs_process_read(void *arg); static void nfs_process_read(void *arg);
static void nfs_process_write(void *arg); static void nfs_process_write(void *arg);
/* Called with QemuMutex held. */
static void nfs_set_events(NFSClient *client) static void nfs_set_events(NFSClient *client)
{ {
int ev = nfs_which_events(client->context); int ev = nfs_which_events(client->context);
...@@ -209,20 +211,20 @@ static void nfs_process_read(void *arg) ...@@ -209,20 +211,20 @@ static void nfs_process_read(void *arg)
{ {
NFSClient *client = arg; NFSClient *client = arg;
aio_context_acquire(client->aio_context); qemu_mutex_lock(&client->mutex);
nfs_service(client->context, POLLIN); nfs_service(client->context, POLLIN);
nfs_set_events(client); nfs_set_events(client);
aio_context_release(client->aio_context); qemu_mutex_unlock(&client->mutex);
} }
static void nfs_process_write(void *arg) static void nfs_process_write(void *arg)
{ {
NFSClient *client = arg; NFSClient *client = arg;
aio_context_acquire(client->aio_context); qemu_mutex_lock(&client->mutex);
nfs_service(client->context, POLLOUT); nfs_service(client->context, POLLOUT);
nfs_set_events(client); nfs_set_events(client);
aio_context_release(client->aio_context); qemu_mutex_unlock(&client->mutex);
} }
static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
...@@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque) ...@@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque)
aio_co_wake(task->co); aio_co_wake(task->co);
} }
/* Called (via nfs_service) with QemuMutex held. */
static void static void
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
void *private_data) void *private_data)
...@@ -273,12 +276,15 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, ...@@ -273,12 +276,15 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
nfs_co_init_task(bs, &task); nfs_co_init_task(bs, &task);
task.iov = iov; task.iov = iov;
qemu_mutex_lock(&client->mutex);
if (nfs_pread_async(client->context, client->fh, if (nfs_pread_async(client->context, client->fh,
offset, bytes, nfs_co_generic_cb, &task) != 0) { offset, bytes, nfs_co_generic_cb, &task) != 0) {
qemu_mutex_unlock(&client->mutex);
return -ENOMEM; return -ENOMEM;
} }
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex);
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
...@@ -317,9 +323,11 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, ...@@ -317,9 +323,11 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
buf = iov->iov[0].iov_base; buf = iov->iov[0].iov_base;
} }
qemu_mutex_lock(&client->mutex);
if (nfs_pwrite_async(client->context, client->fh, if (nfs_pwrite_async(client->context, client->fh,
offset, bytes, buf, offset, bytes, buf,
nfs_co_generic_cb, &task) != 0) { nfs_co_generic_cb, &task) != 0) {
qemu_mutex_unlock(&client->mutex);
if (my_buffer) { if (my_buffer) {
g_free(buf); g_free(buf);
} }
...@@ -327,6 +335,7 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, ...@@ -327,6 +335,7 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
} }
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex);
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
...@@ -349,12 +358,15 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) ...@@ -349,12 +358,15 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
nfs_co_init_task(bs, &task); nfs_co_init_task(bs, &task);
qemu_mutex_lock(&client->mutex);
if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
&task) != 0) { &task) != 0) {
qemu_mutex_unlock(&client->mutex);
return -ENOMEM; return -ENOMEM;
} }
nfs_set_events(client); nfs_set_events(client);
qemu_mutex_unlock(&client->mutex);
while (!task.complete) { while (!task.complete) {
qemu_coroutine_yield(); qemu_coroutine_yield();
} }
...@@ -440,6 +452,7 @@ static void nfs_file_close(BlockDriverState *bs) ...@@ -440,6 +452,7 @@ static void nfs_file_close(BlockDriverState *bs)
{ {
NFSClient *client = bs->opaque; NFSClient *client = bs->opaque;
nfs_client_close(client); nfs_client_close(client);
qemu_mutex_destroy(&client->mutex);
} }
static NFSServer *nfs_config(QDict *options, Error **errp) static NFSServer *nfs_config(QDict *options, Error **errp)
...@@ -647,6 +660,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, ...@@ -647,6 +660,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
qemu_mutex_init(&client->mutex);
bs->total_sectors = ret; bs->total_sectors = ret;
ret = 0; ret = 0;
return ret; return ret;
...@@ -702,6 +716,7 @@ static int nfs_has_zero_init(BlockDriverState *bs) ...@@ -702,6 +716,7 @@ static int nfs_has_zero_init(BlockDriverState *bs)
return client->has_zero_init; return client->has_zero_init;
} }
/* Called (via nfs_service) with QemuMutex held. */
static void static void
nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
void *private_data) void *private_data)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册