提交 48af776a 编写于 作者: K Kevin Wolf

block: Use blk_co_ioctl() for all BB level ioctls

All read/write functions already have a single coroutine-based function
on the BlockBackend level through which all requests go (no matter what
API style the external caller used) and which passes the requests down
to the block node level.

This patch exports a bdrv_co_ioctl() function and uses it to extend this
mode of operation to ioctls.
Signed-off-by: NKevin Wolf <kwolf@redhat.com>
Reviewed-by: NEric Blake <eblake@redhat.com>
上级 7381e95c
......@@ -1141,23 +1141,50 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
bdrv_aio_cancel_async(acb);
}
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_ioctl(blk_bs(blk), req, buf);
return bdrv_co_ioctl(blk_bs(blk), req, buf);
}
static void blk_ioctl_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
rwco->qiov->iov[0].iov_base);
}
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
}
static void blk_aio_ioctl_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
rwco->qiov->iov[0].iov_base);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
if (!blk_is_available(blk)) {
return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
}
QEMUIOVector qiov;
struct iovec iov;
iov = (struct iovec) {
.iov_base = buf,
.iov_len = 0,
};
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
......
......@@ -2492,7 +2492,7 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
return rwco.ret;
}
static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
{
BlockDriver *drv = bs->drv;
BdrvTrackedRequest tracked_req;
......@@ -2528,7 +2528,7 @@ typedef struct {
static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
{
BdrvIoctlCoData *data = opaque;
data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
data->ret = bdrv_co_ioctl(data->bs, data->req, data->buf);
}
/* needed for generic scsi interface */
......@@ -2558,8 +2558,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
acb->req.req, acb->req.buf);
acb->req.error = bdrv_co_ioctl(acb->common.bs,
acb->req.req, acb->req.buf);
bdrv_co_complete(acb);
}
......
......@@ -318,6 +318,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb);
void bdrv_aio_cancel_async(BlockAIOCB *acb);
/* sg packet commands */
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf,
......
......@@ -146,6 +146,7 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count,
BlockCompletionFunc *cb, void *opaque);
void blk_aio_cancel(BlockAIOCB *acb);
void blk_aio_cancel_async(BlockAIOCB *acb);
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册