提交 bafbd6a1 编写于 作者: P Paolo Bonzini 提交者: Kevin Wolf

aio: remove process_queue callback and qemu_aio_process_queue

Both unused after the previous patch.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NKevin Wolf <kwolf@redhat.com>
上级 adfe92f6
...@@ -35,7 +35,6 @@ struct AioHandler ...@@ -35,7 +35,6 @@ struct AioHandler
IOHandler *io_read; IOHandler *io_read;
IOHandler *io_write; IOHandler *io_write;
AioFlushHandler *io_flush; AioFlushHandler *io_flush;
AioProcessQueue *io_process_queue;
int deleted; int deleted;
void *opaque; void *opaque;
QLIST_ENTRY(AioHandler) node; QLIST_ENTRY(AioHandler) node;
...@@ -58,7 +57,6 @@ int qemu_aio_set_fd_handler(int fd, ...@@ -58,7 +57,6 @@ int qemu_aio_set_fd_handler(int fd,
IOHandler *io_read, IOHandler *io_read,
IOHandler *io_write, IOHandler *io_write,
AioFlushHandler *io_flush, AioFlushHandler *io_flush,
AioProcessQueue *io_process_queue,
void *opaque) void *opaque)
{ {
AioHandler *node; AioHandler *node;
...@@ -91,7 +89,6 @@ int qemu_aio_set_fd_handler(int fd, ...@@ -91,7 +89,6 @@ int qemu_aio_set_fd_handler(int fd,
node->io_read = io_read; node->io_read = io_read;
node->io_write = io_write; node->io_write = io_write;
node->io_flush = io_flush; node->io_flush = io_flush;
node->io_process_queue = io_process_queue;
node->opaque = opaque; node->opaque = opaque;
} }
...@@ -122,39 +119,17 @@ void qemu_aio_flush(void) ...@@ -122,39 +119,17 @@ void qemu_aio_flush(void)
} while (qemu_bh_poll() || ret > 0); } while (qemu_bh_poll() || ret > 0);
} }
int qemu_aio_process_queue(void)
{
AioHandler *node;
int ret = 0;
walking_handlers = 1;
QLIST_FOREACH(node, &aio_handlers, node) {
if (node->io_process_queue) {
if (node->io_process_queue(node->opaque)) {
ret = 1;
}
}
}
walking_handlers = 0;
return ret;
}
void qemu_aio_wait(void) void qemu_aio_wait(void)
{ {
int ret; int ret;
if (qemu_bh_poll())
return;
/* /*
* If there are callbacks left that have been queued, we need to call then. * If there are callbacks left that have been queued, we need to call then.
* Return afterwards to avoid waiting needlessly in select(). * Return afterwards to avoid waiting needlessly in select().
*/ */
if (qemu_aio_process_queue()) if (qemu_bh_poll()) {
return; return;
}
do { do {
AioHandler *node; AioHandler *node;
......
...@@ -89,19 +89,17 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, ...@@ -89,19 +89,17 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd); DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
switch (action) { switch (action) {
case CURL_POLL_IN: case CURL_POLL_IN:
qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, s);
NULL, s);
break; break;
case CURL_POLL_OUT: case CURL_POLL_OUT:
qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, s);
NULL, s);
break; break;
case CURL_POLL_INOUT: case CURL_POLL_INOUT:
qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
curl_aio_flush, NULL, s); curl_aio_flush, s);
break; break;
case CURL_POLL_REMOVE: case CURL_POLL_REMOVE:
qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL, NULL); qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
break; break;
} }
......
...@@ -108,7 +108,7 @@ iscsi_set_events(IscsiLun *iscsilun) ...@@ -108,7 +108,7 @@ iscsi_set_events(IscsiLun *iscsilun)
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read, qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read,
(iscsi_which_events(iscsi) & POLLOUT) (iscsi_which_events(iscsi) & POLLOUT)
? iscsi_process_write : NULL, ? iscsi_process_write : NULL,
iscsi_process_flush, NULL, iscsilun); iscsi_process_flush, iscsilun);
} }
static void static void
...@@ -682,7 +682,7 @@ static void iscsi_close(BlockDriverState *bs) ...@@ -682,7 +682,7 @@ static void iscsi_close(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque; IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi; struct iscsi_context *iscsi = iscsilun->iscsi;
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL, NULL); qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL);
iscsi_destroy_context(iscsi); iscsi_destroy_context(iscsi);
memset(iscsilun, 0, sizeof(IscsiLun)); memset(iscsilun, 0, sizeof(IscsiLun));
} }
......
...@@ -191,7 +191,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, ...@@ -191,7 +191,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
qemu_co_mutex_lock(&s->send_mutex); qemu_co_mutex_lock(&s->send_mutex);
s->send_coroutine = qemu_coroutine_self(); s->send_coroutine = qemu_coroutine_self();
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
nbd_have_request, NULL, s); nbd_have_request, s);
rc = nbd_send_request(s->sock, request); rc = nbd_send_request(s->sock, request);
if (rc != -1 && iov) { if (rc != -1 && iov) {
ret = qemu_co_sendv(s->sock, iov, request->len, offset); ret = qemu_co_sendv(s->sock, iov, request->len, offset);
...@@ -201,7 +201,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, ...@@ -201,7 +201,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
} }
} }
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
nbd_have_request, NULL, s); nbd_have_request, s);
s->send_coroutine = NULL; s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex); qemu_co_mutex_unlock(&s->send_mutex);
return rc; return rc;
...@@ -274,7 +274,7 @@ static int nbd_establish_connection(BlockDriverState *bs) ...@@ -274,7 +274,7 @@ static int nbd_establish_connection(BlockDriverState *bs)
* kick the reply mechanism. */ * kick the reply mechanism. */
socket_set_nonblock(sock); socket_set_nonblock(sock);
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
nbd_have_request, NULL, s); nbd_have_request, s);
s->sock = sock; s->sock = sock;
s->size = size; s->size = size;
...@@ -294,7 +294,7 @@ static void nbd_teardown_connection(BlockDriverState *bs) ...@@ -294,7 +294,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
request.len = 0; request.len = 0;
nbd_send_request(s->sock, &request); nbd_send_request(s->sock, &request);
qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL, NULL); qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
closesocket(s->sock); closesocket(s->sock);
} }
......
...@@ -504,7 +504,7 @@ static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags) ...@@ -504,7 +504,7 @@ static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
fcntl(s->fds[0], F_SETFL, O_NONBLOCK); fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK); fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, qemu_rbd_aio_flush_cb, NULL, s); NULL, qemu_rbd_aio_flush_cb, s);
return 0; return 0;
...@@ -525,8 +525,7 @@ static void qemu_rbd_close(BlockDriverState *bs) ...@@ -525,8 +525,7 @@ static void qemu_rbd_close(BlockDriverState *bs)
close(s->fds[0]); close(s->fds[0]);
close(s->fds[1]); close(s->fds[1]);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL , NULL, NULL, NULL, qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
NULL);
rbd_close(s->image); rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx); rados_ioctx_destroy(s->io_ctx);
......
...@@ -799,8 +799,7 @@ static int get_sheep_fd(BDRVSheepdogState *s) ...@@ -799,8 +799,7 @@ static int get_sheep_fd(BDRVSheepdogState *s)
return -1; return -1;
} }
qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, s);
NULL, s);
return fd; return fd;
} }
...@@ -973,7 +972,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, ...@@ -973,7 +972,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
qemu_co_mutex_lock(&s->lock); qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self(); s->co_send = qemu_coroutine_self();
qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
aio_flush_request, NULL, s); aio_flush_request, s);
socket_set_cork(s->fd, 1); socket_set_cork(s->fd, 1);
/* send a header */ /* send a header */
...@@ -995,7 +994,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, ...@@ -995,7 +994,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
socket_set_cork(s->fd, 0); socket_set_cork(s->fd, 0);
qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
aio_flush_request, NULL, s); aio_flush_request, s);
qemu_co_mutex_unlock(&s->lock); qemu_co_mutex_unlock(&s->lock);
return 0; return 0;
...@@ -1135,7 +1134,7 @@ static int sd_open(BlockDriverState *bs, const char *filename, int flags) ...@@ -1135,7 +1134,7 @@ static int sd_open(BlockDriverState *bs, const char *filename, int flags)
g_free(buf); g_free(buf);
return 0; return 0;
out: out:
qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL); qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
if (s->fd >= 0) { if (s->fd >= 0) {
closesocket(s->fd); closesocket(s->fd);
} }
...@@ -1349,7 +1348,7 @@ static void sd_close(BlockDriverState *bs) ...@@ -1349,7 +1348,7 @@ static void sd_close(BlockDriverState *bs)
error_report("%s, %s", sd_strerror(rsp->result), s->name); error_report("%s, %s", sd_strerror(rsp->result), s->name);
} }
qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL); qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
closesocket(s->fd); closesocket(s->fd);
if (s->cache_enabled) { if (s->cache_enabled) {
closesocket(s->flush_fd); closesocket(s->flush_fd);
......
...@@ -214,7 +214,7 @@ void *laio_init(void) ...@@ -214,7 +214,7 @@ void *laio_init(void)
goto out_close_efd; goto out_close_efd;
qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL, qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL,
qemu_laio_flush_cb, NULL, s); qemu_laio_flush_cb, s);
return s; return s;
......
...@@ -663,8 +663,7 @@ int paio_init(void) ...@@ -663,8 +663,7 @@ int paio_init(void)
fcntl(s->rfd, F_SETFL, O_NONBLOCK); fcntl(s->rfd, F_SETFL, O_NONBLOCK);
fcntl(s->wfd, F_SETFL, O_NONBLOCK); fcntl(s->wfd, F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
NULL, s);
ret = pthread_attr_init(&attr); ret = pthread_attr_init(&attr);
if (ret) if (ret)
......
...@@ -41,11 +41,6 @@ void qemu_aio_release(void *p); ...@@ -41,11 +41,6 @@ void qemu_aio_release(void *p);
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
typedef int (AioFlushHandler)(void *opaque); typedef int (AioFlushHandler)(void *opaque);
/* Runs all currently allowed AIO callbacks of completed requests in the
* respective AIO backend. Returns 0 if no requests was handled, non-zero
* if at least one queued request was handled. */
typedef int (AioProcessQueue)(void *opaque);
/* Flush any pending AIO operation. This function will block until all /* Flush any pending AIO operation. This function will block until all
* outstanding AIO operations have been completed or cancelled. */ * outstanding AIO operations have been completed or cancelled. */
void qemu_aio_flush(void); void qemu_aio_flush(void);
...@@ -56,13 +51,6 @@ void qemu_aio_flush(void); ...@@ -56,13 +51,6 @@ void qemu_aio_flush(void);
* result of executing I/O completion or bh callbacks. */ * result of executing I/O completion or bh callbacks. */
void qemu_aio_wait(void); void qemu_aio_wait(void);
/*
* Runs all currently allowed AIO callbacks of completed requests. Returns 0
* if no requests were handled, non-zero if at least one request was
* processed.
*/
int qemu_aio_process_queue(void);
/* Register a file descriptor and associated callbacks. Behaves very similarly /* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
* be invoked when using either qemu_aio_wait() or qemu_aio_flush(). * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
...@@ -74,7 +62,6 @@ int qemu_aio_set_fd_handler(int fd, ...@@ -74,7 +62,6 @@ int qemu_aio_set_fd_handler(int fd,
IOHandler *io_read, IOHandler *io_read,
IOHandler *io_write, IOHandler *io_write,
AioFlushHandler *io_flush, AioFlushHandler *io_flush,
AioProcessQueue *io_process_queue,
void *opaque); void *opaque);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册