提交 e95205e1 编写于 作者: F Fam Zheng 提交者: Paolo Bonzini

dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.
Signed-off-by: NFam Zheng <famz@redhat.com>
Reviewed-by: NPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 33b6c2ed
...@@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque) ...@@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
dma_blk_cb(dbs, 0); dma_blk_cb(dbs, 0);
} }
static void continue_after_map_failure(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
dbs->bh = qemu_bh_new(reschedule_dma, dbs);
qemu_bh_schedule(dbs->bh);
}
static void dma_blk_unmap(DMAAIOCB *dbs) static void dma_blk_unmap(DMAAIOCB *dbs)
{ {
int i; int i;
...@@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret) ...@@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) { if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs); trace_dma_map_wait(dbs);
cpu_register_map_client(dbs, continue_after_map_failure); dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
reschedule_dma, dbs);
cpu_register_map_client(dbs->bh);
return; return;
} }
...@@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb) ...@@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
if (dbs->acb) { if (dbs->acb) {
blk_aio_cancel_async(dbs->acb); blk_aio_cancel_async(dbs->acb);
} }
if (dbs->bh) {
cpu_unregister_map_client(dbs->bh);
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
}
} }
......
...@@ -2479,8 +2479,7 @@ typedef struct { ...@@ -2479,8 +2479,7 @@ typedef struct {
static BounceBuffer bounce; static BounceBuffer bounce;
typedef struct MapClient { typedef struct MapClient {
void *opaque; QEMUBH *bh;
void (*callback)(void *opaque);
QLIST_ENTRY(MapClient) link; QLIST_ENTRY(MapClient) link;
} MapClient; } MapClient;
...@@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock; ...@@ -2488,31 +2487,34 @@ QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list); = QLIST_HEAD_INITIALIZER(map_client_list);
static void cpu_unregister_map_client(void *_client); static void cpu_unregister_map_client_do(MapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
}
static void cpu_notify_map_clients_locked(void) static void cpu_notify_map_clients_locked(void)
{ {
MapClient *client; MapClient *client;
while (!QLIST_EMPTY(&map_client_list)) { while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list); client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque); qemu_bh_schedule(client->bh);
cpu_unregister_map_client(client); cpu_unregister_map_client_do(client);
} }
} }
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) void cpu_register_map_client(QEMUBH *bh)
{ {
MapClient *client = g_malloc(sizeof(*client)); MapClient *client = g_malloc(sizeof(*client));
qemu_mutex_lock(&map_client_list_lock); qemu_mutex_lock(&map_client_list_lock);
client->opaque = opaque; client->bh = bh;
client->callback = callback;
QLIST_INSERT_HEAD(&map_client_list, client, link); QLIST_INSERT_HEAD(&map_client_list, client, link);
if (!atomic_read(&bounce.in_use)) { if (!atomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked(); cpu_notify_map_clients_locked();
} }
qemu_mutex_unlock(&map_client_list_lock); qemu_mutex_unlock(&map_client_list_lock);
return client;
} }
void cpu_exec_init_all(void) void cpu_exec_init_all(void)
...@@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void) ...@@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock); qemu_mutex_init(&map_client_list_lock);
} }
static void cpu_unregister_map_client(void *_client) void cpu_unregister_map_client(QEMUBH *bh)
{ {
MapClient *client = (MapClient *)_client; MapClient *client;
QLIST_REMOVE(client, link); qemu_mutex_lock(&map_client_list_lock);
g_free(client); QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) {
cpu_unregister_map_client_do(client);
break;
}
}
qemu_mutex_unlock(&map_client_list_lock);
} }
static void cpu_notify_map_clients(void) static void cpu_notify_map_clients(void)
......
...@@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr, ...@@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
int is_write); int is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len, void cpu_physical_memory_unmap(void *buffer, hwaddr len,
int is_write, hwaddr access_len); int is_write, hwaddr access_len);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); void cpu_register_map_client(QEMUBH *bh);
void cpu_unregister_map_client(QEMUBH *bh);
bool cpu_physical_memory_is_io(hwaddr phys_addr); bool cpu_physical_memory_is_io(hwaddr phys_addr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册