From a73c67b6cc4d2404b0c2d2af3cbd37f79b701f42 Mon Sep 17 00:00:00 2001 From: John Ferlan Date: Tue, 8 Sep 2015 07:05:55 -0400 Subject: [PATCH] qemu: Resolve Coverity RESOURCE_LEAK This seemed to be more of a false positive as for some reason Coverity was missing the "ret < 0" goto error condition and somehow believing that event could be overwritten. At first I thought it was just the ret != 0 condition difference, but it wasn't. In any case, make use of the recent change to qemuDomainEventQueue to check event == NULL and just pass it as a parameter directly in the error path. That avoids the error. Signed-off-by: John Ferlan --- src/qemu/qemu_driver.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0aaea7b46d..7025c51b01 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3175,7 +3175,6 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, char *xml = NULL; bool was_running = false; int ret = -1; - int rc; virObjectEventPtr event = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; virCapsPtr caps; @@ -3256,14 +3255,14 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, if (ret < 0) { if (was_running && virDomainObjIsActive(vm)) { virErrorPtr save_err = virSaveLastError(); - rc = qemuProcessStartCPUs(driver, vm, dom->conn, - VIR_DOMAIN_RUNNING_SAVE_CANCELED, - QEMU_ASYNC_JOB_SAVE); - if (rc < 0) { + if (qemuProcessStartCPUs(driver, vm, dom->conn, + VIR_DOMAIN_RUNNING_SAVE_CANCELED, + QEMU_ASYNC_JOB_SAVE) < 0) { VIR_WARN("Unable to resume guest CPUs after save failure"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR)); } virSetError(save_err); virFreeError(save_err); -- GitLab