From 496084175a78b02312129e0398ec14c5927d75ba Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Mon, 14 Mar 2011 20:20:53 -0600 Subject: [PATCH] qemu: respect locking rules THREADS.txt states that the contents of vm should not be read or modified while the vm lock is not held, but that the lock must not be held while performing a monitor command. This fixes all the offenders that I could find. * src/qemu/qemu_process.c (qemuProcessStartCPUs) (qemuProcessInitPasswords, qemuProcessStart): Don't modify or refer to vm state outside lock. * src/qemu/qemu_driver.c (qemudDomainHotplugVcpus): Likewise. * src/qemu/qemu_hotplug.c (qemuDomainChangeGraphicsPasswords): Likewise. --- src/qemu/qemu_driver.c | 12 +++++++----- src/qemu/qemu_hotplug.c | 7 ------- src/qemu/qemu_process.c | 18 +++++++++++------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 076177cde0..c7d42623e2 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2553,14 +2553,15 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus) int i, rc = 1; int ret = -1; int oldvcpus = vm->def->vcpus; + int vcpus = oldvcpus; qemuDomainObjEnterMonitor(vm); /* We need different branches here, because we want to offline * in reverse order to onlining, so any partial fail leaves us in a * reasonably sensible state */ - if (nvcpus > vm->def->vcpus) { - for (i = vm->def->vcpus ; i < nvcpus ; i++) { + if (nvcpus > vcpus) { + for (i = vcpus ; i < nvcpus ; i++) { /* Online new CPU */ rc = qemuMonitorSetCPU(priv->mon, i, 1); if (rc == 0) @@ -2568,10 +2569,10 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus) if (rc < 0) goto cleanup; - vm->def->vcpus++; + vcpus++; } } else { - for (i = vm->def->vcpus - 1 ; i >= nvcpus ; i--) { + for (i = vcpus - 1 ; i >= nvcpus ; i--) { /* Offline old CPU */ rc = qemuMonitorSetCPU(priv->mon, i, 0); if (rc == 0) @@ -2579,7 +2580,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus) if (rc < 0) goto cleanup; - vm->def->vcpus--; + vcpus--; } } @@ -2587,6 +2588,7 @@ static int qemudDomainHotplugVcpus(virDomainObjPtr vm, unsigned int nvcpus) cleanup: qemuDomainObjExitMonitor(vm); + vm->def->vcpus = vcpus; qemuAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1); return ret; diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index e4ba526010..e1d9d29865 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -1836,13 +1836,6 @@ qemuDomainChangeGraphicsPasswords(struct qemud_driver *driver, if (ret != 0) goto cleanup; - if (!virDomainObjIsActive(vm)) { - ret = -1; - qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("guest unexpectedly quit")); - goto cleanup; - } - if (auth->expires) { time_t lifetime = auth->validTo - now; if (lifetime <= 0) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 740684a6df..793a43c69c 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1008,8 +1008,8 @@ qemuProcessWaitForMonitor(struct qemud_driver* driver, if (paths == NULL) goto cleanup; - qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorGetPtyPaths(priv->mon, paths); qemuDomainObjExitMonitorWithDriver(driver, vm); @@ -1175,6 +1175,7 @@ qemuProcessInitPasswords(virConnectPtr conn, for (i = 0 ; i < vm->def->ndisks ; i++) { char *secret; size_t secretLen; + const char *alias; if (!vm->def->disks[i]->encryption || !vm->def->disks[i]->src) @@ -1185,10 +1186,9 @@ qemuProcessInitPasswords(virConnectPtr conn, &secret, &secretLen) < 0) goto cleanup; + alias = vm->def->disks[i]->info.alias; qemuDomainObjEnterMonitorWithDriver(driver, vm); - ret = qemuMonitorSetDrivePassphrase(priv->mon, - vm->def->disks[i]->info.alias, - secret); + ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret); VIR_FREE(secret); qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) @@ -1727,17 +1727,19 @@ qemuProcessPrepareMonitorChr(struct qemud_driver *driver, } -int qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm, virConnectPtr conn) +int +qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm, + virConnectPtr conn) { int ret; qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorStartCPUs(priv->mon, conn); + qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret == 0) { vm->state = VIR_DOMAIN_RUNNING; } - qemuDomainObjExitMonitorWithDriver(driver, vm); return ret; } @@ -1901,6 +1903,7 @@ int qemuProcessStart(virConnectPtr conn, qemuDomainObjPrivatePtr priv = vm->privateData; virCommandPtr cmd = NULL; struct qemuProcessHookData hookData; + unsigned long cur_balloon; hookData.conn = conn; hookData.vm = vm; @@ -2210,8 +2213,9 @@ int qemuProcessStart(virConnectPtr conn, } VIR_DEBUG0("Setting initial memory amount"); + cur_balloon = vm->def->mem.cur_balloon; qemuDomainObjEnterMonitorWithDriver(driver, vm); - if (qemuMonitorSetBalloon(priv->mon, vm->def->mem.cur_balloon) < 0) { + if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } -- GitLab