提交 aad362f9 编写于 作者: J Jiri Denemark

qemu: Move qemuProcessReconnect to the end of qemu_process.c

qemuProcessReconnect will need to call additional functions which were
originally defined further in qemu_process.c.
Signed-off-by: NJiri Denemark <jdenemar@redhat.com>
Reviewed-by: NPavel Hrdina <phrdina@redhat.com>
上级 ee4180be
......@@ -3360,628 +3360,306 @@ qemuProcessBuildDestroyHugepagesPath(virQEMUDriverPtr driver,
}
struct qemuProcessReconnectData {
virConnectPtr conn;
virQEMUDriverPtr driver;
virDomainObjPtr obj;
};
/*
* Open an existing VM's monitor, re-detect VCPU threads
* and re-reserve the security labels in use
*
* We own the virConnectPtr we are passed here - whoever started
* this thread function has increased the reference counter to it
* so that we now have to close it.
*
* This function also inherits a locked and ref'd domain object.
*
* This function needs to:
* 1. Enter job
* 1. just before monitor reconnect do lightweight MonitorEnter
* (increase VM refcount and unlock VM)
* 2. reconnect to monitor
* 3. do lightweight MonitorExit (lock VM)
* 4. continue reconnect process
* 5. EndJob
*
* We can't do normal MonitorEnter & MonitorExit because these two lock the
* monitor lock, which does not exists in this early phase.
*/
static void
qemuProcessReconnect(void *opaque)
static int
qemuProcessVNCAllocatePorts(virQEMUDriverPtr driver,
virDomainGraphicsDefPtr graphics,
bool allocate)
{
struct qemuProcessReconnectData *data = opaque;
virQEMUDriverPtr driver = data->driver;
virDomainObjPtr obj = data->obj;
qemuDomainObjPrivatePtr priv;
virConnectPtr conn = data->conn;
struct qemuDomainJobObj oldjob;
int state;
int reason;
virQEMUDriverConfigPtr cfg;
size_t i;
unsigned int stopFlags = 0;
bool jobStarted = false;
virCapsPtr caps = NULL;
VIR_FREE(data);
qemuDomainObjRestoreJob(obj, &oldjob);
if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
unsigned short port;
cfg = virQEMUDriverGetConfig(driver);
priv = obj->privateData;
if (!allocate) {
if (graphics->data.vnc.autoport)
graphics->data.vnc.port = 5900;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto error;
return 0;
}
if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
goto error;
jobStarted = true;
if (graphics->data.vnc.autoport) {
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
return -1;
graphics->data.vnc.port = port;
}
/* XXX If we ever gonna change pid file pattern, come up with
* some intelligence here to deal with old paths. */
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, obj->def->name)))
goto error;
if (graphics->data.vnc.websocket == -1) {
if (virPortAllocatorAcquire(driver->webSocketPorts, &port) < 0)
return -1;
graphics->data.vnc.websocket = port;
graphics->data.vnc.websocketGenerated = true;
}
/* Restore the masterKey */
if (qemuDomainMasterKeyReadFile(priv) < 0)
goto error;
return 0;
}
virNWFilterReadLockFilterUpdates();
static int
qemuProcessSPICEAllocatePorts(virQEMUDriverPtr driver,
virDomainGraphicsDefPtr graphics,
bool allocate)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
unsigned short port = 0;
unsigned short tlsPort;
size_t i;
int defaultMode = graphics->data.spice.defaultMode;
int ret = -1;
VIR_DEBUG("Reconnect monitor to %p '%s'", obj, obj->def->name);
bool needTLSPort = false;
bool needPort = false;
/* XXX check PID liveliness & EXE path */
if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, NULL) < 0)
goto error;
if (graphics->data.spice.autoport) {
/* check if tlsPort or port need allocation */
for (i = 0; i < VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_LAST; i++) {
switch (graphics->data.spice.channels[i]) {
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
needTLSPort = true;
break;
if (qemuHostdevUpdateActiveDomainDevices(driver, obj->def) < 0)
goto error;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
needPort = true;
break;
if (qemuConnectCgroup(driver, obj) < 0)
goto error;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
/* default mode will be used */
break;
}
}
switch (defaultMode) {
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
needTLSPort = true;
break;
if (qemuDomainPerfRestart(obj) < 0)
goto error;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
needPort = true;
break;
/* XXX: Need to change as long as lock is introduced for
* qemu_driver->sharedDevices.
*/
for (i = 0; i < obj->def->ndisks; i++) {
virDomainDeviceDef dev;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
if (cfg->spiceTLS)
needTLSPort = true;
needPort = true;
break;
}
}
if (virStorageTranslateDiskSourcePool(conn, obj->def->disks[i]) < 0)
goto error;
if (!allocate) {
if (needPort || graphics->data.spice.port == -1)
graphics->data.spice.port = 5901;
/* XXX we should be able to restore all data from XML in the future.
* This should be the only place that calls qemuDomainDetermineDiskChain
* with @report_broken == false to guarantee best-effort domain
* reconnect */
if (qemuDomainDetermineDiskChain(driver, obj, obj->def->disks[i],
true, false) < 0)
goto error;
if (needTLSPort || graphics->data.spice.tlsPort == -1)
graphics->data.spice.tlsPort = 5902;
dev.type = VIR_DOMAIN_DEVICE_DISK;
dev.data.disk = obj->def->disks[i];
if (qemuAddSharedDevice(driver, &dev, obj->def->name) < 0)
goto error;
ret = 0;
goto cleanup;
}
if (qemuProcessUpdateState(driver, obj) < 0)
goto error;
if (needPort || graphics->data.spice.port == -1) {
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
goto cleanup;
state = virDomainObjGetState(obj, &reason);
if (state == VIR_DOMAIN_SHUTOFF ||
(state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_STARTING_UP)) {
VIR_DEBUG("Domain '%s' wasn't fully started yet, killing it",
obj->def->name);
goto error;
graphics->data.spice.port = port;
if (!graphics->data.spice.autoport)
graphics->data.spice.portReserved = true;
}
/* If upgrading from old libvirtd we won't have found any
* caps in the domain status, so re-query them
*/
if (!priv->qemuCaps &&
!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
driver->qemuCapsCache,
obj->def->emulator,
obj->def->os.machine)))
goto error;
if (needTLSPort || graphics->data.spice.tlsPort == -1) {
if (!cfg->spiceTLS) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Auto allocation of spice TLS port requested "
"but spice TLS is disabled in qemu.conf"));
goto cleanup;
}
/* In case the domain shutdown while we were not running,
* we need to finish the shutdown process. And we need to do it after
* we have virQEMUCaps filled in.
*/
if (state == VIR_DOMAIN_SHUTDOWN ||
(state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN)) {
VIR_DEBUG("Finishing shutdown sequence for domain %s",
obj->def->name);
qemuProcessShutdownOrReboot(driver, obj);
goto cleanup;
}
if (virPortAllocatorAcquire(driver->remotePorts, &tlsPort) < 0)
goto cleanup;
if (qemuProcessBuildDestroyHugepagesPath(driver, obj, NULL, true) < 0)
goto error;
graphics->data.spice.tlsPort = tlsPort;
if ((qemuDomainAssignAddresses(obj->def, priv->qemuCaps,
driver, obj, false)) < 0) {
goto error;
if (!graphics->data.spice.autoport)
graphics->data.spice.tlsPortReserved = true;
}
/* if domain requests security driver we haven't loaded, report error, but
* do not kill the domain
*/
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
obj->def));
ret = 0;
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
goto error;
cleanup:
virObjectUnref(cfg);
return ret;
}
qemuDomainVcpuPersistOrder(obj->def);
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
goto error;
static int
qemuValidateCpuCount(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps)
{
unsigned int maxCpus = virQEMUCapsGetMachineMaxCpus(qemuCaps, def->os.machine);
qemuProcessNotifyNets(obj->def);
if (virDomainDefGetVcpus(def) == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Domain requires at least 1 vCPU"));
return -1;
}
if (qemuProcessFiltersInstantiate(obj->def))
goto error;
if (maxCpus > 0 && virDomainDefGetVcpusMax(def) > maxCpus) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Maximum CPUs greater than specified machine type limit"));
return -1;
}
if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
return 0;
}
if (qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
qemuRefreshRTC(driver, obj);
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
goto error;
if (qemuProcessUpdateDevices(driver, obj) < 0)
goto error;
qemuProcessReconnectCheckMemAliasOrderMismatch(obj);
static int
qemuProcessVerifyHypervFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
char *cpuFeature;
size_t i;
int rc;
if (qemuConnectAgent(driver, obj) < 0)
goto error;
for (i = 0; i < VIR_DOMAIN_HYPERV_LAST; i++) {
/* always supported string property */
if (i == VIR_DOMAIN_HYPERV_VENDOR_ID)
continue;
/* update domain state XML with possibly updated state in virDomainObj */
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, obj, driver->caps) < 0)
goto error;
if (def->hyperv_features[i] != VIR_TRISTATE_SWITCH_ON)
continue;
/* Run an hook to allow admins to do some magic */
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
char *xml = qemuDomainDefFormatXML(driver, obj->def, 0);
int hookret;
if (virAsprintf(&cpuFeature, "__kvm_hv_%s",
virDomainHypervTypeToString(i)) < 0)
return -1;
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, obj->def->name,
VIR_HOOK_QEMU_OP_RECONNECT, VIR_HOOK_SUBOP_BEGIN,
NULL, xml, NULL);
VIR_FREE(xml);
rc = virCPUDataCheckFeature(cpu, cpuFeature);
VIR_FREE(cpuFeature);
/*
* If the script raised an error abort the launch
*/
if (hookret < 0)
goto error;
}
if (rc < 0)
return -1;
else if (rc == 1)
continue;
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
driver->inhibitCallback(true, driver->inhibitOpaque);
switch ((virDomainHyperv) i) {
case VIR_DOMAIN_HYPERV_RELAXED:
case VIR_DOMAIN_HYPERV_VAPIC:
case VIR_DOMAIN_HYPERV_SPINLOCKS:
VIR_WARN("host doesn't support hyperv '%s' feature",
virDomainHypervTypeToString(i));
break;
cleanup:
if (jobStarted)
qemuDomainObjEndJob(driver, obj);
if (!virDomainObjIsActive(obj))
qemuDomainRemoveInactive(driver, obj);
virDomainObjEndAPI(&obj);
virObjectUnref(conn);
virObjectUnref(cfg);
virObjectUnref(caps);
virNWFilterUnlockFilterUpdates();
return;
case VIR_DOMAIN_HYPERV_VPINDEX:
case VIR_DOMAIN_HYPERV_RUNTIME:
case VIR_DOMAIN_HYPERV_SYNIC:
case VIR_DOMAIN_HYPERV_STIMER:
case VIR_DOMAIN_HYPERV_RESET:
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("host doesn't support hyperv '%s' feature"),
virDomainHypervTypeToString(i));
return -1;
error:
if (virDomainObjIsActive(obj)) {
/* We can't get the monitor back, so must kill the VM
* to remove danger of it ending up running twice if
* user tries to start it again later
*/
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NO_SHUTDOWN)) {
/* If we couldn't get the monitor and qemu supports
* no-shutdown, we can safely say that the domain
* crashed ... */
state = VIR_DOMAIN_SHUTOFF_CRASHED;
} else {
/* ... but if it doesn't we can't say what the state
* really is and FAILED means "failed to start" */
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
/* coverity[dead_error_begin] */
case VIR_DOMAIN_HYPERV_VENDOR_ID:
case VIR_DOMAIN_HYPERV_LAST:
break;
}
/* If BeginJob failed, we jumped here without a job, let's hope another
* thread didn't have a chance to start playing with the domain yet
* (it's all we can do anyway).
*/
qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
}
goto cleanup;
return 0;
}
static int
qemuProcessReconnectHelper(virDomainObjPtr obj,
void *opaque)
qemuProcessVerifyKVMFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
virThread thread;
struct qemuProcessReconnectData *src = opaque;
struct qemuProcessReconnectData *data;
int rc = 0;
/* If the VM was inactive, we don't need to reconnect */
if (!obj->pid)
if (def->features[VIR_DOMAIN_FEATURE_PVSPINLOCK] != VIR_TRISTATE_SWITCH_ON)
return 0;
if (VIR_ALLOC(data) < 0)
rc = virCPUDataCheckFeature(cpu, VIR_CPU_x86_KVM_PV_UNHALT);
if (rc <= 0) {
if (rc == 0)
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("host doesn't support paravirtual spinlocks"));
return -1;
}
memcpy(data, src, sizeof(*data));
data->obj = obj;
return 0;
}
/* this lock and reference will be eventually transferred to the thread
* that handles the reconnect */
virObjectLock(obj);
virObjectRef(obj);
/* Since we close the connection later on, we have to make sure that the
* threads we start see a valid connection throughout their lifetime. We
* simply increase the reference counter here.
*/
virObjectRef(data->conn);
static int
qemuProcessVerifyCPUFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
int rc;
if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Could not create thread. QEMU initialization "
"might be incomplete"));
/* We can't spawn a thread and thus connect to monitor. Kill qemu.
* It's safe to call qemuProcessStop without a job here since there
* is no thread that could be doing anything else with the same domain
* object.
*/
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
QEMU_ASYNC_JOB_NONE, 0);
qemuDomainRemoveInactive(src->driver, obj);
rc = virCPUCheckFeature(def->os.arch, def->cpu, "invtsc");
virDomainObjEndAPI(&obj);
virObjectUnref(data->conn);
VIR_FREE(data);
if (rc < 0) {
return -1;
} else if (rc == 1) {
rc = virCPUDataCheckFeature(cpu, "invtsc");
if (rc <= 0) {
if (rc == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("host doesn't support invariant TSC"));
}
return -1;
}
}
return 0;
}
/**
* qemuProcessReconnectAll
*
* Try to re-open the resources for live VMs that we care
* about.
*/
void
qemuProcessReconnectAll(virConnectPtr conn, virQEMUDriverPtr driver)
{
struct qemuProcessReconnectData data = {.conn = conn, .driver = driver};
virDomainObjListForEach(driver->domains, qemuProcessReconnectHelper, &data);
}
static int
qemuProcessVNCAllocatePorts(virQEMUDriverPtr driver,
virDomainGraphicsDefPtr graphics,
bool allocate)
qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
virCPUDataPtr *enabled,
virCPUDataPtr *disabled)
{
unsigned short port;
qemuDomainObjPrivatePtr priv = vm->privateData;
virCPUDataPtr dataEnabled = NULL;
virCPUDataPtr dataDisabled = NULL;
int rc;
if (!allocate) {
if (graphics->data.vnc.autoport)
graphics->data.vnc.port = 5900;
*enabled = NULL;
*disabled = NULL;
if (!ARCH_IS_X86(vm->def->os.arch))
return 0;
}
if (graphics->data.vnc.autoport) {
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
return -1;
graphics->data.vnc.port = port;
}
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto error;
if (graphics->data.vnc.websocket == -1) {
if (virPortAllocatorAcquire(driver->webSocketPorts, &port) < 0)
return -1;
graphics->data.vnc.websocket = port;
graphics->data.vnc.websocketGenerated = true;
}
rc = qemuMonitorGetGuestCPU(priv->mon, vm->def->os.arch,
&dataEnabled, &dataDisabled);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto error;
if (rc == -1)
goto error;
*enabled = dataEnabled;
*disabled = dataDisabled;
return 0;
error:
virCPUDataFree(dataEnabled);
virCPUDataFree(dataDisabled);
return -1;
}
static int
qemuProcessSPICEAllocatePorts(virQEMUDriverPtr driver,
virDomainGraphicsDefPtr graphics,
bool allocate)
qemuProcessVerifyCPU(virDomainObjPtr vm,
virCPUDataPtr cpu)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
unsigned short port = 0;
unsigned short tlsPort;
size_t i;
int defaultMode = graphics->data.spice.defaultMode;
int ret = -1;
virDomainDefPtr def = vm->def;
bool needTLSPort = false;
bool needPort = false;
if (graphics->data.spice.autoport) {
/* check if tlsPort or port need allocation */
for (i = 0; i < VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_LAST; i++) {
switch (graphics->data.spice.channels[i]) {
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
needTLSPort = true;
break;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
needPort = true;
break;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
/* default mode will be used */
break;
}
}
switch (defaultMode) {
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
needTLSPort = true;
break;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
needPort = true;
break;
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
if (cfg->spiceTLS)
needTLSPort = true;
needPort = true;
break;
}
}
if (!allocate) {
if (needPort || graphics->data.spice.port == -1)
graphics->data.spice.port = 5901;
if (needTLSPort || graphics->data.spice.tlsPort == -1)
graphics->data.spice.tlsPort = 5902;
ret = 0;
goto cleanup;
}
if (needPort || graphics->data.spice.port == -1) {
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
goto cleanup;
graphics->data.spice.port = port;
if (!graphics->data.spice.autoport)
graphics->data.spice.portReserved = true;
}
if (needTLSPort || graphics->data.spice.tlsPort == -1) {
if (!cfg->spiceTLS) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Auto allocation of spice TLS port requested "
"but spice TLS is disabled in qemu.conf"));
goto cleanup;
}
if (virPortAllocatorAcquire(driver->remotePorts, &tlsPort) < 0)
goto cleanup;
graphics->data.spice.tlsPort = tlsPort;
if (!graphics->data.spice.autoport)
graphics->data.spice.tlsPortReserved = true;
}
ret = 0;
cleanup:
virObjectUnref(cfg);
return ret;
}
static int
qemuValidateCpuCount(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps)
{
unsigned int maxCpus = virQEMUCapsGetMachineMaxCpus(qemuCaps, def->os.machine);
if (virDomainDefGetVcpus(def) == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Domain requires at least 1 vCPU"));
return -1;
}
if (maxCpus > 0 && virDomainDefGetVcpusMax(def) > maxCpus) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Maximum CPUs greater than specified machine type limit"));
return -1;
}
return 0;
}
static int
qemuProcessVerifyHypervFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
char *cpuFeature;
size_t i;
int rc;
for (i = 0; i < VIR_DOMAIN_HYPERV_LAST; i++) {
/* always supported string property */
if (i == VIR_DOMAIN_HYPERV_VENDOR_ID)
continue;
if (def->hyperv_features[i] != VIR_TRISTATE_SWITCH_ON)
continue;
if (virAsprintf(&cpuFeature, "__kvm_hv_%s",
virDomainHypervTypeToString(i)) < 0)
return -1;
rc = virCPUDataCheckFeature(cpu, cpuFeature);
VIR_FREE(cpuFeature);
if (rc < 0)
return -1;
else if (rc == 1)
continue;
switch ((virDomainHyperv) i) {
case VIR_DOMAIN_HYPERV_RELAXED:
case VIR_DOMAIN_HYPERV_VAPIC:
case VIR_DOMAIN_HYPERV_SPINLOCKS:
VIR_WARN("host doesn't support hyperv '%s' feature",
virDomainHypervTypeToString(i));
break;
case VIR_DOMAIN_HYPERV_VPINDEX:
case VIR_DOMAIN_HYPERV_RUNTIME:
case VIR_DOMAIN_HYPERV_SYNIC:
case VIR_DOMAIN_HYPERV_STIMER:
case VIR_DOMAIN_HYPERV_RESET:
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("host doesn't support hyperv '%s' feature"),
virDomainHypervTypeToString(i));
return -1;
/* coverity[dead_error_begin] */
case VIR_DOMAIN_HYPERV_VENDOR_ID:
case VIR_DOMAIN_HYPERV_LAST:
break;
}
}
return 0;
}
static int
qemuProcessVerifyKVMFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
int rc = 0;
if (def->features[VIR_DOMAIN_FEATURE_PVSPINLOCK] != VIR_TRISTATE_SWITCH_ON)
return 0;
rc = virCPUDataCheckFeature(cpu, VIR_CPU_x86_KVM_PV_UNHALT);
if (rc <= 0) {
if (rc == 0)
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("host doesn't support paravirtual spinlocks"));
return -1;
}
return 0;
}
static int
qemuProcessVerifyCPUFeatures(virDomainDefPtr def,
virCPUDataPtr cpu)
{
int rc;
rc = virCPUCheckFeature(def->os.arch, def->cpu, "invtsc");
if (rc < 0) {
return -1;
} else if (rc == 1) {
rc = virCPUDataCheckFeature(cpu, "invtsc");
if (rc <= 0) {
if (rc == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("host doesn't support invariant TSC"));
}
return -1;
}
}
return 0;
}
static int
qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
virCPUDataPtr *enabled,
virCPUDataPtr *disabled)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virCPUDataPtr dataEnabled = NULL;
virCPUDataPtr dataDisabled = NULL;
int rc;
*enabled = NULL;
*disabled = NULL;
if (!ARCH_IS_X86(vm->def->os.arch))
return 0;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto error;
rc = qemuMonitorGetGuestCPU(priv->mon, vm->def->os.arch,
&dataEnabled, &dataDisabled);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto error;
if (rc == -1)
goto error;
*enabled = dataEnabled;
*disabled = dataDisabled;
return 0;
error:
virCPUDataFree(dataEnabled);
virCPUDataFree(dataDisabled);
return -1;
}
static int
qemuProcessVerifyCPU(virDomainObjPtr vm,
virCPUDataPtr cpu)
{
virDomainDefPtr def = vm->def;
if (!cpu)
return 0;
if (!cpu)
return 0;
if (qemuProcessVerifyKVMFeatures(def, cpu) < 0 ||
qemuProcessVerifyHypervFeatures(def, cpu) < 0)
......@@ -6633,208 +6311,586 @@ void qemuProcessStop(virQEMUDriverPtr driver,
}
int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
pid_t pid,
const char *pidfile,
virDomainChrSourceDefPtr monConfig,
bool monJSON)
int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
virQEMUDriverPtr driver,
virDomainObjPtr vm,
pid_t pid,
const char *pidfile,
virDomainChrSourceDefPtr monConfig,
bool monJSON)
{
size_t i;
qemuDomainLogContextPtr logCtxt = NULL;
char *timestamp;
qemuDomainObjPrivatePtr priv = vm->privateData;
bool running = true;
virDomainPausedReason reason;
virSecurityLabelPtr seclabel = NULL;
virSecurityLabelDefPtr seclabeldef = NULL;
bool seclabelgen = false;
virSecurityManagerPtr* sec_managers = NULL;
const char *model;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virCapsPtr caps = NULL;
bool active = false;
VIR_DEBUG("Beginning VM attach process");
if (virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("VM is already active"));
virObjectUnref(cfg);
return -1;
}
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto error;
/* Do this upfront, so any part of the startup process can add
* runtime state to vm->def that won't be persisted. This let's us
* report implicit runtime defaults in the XML, like vnc listen/socket
*/
VIR_DEBUG("Setting current domain def as transient");
if (virDomainObjSetDefTransient(caps, driver->xmlopt, vm) < 0)
goto error;
vm->def->id = qemuDriverAllocateID(driver);
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
driver->inhibitCallback(true, driver->inhibitOpaque);
active = true;
if (virFileMakePath(cfg->logDir) < 0) {
virReportSystemError(errno,
_("cannot create log directory %s"),
cfg->logDir);
goto error;
}
VIR_FREE(priv->pidfile);
if (VIR_STRDUP(priv->pidfile, pidfile) < 0)
goto error;
vm->pid = pid;
VIR_DEBUG("Detect security driver config");
sec_managers = qemuSecurityGetNested(driver->securityManager);
if (sec_managers == NULL)
goto error;
for (i = 0; sec_managers[i]; i++) {
seclabelgen = false;
model = qemuSecurityGetModel(sec_managers[i]);
seclabeldef = virDomainDefGetSecurityLabelDef(vm->def, model);
if (seclabeldef == NULL) {
if (!(seclabeldef = virSecurityLabelDefNew(model)))
goto error;
seclabelgen = true;
}
seclabeldef->type = VIR_DOMAIN_SECLABEL_STATIC;
if (VIR_ALLOC(seclabel) < 0)
goto error;
if (qemuSecurityGetProcessLabel(sec_managers[i], vm->def,
vm->pid, seclabel) < 0)
goto error;
if (VIR_STRDUP(seclabeldef->model, model) < 0)
goto error;
if (VIR_STRDUP(seclabeldef->label, seclabel->label) < 0)
goto error;
VIR_FREE(seclabel);
if (seclabelgen) {
if (VIR_APPEND_ELEMENT(vm->def->seclabels, vm->def->nseclabels, seclabeldef) < 0)
goto error;
seclabelgen = false;
}
}
if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
goto error;
if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0)
goto error;
if (qemuDomainPerfRestart(vm) < 0)
goto error;
VIR_DEBUG("Creating domain log file");
if (!(logCtxt = qemuDomainLogContextNew(driver, vm,
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH)))
goto error;
VIR_DEBUG("Determining emulator version");
virObjectUnref(priv->qemuCaps);
if (!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
driver->qemuCapsCache,
vm->def->emulator,
vm->def->os.machine)))
goto error;
VIR_DEBUG("Preparing monitor state");
priv->monConfig = monConfig;
monConfig = NULL;
priv->monJSON = monJSON;
priv->gotShutdown = false;
/*
* Normally PCI addresses are assigned in the virDomainCreate
* or virDomainDefine methods. We might still need to assign
* some here to cope with the question of upgrades. Regardless
* we also need to populate the PCI address set cache for later
* use in hotplug
*/
VIR_DEBUG("Assigning domain PCI addresses");
if ((qemuDomainAssignAddresses(vm->def, priv->qemuCaps,
driver, vm, false)) < 0) {
goto error;
}
if ((timestamp = virTimeStringNow()) == NULL)
goto error;
qemuDomainLogContextWrite(logCtxt, "%s: attaching\n", timestamp);
VIR_FREE(timestamp);
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_EXTERNAL_LAUNCH, logCtxt);
VIR_DEBUG("Waiting for monitor to show up");
if (qemuProcessWaitForMonitor(driver, vm, QEMU_ASYNC_JOB_NONE, NULL) < 0)
goto error;
if (qemuConnectAgent(driver, vm) < 0)
goto error;
VIR_DEBUG("Detecting VCPU PIDs");
if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
goto error;
if (qemuDomainValidateVcpuInfo(vm) < 0)
goto error;
VIR_DEBUG("Detecting IOThread PIDs");
if (qemuProcessDetectIOThreadPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
VIR_DEBUG("Getting initial memory amount");
qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0)
goto exit_monitor;
if (qemuMonitorGetStatus(priv->mon, &running, &reason) < 0)
goto exit_monitor;
if (qemuMonitorGetVirtType(priv->mon, &vm->def->virtType) < 0)
goto exit_monitor;
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto error;
if (running) {
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
VIR_DOMAIN_RUNNING_UNPAUSED);
if (vm->def->memballoon &&
vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO &&
vm->def->memballoon->period) {
qemuDomainObjEnterMonitor(driver, vm);
qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon,
vm->def->memballoon->period);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto error;
}
} else {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
}
VIR_DEBUG("Writing domain status to disk");
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
goto error;
/* Run an hook to allow admins to do some magic */
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0);
int hookret;
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
VIR_HOOK_QEMU_OP_ATTACH, VIR_HOOK_SUBOP_BEGIN,
NULL, xml, NULL);
VIR_FREE(xml);
/*
* If the script raised an error abort the launch
*/
if (hookret < 0)
goto error;
}
virObjectUnref(logCtxt);
VIR_FREE(seclabel);
VIR_FREE(sec_managers);
virObjectUnref(cfg);
virObjectUnref(caps);
return 0;
exit_monitor:
ignore_value(qemuDomainObjExitMonitor(driver, vm));
error:
/* We jump here if we failed to attach to the VM for any reason.
* Leave the domain running, but pretend we never attempted to
* attach to it. */
if (active && virAtomicIntDecAndTest(&driver->nactive) &&
driver->inhibitCallback)
driver->inhibitCallback(false, driver->inhibitOpaque);
qemuMonitorClose(priv->mon);
priv->mon = NULL;
virObjectUnref(logCtxt);
VIR_FREE(seclabel);
VIR_FREE(sec_managers);
if (seclabelgen)
virSecurityLabelDefFree(seclabeldef);
virDomainChrSourceDefFree(monConfig);
virObjectUnref(cfg);
virObjectUnref(caps);
return -1;
}
static virDomainObjPtr
qemuProcessAutoDestroy(virDomainObjPtr dom,
virConnectPtr conn,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = dom->privateData;
virObjectEventPtr event = NULL;
unsigned int stopFlags = 0;
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
virObjectRef(dom);
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
qemuDomainObjDiscardAsyncJob(driver, dom);
}
VIR_DEBUG("Killing domain");
if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
goto cleanup;
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
QEMU_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
qemuDomainObjEndJob(driver, dom);
qemuDomainRemoveInactive(driver, dom);
qemuDomainEventQueue(driver, event);
cleanup:
virDomainObjEndAPI(&dom);
return dom;
}
int qemuProcessAutoDestroyAdd(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virConnectPtr conn)
{
VIR_DEBUG("vm=%s, conn=%p", vm->def->name, conn);
return virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
qemuProcessAutoDestroy);
}
int qemuProcessAutoDestroyRemove(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
int ret;
VIR_DEBUG("vm=%s", vm->def->name);
ret = virCloseCallbacksUnset(driver->closeCallbacks, vm,
qemuProcessAutoDestroy);
return ret;
}
bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
virCloseCallback cb;
VIR_DEBUG("vm=%s", vm->def->name);
cb = virCloseCallbacksGet(driver->closeCallbacks, vm, NULL);
return cb == qemuProcessAutoDestroy;
}
int
qemuProcessRefreshDisks(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr table = NULL;
int ret = -1;
size_t i;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
table = qemuMonitorGetBlockInfo(priv->mon);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto cleanup;
}
if (!table)
goto cleanup;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskpriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
struct qemuDomainDiskInfo *info;
if (!(info = virHashLookup(table, disk->info.alias)))
continue;
if (info->removable) {
if (info->empty)
virDomainDiskEmptySource(disk);
if (info->tray) {
if (info->tray_open)
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
else
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
}
}
/* fill in additional data */
diskpriv->removable = info->removable;
diskpriv->tray = info->tray;
}
ret = 0;
cleanup:
virHashFree(table);
return ret;
}
struct qemuProcessReconnectData {
virConnectPtr conn;
virQEMUDriverPtr driver;
virDomainObjPtr obj;
};
/*
* Open an existing VM's monitor, re-detect VCPU threads
* and re-reserve the security labels in use
*
* We own the virConnectPtr we are passed here - whoever started
* this thread function has increased the reference counter to it
* so that we now have to close it.
*
* This function also inherits a locked and ref'd domain object.
*
* This function needs to:
* 1. Enter job
* 1. just before monitor reconnect do lightweight MonitorEnter
* (increase VM refcount and unlock VM)
* 2. reconnect to monitor
* 3. do lightweight MonitorExit (lock VM)
* 4. continue reconnect process
* 5. EndJob
*
* We can't do normal MonitorEnter & MonitorExit because these two lock the
* monitor lock, which does not exists in this early phase.
*/
static void
qemuProcessReconnect(void *opaque)
{
struct qemuProcessReconnectData *data = opaque;
virQEMUDriverPtr driver = data->driver;
virDomainObjPtr obj = data->obj;
qemuDomainObjPrivatePtr priv;
virConnectPtr conn = data->conn;
struct qemuDomainJobObj oldjob;
int state;
int reason;
virQEMUDriverConfigPtr cfg;
size_t i;
qemuDomainLogContextPtr logCtxt = NULL;
char *timestamp;
qemuDomainObjPrivatePtr priv = vm->privateData;
bool running = true;
virDomainPausedReason reason;
virSecurityLabelPtr seclabel = NULL;
virSecurityLabelDefPtr seclabeldef = NULL;
bool seclabelgen = false;
virSecurityManagerPtr* sec_managers = NULL;
const char *model;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
unsigned int stopFlags = 0;
bool jobStarted = false;
virCapsPtr caps = NULL;
bool active = false;
VIR_DEBUG("Beginning VM attach process");
VIR_FREE(data);
if (virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("VM is already active"));
virObjectUnref(cfg);
return -1;
}
qemuDomainObjRestoreJob(obj, &oldjob);
if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
cfg = virQEMUDriverGetConfig(driver);
priv = obj->privateData;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto error;
/* Do this upfront, so any part of the startup process can add
* runtime state to vm->def that won't be persisted. This let's us
* report implicit runtime defaults in the XML, like vnc listen/socket
*/
VIR_DEBUG("Setting current domain def as transient");
if (virDomainObjSetDefTransient(caps, driver->xmlopt, vm) < 0)
if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
goto error;
jobStarted = true;
vm->def->id = qemuDriverAllocateID(driver);
/* XXX If we ever gonna change pid file pattern, come up with
* some intelligence here to deal with old paths. */
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, obj->def->name)))
goto error;
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
driver->inhibitCallback(true, driver->inhibitOpaque);
active = true;
/* Restore the masterKey */
if (qemuDomainMasterKeyReadFile(priv) < 0)
goto error;
if (virFileMakePath(cfg->logDir) < 0) {
virReportSystemError(errno,
_("cannot create log directory %s"),
cfg->logDir);
virNWFilterReadLockFilterUpdates();
VIR_DEBUG("Reconnect monitor to %p '%s'", obj, obj->def->name);
/* XXX check PID liveliness & EXE path */
if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, NULL) < 0)
goto error;
}
VIR_FREE(priv->pidfile);
if (VIR_STRDUP(priv->pidfile, pidfile) < 0)
if (qemuHostdevUpdateActiveDomainDevices(driver, obj->def) < 0)
goto error;
vm->pid = pid;
if (qemuConnectCgroup(driver, obj) < 0)
goto error;
VIR_DEBUG("Detect security driver config");
sec_managers = qemuSecurityGetNested(driver->securityManager);
if (sec_managers == NULL)
if (qemuDomainPerfRestart(obj) < 0)
goto error;
for (i = 0; sec_managers[i]; i++) {
seclabelgen = false;
model = qemuSecurityGetModel(sec_managers[i]);
seclabeldef = virDomainDefGetSecurityLabelDef(vm->def, model);
if (seclabeldef == NULL) {
if (!(seclabeldef = virSecurityLabelDefNew(model)))
goto error;
seclabelgen = true;
}
seclabeldef->type = VIR_DOMAIN_SECLABEL_STATIC;
if (VIR_ALLOC(seclabel) < 0)
goto error;
if (qemuSecurityGetProcessLabel(sec_managers[i], vm->def,
vm->pid, seclabel) < 0)
goto error;
/* XXX: Need to change as long as lock is introduced for
* qemu_driver->sharedDevices.
*/
for (i = 0; i < obj->def->ndisks; i++) {
virDomainDeviceDef dev;
if (VIR_STRDUP(seclabeldef->model, model) < 0)
if (virStorageTranslateDiskSourcePool(conn, obj->def->disks[i]) < 0)
goto error;
if (VIR_STRDUP(seclabeldef->label, seclabel->label) < 0)
/* XXX we should be able to restore all data from XML in the future.
* This should be the only place that calls qemuDomainDetermineDiskChain
* with @report_broken == false to guarantee best-effort domain
* reconnect */
if (qemuDomainDetermineDiskChain(driver, obj, obj->def->disks[i],
true, false) < 0)
goto error;
VIR_FREE(seclabel);
if (seclabelgen) {
if (VIR_APPEND_ELEMENT(vm->def->seclabels, vm->def->nseclabels, seclabeldef) < 0)
goto error;
seclabelgen = false;
}
dev.type = VIR_DOMAIN_DEVICE_DISK;
dev.data.disk = obj->def->disks[i];
if (qemuAddSharedDevice(driver, &dev, obj->def->name) < 0)
goto error;
}
if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
goto error;
if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0)
goto error;
if (qemuDomainPerfRestart(vm) < 0)
if (qemuProcessUpdateState(driver, obj) < 0)
goto error;
VIR_DEBUG("Creating domain log file");
if (!(logCtxt = qemuDomainLogContextNew(driver, vm,
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH)))
state = virDomainObjGetState(obj, &reason);
if (state == VIR_DOMAIN_SHUTOFF ||
(state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_STARTING_UP)) {
VIR_DEBUG("Domain '%s' wasn't fully started yet, killing it",
obj->def->name);
goto error;
}
VIR_DEBUG("Determining emulator version");
virObjectUnref(priv->qemuCaps);
if (!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
/* If upgrading from old libvirtd we won't have found any
* caps in the domain status, so re-query them
*/
if (!priv->qemuCaps &&
!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
driver->qemuCapsCache,
vm->def->emulator,
vm->def->os.machine)))
obj->def->emulator,
obj->def->os.machine)))
goto error;
VIR_DEBUG("Preparing monitor state");
priv->monConfig = monConfig;
monConfig = NULL;
priv->monJSON = monJSON;
/* In case the domain shutdown while we were not running,
* we need to finish the shutdown process. And we need to do it after
* we have virQEMUCaps filled in.
*/
if (state == VIR_DOMAIN_SHUTDOWN ||
(state == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN)) {
VIR_DEBUG("Finishing shutdown sequence for domain %s",
obj->def->name);
qemuProcessShutdownOrReboot(driver, obj);
goto cleanup;
}
priv->gotShutdown = false;
if (qemuProcessBuildDestroyHugepagesPath(driver, obj, NULL, true) < 0)
goto error;
/*
* Normally PCI addresses are assigned in the virDomainCreate
* or virDomainDefine methods. We might still need to assign
* some here to cope with the question of upgrades. Regardless
* we also need to populate the PCI address set cache for later
* use in hotplug
*/
VIR_DEBUG("Assigning domain PCI addresses");
if ((qemuDomainAssignAddresses(vm->def, priv->qemuCaps,
driver, vm, false)) < 0) {
if ((qemuDomainAssignAddresses(obj->def, priv->qemuCaps,
driver, obj, false)) < 0) {
goto error;
}
if ((timestamp = virTimeStringNow()) == NULL)
/* if domain requests security driver we haven't loaded, report error, but
* do not kill the domain
*/
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
obj->def));
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
goto error;
qemuDomainLogContextWrite(logCtxt, "%s: attaching\n", timestamp);
VIR_FREE(timestamp);
qemuDomainVcpuPersistOrder(obj->def);
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_EXTERNAL_LAUNCH, logCtxt);
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
goto error;
VIR_DEBUG("Waiting for monitor to show up");
if (qemuProcessWaitForMonitor(driver, vm, QEMU_ASYNC_JOB_NONE, NULL) < 0)
qemuProcessNotifyNets(obj->def);
if (qemuProcessFiltersInstantiate(obj->def))
goto error;
if (qemuConnectAgent(driver, vm) < 0)
if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
VIR_DEBUG("Detecting VCPU PIDs");
if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
if (qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuDomainValidateVcpuInfo(vm) < 0)
if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
VIR_DEBUG("Detecting IOThread PIDs");
if (qemuProcessDetectIOThreadPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
qemuRefreshRTC(driver, obj);
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
VIR_DEBUG("Getting initial memory amount");
qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0)
goto exit_monitor;
if (qemuMonitorGetStatus(priv->mon, &running, &reason) < 0)
goto exit_monitor;
if (qemuMonitorGetVirtType(priv->mon, &vm->def->virtType) < 0)
goto exit_monitor;
if (qemuDomainObjExitMonitor(driver, vm) < 0)
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
goto error;
if (running) {
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
VIR_DOMAIN_RUNNING_UNPAUSED);
if (vm->def->memballoon &&
vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO &&
vm->def->memballoon->period) {
qemuDomainObjEnterMonitor(driver, vm);
qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon,
vm->def->memballoon->period);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto error;
}
} else {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
}
if (qemuProcessUpdateDevices(driver, obj) < 0)
goto error;
VIR_DEBUG("Writing domain status to disk");
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
qemuProcessReconnectCheckMemAliasOrderMismatch(obj);
if (qemuConnectAgent(driver, obj) < 0)
goto error;
/* update domain state XML with possibly updated state in virDomainObj */
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, obj, driver->caps) < 0)
goto error;
/* Run an hook to allow admins to do some magic */
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0);
char *xml = qemuDomainDefFormatXML(driver, obj->def, 0);
int hookret;
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
VIR_HOOK_QEMU_OP_ATTACH, VIR_HOOK_SUBOP_BEGIN,
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, obj->def->name,
VIR_HOOK_QEMU_OP_RECONNECT, VIR_HOOK_SUBOP_BEGIN,
NULL, xml, NULL);
VIR_FREE(xml);
......@@ -6845,161 +6901,106 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
goto error;
}
virObjectUnref(logCtxt);
VIR_FREE(seclabel);
VIR_FREE(sec_managers);
virObjectUnref(cfg);
virObjectUnref(caps);
return 0;
exit_monitor:
ignore_value(qemuDomainObjExitMonitor(driver, vm));
error:
/* We jump here if we failed to attach to the VM for any reason.
* Leave the domain running, but pretend we never attempted to
* attach to it. */
if (active && virAtomicIntDecAndTest(&driver->nactive) &&
driver->inhibitCallback)
driver->inhibitCallback(false, driver->inhibitOpaque);
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
driver->inhibitCallback(true, driver->inhibitOpaque);
qemuMonitorClose(priv->mon);
priv->mon = NULL;
virObjectUnref(logCtxt);
VIR_FREE(seclabel);
VIR_FREE(sec_managers);
if (seclabelgen)
virSecurityLabelDefFree(seclabeldef);
virDomainChrSourceDefFree(monConfig);
cleanup:
if (jobStarted)
qemuDomainObjEndJob(driver, obj);
if (!virDomainObjIsActive(obj))
qemuDomainRemoveInactive(driver, obj);
virDomainObjEndAPI(&obj);
virObjectUnref(conn);
virObjectUnref(cfg);
virObjectUnref(caps);
return -1;
}
static virDomainObjPtr
qemuProcessAutoDestroy(virDomainObjPtr dom,
virConnectPtr conn,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = dom->privateData;
virObjectEventPtr event = NULL;
unsigned int stopFlags = 0;
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
virObjectRef(dom);
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
virNWFilterUnlockFilterUpdates();
return;
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
qemuDomainObjDiscardAsyncJob(driver, dom);
error:
if (virDomainObjIsActive(obj)) {
/* We can't get the monitor back, so must kill the VM
* to remove danger of it ending up running twice if
* user tries to start it again later
*/
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NO_SHUTDOWN)) {
/* If we couldn't get the monitor and qemu supports
* no-shutdown, we can safely say that the domain
* crashed ... */
state = VIR_DOMAIN_SHUTOFF_CRASHED;
} else {
/* ... but if it doesn't we can't say what the state
* really is and FAILED means "failed to start" */
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
}
/* If BeginJob failed, we jumped here without a job, let's hope another
* thread didn't have a chance to start playing with the domain yet
* (it's all we can do anyway).
*/
qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
}
VIR_DEBUG("Killing domain");
if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
goto cleanup;
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
QEMU_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
qemuDomainObjEndJob(driver, dom);
qemuDomainRemoveInactive(driver, dom);
qemuDomainEventQueue(driver, event);
cleanup:
virDomainObjEndAPI(&dom);
return dom;
}
int qemuProcessAutoDestroyAdd(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virConnectPtr conn)
{
VIR_DEBUG("vm=%s, conn=%p", vm->def->name, conn);
return virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
qemuProcessAutoDestroy);
}
int qemuProcessAutoDestroyRemove(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
int ret;
VIR_DEBUG("vm=%s", vm->def->name);
ret = virCloseCallbacksUnset(driver->closeCallbacks, vm,
qemuProcessAutoDestroy);
return ret;
}
bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
virCloseCallback cb;
VIR_DEBUG("vm=%s", vm->def->name);
cb = virCloseCallbacksGet(driver->closeCallbacks, vm, NULL);
return cb == qemuProcessAutoDestroy;
goto cleanup;
}
int
qemuProcessRefreshDisks(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob)
static int
qemuProcessReconnectHelper(virDomainObjPtr obj,
void *opaque)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr table = NULL;
int ret = -1;
size_t i;
virThread thread;
struct qemuProcessReconnectData *src = opaque;
struct qemuProcessReconnectData *data;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
table = qemuMonitorGetBlockInfo(priv->mon);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto cleanup;
}
/* If the VM was inactive, we don't need to reconnect */
if (!obj->pid)
return 0;
if (!table)
goto cleanup;
if (VIR_ALLOC(data) < 0)
return -1;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskpriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
struct qemuDomainDiskInfo *info;
memcpy(data, src, sizeof(*data));
data->obj = obj;
if (!(info = virHashLookup(table, disk->info.alias)))
continue;
/* this lock and reference will be eventually transferred to the thread
* that handles the reconnect */
virObjectLock(obj);
virObjectRef(obj);
if (info->removable) {
if (info->empty)
virDomainDiskEmptySource(disk);
/* Since we close the connection later on, we have to make sure that the
* threads we start see a valid connection throughout their lifetime. We
* simply increase the reference counter here.
*/
virObjectRef(data->conn);
if (info->tray) {
if (info->tray_open)
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
else
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
}
}
if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Could not create thread. QEMU initialization "
"might be incomplete"));
/* We can't spawn a thread and thus connect to monitor. Kill qemu.
* It's safe to call qemuProcessStop without a job here since there
* is no thread that could be doing anything else with the same domain
* object.
*/
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
QEMU_ASYNC_JOB_NONE, 0);
qemuDomainRemoveInactive(src->driver, obj);
/* fill in additional data */
diskpriv->removable = info->removable;
diskpriv->tray = info->tray;
virDomainObjEndAPI(&obj);
virObjectUnref(data->conn);
VIR_FREE(data);
return -1;
}
ret = 0;
return 0;
}
cleanup:
virHashFree(table);
return ret;
/**
* qemuProcessReconnectAll
*
* Try to re-open the resources for live VMs that we care
* about.
*/
void
qemuProcessReconnectAll(virConnectPtr conn, virQEMUDriverPtr driver)
{
struct qemuProcessReconnectData data = {.conn = conn, .driver = driver};
virDomainObjListForEach(driver->domains, qemuProcessReconnectHelper, &data);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册