提交 2771f8b7 编写于 作者: P Peter Krempa

qemu: Split out domain memory saving code to allow reuse

The code that saves domain memory by migration to file can be reused
while doing external checkpoints of a machine. This patch extracts the
common code and places it in a separate function.
上级 ec69ca14
......@@ -2608,7 +2608,7 @@ bswap_header(struct qemud_save_header *hdr) {
/* return -errno on failure, or 0 on success */
static int
qemuDomainSaveHeader(int fd, const char *path, char *xml,
qemuDomainSaveHeader(int fd, const char *path, const char *xml,
struct qemud_save_header *header)
{
int ret = 0;
......@@ -2755,103 +2755,38 @@ cleanup:
return fd;
}
/* This internal function expects the driver lock to already be held on
* entry and the vm must be active + locked. Vm will be unlocked and
* potentially free'd after this returns (eg transient VMs are freed
* shutdown). So 'vm' must not be referenced by the caller after
* this returns (whether returning success or failure).
*/
/* Helper function to execute a migration to file with a correct save header
* the caller needs to make sure that the processors are stopped and do all other
* actions besides saving memory */
static int
qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
virDomainObjPtr vm, const char *path,
int compressed, const char *xmlin, unsigned int flags)
qemuDomainSaveMemory(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *path,
const char *xml,
int compressed,
bool was_running,
unsigned int flags,
enum qemuDomainAsyncJob asyncJob)
{
char *xml = NULL;
struct qemud_save_header header;
bool bypassSecurityDriver = false;
int ret = -1;
int rc;
virDomainEventPtr event = NULL;
qemuDomainObjPrivatePtr priv;
bool needUnlink = false;
size_t len;
unsigned long long offset;
unsigned long long pad;
int ret = -1;
int fd = -1;
int directFlag = 0;
virFileWrapperFdPtr wrapperFd = NULL;
unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING;
if (qemuProcessAutoDestroyActive(driver, vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is marked for auto destroy"));
goto cleanup;
}
if (virDomainHasDiskMirror(vm)) {
virReportError(VIR_ERR_BLOCK_COPY_ACTIVE, "%s",
_("domain has active block copy job"));
goto cleanup;
}
unsigned long long pad;
unsigned long long offset;
size_t len;
memset(&header, 0, sizeof(header));
memcpy(header.magic, QEMUD_SAVE_PARTIAL, sizeof(header.magic));
header.version = QEMUD_SAVE_VERSION;
header.was_running = was_running ? 1 : 0;
header.compressed = compressed;
priv = vm->privateData;
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
QEMU_ASYNC_JOB_SAVE) < 0)
goto cleanup;
memset(&priv->job.info, 0, sizeof(priv->job.info));
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
/* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
header.was_running = 1;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
goto endjob;
}
}
/* libvirt.c already guaranteed these two flags are exclusive. */
if (flags & VIR_DOMAIN_SAVE_RUNNING)
header.was_running = 1;
else if (flags & VIR_DOMAIN_SAVE_PAUSED)
header.was_running = 0;
/* Get XML for the domain. Restore needs only the inactive xml,
* including secure. We should get the same result whether xmlin
* is NULL or whether it was the live xml of the domain moments
* before. */
if (xmlin) {
virDomainDefPtr def = NULL;
if (!(def = virDomainDefParseString(driver->caps, xmlin,
QEMU_EXPECTED_VIRT_TYPES,
VIR_DOMAIN_XML_INACTIVE))) {
goto endjob;
}
if (!virDomainDefCheckABIStability(vm->def, def)) {
virDomainDefFree(def);
goto endjob;
}
xml = qemuDomainDefFormatLive(driver, def, true, true);
} else {
xml = qemuDomainDefFormatLive(driver, vm->def, true, true);
}
if (!xml) {
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to get domain xml"));
goto endjob;
}
len = strlen(xml) + 1;
offset = sizeof(header) + len;
......@@ -2868,7 +2803,7 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
((offset + pad) % QEMU_MONITOR_MIGRATE_TO_FILE_BS));
if (VIR_EXPAND_N(xml, len, pad) < 0) {
virReportOOMError();
goto endjob;
goto cleanup;
}
offset += pad;
header.xml_len = len;
......@@ -2886,22 +2821,21 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
fd = qemuOpenFile(driver, path, O_WRONLY | O_TRUNC | O_CREAT | directFlag,
&needUnlink, &bypassSecurityDriver);
if (fd < 0)
goto endjob;
goto cleanup;
if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags)))
goto endjob;
goto cleanup;
/* Write header to file, followed by XML */
if (qemuDomainSaveHeader(fd, path, xml, &header) < 0) {
VIR_FORCE_CLOSE(fd);
goto endjob;
}
if (qemuDomainSaveHeader(fd, path, xml, &header) < 0)
goto cleanup;
/* Perform the migration */
if (qemuMigrationToFile(driver, vm, fd, offset, path,
qemuCompressProgramName(compressed),
bypassSecurityDriver,
QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob;
asyncJob) < 0)
goto cleanup;
/* Touch up file header to mark image complete. */
......@@ -2911,26 +2845,126 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
* that's acceptable. */
if (VIR_CLOSE(fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), path);
goto endjob;
goto cleanup;
}
if (virFileWrapperFdClose(wrapperFd) < 0)
goto endjob;
fd = qemuOpenFile(driver, path, O_WRONLY, NULL, NULL);
if (fd < 0)
goto endjob;
goto cleanup;
if ((fd = qemuOpenFile(driver, path, O_WRONLY, NULL, NULL)) < 0)
goto cleanup;
memcpy(header.magic, QEMUD_SAVE_MAGIC, sizeof(header.magic));
if (safewrite(fd, &header, sizeof(header)) != sizeof(header)) {
virReportSystemError(errno, _("unable to write %s"), path);
goto endjob;
goto cleanup;
}
if (VIR_CLOSE(fd) < 0) {
virReportSystemError(errno, _("unable to close %s"), path);
goto endjob;
goto cleanup;
}
ret = 0;
cleanup:
VIR_FORCE_CLOSE(fd);
virFileWrapperFdCatchError(wrapperFd);
virFileWrapperFdFree(wrapperFd);
if (ret != 0 && needUnlink)
unlink(path);
return ret;
}
/* This internal function expects the driver lock to already be held on
* entry and the vm must be active + locked. Vm will be unlocked and
* potentially free'd after this returns (eg transient VMs are freed
* shutdown). So 'vm' must not be referenced by the caller after
* this returns (whether returning success or failure).
*/
static int
qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
virDomainObjPtr vm, const char *path,
int compressed, const char *xmlin, unsigned int flags)
{
char *xml = NULL;
bool was_running = false;
int ret = -1;
int rc;
virDomainEventPtr event = NULL;
qemuDomainObjPrivatePtr priv = vm->privateData;
if (qemuProcessAutoDestroyActive(driver, vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is marked for auto destroy"));
goto cleanup;
}
if (virDomainHasDiskMirror(vm)) {
virReportError(VIR_ERR_BLOCK_COPY_ACTIVE, "%s",
_("domain has active block copy job"));
goto cleanup;
}
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
QEMU_ASYNC_JOB_SAVE) < 0)
memset(&priv->job.info, 0, sizeof(priv->job.info));
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
/* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
was_running = true;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
QEMU_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
goto endjob;
}
}
/* libvirt.c already guaranteed these two flags are exclusive. */
if (flags & VIR_DOMAIN_SAVE_RUNNING)
was_running = true;
else if (flags & VIR_DOMAIN_SAVE_PAUSED)
was_running = false;
/* Get XML for the domain. Restore needs only the inactive xml,
* including secure. We should get the same result whether xmlin
* is NULL or whether it was the live xml of the domain moments
* before. */
if (xmlin) {
virDomainDefPtr def = NULL;
if (!(def = virDomainDefParseString(driver->caps, xmlin,
QEMU_EXPECTED_VIRT_TYPES,
VIR_DOMAIN_XML_INACTIVE))) {
goto endjob;
}
if (!virDomainDefCheckABIStability(vm->def, def)) {
virDomainDefFree(def);
goto endjob;
}
xml = qemuDomainDefFormatLive(driver, def, true, true);
} else {
xml = qemuDomainDefFormatLive(driver, vm->def, true, true);
}
if (!xml) {
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to get domain xml"));
goto endjob;
}
ret = qemuDomainSaveMemory(driver, vm, path, xml, compressed,
was_running, flags, QEMU_ASYNC_JOB_SAVE);
if (ret < 0)
goto endjob;
/* Shut it down */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_SAVED, 0);
virDomainAuditStop(vm, "saved");
......@@ -2946,25 +2980,20 @@ qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
endjob:
if (vm) {
if (ret != 0) {
if (header.was_running && virDomainObjIsActive(vm)) {
if (was_running && virDomainObjIsActive(vm)) {
rc = qemuProcessStartCPUs(driver, vm, dom->conn,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
QEMU_ASYNC_JOB_SAVE);
if (rc < 0)
VIR_WARN("Unable to resume guest CPUs after save failure");
}
virFileWrapperFdCatchError(wrapperFd);
}
if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
vm = NULL;
}
cleanup:
VIR_FORCE_CLOSE(fd);
virFileWrapperFdFree(wrapperFd);
VIR_FREE(xml);
if (ret != 0 && needUnlink)
unlink(path);
if (event)
qemuDomainEventQueue(driver, event);
if (vm)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册