提交 2a978269 编写于 作者: J Jiri Denemark

qemu: Report VIR_DOMAIN_JOB_OPERATION

Not all async jobs are visible via virDomainGetJobStats (either they are
too fast or getting the stats is not allowed during the job), but
forcing all of them to advertise the operation is easier than hunting
the jobs for which fetching statistics is allowed. And we won't need to
think about this when we add support for getting stats for more jobs.

https://bugzilla.redhat.com/show_bug.cgi?id=1441563Signed-off-by: NJiri Denemark <jdenemar@redhat.com>
上级 b1c79d78
......@@ -436,6 +436,11 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
int maxpar = 0;
int npar = 0;
if (virTypedParamsAddInt(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_OPERATION,
jobInfo->operation) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0)
......@@ -3736,13 +3741,18 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
qemuDomainAsyncJob asyncJob,
virDomainJobOperation operation)
{
qemuDomainObjPrivatePtr priv;
if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
asyncJob) < 0)
return -1;
else
return 0;
priv = obj->privateData;
priv->job.current->operation = operation;
return 0;
}
int
......
......@@ -103,6 +103,7 @@ typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
struct _qemuDomainJobInfo {
virDomainJobType type;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
......@@ -433,7 +434,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
qemuDomainAsyncJob asyncJob,
virDomainJobOperation operation)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
......
......@@ -268,7 +268,8 @@ qemuAutostartDomain(virDomainObjPtr vm,
virResetLastError();
if (vm->autostart &&
!virDomainObjIsActive(vm)) {
if (qemuProcessBeginJob(data->driver, vm) < 0) {
if (qemuProcessBeginJob(data->driver, vm,
VIR_DOMAIN_JOB_OPERATION_START) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to start job on VM '%s': %s"),
vm->def->name, virGetLastErrorMessage());
......@@ -1761,7 +1762,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
virObjectRef(vm);
def = NULL;
if (qemuProcessBeginJob(driver, vm) < 0) {
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0) {
qemuDomainRemoveInactive(driver, vm);
goto cleanup;
}
......@@ -3147,7 +3148,8 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
if (!qemuMigrationIsAllowed(driver, vm, false, 0))
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE) < 0)
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
VIR_DOMAIN_JOB_OPERATION_SAVE) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
......@@ -3685,7 +3687,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm,
QEMU_ASYNC_JOB_DUMP) < 0)
QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
......@@ -3907,7 +3910,8 @@ processWatchdogEvent(virQEMUDriverPtr driver,
switch (action) {
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
if (qemuDomainObjBeginAsyncJob(driver, vm,
QEMU_ASYNC_JOB_DUMP) < 0) {
QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) {
goto cleanup;
}
......@@ -3994,7 +3998,8 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
bool removeInactive = false;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP) < 0)
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
......@@ -6479,7 +6484,7 @@ qemuDomainRestoreFlags(virConnectPtr conn,
priv->hookRun = true;
}
if (qemuProcessBeginJob(driver, vm) < 0)
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE) < 0)
goto cleanup;
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path,
......@@ -6899,6 +6904,7 @@ qemuDomainObjStart(virConnectPtr conn,
bool bypass_cache = (flags & VIR_DOMAIN_START_BYPASS_CACHE) != 0;
bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0;
unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD;
qemuDomainObjPrivatePtr priv = vm->privateData;
start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0;
......@@ -6922,6 +6928,9 @@ qemuDomainObjStart(virConnectPtr conn,
}
vm->hasManagedSave = false;
} else {
virDomainJobOperation op = priv->job.current->operation;
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE;
ret = qemuDomainObjRestore(conn, driver, vm, managed_save,
start_paused, bypass_cache, asyncJob);
......@@ -6938,6 +6947,7 @@ qemuDomainObjStart(virConnectPtr conn,
goto cleanup;
} else {
VIR_WARN("Ignoring incomplete managed state %s", managed_save);
priv->job.current->operation = op;
}
}
}
......@@ -6987,7 +6997,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags)
if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
if (qemuProcessBeginJob(driver, vm) < 0)
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
......@@ -14556,7 +14566,8 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
* a regular job, so we need to set the job mask to disallow query as
* 'savevm' blocks the monitor. External snapshot will then modify the
* job mask appropriately. */
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0)
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT) < 0)
goto cleanup;
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
......@@ -15146,7 +15157,8 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
goto cleanup;
}
if (qemuProcessBeginJob(driver, vm) < 0)
if (qemuProcessBeginJob(driver, vm,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT) < 0)
goto cleanup;
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))
......
......@@ -5629,18 +5629,23 @@ qemuMigrationJobStart(virQEMUDriverPtr driver,
qemuDomainAsyncJob job)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
if (qemuDomainObjBeginAsyncJob(driver, vm, job) < 0)
return -1;
virDomainJobOperation op;
unsigned long long mask;
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
mask = QEMU_JOB_NONE;
} else {
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
JOB_MASK(QEMU_JOB_SUSPEND) |
JOB_MASK(QEMU_JOB_MIGRATION_OP)));
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
mask = QEMU_JOB_DEFAULT_MASK |
JOB_MASK(QEMU_JOB_SUSPEND) |
JOB_MASK(QEMU_JOB_MIGRATION_OP);
}
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op) < 0)
return -1;
qemuDomainObjSetAsyncJobMask(vm, mask);
priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED;
return 0;
......
......@@ -4143,11 +4143,13 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
*/
int
qemuProcessBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr vm)
virDomainObjPtr vm,
virDomainJobOperation operation)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START) < 0)
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
operation) < 0)
return -1;
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
......
......@@ -59,7 +59,8 @@ qemuProcessIncomingDefPtr qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc);
int qemuProcessBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr vm);
virDomainObjPtr vm,
virDomainJobOperation operation);
void qemuProcessEndJob(virQEMUDriverPtr driver,
virDomainObjPtr vm);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册