提交 d1eac927 编写于 作者: J John Ferlan

qemu: Implement the ability to return IOThread stats

Process the IOThreads polling stats if available. Generate the
output params record to be returned to the caller with the three
values - poll-max-ns, poll-grow, and poll-shrink.
Signed-off-by: NJohn Ferlan <jferlan@redhat.com>
ACKed-by: NMichal Privoznik <mprivozn@redhat.com>
上级 212dc928
......@@ -2050,6 +2050,7 @@ typedef enum {
VIR_DOMAIN_STATS_INTERFACE = (1 << 4), /* return domain interfaces info */
VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
} virDomainStatsTypes;
typedef enum {
......
......@@ -11499,6 +11499,44 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* long long. It is produced by the
* emulation_faults perf event
*
* VIR_DOMAIN_STATS_IOTHREAD:
* Return IOThread statistics if available. IOThread polling is a
* timing mechanism that allows the hypervisor to generate a longer
* period of time in which the guest will perform operations on the
* CPU being used by the IOThread. The higher the value for poll-max-ns
* the longer the guest will keep the CPU. This may affect other host
* threads using the CPU. The poll-grow and poll-shrink values allow
* the hypervisor to generate a mechanism to add or remove polling time
* within the confines of 0 and poll-max-ns. For QEMU, the poll-grow is
* multiplied by the polling interval, while poll-shrink is used as a
* divisor. When not provided, QEMU may double the polling time until
* poll-max-ns is reached. When poll-shrink is 0 (zero) QEMU may reset
* the polling interval to 0 until it finds its "sweet spot". Setting
* poll-grow too large may cause frequent fluctution of the time; however,
* this can be tempered by a high poll-shrink to reduce the polling
* interval. For example, a poll-grow of 3 will triple the polling time
* which could quickly exceed poll-max-ns; however, a poll-shrink of
* 10 would cut that polling time more gradually.
*
* The typed parameter keys are in this format:
*
* "iothread.cnt" - maximum number of IOThreads in the subsequent list
* as unsigned int. Each IOThread in the list will
* will use it's iothread_id value as the <id>. There
* may be fewer <id> entries than the iothread.cnt
* value if the polling values are not supported.
* "iothread.<id>.poll-max-ns" - maximum polling time in ns as an unsigned
* long long. A 0 (zero) means polling is
* disabled.
* "iothread.<id>.poll-grow" - polling time factor as an unsigned int.
* A 0 (zero) indicates to allow the underlying
* hypervisor to choose how to grow the
* polling time.
* "iothread.<id>.poll-shrink" - polling time divisor as an unsigned int.
* A 0 (zero) indicates to allow the underlying
* hypervisor to choose how to shrink the
* polling time.
*
* Note that entire stats groups or individual stat fields may be missing from
* the output in case they are not supported by the given hypervisor, are not
* applicable for the current state of the guest domain, or their retrieval
......
......@@ -20444,6 +20444,86 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
#undef QEMU_ADD_NAME_PARAM
#define QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams, id, name, value) \
do { \
char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
"iothread.%u.%s", id, name); \
if (virTypedParamsAddUInt(&(record)->params, \
&(record)->nparams, \
maxparams, \
param_name, \
value) < 0) \
goto cleanup; \
} while (0)
#define QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams, id, name, value) \
do { \
char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
"iothread.%u.%s", id, name); \
if (virTypedParamsAddULLong(&(record)->params, \
&(record)->nparams, \
maxparams, \
param_name, \
value) < 0) \
goto cleanup; \
} while (0)
static int
qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
virDomainObjPtr dom,
virDomainStatsRecordPtr record,
int *maxparams,
unsigned int privflags ATTRIBUTE_UNUSED)
{
size_t i;
qemuMonitorIOThreadInfoPtr *iothreads = NULL;
int niothreads;
int ret = -1;
if (!virDomainObjIsActive(dom))
return 0;
if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0)
return -1;
if (niothreads == 0)
return 0;
QEMU_ADD_COUNT_PARAM(record, maxparams, "iothread", niothreads);
for (i = 0; i < niothreads; i++) {
if (iothreads[i]->poll_valid) {
QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams,
iothreads[i]->iothread_id,
"poll-max-ns",
iothreads[i]->poll_max_ns);
QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
iothreads[i]->iothread_id,
"poll-grow",
iothreads[i]->poll_grow);
QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
iothreads[i]->iothread_id,
"poll-shrink",
iothreads[i]->poll_shrink);
}
}
ret = 0;
cleanup:
for (i = 0; i < niothreads; i++)
VIR_FREE(iothreads[i]);
VIR_FREE(iothreads);
return ret;
}
#undef QEMU_ADD_IOTHREAD_PARAM_UI
#undef QEMU_ADD_IOTHREAD_PARAM_ULL
#undef QEMU_ADD_COUNT_PARAM
static int
......@@ -20518,6 +20598,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
{ qemuDomainGetStatsInterface, VIR_DOMAIN_STATS_INTERFACE, false },
{ qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
{ qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
{ qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, true },
{ NULL, 0, false }
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册