diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 1f58832ebb5a7f9c8e4b0571726304b4d5d78790..c44c61732ccc3d3ef1641b78a8acaf930052665d 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -74,16 +74,17 @@ virCgroupForDriver; virCgroupForVcpu; virCgroupFree; virCgroupGetBlkioWeight; -virCgroupGetCpuShares; virCgroupGetCpuCfsPeriod; virCgroupGetCpuCfsQuota; +virCgroupGetCpuShares; +virCgroupGetCpuacctPercpuUsage; virCgroupGetCpuacctUsage; virCgroupGetCpusetMems; virCgroupGetFreezerState; +virCgroupGetMemSwapHardLimit; virCgroupGetMemoryHardLimit; virCgroupGetMemorySoftLimit; virCgroupGetMemoryUsage; -virCgroupGetMemSwapHardLimit; virCgroupKill; virCgroupKillPainfully; virCgroupKillRecursive; @@ -92,15 +93,15 @@ virCgroupPathOfController; virCgroupRemove; virCgroupSetBlkioDeviceWeight; virCgroupSetBlkioWeight; -virCgroupSetCpuShares; virCgroupSetCpuCfsPeriod; virCgroupSetCpuCfsQuota; +virCgroupSetCpuShares; virCgroupSetCpusetMems; virCgroupSetFreezerState; +virCgroupSetMemSwapHardLimit; virCgroupSetMemory; virCgroupSetMemoryHardLimit; virCgroupSetMemorySoftLimit; -virCgroupSetMemSwapHardLimit; # command.h diff --git a/src/qemu/qemu.conf b/src/qemu/qemu.conf index 95428c1968ce3fc4ac02342e2ccc6d75b077af1f..cb877281920c97448ffbfddf40cab70221ceeed0 100644 --- a/src/qemu/qemu.conf +++ b/src/qemu/qemu.conf @@ -166,6 +166,7 @@ # - 'memory' - use for memory tunables # - 'blkio' - use for block devices I/O tunables # - 'cpuset' - use for CPUs and memory nodes +# - 'cpuacct' - use for CPUs statistics. # # NB, even if configured here, they won't be used unless # the administrator has mounted cgroups, e.g.: @@ -177,7 +178,7 @@ # can be mounted in different locations. libvirt will detect # where they are located. # -# cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset" ] +# cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ] # This is the basic set of devices allowed / required by # all virtual machines. diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c index e95c7a55e043927c92d4391b5916ce0bde398b92..a709cbf1253a20b3f3d95b86be2d983b461b95a8 100644 --- a/src/qemu/qemu_conf.c +++ b/src/qemu/qemu_conf.c @@ -318,7 +318,8 @@ int qemudLoadDriverConfig(struct qemud_driver *driver, (1 << VIR_CGROUP_CONTROLLER_DEVICES) | (1 << VIR_CGROUP_CONTROLLER_MEMORY) | (1 << VIR_CGROUP_CONTROLLER_BLKIO) | - (1 << VIR_CGROUP_CONTROLLER_CPUSET); + (1 << VIR_CGROUP_CONTROLLER_CPUSET) | + (1 << VIR_CGROUP_CONTROLLER_CPUACCT); } for (i = 0 ; i < VIR_CGROUP_CONTROLLER_LAST ; i++) { if (driver->cgroupControllers & (1 << i)) { diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 733df0a57ccc4516e032f648dd4797bede76505b..538a4190ce5325bbe137e4fb58def36a840fd12c 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -12095,6 +12095,158 @@ cleanup: return ret; } +/* qemuDomainGetCPUStats() with start_cpu == -1 */ +static int +qemuDomainGetTotalcpuStats(virCgroupPtr group, + virTypedParameterPtr params, + int nparams) +{ + unsigned long long cpu_time; + int param_idx = 0; + int ret; + + if (nparams == 0) /* return supported number of params */ + return 1; + /* entry 0 is cputime */ + ret = virCgroupGetCpuacctUsage(group, &cpu_time); + if (ret < 0) { + virReportSystemError(-ret, "%s", _("unable to get cpu account")); + return -1; + } + + virTypedParameterAssign(¶ms[param_idx], VIR_DOMAIN_CPU_STATS_CPUTIME, + VIR_TYPED_PARAM_ULLONG, cpu_time); + return 1; +} + +static int +qemuDomainGetPercpuStats(virDomainPtr domain, + virCgroupPtr group, + virTypedParameterPtr params, + unsigned int nparams, + int start_cpu, + unsigned int ncpus) +{ + char *map = NULL; + int rv = -1; + int i, max_id; + char *pos; + char *buf = NULL; + virTypedParameterPtr ent; + int param_idx; + + /* return the number of supported params */ + if (nparams == 0 && ncpus != 0) + return 1; /* only cpu_time is supported */ + + /* return percpu cputime in index 0 */ + param_idx = 0; + /* to parse account file, we need "present" cpu map */ + map = nodeGetCPUmap(domain->conn, &max_id, "present"); + if (!map) + return rv; + + if (ncpus == 0) { /* returns max cpu ID */ + rv = max_id + 1; + goto cleanup; + } + + if (start_cpu > max_id) { + qemuReportError(VIR_ERR_INVALID_ARG, + _("start_cpu %d larger than maximum of %d"), + start_cpu, max_id); + goto cleanup; + } + + /* we get percpu cputime accounting info. */ + if (virCgroupGetCpuacctPercpuUsage(group, &buf)) + goto cleanup; + pos = buf; + + if (max_id - start_cpu > ncpus - 1) + max_id = start_cpu + ncpus - 1; + + for (i = 0; i <= max_id; i++) { + unsigned long long cpu_time; + + if (!map[i]) { + cpu_time = 0; + } else if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cpuacct parse error")); + goto cleanup; + } + if (i < start_cpu) + continue; + ent = ¶ms[ (i - start_cpu) * nparams + param_idx]; + virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME, + VIR_TYPED_PARAM_ULLONG, cpu_time); + } + rv = param_idx + 1; +cleanup: + VIR_FREE(buf); + VIR_FREE(map); + return rv; +} + + +static int +qemuDomainGetCPUStats(virDomainPtr domain, + virTypedParameterPtr params, + unsigned int nparams, + int start_cpu, + unsigned int ncpus, + unsigned int flags) +{ + struct qemud_driver *driver = domain->conn->privateData; + virCgroupPtr group = NULL; + virDomainObjPtr vm = NULL; + int ret = -1; + bool isActive; + + virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); + + qemuDriverLock(driver); + + vm = virDomainFindByUUID(&driver->domains, domain->uuid); + if (vm == NULL) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("No such domain %s"), domain->uuid); + goto cleanup; + } + + isActive = virDomainObjIsActive(vm); + if (!isActive) { + qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto cleanup; + } + + if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUACCT)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("cgroup CPUACCT controller is not mounted")); + goto cleanup; + } + + if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cannot find cgroup for domain %s"), vm->def->name); + goto cleanup; + } + + if (start_cpu == -1) + ret = qemuDomainGetTotalcpuStats(group, params, nparams); + else + ret = qemuDomainGetPercpuStats(domain, group, params, nparams, + start_cpu, ncpus); +cleanup: + virCgroupFree(&group); + if (vm) + virDomainObjUnlock(vm); + qemuDriverUnlock(driver); + return ret; +} + static int qemuDomainPMSuspendForDuration(virDomainPtr dom, unsigned int target, @@ -12395,6 +12547,7 @@ static virDriver qemuDriver = { .domainGetMetadata = qemuDomainGetMetadata, /* 0.9.10 */ .domainPMSuspendForDuration = qemuDomainPMSuspendForDuration, /* 0.9.11 */ .domainPMWakeup = qemuDomainPMWakeup, /* 0.9.11 */ + .domainGetCPUStats = qemuDomainGetCPUStats, /* 0.9.11 */ }; diff --git a/src/util/cgroup.c b/src/util/cgroup.c index 00528c522547ad85cc9a4079709cb0aa60094f09..c150fbb8b2b18aadb0ca35050162673761648968 100644 --- a/src/util/cgroup.c +++ b/src/util/cgroup.c @@ -1555,6 +1555,12 @@ int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage) "cpuacct.usage", usage); } +int virCgroupGetCpuacctPercpuUsage(virCgroupPtr group, char **usage) +{ + return virCgroupGetValueStr(group, VIR_CGROUP_CONTROLLER_CPUACCT, + "cpuacct.usage_percpu", usage); +} + int virCgroupSetFreezerState(virCgroupPtr group, const char *state) { return virCgroupSetValueStr(group, diff --git a/src/util/cgroup.h b/src/util/cgroup.h index 8d757350c3a0c00ec6415ed63e662f38355093d8..b4e0f373d8b4a1b2579f704064fd06a5e49d3e1f 100644 --- a/src/util/cgroup.h +++ b/src/util/cgroup.h @@ -115,6 +115,7 @@ int virCgroupSetCpuCfsQuota(virCgroupPtr group, long long cfs_quota); int virCgroupGetCpuCfsQuota(virCgroupPtr group, long long *cfs_quota); int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage); +int virCgroupGetCpuacctPercpuUsage(virCgroupPtr group, char **usage); int virCgroupSetFreezerState(virCgroupPtr group, const char *state); int virCgroupGetFreezerState(virCgroupPtr group, char **state);