diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 9d0e7531907e98005d074b99c9b4f4ea69ae1c59..6b9eded51c70951601fd22e5f98e021fa272ec48 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -82,6 +82,7 @@ virCgroupGetCpuShares; virCgroupGetCpuacctPercpuUsage; virCgroupGetCpuacctStat; virCgroupGetCpuacctUsage; +virCgroupGetCpusetCpus; virCgroupGetCpusetMems; virCgroupGetFreezerState; virCgroupGetMemSwapHardLimit; @@ -100,6 +101,7 @@ virCgroupSetBlkioWeight; virCgroupSetCpuCfsPeriod; virCgroupSetCpuCfsQuota; virCgroupSetCpuShares; +virCgroupSetCpusetCpus; virCgroupSetCpusetMems; virCgroupSetFreezerState; virCgroupSetMemSwapHardLimit; diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c index 8a5a5365b84c21d44f3aa8bd157bd4836f1fed77..37874d33ed8e24e596caa8769ece0c046b559ce7 100644 --- a/src/qemu/qemu_cgroup.c +++ b/src/qemu/qemu_cgroup.c @@ -491,11 +491,45 @@ cleanup: return -1; } +int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup, + virDomainVcpuPinDefPtr *vcpupin, + int nvcpupin, + int vcpuid) +{ + int i, rc = 0; + char *new_cpus = NULL; + + for (i = 0; i < nvcpupin; i++) { + if (vcpuid == vcpupin[i]->vcpuid) { + new_cpus = virDomainCpuSetFormat(vcpupin[i]->cpumask, + VIR_DOMAIN_CPUMASK_LEN); + if (!new_cpus) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("failed to convert cpu mask")); + rc = -1; + goto cleanup; + } + rc = virCgroupSetCpusetCpus(cgroup, new_cpus); + if (rc != 0) { + virReportSystemError(-rc, + "%s", + _("Unable to set cpuset.cpus")); + goto cleanup; + } + } + } + +cleanup: + VIR_FREE(new_cpus); + return rc; +} + int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm) { virCgroupPtr cgroup = NULL; virCgroupPtr cgroup_vcpu = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; + virDomainDefPtr def = vm->def; int rc; unsigned int i; unsigned long long period = vm->def->cputune.period; @@ -567,6 +601,15 @@ int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm) } } + /* Set vcpupin in cgroup if vcpupin xml is provided */ + if (def->cputune.nvcpupin && + qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET) && + qemuSetupCgroupVcpuPin(cgroup_vcpu, + def->cputune.vcpupin, + def->cputune.nvcpupin, + i) < 0) + goto cleanup; + virCgroupFree(&cgroup_vcpu); } diff --git a/src/qemu/qemu_cgroup.h b/src/qemu/qemu_cgroup.h index 34a93126cb6c4235a79ae7be10e231061320e232..fa93cdb01289479bae33766221806422371628b2 100644 --- a/src/qemu/qemu_cgroup.h +++ b/src/qemu/qemu_cgroup.h @@ -53,6 +53,10 @@ int qemuSetupCgroup(struct qemud_driver *driver, int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup, unsigned long long period, long long quota); +int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup, + virDomainVcpuPinDefPtr *vcpupin, + int nvcpupin, + int vcpuid); int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm); int qemuSetupCgroupForEmulator(struct qemud_driver *driver, virDomainObjPtr vm); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0c6be6bbe6db7e85ab4656498432643dca2115c4..5ad12085eb1a83156fd7b601551578fb4f8cca9e 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3716,11 +3716,15 @@ qemudDomainPinVcpuFlags(virDomainPtr dom, struct qemud_driver *driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDefPtr persistentDef = NULL; + virCgroupPtr cgroup_dom = NULL; + virCgroupPtr cgroup_vcpu = NULL; int maxcpu, hostcpus; virNodeInfo nodeinfo; int ret = -1; qemuDomainObjPrivatePtr priv; bool canResetting = true; + int newVcpuPinNum = 0; + virDomainVcpuPinDefPtr *newVcpuPin = NULL; int pcpu; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | @@ -3769,43 +3773,73 @@ qemudDomainPinVcpuFlags(virDomainPtr dom, if (flags & VIR_DOMAIN_AFFECT_LIVE) { - if (priv->vcpupids != NULL) { - if (virProcessInfoSetAffinity(priv->vcpupids[vcpu], - cpumap, maplen, maxcpu) < 0) - goto cleanup; - } else { + if (priv->vcpupids == NULL) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("cpu affinity is not supported")); goto cleanup; } - if (canResetting) { - if (virDomainVcpuPinDel(vm->def, vcpu) < 0) { - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("failed to delete vcpupin xml of " - "a running domain")); + if (vm->def->cputune.vcpupin) { + newVcpuPin = virDomainVcpuPinDefCopy(vm->def->cputune.vcpupin, + vm->def->cputune.nvcpupin); + if (!newVcpuPin) + goto cleanup; + + newVcpuPinNum = vm->def->cputune.nvcpupin; + } else { + if (VIR_ALLOC(newVcpuPin) < 0) { + virReportOOMError(); + goto cleanup; + } + newVcpuPinNum = 0; + } + + if (virDomainVcpuPinAdd(newVcpuPin, &newVcpuPinNum, cpumap, maplen, vcpu) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("failed to update vcpupin")); + virDomainVcpuPinDefFree(newVcpuPin, newVcpuPinNum); + goto cleanup; + } + + /* Configure the corresponding cpuset cgroup before set affinity. */ + if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) { + if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup_dom, 0) == 0 && + virCgroupForVcpu(cgroup_dom, vcpu, &cgroup_vcpu, 0) == 0 && + qemuSetupCgroupVcpuPin(cgroup_vcpu, newVcpuPin, newVcpuPinNum, vcpu) < 0) { + virReportError(VIR_ERR_OPERATION_INVALID, + _("failed to set cpuset.cpus in cgroup" + " for vcpu %d"), vcpu); goto cleanup; } } else { - if (!vm->def->cputune.vcpupin) { - if (VIR_ALLOC(vm->def->cputune.vcpupin) < 0) { - virReportOOMError(); - goto cleanup; - } - vm->def->cputune.nvcpupin = 0; + if (virProcessInfoSetAffinity(priv->vcpupids[vcpu], + cpumap, maplen, maxcpu) < 0) { + virReportError(VIR_ERR_SYSTEM_ERROR, + _("failed to set cpu affinity for vcpu %d"), + vcpu); + goto cleanup; } - if (virDomainVcpuPinAdd(vm->def->cputune.vcpupin, - &vm->def->cputune.nvcpupin, - cpumap, - maplen, - vcpu) < 0) { + } + + if (canResetting) { + if (virDomainVcpuPinDel(vm->def, vcpu) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("failed to update or add vcpupin xml of " + _("failed to delete vcpupin xml of " "a running domain")); goto cleanup; } + } else { + if (vm->def->cputune.vcpupin) + virDomainVcpuPinDefFree(vm->def->cputune.vcpupin, vm->def->cputune.nvcpupin); + + vm->def->cputune.vcpupin = newVcpuPin; + vm->def->cputune.nvcpupin = newVcpuPinNum; + newVcpuPin = NULL; } + if (newVcpuPin) + virDomainVcpuPinDefFree(newVcpuPin, newVcpuPinNum); + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) goto cleanup; } @@ -3846,6 +3880,10 @@ qemudDomainPinVcpuFlags(virDomainPtr dom, ret = 0; cleanup: + if (cgroup_vcpu) + virCgroupFree(&cgroup_vcpu); + if (cgroup_dom) + virCgroupFree(&cgroup_dom); if (vm) virDomainObjUnlock(vm); return ret; diff --git a/src/util/cgroup.c b/src/util/cgroup.c index 470fc5dd71dc14ccfed6b6ecdf594f122290a1e8..8541c7fdaed49f4061703f47c5860510b7e0f553 100644 --- a/src/util/cgroup.c +++ b/src/util/cgroup.c @@ -543,7 +543,8 @@ static int virCgroupMakeGroup(virCgroupPtr parent, virCgroupPtr group, /* We need to control cpu bandwidth for each vcpu now */ if ((flags & VIR_CGROUP_VCPU) && (i != VIR_CGROUP_CONTROLLER_CPU && - i != VIR_CGROUP_CONTROLLER_CPUACCT)) { + i != VIR_CGROUP_CONTROLLER_CPUACCT && + i != VIR_CGROUP_CONTROLLER_CPUSET)) { /* treat it as unmounted and we can use virCgroupAddTask */ VIR_FREE(group->controllers[i].mountPoint); continue; @@ -1401,6 +1402,38 @@ int virCgroupGetCpusetMems(virCgroupPtr group, char **mems) mems); } +/** + * virCgroupSetCpusetCpus: + * + * @group: The cgroup to set cpuset.cpus for + * @cpus: the cpus to set + * + * Retuens: 0 on success + */ +int virCgroupSetCpusetCpus(virCgroupPtr group, const char *cpus) +{ + return virCgroupSetValueStr(group, + VIR_CGROUP_CONTROLLER_CPUSET, + "cpuset.cpus", + cpus); +} + +/** + * virCgroupGetCpusetCpus: + * + * @group: The cgroup to get cpuset.cpus for + * @cpus: the cpus to get + * + * Retuens: 0 on success + */ +int virCgroupGetCpusetCpus(virCgroupPtr group, char **cpus) +{ + return virCgroupGetValueStr(group, + VIR_CGROUP_CONTROLLER_CPUSET, + "cpuset.cpus", + cpus); +} + /** * virCgroupDenyAllDevices: * diff --git a/src/util/cgroup.h b/src/util/cgroup.h index 727e5367d06c5ef5995cb442430d6d5b26dce700..68ac23288e31c7158f19a2d77a7d3eb1ca573924 100644 --- a/src/util/cgroup.h +++ b/src/util/cgroup.h @@ -151,6 +151,9 @@ int virCgroupGetFreezerState(virCgroupPtr group, char **state); int virCgroupSetCpusetMems(virCgroupPtr group, const char *mems); int virCgroupGetCpusetMems(virCgroupPtr group, char **mems); +int virCgroupSetCpusetCpus(virCgroupPtr group, const char *cpus); +int virCgroupGetCpusetCpus(virCgroupPtr group, char **cpus); + int virCgroupRemove(virCgroupPtr group); void virCgroupFree(virCgroupPtr *group);