qemu_cgroup.c 26.1 KB
Newer Older
1 2 3
/*
 * qemu_cgroup.c: QEMU cgroup management
 *
E
Eric Blake 已提交
4
 * Copyright (C) 2006-2013 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17
 * Copyright (C) 2006 Daniel P. Berrange
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library.  If not, see
O
Osier Yang 已提交
19
 * <http://www.gnu.org/licenses/>.
20 21 22 23 24 25 26
 *
 * Author: Daniel P. Berrange <berrange@redhat.com>
 */

#include <config.h>

#include "qemu_cgroup.h"
27
#include "qemu_domain.h"
28
#include "qemu_process.h"
29
#include "vircgroup.h"
30
#include "virlog.h"
31
#include "viralloc.h"
32
#include "virerror.h"
33
#include "virutil.h"
34
#include "domain_audit.h"
35 36 37 38 39 40 41

#define VIR_FROM_THIS VIR_FROM_QEMU

static const char *const defaultDeviceACL[] = {
    "/dev/null", "/dev/full", "/dev/zero",
    "/dev/random", "/dev/urandom",
    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
42
    "/dev/rtc", "/dev/hpet",
43 44 45 46 47
    NULL,
};
#define DEVICE_PTY_MAJOR 136
#define DEVICE_SND_MAJOR 116

48
bool qemuCgroupControllerActive(virQEMUDriverPtr driver,
49
                                int controller)
50
{
51 52 53
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    bool ret = false;

54
    if (driver->cgroup == NULL)
55
        goto cleanup;
56
    if (controller < 0 || controller >= VIR_CGROUP_CONTROLLER_LAST)
57
        goto cleanup;
E
Eric Blake 已提交
58
    if (!virCgroupMounted(driver->cgroup, controller))
59 60 61 62 63 64 65
        goto cleanup;
    if (cfg->cgroupControllers & (1 << controller))
        ret = true;

cleanup:
    virObjectUnref(cfg);
    return ret;
66 67
}

68
static int
69
qemuSetupDiskPathAllow(virDomainDiskDefPtr disk,
70 71 72
                       const char *path,
                       size_t depth ATTRIBUTE_UNUSED,
                       void *opaque)
73
{
74
    qemuCgroupData *data = opaque;
75 76 77
    int rc;

    VIR_DEBUG("Process path %s for disk", path);
78 79 80
    rc = virCgroupAllowDevicePath(data->cgroup, path,
                                  (disk->readonly ? VIR_CGROUP_DEVICE_READ
                                   : VIR_CGROUP_DEVICE_RW));
81 82
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow", path,
                             disk->readonly ? "r" : "rw", rc);
83 84
    if (rc < 0) {
        if (rc == -EACCES) { /* Get this for root squash NFS */
85 86 87 88 89 90 91 92 93 94 95 96
            VIR_DEBUG("Ignoring EACCES for %s", path);
        } else {
            virReportSystemError(-rc,
                                 _("Unable to allow access for disk path %s"),
                                 path);
            return -1;
        }
    }
    return 0;
}


97
int qemuSetupDiskCgroup(virDomainObjPtr vm,
98 99 100
                        virCgroupPtr cgroup,
                        virDomainDiskDefPtr disk)
{
101
    qemuCgroupData data = { vm, cgroup };
102 103 104
    return virDomainDiskDefForeachPath(disk,
                                       true,
                                       qemuSetupDiskPathAllow,
105
                                       &data);
106 107 108
}


109 110 111 112 113
static int
qemuTeardownDiskPathDeny(virDomainDiskDefPtr disk ATTRIBUTE_UNUSED,
                         const char *path,
                         size_t depth ATTRIBUTE_UNUSED,
                         void *opaque)
114
{
115
    qemuCgroupData *data = opaque;
116 117 118
    int rc;

    VIR_DEBUG("Process path %s for disk", path);
119 120
    rc = virCgroupDenyDevicePath(data->cgroup, path,
                                 VIR_CGROUP_DEVICE_RWM);
121
    virDomainAuditCgroupPath(data->vm, data->cgroup, "deny", path, "rwm", rc);
122 123
    if (rc < 0) {
        if (rc == -EACCES) { /* Get this for root squash NFS */
124 125 126 127 128 129 130 131 132 133 134 135
            VIR_DEBUG("Ignoring EACCES for %s", path);
        } else {
            virReportSystemError(-rc,
                                 _("Unable to deny access for disk path %s"),
                                 path);
            return -1;
        }
    }
    return 0;
}


136
int qemuTeardownDiskCgroup(virDomainObjPtr vm,
137 138 139
                           virCgroupPtr cgroup,
                           virDomainDiskDefPtr disk)
{
140
    qemuCgroupData data = { vm, cgroup };
141 142 143
    return virDomainDiskDefForeachPath(disk,
                                       true,
                                       qemuTeardownDiskPathDeny,
144
                                       &data);
145 146 147
}


148 149 150 151
static int
qemuSetupChardevCgroup(virDomainDefPtr def,
                       virDomainChrDefPtr dev,
                       void *opaque)
152
{
153
    qemuCgroupData *data = opaque;
154 155
    int rc;

156
    if (dev->source.type != VIR_DOMAIN_CHR_TYPE_DEV)
157 158 159
        return 0;


160
    VIR_DEBUG("Process path '%s' for disk", dev->source.data.file.path);
161 162
    rc = virCgroupAllowDevicePath(data->cgroup, dev->source.data.file.path,
                                  VIR_CGROUP_DEVICE_RW);
163 164
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow",
                             dev->source.data.file.path, "rw", rc);
165
    if (rc < 0) {
166 167
        virReportSystemError(-rc,
                             _("Unable to allow device %s for %s"),
168
                             dev->source.data.file.path, def->name);
169 170 171 172 173 174 175
        return -1;
    }

    return 0;
}


176
int qemuSetupHostUsbDeviceCgroup(virUSBDevicePtr dev ATTRIBUTE_UNUSED,
177 178 179
                                 const char *path,
                                 void *opaque)
{
180
    qemuCgroupData *data = opaque;
181 182 183
    int rc;

    VIR_DEBUG("Process path '%s' for USB device", path);
184 185
    rc = virCgroupAllowDevicePath(data->cgroup, path,
                                  VIR_CGROUP_DEVICE_RW);
186
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow", path, "rw", rc);
187
    if (rc < 0) {
188 189 190 191 192 193 194 195 196
        virReportSystemError(-rc,
                             _("Unable to allow device %s"),
                             path);
        return -1;
    }

    return 0;
}

197
int qemuSetupCgroup(virQEMUDriverPtr driver,
198
                    virDomainObjPtr vm,
199
                    virBitmapPtr nodemask)
200 201 202 203
{
    virCgroupPtr cgroup = NULL;
    int rc;
    unsigned int i;
204
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
205
    const char *const *deviceACL =
206 207
        cfg->cgroupDeviceACL ?
        (const char *const *)cfg->cgroupDeviceACL :
208 209 210
        defaultDeviceACL;

    if (driver->cgroup == NULL)
211
        goto done; /* Not supported, so claim success */
212 213 214 215 216 217 218 219 220 221

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 1);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to create cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_DEVICES)) {
222
        qemuCgroupData data = { vm, cgroup };
223
        rc = virCgroupDenyAllDevices(cgroup);
224
        virDomainAuditCgroup(vm, cgroup, "deny", "all", rc == 0);
225 226
        if (rc != 0) {
            if (rc == -EPERM) {
227
                VIR_WARN("Group devices ACL is not accessible, disabling whitelisting");
228 229 230 231 232 233 234 235 236
                goto done;
            }

            virReportSystemError(-rc,
                                 _("Unable to deny all devices for %s"), vm->def->name);
            goto cleanup;
        }

        for (i = 0; i < vm->def->ndisks ; i++) {
237 238
            if (qemuDomainDetermineDiskChain(driver, vm->def->disks[i],
                                             false) < 0 ||
239
                qemuSetupDiskCgroup(vm, cgroup, vm->def->disks[i]) < 0)
240 241 242
                goto cleanup;
        }

243 244
        rc = virCgroupAllowDeviceMajor(cgroup, 'c', DEVICE_PTY_MAJOR,
                                       VIR_CGROUP_DEVICE_RW);
245 246
        virDomainAuditCgroupMajor(vm, cgroup, "allow", DEVICE_PTY_MAJOR,
                                  "pty", "rw", rc == 0);
247 248 249 250 251 252
        if (rc != 0) {
            virReportSystemError(-rc, "%s",
                                 _("unable to allow /dev/pts/ devices"));
            goto cleanup;
        }

253 254 255
        if (vm->def->nsounds &&
            (!vm->def->ngraphics ||
             ((vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
256
               cfg->vncAllowHostAudio) ||
257
              (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SDL)))) {
258 259
            rc = virCgroupAllowDeviceMajor(cgroup, 'c', DEVICE_SND_MAJOR,
                                           VIR_CGROUP_DEVICE_RW);
260 261
            virDomainAuditCgroupMajor(vm, cgroup, "allow", DEVICE_SND_MAJOR,
                                      "sound", "rw", rc == 0);
262 263 264 265 266 267 268 269
            if (rc != 0) {
                virReportSystemError(-rc, "%s",
                                     _("unable to allow /dev/snd/ devices"));
                goto cleanup;
            }
        }

        for (i = 0; deviceACL[i] != NULL ; i++) {
270 271
            rc = virCgroupAllowDevicePath(cgroup, deviceACL[i],
                                          VIR_CGROUP_DEVICE_RW);
272
            virDomainAuditCgroupPath(vm, cgroup, "allow", deviceACL[i], "rw", rc);
273 274 275 276 277 278 279 280 281 282 283 284
            if (rc < 0 &&
                rc != -ENOENT) {
                virReportSystemError(-rc,
                                     _("unable to allow device %s"),
                                     deviceACL[i]);
                goto cleanup;
            }
        }

        if (virDomainChrDefForeach(vm->def,
                                   true,
                                   qemuSetupChardevCgroup,
285
                                   &data) < 0)
286 287 288 289
            goto cleanup;

        for (i = 0; i < vm->def->nhostdevs; i++) {
            virDomainHostdevDefPtr hostdev = vm->def->hostdevs[i];
290
            virUSBDevicePtr usb;
291 292 293 294 295

            if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS)
                continue;
            if (hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB)
                continue;
296 297
            if (hostdev->missing)
                continue;
298

299 300 301
            if ((usb = virUSBDeviceNew(hostdev->source.subsys.u.usb.bus,
                                       hostdev->source.subsys.u.usb.device,
                                       NULL)) == NULL)
302 303
                goto cleanup;

304 305 306
            if (virUSBDeviceFileIterate(usb, qemuSetupHostUsbDeviceCgroup,
                                        &data) < 0) {
                virUSBDeviceFree(usb);
307
                goto cleanup;
308
            }
309
            virUSBDeviceFree(usb);
310 311 312
        }
    }

313 314
    if (vm->def->blkio.weight != 0) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
315
            rc = virCgroupSetBlkioWeight(cgroup, vm->def->blkio.weight);
316
            if (rc != 0) {
317 318 319 320 321
                virReportSystemError(-rc,
                                     _("Unable to set io weight for domain %s"),
                                     vm->def->name);
                goto cleanup;
            }
322
        } else {
323 324
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                           _("Block I/O tuning is not available on this host"));
325 326 327 328 329 330 331 332
            goto cleanup;
        }
    }

    if (vm->def->blkio.ndevices) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
            for (i = 0; i < vm->def->blkio.ndevices; i++) {
                virBlkioDeviceWeightPtr dw = &vm->def->blkio.devices[i];
333 334
                if (!dw->weight)
                    continue;
335 336 337 338 339 340 341 342 343 344 345
                rc = virCgroupSetBlkioDeviceWeight(cgroup, dw->path,
                                                   dw->weight);
                if (rc != 0) {
                    virReportSystemError(-rc,
                                         _("Unable to set io device weight "
                                           "for domain %s"),
                                         vm->def->name);
                    goto cleanup;
                }
            }
        } else {
346
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
347
                           _("Block I/O tuning is not available on this host"));
348
            goto cleanup;
349 350 351
        }
    }

352 353 354 355
    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_MEMORY)) {
        unsigned long long hard_limit = vm->def->mem.hard_limit;

        if (!hard_limit) {
M
Michal Privoznik 已提交
356
            /* If there is no hard_limit set, set a reasonable one to avoid
E
Eric Blake 已提交
357 358
             * system thrashing caused by exploited qemu.  A 'reasonable
             * limit' has been chosen:
M
Michal Privoznik 已提交
359 360 361 362
             *     (1 + k) * (domain memory + total video memory) + (32MB for
             *     cache per each disk) + F
             * where k = 0.5 and F = 200MB.  The cache for disks is important as
             * kernel cache on the host side counts into the RSS limit. */
363 364 365
            hard_limit = vm->def->mem.max_balloon;
            for (i = 0; i < vm->def->nvideos; i++)
                hard_limit += vm->def->videos[i]->vram;
M
Michal Privoznik 已提交
366 367
            hard_limit = hard_limit * 1.5 + 204800;
            hard_limit += vm->def->ndisks * 32768;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
        }

        rc = virCgroupSetMemoryHardLimit(cgroup, hard_limit);
        if (rc != 0) {
            virReportSystemError(-rc,
                                 _("Unable to set memory hard limit for domain %s"),
                                 vm->def->name);
            goto cleanup;
        }
        if (vm->def->mem.soft_limit != 0) {
            rc = virCgroupSetMemorySoftLimit(cgroup, vm->def->mem.soft_limit);
            if (rc != 0) {
                virReportSystemError(-rc,
                                     _("Unable to set memory soft limit for domain %s"),
                                     vm->def->name);
                goto cleanup;
384
            }
385
        }
386

387 388 389 390 391 392 393
        if (vm->def->mem.swap_hard_limit != 0) {
            rc = virCgroupSetMemSwapHardLimit(cgroup, vm->def->mem.swap_hard_limit);
            if (rc != 0) {
                virReportSystemError(-rc,
                                     _("Unable to set swap hard limit for domain %s"),
                                     vm->def->name);
                goto cleanup;
394 395
            }
        }
396 397 398 399 400 401 402
    } else if (vm->def->mem.hard_limit != 0 ||
               vm->def->mem.soft_limit != 0 ||
               vm->def->mem.swap_hard_limit != 0) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("Memory cgroup is not available on this host"));
    } else {
        VIR_WARN("Could not autoset a RSS limit for domain %s", vm->def->name);
403 404
    }

405 406
    if (vm->def->cputune.shares != 0) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
407
            rc = virCgroupSetCpuShares(cgroup, vm->def->cputune.shares);
408
            if (rc != 0) {
409 410 411 412 413
                virReportSystemError(-rc,
                                     _("Unable to set io cpu shares for domain %s"),
                                     vm->def->name);
                goto cleanup;
            }
414
        } else {
415 416
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                           _("CPU tuning is not available on this host"));
417 418 419
        }
    }

420 421 422
    if ((vm->def->numatune.memory.nodemask ||
         (vm->def->numatune.memory.placement_mode ==
          VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO)) &&
H
Hu Tao 已提交
423 424
        vm->def->numatune.memory.mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
        qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
425 426 427
        char *mask = NULL;
        if (vm->def->numatune.memory.placement_mode ==
            VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO)
428
            mask = virBitmapFormat(nodemask);
429
        else
430
            mask = virBitmapFormat(vm->def->numatune.memory.nodemask);
H
Hu Tao 已提交
431
        if (!mask) {
432 433
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("failed to convert memory nodemask"));
H
Hu Tao 已提交
434 435 436 437 438 439 440 441 442 443 444 445
            goto cleanup;
        }

        rc = virCgroupSetCpusetMems(cgroup, mask);
        VIR_FREE(mask);
        if (rc != 0) {
            virReportSystemError(-rc,
                                 _("Unable to set cpuset.mems for domain %s"),
                                 vm->def->name);
            goto cleanup;
        }
    }
446
done:
447
    virObjectUnref(cfg);
448 449 450 451
    virCgroupFree(&cgroup);
    return 0;

cleanup:
452
    virObjectUnref(cfg);
453 454 455 456 457 458 459
    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }
    return -1;
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473
int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup, unsigned long long period,
                          long long quota)
{
    int rc;
    unsigned long long old_period;

    if (period == 0 && quota == 0)
        return 0;

    if (period) {
        /* get old period, and we can rollback if set quota failed */
        rc = virCgroupGetCpuCfsPeriod(cgroup, &old_period);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
474
                                 "%s", _("Unable to get cpu bandwidth period"));
475 476 477 478 479 480
            return -1;
        }

        rc = virCgroupSetCpuCfsPeriod(cgroup, period);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
481
                                 "%s", _("Unable to set cpu bandwidth period"));
482 483 484 485 486 487 488 489
            return -1;
        }
    }

    if (quota) {
        rc = virCgroupSetCpuCfsQuota(cgroup, quota);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
490
                                 "%s", _("Unable to set cpu bandwidth quota"));
491 492 493 494 495 496 497 498 499 500
            goto cleanup;
        }
    }

    return 0;

cleanup:
    if (period) {
        rc = virCgroupSetCpuCfsPeriod(cgroup, old_period);
        if (rc < 0)
501
            virReportSystemError(-rc, "%s",
502
                                 _("Unable to rollback cpu bandwidth period"));
503 504 505 506 507
    }

    return -1;
}

508 509 510 511 512
int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup,
                           virDomainVcpuPinDefPtr *vcpupin,
                           int nvcpupin,
                           int vcpuid)
{
513
    int i;
514 515 516

    for (i = 0; i < nvcpupin; i++) {
        if (vcpuid == vcpupin[i]->vcpuid) {
517
            return qemuSetupCgroupEmulatorPin(cgroup, vcpupin[i]->cpumask);
518 519 520
        }
    }

521 522 523 524
    return -1;
}

int qemuSetupCgroupEmulatorPin(virCgroupPtr cgroup,
525
                               virBitmapPtr cpumask)
526 527 528 529
{
    int rc = 0;
    char *new_cpus = NULL;

530
    new_cpus = virBitmapFormat(cpumask);
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
    if (!new_cpus) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("failed to convert cpu mask"));
        rc = -1;
        goto cleanup;
    }

    rc = virCgroupSetCpusetCpus(cgroup, new_cpus);
    if (rc < 0) {
        virReportSystemError(-rc,
                             "%s",
                             _("Unable to set cpuset.cpus"));
        goto cleanup;
    }

546 547 548 549 550
cleanup:
    VIR_FREE(new_cpus);
    return rc;
}

551
int qemuSetupCgroupForVcpu(virQEMUDriverPtr driver, virDomainObjPtr vm)
552 553 554 555
{
    virCgroupPtr cgroup = NULL;
    virCgroupPtr cgroup_vcpu = NULL;
    qemuDomainObjPrivatePtr priv = vm->privateData;
556
    virDomainDefPtr def = vm->def;
557
    int rc;
M
Martin Kletzander 已提交
558
    unsigned int i, j;
559 560 561
    unsigned long long period = vm->def->cputune.period;
    long long quota = vm->def->cputune.quota;

562
    if ((period || quota) &&
563 564 565 566
        (!driver->cgroup ||
         !qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU))) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("cgroup cpu is required for scheduler tuning"));
H
Hu Tao 已提交
567 568 569
        return -1;
    }

570 571 572 573 574 575 576
    /* We are trying to setup cgroups for CPU pinning, which can also be done
     * with virProcessInfoSetAffinity, thus the lack of cgroups is not fatal
     * here.
     */
    if (driver->cgroup == NULL)
        return 0;

577 578 579 580 581 582 583 584 585
    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to find cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
586
        /* If we don't know VCPU<->PID mapping or all vcpu runs in the same
W
Wen Congyang 已提交
587
         * thread, we cannot control each vcpu.
588
         */
589 590 591
        VIR_WARN("Unable to get vcpus' pids.");
        virCgroupFree(&cgroup);
        return 0;
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
    }

    for (i = 0; i < priv->nvcpupids; i++) {
        rc = virCgroupForVcpu(cgroup, i, &cgroup_vcpu, 1);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("Unable to create vcpu cgroup for %s(vcpu:"
                                   " %d)"),
                                 vm->def->name, i);
            goto cleanup;
        }

        /* move the thread for vcpu to sub dir */
        rc = virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("unable to add vcpu %d task %d to cgroup"),
                                 i, priv->vcpupids[i]);
            goto cleanup;
        }

        if (period || quota) {
H
Hu Tao 已提交
614 615
            if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
                goto cleanup;
616 617
        }

618
        /* Set vcpupin in cgroup if vcpupin xml is provided */
M
Martin Kletzander 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
            /* find the right CPU to pin, otherwise
             * qemuSetupCgroupVcpuPin will fail. */
            for (j = 0; j < def->cputune.nvcpupin; j++) {
                if (def->cputune.vcpupin[j]->vcpuid != i)
                    continue;

                if (qemuSetupCgroupVcpuPin(cgroup_vcpu,
                                           def->cputune.vcpupin,
                                           def->cputune.nvcpupin,
                                           i) < 0)
                    goto cleanup;

                break;
            }
        }
635

636 637 638 639 640 641 642
        virCgroupFree(&cgroup_vcpu);
    }

    virCgroupFree(&cgroup);
    return 0;

cleanup:
643 644 645 646 647
    if (cgroup_vcpu) {
        virCgroupRemove(cgroup_vcpu);
        virCgroupFree(&cgroup_vcpu);
    }

648 649 650 651 652 653 654 655
    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }

    return -1;
}

656
int qemuSetupCgroupForEmulator(virQEMUDriverPtr driver,
657 658
                               virDomainObjPtr vm,
                               virBitmapPtr nodemask)
659
{
660
    virBitmapPtr cpumask = NULL;
661
    virBitmapPtr cpumap = NULL;
662 663
    virCgroupPtr cgroup = NULL;
    virCgroupPtr cgroup_emulator = NULL;
664
    virDomainDefPtr def = vm->def;
665 666
    unsigned long long period = vm->def->cputune.emulator_period;
    long long quota = vm->def->cputune.emulator_quota;
667 668
    int rc, i;

669 670 671 672 673 674 675 676
    if ((period || quota) &&
        (!driver->cgroup ||
         !qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU))) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("cgroup cpu is required for scheduler tuning"));
        return -1;
    }

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to find cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    rc = virCgroupForEmulator(cgroup, &cgroup_emulator, 1);
    if (rc < 0) {
        virReportSystemError(-rc,
                             _("Unable to create emulator cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    for (i = 0; i < VIR_CGROUP_CONTROLLER_LAST; i++) {
697
        if (!qemuCgroupControllerActive(driver, i))
698 699 700 701 702 703 704 705 706 707 708
            continue;
        rc = virCgroupMoveTask(cgroup, cgroup_emulator, i);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("Unable to move tasks from domain cgroup to "
                                   "emulator cgroup in controller %d for %s"),
                                 i, vm->def->name);
            goto cleanup;
        }
    }

709 710 711 712 713
    if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
        if (!(cpumap = qemuPrepareCpumap(driver, nodemask)))
            goto cleanup;
        cpumask = cpumap;
    } else if (def->cputune.emulatorpin) {
714
        cpumask = def->cputune.emulatorpin->cpumask;
715
    } else if (def->cpumask) {
716
        cpumask = def->cpumask;
717
    }
718 719 720 721 722 723 724 725

    if (cpumask) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
            rc = qemuSetupCgroupEmulatorPin(cgroup_emulator, cpumask);
            if (rc < 0)
                goto cleanup;
        }
        cpumask = NULL; /* sanity */
H
Hu Tao 已提交
726
    }
727

728 729
    if (period || quota) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
H
Hu Tao 已提交
730 731
            if ((rc = qemuSetupCgroupVcpuBW(cgroup_emulator, period,
                                            quota)) < 0)
732 733 734 735
                goto cleanup;
        }
    }

736 737
    virCgroupFree(&cgroup_emulator);
    virCgroupFree(&cgroup);
738
    virBitmapFree(cpumap);
739 740 741
    return 0;

cleanup:
742 743
    virBitmapFree(cpumap);

744 745 746 747 748 749 750 751 752 753 754 755
    if (cgroup_emulator) {
        virCgroupRemove(cgroup_emulator);
        virCgroupFree(&cgroup_emulator);
    }

    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }

    return rc;
}
756

757
int qemuRemoveCgroup(virQEMUDriverPtr driver,
758 759 760 761 762 763 764 765 766 767 768 769
                     virDomainObjPtr vm,
                     int quiet)
{
    virCgroupPtr cgroup;
    int rc;

    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        if (!quiet)
770 771 772
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Unable to find cgroup for %s"),
                           vm->def->name);
773 774 775 776 777 778 779 780
        return rc;
    }

    rc = virCgroupRemove(cgroup);
    virCgroupFree(&cgroup);
    return rc;
}

781
int qemuAddToCgroup(virQEMUDriverPtr driver,
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
                    virDomainDefPtr def)
{
    virCgroupPtr cgroup = NULL;
    int ret = -1;
    int rc;

    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("unable to find cgroup for domain %s"),
                             def->name);
        goto cleanup;
    }

    rc = virCgroupAddTask(cgroup, getpid());
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("unable to add domain %s task %d to cgroup"),
                             def->name, getpid());
        goto cleanup;
    }

    ret = 0;

cleanup:
    virCgroupFree(&cgroup);
    return ret;
}