qemu_cgroup.c 26.0 KB
Newer Older
1 2 3
/*
 * qemu_cgroup.c: QEMU cgroup management
 *
E
Eric Blake 已提交
4
 * Copyright (C) 2006-2013 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17
 * Copyright (C) 2006 Daniel P. Berrange
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library.  If not, see
O
Osier Yang 已提交
19
 * <http://www.gnu.org/licenses/>.
20 21 22 23 24 25 26
 *
 * Author: Daniel P. Berrange <berrange@redhat.com>
 */

#include <config.h>

#include "qemu_cgroup.h"
27
#include "qemu_domain.h"
28
#include "qemu_process.h"
29
#include "vircgroup.h"
30
#include "virlog.h"
31
#include "viralloc.h"
32
#include "virerror.h"
33
#include "virutil.h"
34
#include "domain_audit.h"
35 36 37 38 39 40 41

#define VIR_FROM_THIS VIR_FROM_QEMU

static const char *const defaultDeviceACL[] = {
    "/dev/null", "/dev/full", "/dev/zero",
    "/dev/random", "/dev/urandom",
    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
42
    "/dev/rtc", "/dev/hpet",
43 44 45 46 47
    NULL,
};
#define DEVICE_PTY_MAJOR 136
#define DEVICE_SND_MAJOR 116

48
bool qemuCgroupControllerActive(virQEMUDriverPtr driver,
49
                                int controller)
50
{
51 52 53
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    bool ret = false;

54
    if (driver->cgroup == NULL)
55
        goto cleanup;
56
    if (controller < 0 || controller >= VIR_CGROUP_CONTROLLER_LAST)
57
        goto cleanup;
E
Eric Blake 已提交
58
    if (!virCgroupMounted(driver->cgroup, controller))
59 60 61 62 63 64 65
        goto cleanup;
    if (cfg->cgroupControllers & (1 << controller))
        ret = true;

cleanup:
    virObjectUnref(cfg);
    return ret;
66 67
}

68
static int
69
qemuSetupDiskPathAllow(virDomainDiskDefPtr disk,
70 71 72
                       const char *path,
                       size_t depth ATTRIBUTE_UNUSED,
                       void *opaque)
73
{
74
    qemuCgroupData *data = opaque;
75 76 77
    int rc;

    VIR_DEBUG("Process path %s for disk", path);
78 79 80
    rc = virCgroupAllowDevicePath(data->cgroup, path,
                                  (disk->readonly ? VIR_CGROUP_DEVICE_READ
                                   : VIR_CGROUP_DEVICE_RW));
81 82
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow", path,
                             disk->readonly ? "r" : "rw", rc);
83 84
    if (rc < 0) {
        if (rc == -EACCES) { /* Get this for root squash NFS */
85 86 87 88 89 90 91 92 93 94 95 96
            VIR_DEBUG("Ignoring EACCES for %s", path);
        } else {
            virReportSystemError(-rc,
                                 _("Unable to allow access for disk path %s"),
                                 path);
            return -1;
        }
    }
    return 0;
}


97
int qemuSetupDiskCgroup(virDomainObjPtr vm,
98 99 100
                        virCgroupPtr cgroup,
                        virDomainDiskDefPtr disk)
{
101
    qemuCgroupData data = { vm, cgroup };
102 103 104
    return virDomainDiskDefForeachPath(disk,
                                       true,
                                       qemuSetupDiskPathAllow,
105
                                       &data);
106 107 108
}


109 110 111 112 113
static int
qemuTeardownDiskPathDeny(virDomainDiskDefPtr disk ATTRIBUTE_UNUSED,
                         const char *path,
                         size_t depth ATTRIBUTE_UNUSED,
                         void *opaque)
114
{
115
    qemuCgroupData *data = opaque;
116 117 118
    int rc;

    VIR_DEBUG("Process path %s for disk", path);
119 120
    rc = virCgroupDenyDevicePath(data->cgroup, path,
                                 VIR_CGROUP_DEVICE_RWM);
121
    virDomainAuditCgroupPath(data->vm, data->cgroup, "deny", path, "rwm", rc);
122 123
    if (rc < 0) {
        if (rc == -EACCES) { /* Get this for root squash NFS */
124 125 126 127 128 129 130 131 132 133 134 135
            VIR_DEBUG("Ignoring EACCES for %s", path);
        } else {
            virReportSystemError(-rc,
                                 _("Unable to deny access for disk path %s"),
                                 path);
            return -1;
        }
    }
    return 0;
}


136
int qemuTeardownDiskCgroup(virDomainObjPtr vm,
137 138 139
                           virCgroupPtr cgroup,
                           virDomainDiskDefPtr disk)
{
140
    qemuCgroupData data = { vm, cgroup };
141 142 143
    return virDomainDiskDefForeachPath(disk,
                                       true,
                                       qemuTeardownDiskPathDeny,
144
                                       &data);
145 146 147
}


148 149 150 151
static int
qemuSetupChardevCgroup(virDomainDefPtr def,
                       virDomainChrDefPtr dev,
                       void *opaque)
152
{
153
    qemuCgroupData *data = opaque;
154 155
    int rc;

156
    if (dev->source.type != VIR_DOMAIN_CHR_TYPE_DEV)
157 158 159
        return 0;


160
    VIR_DEBUG("Process path '%s' for disk", dev->source.data.file.path);
161 162
    rc = virCgroupAllowDevicePath(data->cgroup, dev->source.data.file.path,
                                  VIR_CGROUP_DEVICE_RW);
163 164
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow",
                             dev->source.data.file.path, "rw", rc);
165
    if (rc < 0) {
166 167
        virReportSystemError(-rc,
                             _("Unable to allow device %s for %s"),
168
                             dev->source.data.file.path, def->name);
169 170 171 172 173 174 175
        return -1;
    }

    return 0;
}


176
int qemuSetupHostUsbDeviceCgroup(virUSBDevicePtr dev ATTRIBUTE_UNUSED,
177 178 179
                                 const char *path,
                                 void *opaque)
{
180
    qemuCgroupData *data = opaque;
181 182 183
    int rc;

    VIR_DEBUG("Process path '%s' for USB device", path);
184 185
    rc = virCgroupAllowDevicePath(data->cgroup, path,
                                  VIR_CGROUP_DEVICE_RW);
186
    virDomainAuditCgroupPath(data->vm, data->cgroup, "allow", path, "rw", rc);
187
    if (rc < 0) {
188 189 190 191 192 193 194 195 196
        virReportSystemError(-rc,
                             _("Unable to allow device %s"),
                             path);
        return -1;
    }

    return 0;
}

197
int qemuSetupCgroup(virQEMUDriverPtr driver,
198
                    virDomainObjPtr vm,
199
                    virBitmapPtr nodemask)
200 201 202 203
{
    virCgroupPtr cgroup = NULL;
    int rc;
    unsigned int i;
204
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
205
    const char *const *deviceACL =
206 207
        cfg->cgroupDeviceACL ?
        (const char *const *)cfg->cgroupDeviceACL :
208 209 210
        defaultDeviceACL;

    if (driver->cgroup == NULL)
211
        goto done; /* Not supported, so claim success */
212 213 214 215 216 217 218 219 220 221

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 1);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to create cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_DEVICES)) {
222
        qemuCgroupData data = { vm, cgroup };
223
        rc = virCgroupDenyAllDevices(cgroup);
224
        virDomainAuditCgroup(vm, cgroup, "deny", "all", rc == 0);
225 226
        if (rc != 0) {
            if (rc == -EPERM) {
227
                VIR_WARN("Group devices ACL is not accessible, disabling whitelisting");
228 229 230 231 232 233 234 235 236
                goto done;
            }

            virReportSystemError(-rc,
                                 _("Unable to deny all devices for %s"), vm->def->name);
            goto cleanup;
        }

        for (i = 0; i < vm->def->ndisks ; i++) {
237
            if (qemuSetupDiskCgroup(vm, cgroup, vm->def->disks[i]) < 0)
238 239 240
                goto cleanup;
        }

241 242
        rc = virCgroupAllowDeviceMajor(cgroup, 'c', DEVICE_PTY_MAJOR,
                                       VIR_CGROUP_DEVICE_RW);
243 244
        virDomainAuditCgroupMajor(vm, cgroup, "allow", DEVICE_PTY_MAJOR,
                                  "pty", "rw", rc == 0);
245 246 247 248 249 250
        if (rc != 0) {
            virReportSystemError(-rc, "%s",
                                 _("unable to allow /dev/pts/ devices"));
            goto cleanup;
        }

251 252 253
        if (vm->def->nsounds &&
            (!vm->def->ngraphics ||
             ((vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
254
               cfg->vncAllowHostAudio) ||
255
              (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SDL)))) {
256 257
            rc = virCgroupAllowDeviceMajor(cgroup, 'c', DEVICE_SND_MAJOR,
                                           VIR_CGROUP_DEVICE_RW);
258 259
            virDomainAuditCgroupMajor(vm, cgroup, "allow", DEVICE_SND_MAJOR,
                                      "sound", "rw", rc == 0);
260 261 262 263 264 265 266 267
            if (rc != 0) {
                virReportSystemError(-rc, "%s",
                                     _("unable to allow /dev/snd/ devices"));
                goto cleanup;
            }
        }

        for (i = 0; deviceACL[i] != NULL ; i++) {
268 269
            rc = virCgroupAllowDevicePath(cgroup, deviceACL[i],
                                          VIR_CGROUP_DEVICE_RW);
270
            virDomainAuditCgroupPath(vm, cgroup, "allow", deviceACL[i], "rw", rc);
271 272 273 274 275 276 277 278 279 280 281 282
            if (rc < 0 &&
                rc != -ENOENT) {
                virReportSystemError(-rc,
                                     _("unable to allow device %s"),
                                     deviceACL[i]);
                goto cleanup;
            }
        }

        if (virDomainChrDefForeach(vm->def,
                                   true,
                                   qemuSetupChardevCgroup,
283
                                   &data) < 0)
284 285 286 287
            goto cleanup;

        for (i = 0; i < vm->def->nhostdevs; i++) {
            virDomainHostdevDefPtr hostdev = vm->def->hostdevs[i];
288
            virUSBDevicePtr usb;
289 290 291 292 293

            if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS)
                continue;
            if (hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB)
                continue;
294 295
            if (hostdev->missing)
                continue;
296

297 298 299
            if ((usb = virUSBDeviceNew(hostdev->source.subsys.u.usb.bus,
                                       hostdev->source.subsys.u.usb.device,
                                       NULL)) == NULL)
300 301
                goto cleanup;

302 303 304
            if (virUSBDeviceFileIterate(usb, qemuSetupHostUsbDeviceCgroup,
                                        &data) < 0) {
                virUSBDeviceFree(usb);
305
                goto cleanup;
306
            }
307
            virUSBDeviceFree(usb);
308 309 310
        }
    }

311 312
    if (vm->def->blkio.weight != 0) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
313
            rc = virCgroupSetBlkioWeight(cgroup, vm->def->blkio.weight);
314
            if (rc != 0) {
315 316 317 318 319
                virReportSystemError(-rc,
                                     _("Unable to set io weight for domain %s"),
                                     vm->def->name);
                goto cleanup;
            }
320
        } else {
321 322
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                           _("Block I/O tuning is not available on this host"));
323 324 325 326 327 328 329 330
            goto cleanup;
        }
    }

    if (vm->def->blkio.ndevices) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
            for (i = 0; i < vm->def->blkio.ndevices; i++) {
                virBlkioDeviceWeightPtr dw = &vm->def->blkio.devices[i];
331 332
                if (!dw->weight)
                    continue;
333 334 335 336 337 338 339 340 341 342 343
                rc = virCgroupSetBlkioDeviceWeight(cgroup, dw->path,
                                                   dw->weight);
                if (rc != 0) {
                    virReportSystemError(-rc,
                                         _("Unable to set io device weight "
                                           "for domain %s"),
                                         vm->def->name);
                    goto cleanup;
                }
            }
        } else {
344
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
345
                           _("Block I/O tuning is not available on this host"));
346
            goto cleanup;
347 348 349
        }
    }

350 351 352 353
    if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_MEMORY)) {
        unsigned long long hard_limit = vm->def->mem.hard_limit;

        if (!hard_limit) {
M
Michal Privoznik 已提交
354
            /* If there is no hard_limit set, set a reasonable one to avoid
E
Eric Blake 已提交
355 356
             * system thrashing caused by exploited qemu.  A 'reasonable
             * limit' has been chosen:
M
Michal Privoznik 已提交
357 358 359 360
             *     (1 + k) * (domain memory + total video memory) + (32MB for
             *     cache per each disk) + F
             * where k = 0.5 and F = 200MB.  The cache for disks is important as
             * kernel cache on the host side counts into the RSS limit. */
361 362 363
            hard_limit = vm->def->mem.max_balloon;
            for (i = 0; i < vm->def->nvideos; i++)
                hard_limit += vm->def->videos[i]->vram;
M
Michal Privoznik 已提交
364 365
            hard_limit = hard_limit * 1.5 + 204800;
            hard_limit += vm->def->ndisks * 32768;
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
        }

        rc = virCgroupSetMemoryHardLimit(cgroup, hard_limit);
        if (rc != 0) {
            virReportSystemError(-rc,
                                 _("Unable to set memory hard limit for domain %s"),
                                 vm->def->name);
            goto cleanup;
        }
        if (vm->def->mem.soft_limit != 0) {
            rc = virCgroupSetMemorySoftLimit(cgroup, vm->def->mem.soft_limit);
            if (rc != 0) {
                virReportSystemError(-rc,
                                     _("Unable to set memory soft limit for domain %s"),
                                     vm->def->name);
                goto cleanup;
382
            }
383
        }
384

385 386 387 388 389 390 391
        if (vm->def->mem.swap_hard_limit != 0) {
            rc = virCgroupSetMemSwapHardLimit(cgroup, vm->def->mem.swap_hard_limit);
            if (rc != 0) {
                virReportSystemError(-rc,
                                     _("Unable to set swap hard limit for domain %s"),
                                     vm->def->name);
                goto cleanup;
392 393
            }
        }
394 395 396 397 398 399 400
    } else if (vm->def->mem.hard_limit != 0 ||
               vm->def->mem.soft_limit != 0 ||
               vm->def->mem.swap_hard_limit != 0) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("Memory cgroup is not available on this host"));
    } else {
        VIR_WARN("Could not autoset a RSS limit for domain %s", vm->def->name);
401 402
    }

403 404
    if (vm->def->cputune.shares != 0) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
405
            rc = virCgroupSetCpuShares(cgroup, vm->def->cputune.shares);
406
            if (rc != 0) {
407 408 409 410 411
                virReportSystemError(-rc,
                                     _("Unable to set io cpu shares for domain %s"),
                                     vm->def->name);
                goto cleanup;
            }
412
        } else {
413 414
            virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                           _("CPU tuning is not available on this host"));
415 416 417
        }
    }

418 419 420
    if ((vm->def->numatune.memory.nodemask ||
         (vm->def->numatune.memory.placement_mode ==
          VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO)) &&
H
Hu Tao 已提交
421 422
        vm->def->numatune.memory.mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
        qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
423 424 425
        char *mask = NULL;
        if (vm->def->numatune.memory.placement_mode ==
            VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO)
426
            mask = virBitmapFormat(nodemask);
427
        else
428
            mask = virBitmapFormat(vm->def->numatune.memory.nodemask);
H
Hu Tao 已提交
429
        if (!mask) {
430 431
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("failed to convert memory nodemask"));
H
Hu Tao 已提交
432 433 434 435 436 437 438 439 440 441 442 443
            goto cleanup;
        }

        rc = virCgroupSetCpusetMems(cgroup, mask);
        VIR_FREE(mask);
        if (rc != 0) {
            virReportSystemError(-rc,
                                 _("Unable to set cpuset.mems for domain %s"),
                                 vm->def->name);
            goto cleanup;
        }
    }
444
done:
445
    virObjectUnref(cfg);
446 447 448 449
    virCgroupFree(&cgroup);
    return 0;

cleanup:
450
    virObjectUnref(cfg);
451 452 453 454 455 456 457
    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }
    return -1;
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471
int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup, unsigned long long period,
                          long long quota)
{
    int rc;
    unsigned long long old_period;

    if (period == 0 && quota == 0)
        return 0;

    if (period) {
        /* get old period, and we can rollback if set quota failed */
        rc = virCgroupGetCpuCfsPeriod(cgroup, &old_period);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
472
                                 "%s", _("Unable to get cpu bandwidth period"));
473 474 475 476 477 478
            return -1;
        }

        rc = virCgroupSetCpuCfsPeriod(cgroup, period);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
479
                                 "%s", _("Unable to set cpu bandwidth period"));
480 481 482 483 484 485 486 487
            return -1;
        }
    }

    if (quota) {
        rc = virCgroupSetCpuCfsQuota(cgroup, quota);
        if (rc < 0) {
            virReportSystemError(-rc,
W
Wen Congyang 已提交
488
                                 "%s", _("Unable to set cpu bandwidth quota"));
489 490 491 492 493 494 495 496 497 498
            goto cleanup;
        }
    }

    return 0;

cleanup:
    if (period) {
        rc = virCgroupSetCpuCfsPeriod(cgroup, old_period);
        if (rc < 0)
499
            virReportSystemError(-rc, "%s",
500
                                 _("Unable to rollback cpu bandwidth period"));
501 502 503 504 505
    }

    return -1;
}

506 507 508 509 510
int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup,
                           virDomainVcpuPinDefPtr *vcpupin,
                           int nvcpupin,
                           int vcpuid)
{
511
    int i;
512 513 514

    for (i = 0; i < nvcpupin; i++) {
        if (vcpuid == vcpupin[i]->vcpuid) {
515
            return qemuSetupCgroupEmulatorPin(cgroup, vcpupin[i]->cpumask);
516 517 518
        }
    }

519 520 521 522
    return -1;
}

int qemuSetupCgroupEmulatorPin(virCgroupPtr cgroup,
523
                               virBitmapPtr cpumask)
524 525 526 527
{
    int rc = 0;
    char *new_cpus = NULL;

528
    new_cpus = virBitmapFormat(cpumask);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
    if (!new_cpus) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("failed to convert cpu mask"));
        rc = -1;
        goto cleanup;
    }

    rc = virCgroupSetCpusetCpus(cgroup, new_cpus);
    if (rc < 0) {
        virReportSystemError(-rc,
                             "%s",
                             _("Unable to set cpuset.cpus"));
        goto cleanup;
    }

544 545 546 547 548
cleanup:
    VIR_FREE(new_cpus);
    return rc;
}

549
int qemuSetupCgroupForVcpu(virQEMUDriverPtr driver, virDomainObjPtr vm)
550 551 552 553
{
    virCgroupPtr cgroup = NULL;
    virCgroupPtr cgroup_vcpu = NULL;
    qemuDomainObjPrivatePtr priv = vm->privateData;
554
    virDomainDefPtr def = vm->def;
555
    int rc;
M
Martin Kletzander 已提交
556
    unsigned int i, j;
557 558 559
    unsigned long long period = vm->def->cputune.period;
    long long quota = vm->def->cputune.quota;

560
    if ((period || quota) &&
561 562 563 564
        (!driver->cgroup ||
         !qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU))) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("cgroup cpu is required for scheduler tuning"));
H
Hu Tao 已提交
565 566 567
        return -1;
    }

568 569 570 571 572 573 574
    /* We are trying to setup cgroups for CPU pinning, which can also be done
     * with virProcessInfoSetAffinity, thus the lack of cgroups is not fatal
     * here.
     */
    if (driver->cgroup == NULL)
        return 0;

575 576 577 578 579 580 581 582 583
    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to find cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
584
        /* If we don't know VCPU<->PID mapping or all vcpu runs in the same
W
Wen Congyang 已提交
585
         * thread, we cannot control each vcpu.
586
         */
587 588 589
        VIR_WARN("Unable to get vcpus' pids.");
        virCgroupFree(&cgroup);
        return 0;
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
    }

    for (i = 0; i < priv->nvcpupids; i++) {
        rc = virCgroupForVcpu(cgroup, i, &cgroup_vcpu, 1);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("Unable to create vcpu cgroup for %s(vcpu:"
                                   " %d)"),
                                 vm->def->name, i);
            goto cleanup;
        }

        /* move the thread for vcpu to sub dir */
        rc = virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("unable to add vcpu %d task %d to cgroup"),
                                 i, priv->vcpupids[i]);
            goto cleanup;
        }

        if (period || quota) {
H
Hu Tao 已提交
612 613
            if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
                goto cleanup;
614 615
        }

616
        /* Set vcpupin in cgroup if vcpupin xml is provided */
M
Martin Kletzander 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
            /* find the right CPU to pin, otherwise
             * qemuSetupCgroupVcpuPin will fail. */
            for (j = 0; j < def->cputune.nvcpupin; j++) {
                if (def->cputune.vcpupin[j]->vcpuid != i)
                    continue;

                if (qemuSetupCgroupVcpuPin(cgroup_vcpu,
                                           def->cputune.vcpupin,
                                           def->cputune.nvcpupin,
                                           i) < 0)
                    goto cleanup;

                break;
            }
        }
633

634 635 636 637 638 639 640
        virCgroupFree(&cgroup_vcpu);
    }

    virCgroupFree(&cgroup);
    return 0;

cleanup:
641 642 643 644 645
    if (cgroup_vcpu) {
        virCgroupRemove(cgroup_vcpu);
        virCgroupFree(&cgroup_vcpu);
    }

646 647 648 649 650 651 652 653
    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }

    return -1;
}

654
int qemuSetupCgroupForEmulator(virQEMUDriverPtr driver,
655 656
                               virDomainObjPtr vm,
                               virBitmapPtr nodemask)
657
{
658
    virBitmapPtr cpumask = NULL;
659
    virBitmapPtr cpumap = NULL;
660 661
    virCgroupPtr cgroup = NULL;
    virCgroupPtr cgroup_emulator = NULL;
662
    virDomainDefPtr def = vm->def;
663 664
    unsigned long long period = vm->def->cputune.emulator_period;
    long long quota = vm->def->cputune.emulator_quota;
665 666
    int rc, i;

667 668 669 670 671 672 673 674
    if ((period || quota) &&
        (!driver->cgroup ||
         !qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU))) {
        virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
                       _("cgroup cpu is required for scheduler tuning"));
        return -1;
    }

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("Unable to find cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    rc = virCgroupForEmulator(cgroup, &cgroup_emulator, 1);
    if (rc < 0) {
        virReportSystemError(-rc,
                             _("Unable to create emulator cgroup for %s"),
                             vm->def->name);
        goto cleanup;
    }

    for (i = 0; i < VIR_CGROUP_CONTROLLER_LAST; i++) {
695
        if (!qemuCgroupControllerActive(driver, i))
696 697 698 699 700 701 702 703 704 705 706
            continue;
        rc = virCgroupMoveTask(cgroup, cgroup_emulator, i);
        if (rc < 0) {
            virReportSystemError(-rc,
                                 _("Unable to move tasks from domain cgroup to "
                                   "emulator cgroup in controller %d for %s"),
                                 i, vm->def->name);
            goto cleanup;
        }
    }

707 708 709 710 711
    if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
        if (!(cpumap = qemuPrepareCpumap(driver, nodemask)))
            goto cleanup;
        cpumask = cpumap;
    } else if (def->cputune.emulatorpin) {
712
        cpumask = def->cputune.emulatorpin->cpumask;
713
    } else if (def->cpumask) {
714
        cpumask = def->cpumask;
715
    }
716 717 718 719 720 721 722 723

    if (cpumask) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUSET)) {
            rc = qemuSetupCgroupEmulatorPin(cgroup_emulator, cpumask);
            if (rc < 0)
                goto cleanup;
        }
        cpumask = NULL; /* sanity */
H
Hu Tao 已提交
724
    }
725

726 727
    if (period || quota) {
        if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
H
Hu Tao 已提交
728 729
            if ((rc = qemuSetupCgroupVcpuBW(cgroup_emulator, period,
                                            quota)) < 0)
730 731 732 733
                goto cleanup;
        }
    }

734 735
    virCgroupFree(&cgroup_emulator);
    virCgroupFree(&cgroup);
736
    virBitmapFree(cpumap);
737 738 739
    return 0;

cleanup:
740 741
    virBitmapFree(cpumap);

742 743 744 745 746 747 748 749 750 751 752 753
    if (cgroup_emulator) {
        virCgroupRemove(cgroup_emulator);
        virCgroupFree(&cgroup_emulator);
    }

    if (cgroup) {
        virCgroupRemove(cgroup);
        virCgroupFree(&cgroup);
    }

    return rc;
}
754

755
int qemuRemoveCgroup(virQEMUDriverPtr driver,
756 757 758 759 760 761 762 763 764 765 766 767
                     virDomainObjPtr vm,
                     int quiet)
{
    virCgroupPtr cgroup;
    int rc;

    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
    if (rc != 0) {
        if (!quiet)
768 769 770
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Unable to find cgroup for %s"),
                           vm->def->name);
771 772 773 774 775 776 777 778
        return rc;
    }

    rc = virCgroupRemove(cgroup);
    virCgroupFree(&cgroup);
    return rc;
}

779
int qemuAddToCgroup(virQEMUDriverPtr driver,
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
                    virDomainDefPtr def)
{
    virCgroupPtr cgroup = NULL;
    int ret = -1;
    int rc;

    if (driver->cgroup == NULL)
        return 0; /* Not supported, so claim success */

    rc = virCgroupForDomain(driver->cgroup, def->name, &cgroup, 0);
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("unable to find cgroup for domain %s"),
                             def->name);
        goto cleanup;
    }

    rc = virCgroupAddTask(cgroup, getpid());
    if (rc != 0) {
        virReportSystemError(-rc,
                             _("unable to add domain %s task %d to cgroup"),
                             def->name, getpid());
        goto cleanup;
    }

    ret = 0;

cleanup:
    virCgroupFree(&cgroup);
    return ret;
}