hostmem.c 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU Host Memory Backend
 *
 * Copyright (C) 2013-2014 Red Hat Inc
 *
 * Authors:
 *   Igor Mammedov <imammedo@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 */
P
Peter Maydell 已提交
12
#include "qemu/osdep.h"
13
#include "sysemu/hostmem.h"
14
#include "hw/boards.h"
15
#include "qapi/error.h"
16
#include "qapi/visitor.h"
17 18
#include "qapi-types.h"
#include "qapi-visit.h"
19 20 21
#include "qemu/config-file.h"
#include "qom/object_interfaces.h"

22 23 24 25 26 27 28 29
#ifdef CONFIG_NUMA
#include <numaif.h>
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
#endif

30
static void
31 32
host_memory_backend_get_size(Object *obj, Visitor *v, const char *name,
                             void *opaque, Error **errp)
33 34 35 36
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    uint64_t value = backend->size;

37
    visit_type_size(v, name, &value, errp);
38 39 40
}

static void
41 42
host_memory_backend_set_size(Object *obj, Visitor *v, const char *name,
                             void *opaque, Error **errp)
43 44 45 46 47 48 49 50 51 52
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    Error *local_err = NULL;
    uint64_t value;

    if (memory_region_size(&backend->mr)) {
        error_setg(&local_err, "cannot change property value");
        goto out;
    }

53
    visit_type_size(v, name, &value, &local_err);
54 55 56 57 58 59 60 61 62 63 64 65 66
    if (local_err) {
        goto out;
    }
    if (!value) {
        error_setg(&local_err, "Property '%s.%s' doesn't take value '%"
                   PRIu64 "'", object_get_typename(obj), name, value);
        goto out;
    }
    backend->size = value;
out:
    error_propagate(errp, local_err);
}

67 68 69 70 71 72 73 74
static uint16List **host_memory_append_node(uint16List **node,
                                            unsigned long value)
{
     *node = g_malloc0(sizeof(**node));
     (*node)->value = value;
     return &(*node)->next;
}

75
static void
76 77
host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
78 79 80 81 82 83 84
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    uint16List *host_nodes = NULL;
    uint16List **node = &host_nodes;
    unsigned long value;

    value = find_first_bit(backend->host_nodes, MAX_NODES);
85 86 87

    node = host_memory_append_node(node, value);

88
    if (value == MAX_NODES) {
89
        goto out;
90 91 92 93 94 95 96 97
    }

    do {
        value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1);
        if (value == MAX_NODES) {
            break;
        }

98
        node = host_memory_append_node(node, value);
99 100
    } while (true);

101
out:
102
    visit_type_uint16List(v, name, &host_nodes, errp);
103 104 105
}

static void
106 107
host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
108 109 110 111 112
{
#ifdef CONFIG_NUMA
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    uint16List *l = NULL;

113
    visit_type_uint16List(v, name, &l, errp);
114 115 116 117 118 119 120 121 122 123

    while (l) {
        bitmap_set(backend->host_nodes, l->value, 1);
        l = l->next;
    }
#else
    error_setg(errp, "NUMA node binding are not supported by this QEMU");
#endif
}

124 125
static int
host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED)
126 127
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
128
    return backend->policy;
129 130 131
}

static void
132
host_memory_backend_set_policy(Object *obj, int policy, Error **errp)
133 134 135 136 137 138 139 140 141 142 143
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    backend->policy = policy;

#ifndef CONFIG_NUMA
    if (policy != HOST_MEM_POLICY_DEFAULT) {
        error_setg(errp, "NUMA policies are not supported by this QEMU");
    }
#endif
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static bool host_memory_backend_get_merge(Object *obj, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    return backend->merge;
}

static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    if (!memory_region_size(&backend->mr)) {
        backend->merge = value;
        return;
    }

    if (value != backend->merge) {
        void *ptr = memory_region_get_ram_ptr(&backend->mr);
        uint64_t sz = memory_region_size(&backend->mr);

        qemu_madvise(ptr, sz,
                     value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE);
        backend->merge = value;
    }
}

static bool host_memory_backend_get_dump(Object *obj, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    return backend->dump;
}

static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    if (!memory_region_size(&backend->mr)) {
        backend->dump = value;
        return;
    }

    if (value != backend->dump) {
        void *ptr = memory_region_get_ram_ptr(&backend->mr);
        uint64_t sz = memory_region_size(&backend->mr);

        qemu_madvise(ptr, sz,
                     value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP);
        backend->dump = value;
    }
}

196 197 198 199 200 201 202 203 204 205
static bool host_memory_backend_get_prealloc(Object *obj, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    return backend->prealloc || backend->force_prealloc;
}

static void host_memory_backend_set_prealloc(Object *obj, bool value,
                                             Error **errp)
{
206
    Error *local_err = NULL;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);

    if (backend->force_prealloc) {
        if (value) {
            error_setg(errp,
                       "remove -mem-prealloc to use the prealloc property");
            return;
        }
    }

    if (!memory_region_size(&backend->mr)) {
        backend->prealloc = value;
        return;
    }

    if (value && !backend->prealloc) {
        int fd = memory_region_get_fd(&backend->mr);
        void *ptr = memory_region_get_ram_ptr(&backend->mr);
        uint64_t sz = memory_region_size(&backend->mr);

227 228 229 230 231
        os_mem_prealloc(fd, ptr, sz, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
        }
232 233 234 235
        backend->prealloc = true;
    }
}

236
static void host_memory_backend_init(Object *obj)
237
{
238
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
239
    MachineState *machine = MACHINE(qdev_get_machine());
240

241 242
    backend->merge = machine_mem_merge(machine);
    backend->dump = machine_dump_guest_core(machine);
243
    backend->prealloc = mem_prealloc;
244 245 246 247 248 249 250

    object_property_add_bool(obj, "merge",
                        host_memory_backend_get_merge,
                        host_memory_backend_set_merge, NULL);
    object_property_add_bool(obj, "dump",
                        host_memory_backend_get_dump,
                        host_memory_backend_set_dump, NULL);
251 252 253
    object_property_add_bool(obj, "prealloc",
                        host_memory_backend_get_prealloc,
                        host_memory_backend_set_prealloc, NULL);
254
    object_property_add(obj, "size", "int",
255 256
                        host_memory_backend_get_size,
                        host_memory_backend_set_size, NULL, NULL, NULL);
257 258 259
    object_property_add(obj, "host-nodes", "int",
                        host_memory_backend_get_host_nodes,
                        host_memory_backend_set_host_nodes, NULL, NULL, NULL);
260 261 262 263
    object_property_add_enum(obj, "policy", "HostMemPolicy",
                             HostMemPolicy_lookup,
                             host_memory_backend_get_policy,
                             host_memory_backend_set_policy, NULL);
264 265 266 267 268 269 270 271
}

MemoryRegion *
host_memory_backend_get_memory(HostMemoryBackend *backend, Error **errp)
{
    return memory_region_size(&backend->mr) ? &backend->mr : NULL;
}

272 273 274 275 276 277 278 279 280 281
void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped)
{
    backend->is_mapped = mapped;
}

bool host_memory_backend_is_mapped(HostMemoryBackend *backend)
{
    return backend->is_mapped;
}

282 283 284 285 286
static void
host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(uc);
    HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc);
287 288 289
    Error *local_err = NULL;
    void *ptr;
    uint64_t sz;
290 291

    if (bc->alloc) {
292 293
        bc->alloc(backend, &local_err);
        if (local_err) {
294
            goto out;
295 296 297 298 299 300 301 302 303 304 305
        }

        ptr = memory_region_get_ram_ptr(&backend->mr);
        sz = memory_region_size(&backend->mr);

        if (backend->merge) {
            qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
        }
        if (!backend->dump) {
            qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
        }
306 307 308 309 310 311 312
#ifdef CONFIG_NUMA
        unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
        /* lastbit == MAX_NODES means maxnode = 0 */
        unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
        /* ensure policy won't be ignored in case memory is preallocated
         * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
         * this doesn't catch hugepage case. */
M
Michael S. Tsirkin 已提交
313
        unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

        /* check for invalid host-nodes and policies and give more verbose
         * error messages than mbind(). */
        if (maxnode && backend->policy == MPOL_DEFAULT) {
            error_setg(errp, "host-nodes must be empty for policy default,"
                       " or you should explicitly specify a policy other"
                       " than default");
            return;
        } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
            error_setg(errp, "host-nodes must be set for policy %s",
                       HostMemPolicy_lookup[backend->policy]);
            return;
        }

        /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
         * as argument to mbind() due to an old Linux bug (feature?) which
         * cuts off the last specified node. This means backend->host_nodes
         * must have MAX_NODES+1 bits available.
         */
        assert(sizeof(backend->host_nodes) >=
               BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
        assert(maxnode <= MAX_NODES);
        if (mbind(ptr, sz, backend->policy,
                  maxnode ? backend->host_nodes : NULL, maxnode + 1, flags)) {
338 339 340 341 342
            if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
                error_setg_errno(errp, errno,
                                 "cannot bind memory to host NUMA nodes");
                return;
            }
343 344 345 346 347 348
        }
#endif
        /* Preallocate memory after the NUMA policy has been instantiated.
         * This is necessary to guarantee memory is allocated with
         * specified NUMA policy in place.
         */
349
        if (backend->prealloc) {
350 351 352 353 354
            os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz,
                            &local_err);
            if (local_err) {
                goto out;
            }
355
        }
356
    }
357 358
out:
    error_propagate(errp, local_err);
359 360
}

361 362 363
static bool
host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp)
{
364
    if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) {
365 366 367 368 369 370
        return false;
    } else {
        return true;
    }
}

371 372 373 374 375 376
static void
host_memory_backend_class_init(ObjectClass *oc, void *data)
{
    UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);

    ucc->complete = host_memory_backend_memory_complete;
377
    ucc->can_be_deleted = host_memory_backend_can_be_deleted;
378 379
}

380
static const TypeInfo host_memory_backend_info = {
381 382 383 384
    .name = TYPE_MEMORY_BACKEND,
    .parent = TYPE_OBJECT,
    .abstract = true,
    .class_size = sizeof(HostMemoryBackendClass),
385
    .class_init = host_memory_backend_class_init,
386
    .instance_size = sizeof(HostMemoryBackend),
387
    .instance_init = host_memory_backend_init,
388 389 390 391 392 393 394 395
    .interfaces = (InterfaceInfo[]) {
        { TYPE_USER_CREATABLE },
        { }
    }
};

static void register_types(void)
{
396
    type_register_static(&host_memory_backend_info);
397 398 399
}

type_init(register_types);