msix.c 17.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * MSI-X device support
 *
 * This module includes support for MSI-X in pci devices.
 *
 * Author: Michael S. Tsirkin <mst@redhat.com>
 *
 *  Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
12 13 14
 *
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
15 16
 */

17 18 19 20
#include "hw/hw.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
21
#include "qemu/range.h"
22 23 24

#define MSIX_CAP_LENGTH 12

25 26
/* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
#define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
27
#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
M
Michael S. Tsirkin 已提交
28
#define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
29

30
MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
J
Jan Kiszka 已提交
31
{
32
    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
J
Jan Kiszka 已提交
33 34 35 36 37 38
    MSIMessage msg;

    msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
    msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
    return msg;
}
39

40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * Special API for POWER to configure the vectors through
 * a side channel. Should never be used by devices.
 */
void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
{
    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;

    pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
    pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
    table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
}

53 54 55 56 57 58 59
static uint8_t msix_pending_mask(int vector)
{
    return 1 << (vector % 8);
}

static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
{
60
    return dev->msix_pba + vector / 8;
61 62 63 64 65 66 67
}

static int msix_is_pending(PCIDevice *dev, int vector)
{
    return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
}

68
void msix_set_pending(PCIDevice *dev, unsigned int vector)
69 70 71 72 73 74 75 76 77
{
    *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
}

static void msix_clr_pending(PCIDevice *dev, int vector)
{
    *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
}

78
static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
79
{
80
    unsigned offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
81
    return fmask || dev->msix_table[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
M
Michael S. Tsirkin 已提交
82 83
}

84
bool msix_is_masked(PCIDevice *dev, unsigned int vector)
M
Michael S. Tsirkin 已提交
85
{
86 87 88
    return msix_vector_masked(dev, vector, dev->msix_function_masked);
}

J
Jan Kiszka 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static void msix_fire_vector_notifier(PCIDevice *dev,
                                      unsigned int vector, bool is_masked)
{
    MSIMessage msg;
    int ret;

    if (!dev->msix_vector_use_notifier) {
        return;
    }
    if (is_masked) {
        dev->msix_vector_release_notifier(dev, vector);
    } else {
        msg = msix_get_message(dev, vector);
        ret = dev->msix_vector_use_notifier(dev, vector, msg);
        assert(ret >= 0);
    }
}

107 108 109
static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
{
    bool is_masked = msix_is_masked(dev, vector);
J
Jan Kiszka 已提交
110

111 112 113 114
    if (is_masked == was_masked) {
        return;
    }

J
Jan Kiszka 已提交
115 116
    msix_fire_vector_notifier(dev, vector, is_masked);

117
    if (!is_masked && msix_is_pending(dev, vector)) {
M
Michael S. Tsirkin 已提交
118 119 120 121 122
        msix_clr_pending(dev, vector);
        msix_notify(dev, vector);
    }
}

123 124 125 126 127 128
static void msix_update_function_masked(PCIDevice *dev)
{
    dev->msix_function_masked = !msix_enabled(dev) ||
        (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK);
}

M
Michael S. Tsirkin 已提交
129 130 131 132 133 134
/* Handle MSI-X capability config write. */
void msix_write_config(PCIDevice *dev, uint32_t addr,
                       uint32_t val, int len)
{
    unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
    int vector;
135
    bool was_masked;
M
Michael S. Tsirkin 已提交
136

137
    if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
M
Michael S. Tsirkin 已提交
138 139 140
        return;
    }

141 142 143
    was_masked = dev->msix_function_masked;
    msix_update_function_masked(dev);

M
Michael S. Tsirkin 已提交
144 145 146 147
    if (!msix_enabled(dev)) {
        return;
    }

I
Isaku Yamahata 已提交
148
    pci_device_deassert_intx(dev);
M
Michael S. Tsirkin 已提交
149

150
    if (dev->msix_function_masked == was_masked) {
M
Michael S. Tsirkin 已提交
151 152 153 154
        return;
    }

    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
155 156
        msix_handle_mask_update(dev, vector,
                                msix_vector_masked(dev, vector, was_masked));
M
Michael S. Tsirkin 已提交
157
    }
158 159
}

A
Avi Kivity 已提交
160
static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
161
                                     unsigned size)
A
Alex Williamson 已提交
162 163 164
{
    PCIDevice *dev = opaque;

165
    return pci_get_long(dev->msix_table + addr);
A
Alex Williamson 已提交
166 167
}

A
Avi Kivity 已提交
168
static void msix_table_mmio_write(void *opaque, hwaddr addr,
169
                                  uint64_t val, unsigned size)
170 171
{
    PCIDevice *dev = opaque;
172
    int vector = addr / PCI_MSIX_ENTRY_SIZE;
173
    bool was_masked;
174

175
    was_masked = msix_is_masked(dev, vector);
176
    pci_set_long(dev->msix_table + addr, val);
177
    msix_handle_mask_update(dev, vector, was_masked);
178 179
}

180 181 182
static const MemoryRegionOps msix_table_mmio_ops = {
    .read = msix_table_mmio_read,
    .write = msix_table_mmio_write,
A
Alexander Graf 已提交
183
    .endianness = DEVICE_LITTLE_ENDIAN,
184 185 186 187 188 189
    .valid = {
        .min_access_size = 4,
        .max_access_size = 4,
    },
};

A
Avi Kivity 已提交
190
static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
191 192 193
                                   unsigned size)
{
    PCIDevice *dev = opaque;
194 195 196 197 198
    if (dev->msix_vector_poll_notifier) {
        unsigned vector_start = addr * 8;
        unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
        dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
    }
199 200 201 202 203 204

    return pci_get_long(dev->msix_pba + addr);
}

static const MemoryRegionOps msix_pba_mmio_ops = {
    .read = msix_pba_mmio_read,
A
Alexander Graf 已提交
205
    .endianness = DEVICE_LITTLE_ENDIAN,
A
Avi Kivity 已提交
206 207 208 209
    .valid = {
        .min_access_size = 4,
        .max_access_size = 4,
    },
210 211
};

212 213 214
static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
{
    int vector;
215

216
    for (vector = 0; vector < nentries; ++vector) {
217 218
        unsigned offset =
            vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
219 220
        bool was_masked = msix_is_masked(dev, vector);

221
        dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
222
        msix_handle_mask_update(dev, vector, was_masked);
223 224 225
    }
}

226
/* Initialize the MSI-X structures */
227
int msix_init(struct PCIDevice *dev, unsigned short nentries,
228 229 230
              MemoryRegion *table_bar, uint8_t table_bar_nr,
              unsigned table_offset, MemoryRegion *pba_bar,
              uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos)
231
{
232
    int cap;
233
    unsigned table_size, pba_size;
234
    uint8_t *config;
235

236
    /* Nothing to do if MSI is not supported by interrupt controller */
237
    if (!msi_supported) {
238
        return -ENOTSUP;
239
    }
240 241

    if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
242
        return -EINVAL;
243
    }
244

245 246 247
    table_size = nentries * PCI_MSIX_ENTRY_SIZE;
    pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
    /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
    if ((table_bar_nr == pba_bar_nr &&
         ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
        table_offset + table_size > memory_region_size(table_bar) ||
        pba_offset + pba_size > memory_region_size(pba_bar) ||
        (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
        return -EINVAL;
    }

    cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH);
    if (cap < 0) {
        return cap;
    }

    dev->msix_cap = cap;
    dev->cap_present |= QEMU_PCI_CAP_MSIX;
    config = dev->config + cap;

    pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
    dev->msix_entries_nr = nentries;
    dev->msix_function_masked = true;

    pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
    pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);

    /* Make flags bit writable. */
    dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
                                             MSIX_MASKALL_MASK;
276

277 278
    dev->msix_table = g_malloc0(table_size);
    dev->msix_pba = g_malloc0(pba_size);
279 280
    dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);

281
    msix_mask_all(dev, nentries);
282

283
    memory_region_init_io(&dev->msix_table_mmio, NULL, &msix_table_mmio_ops, dev,
284
                          "msix-table", table_size);
285
    memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
286
    memory_region_init_io(&dev->msix_pba_mmio, NULL, &msix_pba_mmio_ops, dev,
287
                          "msix-pba", pba_size);
288
    memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
289 290 291 292

    return 0;
}

293 294 295 296 297 298 299 300 301 302 303 304
int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
                            uint8_t bar_nr)
{
    int ret;
    char *name;

    /*
     * Migration compatibility dictates that this remains a 4k
     * BAR with the vector table in the lower half and PBA in
     * the upper half.  Do not use these elsewhere!
     */
#define MSIX_EXCLUSIVE_BAR_SIZE 4096
305
#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
306
#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
307
#define MSIX_EXCLUSIVE_CAP_OFFSET 0
308 309 310 311 312

    if (nentries * PCI_MSIX_ENTRY_SIZE > MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
        return -EINVAL;
    }

313
    name = g_strdup_printf("%s-msix", dev->name);
314
    memory_region_init(&dev->msix_exclusive_bar, NULL, name, MSIX_EXCLUSIVE_BAR_SIZE);
315
    g_free(name);
316 317

    ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
318 319 320
                    MSIX_EXCLUSIVE_BAR_TABLE_OFFSET, &dev->msix_exclusive_bar,
                    bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
                    MSIX_EXCLUSIVE_CAP_OFFSET);
321 322 323 324 325 326 327 328 329 330 331
    if (ret) {
        memory_region_destroy(&dev->msix_exclusive_bar);
        return ret;
    }

    pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
                     &dev->msix_exclusive_bar);

    return 0;
}

332 333 334 335 336 337 338 339 340 341
static void msix_free_irq_entries(PCIDevice *dev)
{
    int vector;

    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
        dev->msix_entry_used[vector] = 0;
        msix_clr_pending(dev, vector);
    }
}

342 343 344 345 346 347 348 349 350
static void msix_clear_all_vectors(PCIDevice *dev)
{
    int vector;

    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
        msix_clr_pending(dev, vector);
    }
}

351
/* Clean up resources for the device. */
352
void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
353
{
354
    if (!msix_present(dev)) {
355
        return;
356
    }
357 358 359 360
    pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
    dev->msix_cap = 0;
    msix_free_irq_entries(dev);
    dev->msix_entries_nr = 0;
361
    memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
362 363 364
    memory_region_destroy(&dev->msix_pba_mmio);
    g_free(dev->msix_pba);
    dev->msix_pba = NULL;
365
    memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
366 367 368
    memory_region_destroy(&dev->msix_table_mmio);
    g_free(dev->msix_table);
    dev->msix_table = NULL;
369
    g_free(dev->msix_entry_used);
370 371 372 373
    dev->msix_entry_used = NULL;
    dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
}

374 375 376
void msix_uninit_exclusive_bar(PCIDevice *dev)
{
    if (msix_present(dev)) {
377
        msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
378 379 380 381
        memory_region_destroy(&dev->msix_exclusive_bar);
    }
}

382 383
void msix_save(PCIDevice *dev, QEMUFile *f)
{
M
Michael S. Tsirkin 已提交
384 385
    unsigned n = dev->msix_entries_nr;

386
    if (!msix_present(dev)) {
M
Michael S. Tsirkin 已提交
387
        return;
M
Michael S. Tsirkin 已提交
388
    }
M
Michael S. Tsirkin 已提交
389

390 391
    qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
    qemu_put_buffer(f, dev->msix_pba, (n + 7) / 8);
392 393 394 395 396 397
}

/* Should be called after restoring the config space. */
void msix_load(PCIDevice *dev, QEMUFile *f)
{
    unsigned n = dev->msix_entries_nr;
J
Jan Kiszka 已提交
398
    unsigned int vector;
399

400
    if (!msix_present(dev)) {
401
        return;
B
Blue Swirl 已提交
402
    }
403

404
    msix_clear_all_vectors(dev);
405 406
    qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
    qemu_get_buffer(f, dev->msix_pba, (n + 7) / 8);
407
    msix_update_function_masked(dev);
J
Jan Kiszka 已提交
408 409 410 411

    for (vector = 0; vector < n; vector++) {
        msix_handle_mask_update(dev, vector, true);
    }
412 413 414 415 416 417 418 419 420 421 422 423
}

/* Does device support MSI-X? */
int msix_present(PCIDevice *dev)
{
    return dev->cap_present & QEMU_PCI_CAP_MSIX;
}

/* Is MSI-X enabled? */
int msix_enabled(PCIDevice *dev)
{
    return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
424
        (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
425 426 427 428 429 430
         MSIX_ENABLE_MASK);
}

/* Send an MSI-X message */
void msix_notify(PCIDevice *dev, unsigned vector)
{
J
Jan Kiszka 已提交
431
    MSIMessage msg;
432 433 434 435 436 437 438 439

    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector])
        return;
    if (msix_is_masked(dev, vector)) {
        msix_set_pending(dev, vector);
        return;
    }

J
Jan Kiszka 已提交
440 441 442
    msg = msix_get_message(dev, vector);

    stl_le_phys(msg.address, msg.data);
443 444 445 446
}

void msix_reset(PCIDevice *dev)
{
447
    if (!msix_present(dev)) {
448
        return;
449
    }
450
    msix_clear_all_vectors(dev);
451 452
    dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
	    ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
453 454
    memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
    memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
455
    msix_mask_all(dev, dev->msix_entries_nr);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
}

/* PCI spec suggests that devices make it possible for software to configure
 * less vectors than supported by the device, but does not specify a standard
 * mechanism for devices to do so.
 *
 * We support this by asking devices to declare vectors software is going to
 * actually use, and checking this on the notification path. Devices that
 * don't want to follow the spec suggestion can declare all vectors as used. */

/* Mark vector as used. */
int msix_vector_use(PCIDevice *dev, unsigned vector)
{
    if (vector >= dev->msix_entries_nr)
        return -EINVAL;
    dev->msix_entry_used[vector]++;
    return 0;
}

/* Mark vector as unused. */
void msix_vector_unuse(PCIDevice *dev, unsigned vector)
{
478 479 480 481 482 483 484
    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
        return;
    }
    if (--dev->msix_entry_used[vector]) {
        return;
    }
    msix_clr_pending(dev, vector);
485
}
486 487 488

void msix_unuse_all_vectors(PCIDevice *dev)
{
489
    if (!msix_present(dev)) {
490
        return;
491
    }
492 493
    msix_free_irq_entries(dev);
}
J
Jan Kiszka 已提交
494

J
Jan Kiszka 已提交
495 496 497 498 499
unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
{
    return dev->msix_entries_nr;
}

J
Jan Kiszka 已提交
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
{
    MSIMessage msg;

    if (msix_is_masked(dev, vector)) {
        return 0;
    }
    msg = msix_get_message(dev, vector);
    return dev->msix_vector_use_notifier(dev, vector, msg);
}

static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
{
    if (msix_is_masked(dev, vector)) {
        return;
    }
    dev->msix_vector_release_notifier(dev, vector);
}

int msix_set_vector_notifiers(PCIDevice *dev,
                              MSIVectorUseNotifier use_notifier,
521 522
                              MSIVectorReleaseNotifier release_notifier,
                              MSIVectorPollNotifier poll_notifier)
J
Jan Kiszka 已提交
523 524 525 526 527 528 529
{
    int vector, ret;

    assert(use_notifier && release_notifier);

    dev->msix_vector_use_notifier = use_notifier;
    dev->msix_vector_release_notifier = release_notifier;
530
    dev->msix_vector_poll_notifier = poll_notifier;
J
Jan Kiszka 已提交
531 532 533 534 535 536 537 538 539 540

    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
            ret = msix_set_notifier_for_vector(dev, vector);
            if (ret < 0) {
                goto undo;
            }
        }
    }
541 542 543
    if (dev->msix_vector_poll_notifier) {
        dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
    }
J
Jan Kiszka 已提交
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
    return 0;

undo:
    while (--vector >= 0) {
        msix_unset_notifier_for_vector(dev, vector);
    }
    dev->msix_vector_use_notifier = NULL;
    dev->msix_vector_release_notifier = NULL;
    return ret;
}

void msix_unset_vector_notifiers(PCIDevice *dev)
{
    int vector;

    assert(dev->msix_vector_use_notifier &&
           dev->msix_vector_release_notifier);

    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
            msix_unset_notifier_for_vector(dev, vector);
        }
    }
    dev->msix_vector_use_notifier = NULL;
    dev->msix_vector_release_notifier = NULL;
570
    dev->msix_vector_poll_notifier = NULL;
J
Jan Kiszka 已提交
571
}
G
Gerd Hoffmann 已提交
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604

static void put_msix_state(QEMUFile *f, void *pv, size_t size)
{
    msix_save(pv, f);
}

static int get_msix_state(QEMUFile *f, void *pv, size_t size)
{
    msix_load(pv, f);
    return 0;
}

static VMStateInfo vmstate_info_msix = {
    .name = "msix state",
    .get  = get_msix_state,
    .put  = put_msix_state,
};

const VMStateDescription vmstate_msix = {
    .name = "msix",
    .fields = (VMStateField[]) {
        {
            .name         = "msix",
            .version_id   = 0,
            .field_exists = NULL,
            .size         = 0,   /* ouch */
            .info         = &vmstate_info_msix,
            .flags        = VMS_SINGLE,
            .offset       = 0,
        },
        VMSTATE_END_OF_LIST()
    }
};