xen_common.h 15.5 KB
Newer Older
1
#ifndef QEMU_HW_XEN_COMMON_H
2
#define QEMU_HW_XEN_COMMON_H
3

4 5 6 7 8 9 10 11 12
/*
 * If we have new enough libxenctrl then we do not want/need these compat
 * interfaces, despite what the user supplied cflags might say. They
 * must be undefined before including xenctrl.h
 */
#undef XC_WANT_COMPAT_EVTCHN_API
#undef XC_WANT_COMPAT_GNTTAB_API
#undef XC_WANT_COMPAT_MAP_FOREIGN_API

13
#include <xenctrl.h>
14
#include <xenstore.h>
15 16
#include <xen/io/xenbus.h>

17
#include "hw/hw.h"
P
Paolo Bonzini 已提交
18
#include "hw/xen/xen.h"
19
#include "hw/pci/pci.h"
20
#include "qemu/queue.h"
21
#include "hw/xen/trace.h"
22

23 24
extern xc_interface *xen_xc;

25
/*
26
 * We don't support Xen prior to 4.2.0.
27
 */
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
                                   uint64_t first_pfn, uint32_t nr)
{
    return xc_hvm_set_mem_type(xen_xc, domid, type, first_pfn, nr);
}

static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
                                         uint8_t bus, uint8_t device,
                                         uint8_t intx, unsigned int level)
{
    return xc_hvm_set_pci_intx_level(xen_xc, domid, segment, bus, device,
                                     intx, level);
}

static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
                                         uint8_t irq)
{
    return xc_hvm_set_pci_link_route(xen_xc, domid, link, irq);
}

static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
                                 uint32_t msi_data)
{
    return xc_hvm_inject_msi(xen_xc, domid, msi_addr, msi_data);
}

static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
                                        unsigned int level)
{
    return xc_hvm_set_isa_irq_level(xen_xc, domid, irq, level);
}

static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
                                       uint32_t nr, unsigned long *bitmap)
{
    return xc_hvm_track_dirty_vram(xen_xc, domid, first_pfn, nr, bitmap);
}

static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
                                      uint32_t nr)
{
    return xc_hvm_modified_memory(xen_xc, domid, first_pfn, nr);
}

S
Stefan Weil 已提交
73
/* Xen 4.2 through 4.6 */
74
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
75

76
typedef xc_interface xenforeignmemory_handle;
77
typedef xc_evtchn xenevtchn_handle;
78
typedef xc_gnttab xengnttab_handle;
79

80 81 82 83 84 85 86 87
#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
#define xenevtchn_close(h) xc_evtchn_close(h)
#define xenevtchn_fd(h) xc_evtchn_fd(h)
#define xenevtchn_pending(h) xc_evtchn_pending(h)
#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
88

89 90 91 92 93 94 95
#define xengnttab_open(l, f) xc_gnttab_open(l, f)
#define xengnttab_close(h) xc_gnttab_close(h)
#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
#define xengnttab_map_grant_refs(h, c, d, r, p) \
    xc_gnttab_map_grant_refs(h, c, d, r, p)
J
Juergen Gross 已提交
96 97
#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
    xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
98

99 100 101 102 103 104 105 106 107 108 109 110 111 112
#define xenforeignmemory_open(l, f) xen_xc

static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
                                         int prot, size_t pages,
                                         const xen_pfn_t arr[/*pages*/],
                                         int err[/*pages*/])
{
    if (err)
        return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
    else
        return xc_map_foreign_pages(h, dom, prot, arr, pages);
}

#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
113

114 115 116 117 118 119
#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */

#include <xenevtchn.h>
#include <xengnttab.h>
#include <xenforeignmemory.h>

120 121
#endif

122 123
extern xenforeignmemory_handle *xen_fmem;

124
void destroy_hvm_domain(bool reboot);
125

126 127 128
/* shutdown/destroy current domain because of an error */
void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);

129
#ifdef HVM_PARAM_VMPORT_REGS_PFN
130
static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
S
Stefano Stabellini 已提交
131
                                          xen_pfn_t *vmport_regs_pfn)
132
{
S
Stefano Stabellini 已提交
133 134 135 136 137 138 139
    int rc;
    uint64_t value;
    rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
    if (rc >= 0) {
        *vmport_regs_pfn = (xen_pfn_t) value;
    }
    return rc;
140 141
}
#else
142
static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
S
Stefano Stabellini 已提交
143
                                          xen_pfn_t *vmport_regs_pfn)
144 145 146 147 148
{
    return -ENOSYS;
}
#endif

149 150 151 152 153 154 155 156 157
/* Xen before 4.6 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460

#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
#endif

#endif

158
static inline int xen_get_default_ioreq_server_info(domid_t dom,
159 160 161 162 163 164 165 166
                                                    xen_pfn_t *ioreq_pfn,
                                                    xen_pfn_t *bufioreq_pfn,
                                                    evtchn_port_t
                                                        *bufioreq_evtchn)
{
    unsigned long param;
    int rc;

167
    rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
168 169 170 171 172 173 174
    if (rc < 0) {
        fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
        return -1;
    }

    *ioreq_pfn = param;

175
    rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
176 177 178 179 180 181 182
    if (rc < 0) {
        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
        return -1;
    }

    *bufioreq_pfn = param;

183
    rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
184 185 186 187 188 189 190 191 192 193 194
                          &param);
    if (rc < 0) {
        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
        return -1;
    }

    *bufioreq_evtchn = param;

    return 0;
}

195 196 197 198 199 200 201 202 203
/* Xen before 4.5 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450

#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#endif

#define IOREQ_TYPE_PCI_CONFIG 2

204
typedef uint16_t ioservid_t;
205

206
static inline void xen_map_memory_section(domid_t dom,
207 208 209 210 211
                                          ioservid_t ioservid,
                                          MemoryRegionSection *section)
{
}

212
static inline void xen_unmap_memory_section(domid_t dom,
213 214 215 216 217
                                            ioservid_t ioservid,
                                            MemoryRegionSection *section)
{
}

218
static inline void xen_map_io_section(domid_t dom,
219 220 221 222 223
                                      ioservid_t ioservid,
                                      MemoryRegionSection *section)
{
}

224
static inline void xen_unmap_io_section(domid_t dom,
225 226 227 228 229
                                        ioservid_t ioservid,
                                        MemoryRegionSection *section)
{
}

230
static inline void xen_map_pcidev(domid_t dom,
231 232 233 234 235
                                  ioservid_t ioservid,
                                  PCIDevice *pci_dev)
{
}

236
static inline void xen_unmap_pcidev(domid_t dom,
237 238 239 240 241
                                    ioservid_t ioservid,
                                    PCIDevice *pci_dev)
{
}

242
static inline void xen_create_ioreq_server(domid_t dom,
243
                                           ioservid_t *ioservid)
244 245 246
{
}

247
static inline void xen_destroy_ioreq_server(domid_t dom,
248 249 250 251
                                            ioservid_t ioservid)
{
}

252
static inline int xen_get_ioreq_server_info(domid_t dom,
253 254 255 256 257
                                            ioservid_t ioservid,
                                            xen_pfn_t *ioreq_pfn,
                                            xen_pfn_t *bufioreq_pfn,
                                            evtchn_port_t *bufioreq_evtchn)
{
258 259
    return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
                                             bufioreq_pfn,
260
                                             bufioreq_evtchn);
261 262
}

263
static inline int xen_set_ioreq_server_state(domid_t dom,
264 265 266 267 268 269 270 271 272
                                             ioservid_t ioservid,
                                             bool enable)
{
    return 0;
}

/* Xen 4.5 */
#else

273 274
static bool use_default_ioreq_server;

275
static inline void xen_map_memory_section(domid_t dom,
276 277 278 279 280 281 282
                                          ioservid_t ioservid,
                                          MemoryRegionSection *section)
{
    hwaddr start_addr = section->offset_within_address_space;
    ram_addr_t size = int128_get64(section->size);
    hwaddr end_addr = start_addr + size - 1;

283 284 285 286
    if (use_default_ioreq_server) {
        return;
    }

287
    trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
288
    xc_hvm_map_io_range_to_ioreq_server(xen_xc, dom, ioservid, 1,
289 290 291
                                        start_addr, end_addr);
}

292
static inline void xen_unmap_memory_section(domid_t dom,
293 294 295 296 297 298 299
                                            ioservid_t ioservid,
                                            MemoryRegionSection *section)
{
    hwaddr start_addr = section->offset_within_address_space;
    ram_addr_t size = int128_get64(section->size);
    hwaddr end_addr = start_addr + size - 1;

300 301 302 303
    if (use_default_ioreq_server) {
        return;
    }

304
    trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
305 306
    xc_hvm_unmap_io_range_from_ioreq_server(xen_xc, dom, ioservid,
                                            1, start_addr, end_addr);
307 308
}

309
static inline void xen_map_io_section(domid_t dom,
310 311 312 313 314 315 316
                                      ioservid_t ioservid,
                                      MemoryRegionSection *section)
{
    hwaddr start_addr = section->offset_within_address_space;
    ram_addr_t size = int128_get64(section->size);
    hwaddr end_addr = start_addr + size - 1;

317 318 319 320
    if (use_default_ioreq_server) {
        return;
    }

321
    trace_xen_map_portio_range(ioservid, start_addr, end_addr);
322
    xc_hvm_map_io_range_to_ioreq_server(xen_xc, dom, ioservid, 0,
323 324 325
                                        start_addr, end_addr);
}

326
static inline void xen_unmap_io_section(domid_t dom,
327 328 329 330 331 332 333
                                        ioservid_t ioservid,
                                        MemoryRegionSection *section)
{
    hwaddr start_addr = section->offset_within_address_space;
    ram_addr_t size = int128_get64(section->size);
    hwaddr end_addr = start_addr + size - 1;

334 335 336 337
    if (use_default_ioreq_server) {
        return;
    }

338
    trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
339 340
    xc_hvm_unmap_io_range_from_ioreq_server(xen_xc, dom, ioservid,
                                            0, start_addr, end_addr);
341 342
}

343
static inline void xen_map_pcidev(domid_t dom,
344 345 346
                                  ioservid_t ioservid,
                                  PCIDevice *pci_dev)
{
347 348 349 350
    if (use_default_ioreq_server) {
        return;
    }

351 352
    trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
                         PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
353 354
    xc_hvm_map_pcidev_to_ioreq_server(xen_xc, dom, ioservid, 0,
                                      pci_bus_num(pci_dev->bus),
355 356 357 358
                                      PCI_SLOT(pci_dev->devfn),
                                      PCI_FUNC(pci_dev->devfn));
}

359
static inline void xen_unmap_pcidev(domid_t dom,
360 361 362
                                    ioservid_t ioservid,
                                    PCIDevice *pci_dev)
{
363 364 365 366
    if (use_default_ioreq_server) {
        return;
    }

367 368
    trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
                           PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
369 370
    xc_hvm_unmap_pcidev_from_ioreq_server(xen_xc, dom, ioservid, 0,
                                          pci_bus_num(pci_dev->bus),
371 372 373 374
                                          PCI_SLOT(pci_dev->devfn),
                                          PCI_FUNC(pci_dev->devfn));
}

375
static inline void xen_create_ioreq_server(domid_t dom,
376
                                           ioservid_t *ioservid)
377
{
378 379
    int rc = xc_hvm_create_ioreq_server(xen_xc, dom,
                                        HVM_IOREQSRV_BUFIOREQ_ATOMIC,
380
                                        ioservid);
381 382 383

    if (rc == 0) {
        trace_xen_ioreq_server_create(*ioservid);
384
        return;
385 386
    }

387 388 389
    *ioservid = 0;
    use_default_ioreq_server = true;
    trace_xen_default_ioreq_server();
390 391
}

392
static inline void xen_destroy_ioreq_server(domid_t dom,
393 394
                                            ioservid_t ioservid)
{
395 396 397 398
    if (use_default_ioreq_server) {
        return;
    }

399
    trace_xen_ioreq_server_destroy(ioservid);
400
    xc_hvm_destroy_ioreq_server(xen_xc, dom, ioservid);
401 402
}

403
static inline int xen_get_ioreq_server_info(domid_t dom,
404 405 406 407 408
                                            ioservid_t ioservid,
                                            xen_pfn_t *ioreq_pfn,
                                            xen_pfn_t *bufioreq_pfn,
                                            evtchn_port_t *bufioreq_evtchn)
{
409
    if (use_default_ioreq_server) {
410
        return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
411 412 413 414
                                                 bufioreq_pfn,
                                                 bufioreq_evtchn);
    }

415
    return xc_hvm_get_ioreq_server_info(xen_xc, dom, ioservid,
416 417 418 419
                                        ioreq_pfn, bufioreq_pfn,
                                        bufioreq_evtchn);
}

420
static inline int xen_set_ioreq_server_state(domid_t dom,
421 422 423
                                             ioservid_t ioservid,
                                             bool enable)
{
424 425 426 427
    if (use_default_ioreq_server) {
        return 0;
    }

428
    trace_xen_ioreq_server_state(ioservid, enable);
429 430
    return xc_hvm_set_ioreq_server_state(xen_xc, dom, ioservid,
                                         enable);
431 432 433 434
}

#endif

435
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
436
static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
437 438 439 440 441 442 443
                                               unsigned int space,
                                               unsigned long idx,
                                               xen_pfn_t gpfn)
{
    return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
}
#else
444
static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
445 446 447 448 449 450 451 452 453 454 455 456
                                               unsigned int space,
                                               unsigned long idx,
                                               xen_pfn_t gpfn)
{
    /* In Xen 4.6 rc is -1 and errno contains the error value. */
    int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
    if (rc == -1)
        return errno;
    return rc;
}
#endif

457
#ifdef CONFIG_XEN_PV_DOMAIN_BUILD
458
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
459
static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
460 461 462 463 464 465
                                    xen_domain_handle_t handle, uint32_t flags,
                                    uint32_t *pdomid)
{
    return xc_domain_create(xc, ssidref, handle, flags, pdomid);
}
#else
466
static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
467 468 469 470 471 472
                                    xen_domain_handle_t handle, uint32_t flags,
                                    uint32_t *pdomid)
{
    return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
}
#endif
473
#endif
474

475 476 477 478 479 480 481 482 483 484 485 486 487 488
/* Xen before 4.8 */

#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480


typedef void *xengnttab_grant_copy_segment_t;

static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
                                       xengnttab_grant_copy_segment_t *segs)
{
    return -ENOSYS;
}
#endif

489
#endif /* QEMU_HW_XEN_COMMON_H */