spapr_pci.c 31.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * QEMU sPAPR PCI host originated from Uninorth PCI host
 *
 * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
 * Copyright (C) 2011 David Gibson, IBM Corporation.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
25 26 27 28 29
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci_host.h"
P
Paolo Bonzini 已提交
30 31
#include "hw/ppc/spapr.h"
#include "hw/pci-host/spapr.h"
32
#include "exec/address-spaces.h"
33
#include <libfdt.h>
34
#include "trace.h"
35
#include "qemu/error-report.h"
36

37
#include "hw/pci/pci_bus.h"
38

39 40 41 42 43 44 45 46 47 48 49
/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
#define RTAS_QUERY_FN           0
#define RTAS_CHANGE_FN          1
#define RTAS_RESET_FN           2
#define RTAS_CHANGE_MSI_FN      3
#define RTAS_CHANGE_MSIX_FN     4

/* Interrupt types to return on RTAS_CHANGE_* */
#define RTAS_TYPE_MSI           1
#define RTAS_TYPE_MSIX          2

50
static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
51
{
52
    sPAPRPHBState *sphb;
53

54 55
    QLIST_FOREACH(sphb, &spapr->phbs, list) {
        if (sphb->buid != buid) {
56 57
            continue;
        }
58
        return sphb;
59 60 61 62 63 64 65 66
    }

    return NULL;
}

static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid,
                           uint32_t config_addr)
{
67
    sPAPRPHBState *sphb = find_phb(spapr, buid);
68
    PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
69
    int bus_num = (config_addr >> 16) & 0xFF;
70 71 72 73 74
    int devfn = (config_addr >> 8) & 0xFF;

    if (!phb) {
        return NULL;
    }
75

76
    return pci_find_device(phb->bus, bus_num, devfn);
77 78
}

79 80
static uint32_t rtas_pci_cfgaddr(uint32_t arg)
{
81
    /* This handles the encoding of extended config space addresses */
82 83 84
    return ((arg >> 20) & 0xf00) | (arg & 0xff);
}

85 86 87
static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
                                   uint32_t addr, uint32_t size,
                                   target_ulong rets)
88
{
89 90 91 92 93
    PCIDevice *pci_dev;
    uint32_t val;

    if ((size != 1) && (size != 2) && (size != 4)) {
        /* access must be 1, 2 or 4 bytes */
94
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
95
        return;
96 97
    }

98 99 100 101 102 103
    pci_dev = find_dev(spapr, buid, addr);
    addr = rtas_pci_cfgaddr(addr);

    if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
        /* Access must be to a valid device, within bounds and
         * naturally aligned */
104
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
105
        return;
106
    }
107 108 109 110

    val = pci_host_config_read_common(pci_dev, addr,
                                      pci_config_size(pci_dev), size);

111
    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
112
    rtas_st(rets, 1, val);
113 114
}

115
static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
116 117 118 119
                                     uint32_t token, uint32_t nargs,
                                     target_ulong args,
                                     uint32_t nret, target_ulong rets)
{
120 121
    uint64_t buid;
    uint32_t size, addr;
122

123
    if ((nargs != 4) || (nret != 2)) {
124
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
125 126
        return;
    }
127 128

    buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
129
    size = rtas_ld(args, 3);
130 131 132
    addr = rtas_ld(args, 0);

    finish_read_pci_config(spapr, buid, addr, size, rets);
133 134
}

135
static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
136 137 138 139
                                 uint32_t token, uint32_t nargs,
                                 target_ulong args,
                                 uint32_t nret, target_ulong rets)
{
140
    uint32_t size, addr;
141

142
    if ((nargs != 2) || (nret != 2)) {
143
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
144 145
        return;
    }
146

147
    size = rtas_ld(args, 1);
148 149 150 151 152 153 154 155 156 157 158 159 160
    addr = rtas_ld(args, 0);

    finish_read_pci_config(spapr, 0, addr, size, rets);
}

static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
                                    uint32_t addr, uint32_t size,
                                    uint32_t val, target_ulong rets)
{
    PCIDevice *pci_dev;

    if ((size != 1) && (size != 2) && (size != 4)) {
        /* access must be 1, 2 or 4 bytes */
161
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
162 163 164 165 166 167 168 169 170
        return;
    }

    pci_dev = find_dev(spapr, buid, addr);
    addr = rtas_pci_cfgaddr(addr);

    if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
        /* Access must be to a valid device, within bounds and
         * naturally aligned */
171
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
172 173 174 175 176 177
        return;
    }

    pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
                                 val, size);

178
    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
179 180
}

181
static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
182 183 184 185
                                      uint32_t token, uint32_t nargs,
                                      target_ulong args,
                                      uint32_t nret, target_ulong rets)
{
186
    uint64_t buid;
187 188
    uint32_t val, size, addr;

189
    if ((nargs != 5) || (nret != 1)) {
190
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
191 192
        return;
    }
193 194

    buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
195 196
    val = rtas_ld(args, 4);
    size = rtas_ld(args, 3);
197 198 199
    addr = rtas_ld(args, 0);

    finish_write_pci_config(spapr, buid, addr, size, val, rets);
200 201
}

202
static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
203 204 205 206 207 208
                                  uint32_t token, uint32_t nargs,
                                  target_ulong args,
                                  uint32_t nret, target_ulong rets)
{
    uint32_t val, size, addr;

209
    if ((nargs != 3) || (nret != 1)) {
210
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
211 212
        return;
    }
213 214


215 216
    val = rtas_ld(args, 2);
    size = rtas_ld(args, 1);
217 218 219
    addr = rtas_ld(args, 0);

    finish_write_pci_config(spapr, 0, addr, size, val, rets);
220 221
}

222 223 224 225
/*
 * Set MSI/MSIX message data.
 * This is required for msi_notify()/msix_notify() which
 * will write at the addresses via spapr_msi_write().
226 227 228
 *
 * If hwaddr == 0, all entries will have .data == first_irq i.e.
 * table will be reset.
229
 */
230 231
static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
                             unsigned first_irq, unsigned req_num)
232 233
{
    unsigned i;
234
    MSIMessage msg = { .address = addr, .data = first_irq };
235 236 237 238 239 240 241

    if (!msix) {
        msi_set_message(pdev, msg);
        trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
        return;
    }

242
    for (i = 0; i < req_num; ++i) {
243 244
        msix_set_message(pdev, i, msg);
        trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
245 246 247
        if (addr) {
            ++msg.data;
        }
248 249 250
    }
}

251
static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
252 253 254 255 256 257 258 259 260 261
                                uint32_t token, uint32_t nargs,
                                target_ulong args, uint32_t nret,
                                target_ulong rets)
{
    uint32_t config_addr = rtas_ld(args, 0);
    uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
    unsigned int func = rtas_ld(args, 3);
    unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
    unsigned int seq_num = rtas_ld(args, 5);
    unsigned int ret_intr_type;
262
    unsigned int irq, max_irqs = 0, num = 0;
263 264
    sPAPRPHBState *phb = NULL;
    PCIDevice *pdev = NULL;
265 266
    spapr_pci_msi *msi;
    int *config_addr_key;
267 268 269 270 271 272 273 274 275 276

    switch (func) {
    case RTAS_CHANGE_MSI_FN:
    case RTAS_CHANGE_FN:
        ret_intr_type = RTAS_TYPE_MSI;
        break;
    case RTAS_CHANGE_MSIX_FN:
        ret_intr_type = RTAS_TYPE_MSIX;
        break;
    default:
277
        error_report("rtas_ibm_change_msi(%u) is not implemented", func);
278
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
279 280 281 282 283 284 285 286 287
        return;
    }

    /* Fins sPAPRPHBState */
    phb = find_phb(spapr, buid);
    if (phb) {
        pdev = find_dev(spapr, buid, config_addr);
    }
    if (!phb || !pdev) {
288
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
289 290 291 292 293
        return;
    }

    /* Releasing MSIs */
    if (!req_num) {
294 295 296
        msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
        if (!msi) {
            trace_spapr_pci_msi("Releasing wrong config", config_addr);
297
            rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
298 299
            return;
        }
300 301

        xics_free(spapr->icp, msi->first_irq, msi->num);
302 303 304 305 306 307
        if (msi_present(pdev)) {
            spapr_msi_setmsg(pdev, 0, false, 0, num);
        }
        if (msix_present(pdev)) {
            spapr_msi_setmsg(pdev, 0, true, 0, num);
        }
308 309 310
        g_hash_table_remove(phb->msi, &config_addr);

        trace_spapr_pci_msi("Released MSIs", config_addr);
311
        rtas_st(rets, 0, RTAS_OUT_SUCCESS);
312 313 314 315 316 317
        rtas_st(rets, 1, 0);
        return;
    }

    /* Enabling MSI */

A
Alexey Kardashevskiy 已提交
318 319 320 321 322 323 324
    /* Check if the device supports as many IRQs as requested */
    if (ret_intr_type == RTAS_TYPE_MSI) {
        max_irqs = msi_nr_vectors_allocated(pdev);
    } else if (ret_intr_type == RTAS_TYPE_MSIX) {
        max_irqs = pdev->msix_entries_nr;
    }
    if (!max_irqs) {
325 326
        error_report("Requested interrupt type %d is not enabled for device %x",
                     ret_intr_type, config_addr);
A
Alexey Kardashevskiy 已提交
327 328 329 330 331
        rtas_st(rets, 0, -1); /* Hardware error */
        return;
    }
    /* Correct the number if the guest asked for too many */
    if (req_num > max_irqs) {
332
        trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
A
Alexey Kardashevskiy 已提交
333
        req_num = max_irqs;
334 335
        irq = 0; /* to avoid misleading trace */
        goto out;
A
Alexey Kardashevskiy 已提交
336 337
    }

338 339 340 341 342
    /* Allocate MSIs */
    irq = xics_alloc_block(spapr->icp, 0, req_num, false,
                           ret_intr_type == RTAS_TYPE_MSI);
    if (!irq) {
        error_report("Cannot allocate MSIs for device %x", config_addr);
343
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
344 345 346 347
        return;
    }

    /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
348
    spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
349
                     irq, req_num);
350

351 352 353 354 355 356 357 358 359
    /* Add MSI device to cache */
    msi = g_new(spapr_pci_msi, 1);
    msi->first_irq = irq;
    msi->num = req_num;
    config_addr_key = g_new(int, 1);
    *config_addr_key = config_addr;
    g_hash_table_insert(phb->msi, config_addr_key, msi);

out:
360
    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
361 362 363 364
    rtas_st(rets, 1, req_num);
    rtas_st(rets, 2, ++seq_num);
    rtas_st(rets, 3, ret_intr_type);

365
    trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
366 367
}

368 369
static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
                                                   sPAPREnvironment *spapr,
370 371 372 373 374 375 376 377 378 379
                                                   uint32_t token,
                                                   uint32_t nargs,
                                                   target_ulong args,
                                                   uint32_t nret,
                                                   target_ulong rets)
{
    uint32_t config_addr = rtas_ld(args, 0);
    uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
    unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
    sPAPRPHBState *phb = NULL;
380 381
    PCIDevice *pdev = NULL;
    spapr_pci_msi *msi;
382

383
    /* Find sPAPRPHBState */
384
    phb = find_phb(spapr, buid);
385 386 387 388
    if (phb) {
        pdev = find_dev(spapr, buid, config_addr);
    }
    if (!phb || !pdev) {
389
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
390 391 392 393
        return;
    }

    /* Find device descriptor and start IRQ */
394 395 396
    msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
    if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
        trace_spapr_pci_msi("Failed to return vector", config_addr);
397
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
398 399
        return;
    }
400
    intr_src_num = msi->first_irq + ioa_intr_num;
401 402 403
    trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
                                                           intr_src_num);

404
    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
405 406 407 408
    rtas_st(rets, 1, intr_src_num);
    rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
}

409 410 411 412 413
static int pci_spapr_swizzle(int slot, int pin)
{
    return (slot + pin) % PCI_NUM_PINS;
}

414 415 416 417
static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
{
    /*
     * Here we need to convert pci_dev + irq_num to some unique value
418 419 420
     * which is less than number of IRQs on the specific bus (4).  We
     * use standard PCI swizzling, that is (slot number + pin number)
     * % 4.
421
     */
422
    return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
423 424 425 426 427 428 429 430 431 432
}

static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
{
    /*
     * Here we use the number returned by pci_spapr_map_irq to find a
     * corresponding qemu_irq.
     */
    sPAPRPHBState *phb = opaque;

433
    trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
434
    qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
435 436
}

437 438 439 440 441 442 443 444 445 446 447
static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
{
    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
    PCIINTxRoute route;

    route.mode = PCI_INTX_ENABLED;
    route.irq = sphb->lsi_table[pin].irq;

    return route;
}

448 449 450 451 452 453 454
/*
 * MSI/MSIX memory region implementation.
 * The handler handles both MSI and MSIX.
 * For MSI-X, the vector number is encoded as a part of the address,
 * data is set to 0.
 * For MSI, the vector number is encoded in least bits in data.
 */
A
Avi Kivity 已提交
455
static void spapr_msi_write(void *opaque, hwaddr addr,
456 457
                            uint64_t data, unsigned size)
{
458
    uint32_t irq = data;
459 460 461 462 463 464 465 466 467 468 469 470 471

    trace_spapr_pci_msi_write(addr, data, irq);

    qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
}

static const MemoryRegionOps spapr_msi_ops = {
    /* There is no .read as the read result is undefined by PCI spec */
    .read = NULL,
    .write = spapr_msi_write,
    .endianness = DEVICE_LITTLE_ENDIAN
};

472 473 474
/*
 * PHB PCI device
 */
475
static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
476 477 478
{
    sPAPRPHBState *phb = opaque;

479
    return &phb->iommu_as;
480 481
}

482
static void spapr_phb_realize(DeviceState *dev, Error **errp)
483
{
484
    SysBusDevice *s = SYS_BUS_DEVICE(dev);
485
    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
486
    PCIHostState *phb = PCI_HOST_BRIDGE(s);
487
    sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
488 489
    char *namebuf;
    int i;
490
    PCIBus *bus;
491
    uint64_t msi_window_size = 4096;
492

493 494 495 496 497
    if (sphb->index != -1) {
        hwaddr windows_base;

        if ((sphb->buid != -1) || (sphb->dma_liobn != -1)
            || (sphb->mem_win_addr != -1)
498
            || (sphb->io_win_addr != -1)) {
499 500 501
            error_setg(errp, "Either \"index\" or other parameters must"
                       " be specified for PAPR PHB, not both");
            return;
502 503
        }

504 505 506 507 508 509
        if (sphb->index > SPAPR_PCI_MAX_INDEX) {
            error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
                       SPAPR_PCI_MAX_INDEX);
            return;
        }

510 511 512 513 514 515 516 517 518 519
        sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
        sphb->dma_liobn = SPAPR_PCI_BASE_LIOBN + sphb->index;

        windows_base = SPAPR_PCI_WINDOW_BASE
            + sphb->index * SPAPR_PCI_WINDOW_SPACING;
        sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
        sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
    }

    if (sphb->buid == -1) {
520 521
        error_setg(errp, "BUID not specified for PHB");
        return;
522 523 524
    }

    if (sphb->dma_liobn == -1) {
525 526
        error_setg(errp, "LIOBN not specified for PHB");
        return;
527 528 529
    }

    if (sphb->mem_win_addr == -1) {
530 531
        error_setg(errp, "Memory window address not specified for PHB");
        return;
532 533 534
    }

    if (sphb->io_win_addr == -1) {
535 536
        error_setg(errp, "IO window address not specified for PHB");
        return;
537 538 539
    }

    if (find_phb(spapr, sphb->buid)) {
540 541
        error_setg(errp, "PCI host bridges must have unique BUIDs");
        return;
542 543
    }

544
    sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
545

546
    namebuf = alloca(strlen(sphb->dtbusname) + 32);
547

548
    /* Initialize memory regions */
549
    sprintf(namebuf, "%s.mmio", sphb->dtbusname);
550
    memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
551

552
    sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
553 554
    memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
                             namebuf, &sphb->memspace,
555 556 557
                             SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
    memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
                                &sphb->memwindow);
558

559
    /* Initialize IO regions */
560
    sprintf(namebuf, "%s.io", sphb->dtbusname);
561 562
    memory_region_init(&sphb->iospace, OBJECT(sphb),
                       namebuf, SPAPR_PCI_IO_WIN_SIZE);
563

564
    sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
565
    memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
566
                             &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
567
    memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
568
                                &sphb->iowindow);
569 570

    bus = pci_register_bus(dev, NULL,
571 572
                           pci_spapr_set_irq, pci_spapr_map_irq, sphb,
                           &sphb->memspace, &sphb->iospace,
573
                           PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
574
    phb->bus = bus;
575

576 577 578 579 580 581 582 583 584 585 586 587 588 589
    /*
     * Initialize PHB address space.
     * By default there will be at least one subregion for default
     * 32bit DMA window.
     * Later the guest might want to create another DMA window
     * which will become another memory subregion.
     */
    sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);

    memory_region_init(&sphb->iommu_root, OBJECT(sphb),
                       namebuf, UINT64_MAX);
    address_space_init(&sphb->iommu_as, &sphb->iommu_root,
                       sphb->dtbusname);

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
    /*
     * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
     * we need to allocate some memory to catch those writes coming
     * from msi_notify()/msix_notify().
     * As MSIMessage:addr is going to be the same and MSIMessage:data
     * is going to be a VIRQ number, 4 bytes of the MSI MR will only
     * be used.
     *
     * For KVM we want to ensure that this memory is a full page so that
     * our memory slot is of page size granularity.
     */
#ifdef CONFIG_KVM
    if (kvm_enabled()) {
        msi_window_size = getpagesize();
    }
#endif

    memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
                          "msi", msi_window_size);
    memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
                                &sphb->msiwindow);

612
    pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
613

614 615
    pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);

616
    QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
617 618

    /* Initialize the LSI table */
619
    for (i = 0; i < PCI_NUM_PINS; i++) {
620
        uint32_t irq;
621

622
        irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
623
        if (!irq) {
624 625
            error_setg(errp, "spapr_allocate_lsi failed");
            return;
626 627
        }

628
        sphb->lsi_table[i].irq = irq;
629
    }
630 631 632 633 634 635 636

    if (!info->finish_realize) {
        error_setg(errp, "finish_realize not defined");
        return;
    }

    info->finish_realize(sphb, errp);
637 638

    sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
639 640 641 642
}

static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
{
643 644 645
    sPAPRTCETable *tcet;

    tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
646
                               0,
647
                               SPAPR_TCE_PAGE_SHIFT,
648
                               0x40000000 >> SPAPR_TCE_PAGE_SHIFT, false);
649
    if (!tcet) {
650 651 652 653
        error_setg(errp, "Unable to create TCE table for %s",
                   sphb->dtbusname);
        return ;
    }
654 655 656

    /* Register default 32bit DMA window */
    memory_region_add_subregion(&sphb->iommu_root, 0,
657
                                spapr_tce_get_iommu(tcet));
658 659
}

660
static int spapr_phb_children_reset(Object *child, void *opaque)
661
{
662 663 664 665 666
    DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);

    if (dev) {
        device_reset(dev);
    }
667

668 669 670 671 672
    return 0;
}

static void spapr_phb_reset(DeviceState *qdev)
{
673
    /* Reset the IOMMU state */
674
    object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
675 676
}

677
static Property spapr_phb_properties[] = {
678
    DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
679 680 681 682 683 684 685 686
    DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
    DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
    DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
    DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
                       SPAPR_PCI_MMIO_WIN_SIZE),
    DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
    DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
                       SPAPR_PCI_IO_WIN_SIZE),
687 688 689
    DEFINE_PROP_END_OF_LIST(),
};

690 691 692 693
static const VMStateDescription vmstate_spapr_pci_lsi = {
    .name = "spapr_pci/lsi",
    .version_id = 1,
    .minimum_version_id = 1,
694
    .fields = (VMStateField[]) {
695 696 697 698 699 700 701
        VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),

        VMSTATE_END_OF_LIST()
    },
};

static const VMStateDescription vmstate_spapr_pci_msi = {
702
    .name = "spapr_pci/msi",
703 704
    .version_id = 1,
    .minimum_version_id = 1,
705 706 707 708
    .fields = (VMStateField []) {
        VMSTATE_UINT32(key, spapr_pci_msi_mig),
        VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
        VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
709 710 711 712
        VMSTATE_END_OF_LIST()
    },
};

713 714 715 716 717 718 719 720 721 722
static void spapr_pci_fill_msi_devs(gpointer key, gpointer value,
                                    gpointer opaque)
{
    sPAPRPHBState *sphb = opaque;

    sphb->msi_devs[sphb->msi_devs_num].key = *(uint32_t *)key;
    sphb->msi_devs[sphb->msi_devs_num].value = *(spapr_pci_msi *)value;
    sphb->msi_devs_num++;
}

723 724 725
static void spapr_pci_pre_save(void *opaque)
{
    sPAPRPHBState *sphb = opaque;
726
    int msi_devs_num;
727 728 729 730 731

    if (sphb->msi_devs) {
        g_free(sphb->msi_devs);
        sphb->msi_devs = NULL;
    }
732 733 734
    sphb->msi_devs_num = 0;
    msi_devs_num = g_hash_table_size(sphb->msi);
    if (!msi_devs_num) {
735 736
        return;
    }
737
    sphb->msi_devs = g_malloc(msi_devs_num * sizeof(spapr_pci_msi_mig));
738

739 740
    g_hash_table_foreach(sphb->msi, spapr_pci_fill_msi_devs, sphb);
    assert(sphb->msi_devs_num == msi_devs_num);
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
}

static int spapr_pci_post_load(void *opaque, int version_id)
{
    sPAPRPHBState *sphb = opaque;
    gpointer key, value;
    int i;

    for (i = 0; i < sphb->msi_devs_num; ++i) {
        key = g_memdup(&sphb->msi_devs[i].key,
                       sizeof(sphb->msi_devs[i].key));
        value = g_memdup(&sphb->msi_devs[i].value,
                         sizeof(sphb->msi_devs[i].value));
        g_hash_table_insert(sphb->msi, key, value);
    }
    if (sphb->msi_devs) {
        g_free(sphb->msi_devs);
        sphb->msi_devs = NULL;
    }
    sphb->msi_devs_num = 0;

    return 0;
}

765 766
static const VMStateDescription vmstate_spapr_pci = {
    .name = "spapr_pci",
767 768 769 770
    .version_id = 2,
    .minimum_version_id = 2,
    .pre_save = spapr_pci_pre_save,
    .post_load = spapr_pci_post_load,
771
    .fields = (VMStateField[]) {
772 773 774 775 776 777 778 779
        VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
        VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
        VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
        VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
        VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
        VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
        VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
                             vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
780 781 782
        VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
        VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
                                    vmstate_spapr_pci_msi, spapr_pci_msi_mig),
783 784 785 786
        VMSTATE_END_OF_LIST()
    },
};

787 788 789 790 791 792 793 794
static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
                                           PCIBus *rootbus)
{
    sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);

    return sphb->dtbusname;
}

795 796
static void spapr_phb_class_init(ObjectClass *klass, void *data)
{
797
    PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
798
    DeviceClass *dc = DEVICE_CLASS(klass);
799
    sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
800

801
    hc->root_bus_path = spapr_phb_root_bus_path;
802
    dc->realize = spapr_phb_realize;
803
    dc->props = spapr_phb_properties;
804
    dc->reset = spapr_phb_reset;
805
    dc->vmsd = &vmstate_spapr_pci;
806 807
    set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
    dc->cannot_instantiate_with_device_add_yet = false;
808
    spc->finish_realize = spapr_phb_finish_realize;
809
}
810

811
static const TypeInfo spapr_phb_info = {
812
    .name          = TYPE_SPAPR_PCI_HOST_BRIDGE,
813
    .parent        = TYPE_PCI_HOST_BRIDGE,
814 815
    .instance_size = sizeof(sPAPRPHBState),
    .class_init    = spapr_phb_class_init,
816
    .class_size    = sizeof(sPAPRPHBClass),
817 818
};

819
PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
820 821 822
{
    DeviceState *dev;

823
    dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
824
    qdev_prop_set_uint32(dev, "index", index);
825
    qdev_init_nofail(dev);
826 827

    return PCI_HOST_BRIDGE(dev);
828 829 830 831 832 833 834 835 836 837 838 839 840
}

/* Macros to operate with address in OF binding to PCI */
#define b_x(x, p, l)    (((x) & ((1<<(l))-1)) << (p))
#define b_n(x)          b_x((x), 31, 1) /* 0 if relocatable */
#define b_p(x)          b_x((x), 30, 1) /* 1 if prefetchable */
#define b_t(x)          b_x((x), 29, 1) /* 1 if the address is aliased */
#define b_ss(x)         b_x((x), 24, 2) /* the space code */
#define b_bbbbbbbb(x)   b_x((x), 16, 8) /* bus number */
#define b_ddddd(x)      b_x((x), 11, 5) /* device number */
#define b_fff(x)        b_x((x), 8, 3)  /* function number */
#define b_rrrrrrrr(x)   b_x((x), 0, 8)  /* register number */

841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
typedef struct sPAPRTCEDT {
    void *fdt;
    int node_off;
} sPAPRTCEDT;

static int spapr_phb_children_dt(Object *child, void *opaque)
{
    sPAPRTCEDT *p = opaque;
    sPAPRTCETable *tcet;

    tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
    if (!tcet) {
        return 0;
    }

    spapr_dma_dt(p->fdt, p->node_off, "ibm,dma-window",
857 858
                 tcet->liobn, tcet->bus_offset,
                 tcet->nb_table << tcet->page_shift);
859 860 861 862 863
    /* Stop after the first window */

    return 1;
}

864 865 866
int spapr_populate_pci_dt(sPAPRPHBState *phb,
                          uint32_t xics_phandle,
                          void *fdt)
867
{
868
    int bus_off, i, j;
869 870 871 872 873 874 875
    char nodename[256];
    uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
    struct {
        uint32_t hi;
        uint64_t child;
        uint64_t parent;
        uint64_t size;
876
    } QEMU_PACKED ranges[] = {
877 878 879 880 881 882 883 884 885 886 887 888 889
        {
            cpu_to_be32(b_ss(1)), cpu_to_be64(0),
            cpu_to_be64(phb->io_win_addr),
            cpu_to_be64(memory_region_size(&phb->iospace)),
        },
        {
            cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
            cpu_to_be64(phb->mem_win_addr),
            cpu_to_be64(memory_region_size(&phb->memwindow)),
        },
    };
    uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
    uint32_t interrupt_map_mask[] = {
890 891
        cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
    uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917

    /* Start populating the FDT */
    sprintf(nodename, "pci@%" PRIx64, phb->buid);
    bus_off = fdt_add_subnode(fdt, 0, nodename);
    if (bus_off < 0) {
        return bus_off;
    }

#define _FDT(exp) \
    do { \
        int ret = (exp);                                           \
        if (ret < 0) {                                             \
            return ret;                                            \
        }                                                          \
    } while (0)

    /* Write PHB properties */
    _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
    _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
    _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
    _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
    _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
    _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
    _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
    _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof(ranges)));
    _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
918
    _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
919
    _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
920

921 922 923 924 925
    /* Build the interrupt-map, this must matches what is done
     * in pci_spapr_map_irq
     */
    _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
                     &interrupt_map_mask, sizeof(interrupt_map_mask)));
926 927 928 929 930 931 932 933 934 935
    for (i = 0; i < PCI_SLOT_MAX; i++) {
        for (j = 0; j < PCI_NUM_PINS; j++) {
            uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
            int lsi_num = pci_spapr_swizzle(i, j);

            irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
            irqmap[1] = 0;
            irqmap[2] = 0;
            irqmap[3] = cpu_to_be32(j+1);
            irqmap[4] = cpu_to_be32(xics_phandle);
936
            irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
937 938
            irqmap[6] = cpu_to_be32(0x8);
        }
939 940 941
    }
    /* Write interrupt map */
    _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
942
                     sizeof(interrupt_map)));
943

944 945
    object_child_foreach(OBJECT(phb), spapr_phb_children_dt,
                         &((sPAPRTCEDT){ .fdt = fdt, .node_off = bus_off }));
946

947 948
    return 0;
}
949

950 951
void spapr_pci_rtas_init(void)
{
952 953 954 955 956 957 958 959
    spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
                        rtas_read_pci_config);
    spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
                        rtas_write_pci_config);
    spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
                        rtas_ibm_read_pci_config);
    spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
                        rtas_ibm_write_pci_config);
960
    if (msi_supported) {
961 962
        spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
                            "ibm,query-interrupt-source-number",
963
                            rtas_ibm_query_interrupt_source_number);
964 965
        spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
                            rtas_ibm_change_msi);
966
    }
967 968
}

969
static void spapr_pci_register_types(void)
970 971 972
{
    type_register_static(&spapr_phb_info);
}
973 974

type_init(spapr_pci_register_types)