提交 46a1449d 编写于 作者: A Alexey Kardashevskiy 提交者: Michael Ellerman

powerpc/powernv: Move npu struct from pnv_phb to pci_controller

The powernv PCI code stores NPU data in the pnv_phb struct. The latter
is referenced by pci_controller::private_data. We are going to have NPU2
support in the pseries platform as well but it does not store any
private_data in in the pci_controller struct; and even if it did,
it would be a different data structure.

This makes npu a pointer and stores it one level higher in
the pci_controller struct.
Signed-off-by: NAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 c10c21ef
......@@ -129,6 +129,7 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */
void *private_data;
struct npu *npu;
};
/* These are used for config access before all the PCI probing
......
......@@ -326,6 +326,25 @@ struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
return gpe;
}
/*
* NPU2 ATS
*/
/* Maximum possible number of ATSD MMIO registers per NPU */
#define NV_NMMU_ATSD_REGS 8
/* An NPU descriptor, valid for POWER9 only */
struct npu {
int index;
__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
unsigned int mmio_atsd_count;
/* Bitmask for MMIO register usage */
unsigned long mmio_atsd_usage;
/* Do we need to explicitly flush the nest mmu? */
bool nmmu_flush;
};
/* Maximum number of nvlinks per npu */
#define NV_MAX_LINKS 6
......@@ -477,7 +496,6 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
int i, j;
struct npu *npu;
struct pci_dev *npdev;
struct pnv_phb *nphb;
for (i = 0; i <= max_npu2_index; i++) {
mmio_atsd_reg[i].reg = -1;
......@@ -492,8 +510,7 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
if (!npdev)
continue;
nphb = pci_bus_to_host(npdev->bus)->private_data;
npu = &nphb->npu;
npu = pci_bus_to_host(npdev->bus)->npu;
mmio_atsd_reg[i].npu = npu;
mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
while (mmio_atsd_reg[i].reg < 0) {
......@@ -661,6 +678,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
struct pnv_phb *nphb;
struct npu *npu;
struct npu_context *npu_context;
struct pci_controller *hose;
/*
* At present we don't support GPUs connected to multiple NPUs and I'm
......@@ -688,8 +706,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
return ERR_PTR(-EINVAL);
}
nphb = pci_bus_to_host(npdev->bus)->private_data;
npu = &nphb->npu;
hose = pci_bus_to_host(npdev->bus);
nphb = hose->private_data;
npu = hose->npu;
/*
* Setup the NPU context table for a particular GPU. These need to be
......@@ -763,7 +782,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
*/
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
if (!nphb->npu.nmmu_flush) {
if (!npu->nmmu_flush) {
/*
* If we're not explicitly flushing ourselves we need to mark
* the thread for global flushes
......@@ -801,6 +820,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
struct device_node *nvlink_dn;
u32 nvlink_index;
struct pci_controller *hose;
if (WARN_ON(!npdev))
return;
......@@ -808,8 +828,9 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
nphb = pci_bus_to_host(npdev->bus)->private_data;
npu = &nphb->npu;
hose = pci_bus_to_host(npdev->bus);
nphb = hose->private_data;
npu = hose->npu;
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
......@@ -887,9 +908,15 @@ int pnv_npu2_init(struct pnv_phb *phb)
struct pci_dev *gpdev;
static int npu_index;
uint64_t rc = 0;
struct pci_controller *hose = phb->hose;
struct npu *npu;
int ret;
npu = kzalloc(sizeof(*npu), GFP_KERNEL);
if (!npu)
return -ENOMEM;
phb->npu.nmmu_flush =
of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
for_each_child_of_node(phb->hose->dn, dn) {
gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
if (gpdev) {
......@@ -903,18 +930,29 @@ int pnv_npu2_init(struct pnv_phb *phb)
}
}
for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
i, &mmio_atsd); i++)
phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
phb->npu.mmio_atsd_count = i;
phb->npu.mmio_atsd_usage = 0;
pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
npu->mmio_atsd_count = i;
npu->mmio_atsd_usage = 0;
npu_index++;
if (WARN_ON(npu_index >= NV_MAX_NPUS))
return -ENOSPC;
if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
ret = -ENOSPC;
goto fail_exit;
}
max_npu2_index = npu_index;
phb->npu.index = npu_index;
npu->index = npu_index;
hose->npu = npu;
return 0;
fail_exit:
for (i = 0; i < npu->mmio_atsd_count; ++i)
iounmap(npu->mmio_atsd_regs[i]);
kfree(npu);
return ret;
}
......@@ -1278,7 +1278,7 @@ static void pnv_pci_ioda_setup_PEs(void)
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
if (phb->model == PNV_PHB_MODEL_NPU2)
pnv_npu2_init(phb);
WARN_ON_ONCE(pnv_npu2_init(phb));
}
if (phb->type == PNV_PHB_NPU_OCAPI) {
bus = hose->bus;
......
......@@ -8,9 +8,6 @@
struct pci_dn;
/* Maximum possible number of ATSD MMIO registers per NPU */
#define NV_NMMU_ATSD_REGS 8
enum pnv_phb_type {
PNV_PHB_IODA1 = 0,
PNV_PHB_IODA2 = 1,
......@@ -174,19 +171,6 @@ struct pnv_phb {
unsigned int diag_data_size;
u8 *diag_data;
/* Nvlink2 data */
struct npu {
int index;
__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
unsigned int mmio_atsd_count;
/* Bitmask for MMIO register usage */
unsigned long mmio_atsd_usage;
/* Do we need to explicitly flush the nest mmu? */
bool nmmu_flush;
} npu;
int p2p_target_count;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册