diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 0835e59bb2ef9a4c2d0da74f6da42b6d7a309659..ce7499c9ca214c8faa77c00ba8795e99f231bff9 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -486,30 +486,25 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) AcpiSystemResourceAffinityTable *srat; AcpiSratProcessorGiccAffinity *core; AcpiSratMemoryAffinity *numamem; - int i, j, srat_start; + int i, srat_start; uint64_t mem_base; - uint32_t *cpu_node = g_malloc0(vms->smp_cpus * sizeof(uint32_t)); - - for (i = 0; i < vms->smp_cpus; i++) { - j = numa_get_node_for_cpu(i); - if (j < nb_numa_nodes) { - cpu_node[i] = j; - } - } + MachineClass *mc = MACHINE_GET_CLASS(vms); + const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); srat_start = table_data->len; srat = acpi_data_push(table_data, sizeof(*srat)); srat->reserved1 = cpu_to_le32(1); - for (i = 0; i < vms->smp_cpus; ++i) { + for (i = 0; i < cpu_list->len; ++i) { + int node_id = cpu_list->cpus[i].props.has_node_id ? + cpu_list->cpus[i].props.node_id : 0; core = acpi_data_push(table_data, sizeof(*core)); core->type = ACPI_SRAT_PROCESSOR_GICC; core->length = sizeof(*core); - core->proximity = cpu_to_le32(cpu_node[i]); + core->proximity = cpu_to_le32(node_id); core->acpi_processor_uid = cpu_to_le32(i); core->flags = cpu_to_le32(1); } - g_free(cpu_node); mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < nb_numa_nodes; ++i) { diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 653b4d76fd228a6b7e6d6f7fb022e01d44b8829b..c7c8159dfd593f79cf3e5329994dce876797eaed 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -338,7 +338,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) { int cpu; int addr_cells = 1; - unsigned int i; + const MachineState *ms = MACHINE(vms); /* * From Documentation/devicetree/bindings/arm/cpus.txt @@ -369,6 +369,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) { char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); qemu_fdt_add_subnode(vms->fdt, nodename); qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "cpu"); @@ -389,9 +390,9 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) armcpu->mp_affinity); } - i = numa_get_node_for_cpu(cpu); - if (i < nb_numa_nodes) { - qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", i); + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); } g_free(nodename); @@ -1363,8 +1364,8 @@ static void machvirt_init(MachineState *machine) cs = CPU(cpuobj); cs->cpu_index = n; - node_id = numa_get_node_for_cpu(cs->cpu_index); - if (node_id == nb_numa_nodes) { + node_id = possible_cpus->cpus[cs->cpu_index].props.node_id; + if (!possible_cpus->cpus[cs->cpu_index].props.has_node_id) { /* by default CPUState::numa_node was 0 if it's not set via CLI * keep it this way for now but in future we probably should * refuse to start up with incomplete numa mapping */