提交 0f20ba62 编写于 作者: A Alexey Kardashevskiy 提交者: Alexander Graf

target-ppc: spapr: e500: fix to use cpu_dt_id

This makes use of @cpu_dt_id and related API in:
1. emulated XICS hypercall handlers as they receive fixed CPU indexes;
2. XICS-KVM to enable in-kernel XICS on right CPU;
3. device-tree renderer.

This removes @cpu_index fixup as @cpu_dt_id is used instead so QEMU monitor
can accept command-line CPU indexes again.

This changes kvm_arch_vcpu_id() to use ppc_get_vcpu_dt_id() as at the moment
KVM CPU id and device tree ID are calculated using the same algorithm.
Signed-off-by: NAlexey Kardashevskiy <aik@ozlabs.ru>
Acked-by: NMike Day <ncmike@ncultra.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 0ce470cd
...@@ -228,7 +228,7 @@ int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs) ...@@ -228,7 +228,7 @@ int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
encap.cap = KVM_CAP_IRQ_MPIC; encap.cap = KVM_CAP_IRQ_MPIC;
encap.args[0] = opp->fd; encap.args[0] = opp->fd;
encap.args[1] = cs->cpu_index; encap.args[1] = kvm_arch_vcpu_id(cs);
return kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap); return kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
} }
......
...@@ -33,6 +33,17 @@ ...@@ -33,6 +33,17 @@
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qapi/visitor.h" #include "qapi/visitor.h"
static int get_cpu_index_by_dt_id(int cpu_dt_id)
{
PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
if (cpu) {
return cpu->parent_obj.cpu_index;
}
return -1;
}
void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
{ {
CPUState *cs = CPU(cpu); CPUState *cs = CPU(cpu);
...@@ -659,7 +670,7 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr, ...@@ -659,7 +670,7 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr, static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args) target_ulong opcode, target_ulong *args)
{ {
target_ulong server = args[0]; target_ulong server = get_cpu_index_by_dt_id(args[0]);
target_ulong mfrr = args[1]; target_ulong mfrr = args[1];
if (server >= spapr->icp->nr_servers) { if (server >= spapr->icp->nr_servers) {
...@@ -728,7 +739,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr, ...@@ -728,7 +739,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
} }
nr = rtas_ld(args, 0); nr = rtas_ld(args, 0);
server = rtas_ld(args, 1); server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
priority = rtas_ld(args, 2); priority = rtas_ld(args, 2);
if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
......
...@@ -65,7 +65,7 @@ static void icp_get_kvm_state(ICPState *ss) ...@@ -65,7 +65,7 @@ static void icp_get_kvm_state(ICPState *ss)
ret = kvm_vcpu_ioctl(ss->cs, KVM_GET_ONE_REG, &reg); ret = kvm_vcpu_ioctl(ss->cs, KVM_GET_ONE_REG, &reg);
if (ret != 0) { if (ret != 0) {
error_report("Unable to retrieve KVM interrupt controller state" error_report("Unable to retrieve KVM interrupt controller state"
" for CPU %d: %s", ss->cs->cpu_index, strerror(errno)); " for CPU %ld: %s", kvm_arch_vcpu_id(ss->cs), strerror(errno));
exit(1); exit(1);
} }
...@@ -97,7 +97,7 @@ static int icp_set_kvm_state(ICPState *ss, int version_id) ...@@ -97,7 +97,7 @@ static int icp_set_kvm_state(ICPState *ss, int version_id)
ret = kvm_vcpu_ioctl(ss->cs, KVM_SET_ONE_REG, &reg); ret = kvm_vcpu_ioctl(ss->cs, KVM_SET_ONE_REG, &reg);
if (ret != 0) { if (ret != 0) {
error_report("Unable to restore KVM interrupt controller state (0x%" error_report("Unable to restore KVM interrupt controller state (0x%"
PRIx64 ") for CPU %d: %s", state, ss->cs->cpu_index, PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(ss->cs),
strerror(errno)); strerror(errno));
return ret; return ret;
} }
...@@ -325,15 +325,15 @@ static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu) ...@@ -325,15 +325,15 @@ static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
struct kvm_enable_cap xics_enable_cap = { struct kvm_enable_cap xics_enable_cap = {
.cap = KVM_CAP_IRQ_XICS, .cap = KVM_CAP_IRQ_XICS,
.flags = 0, .flags = 0,
.args = {icpkvm->kernel_xics_fd, cs->cpu_index, 0, 0}, .args = {icpkvm->kernel_xics_fd, kvm_arch_vcpu_id(cs), 0, 0},
}; };
ss->cs = cs; ss->cs = cs;
ret = kvm_vcpu_ioctl(ss->cs, KVM_ENABLE_CAP, &xics_enable_cap); ret = kvm_vcpu_ioctl(ss->cs, KVM_ENABLE_CAP, &xics_enable_cap);
if (ret < 0) { if (ret < 0) {
error_report("Unable to connect CPU%d to kernel XICS: %s", error_report("Unable to connect CPU%ld to kernel XICS: %s",
cs->cpu_index, strerror(errno)); kvm_arch_vcpu_id(cs), strerror(errno));
exit(1); exit(1);
} }
} }
......
...@@ -238,6 +238,7 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args, ...@@ -238,6 +238,7 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
the first node as boot node and be happy */ the first node as boot node and be happy */
for (i = smp_cpus - 1; i >= 0; i--) { for (i = smp_cpus - 1; i >= 0; i--) {
CPUState *cpu; CPUState *cpu;
PowerPCCPU *pcpu;
char cpu_name[128]; char cpu_name[128];
uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20); uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20);
...@@ -246,14 +247,16 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args, ...@@ -246,14 +247,16 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
continue; continue;
} }
env = cpu->env_ptr; env = cpu->env_ptr;
pcpu = POWERPC_CPU(cpu);
snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x", snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x",
cpu->cpu_index); ppc_get_vcpu_dt_id(pcpu));
qemu_fdt_add_subnode(fdt, cpu_name); qemu_fdt_add_subnode(fdt, cpu_name);
qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq); qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq); qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu"); qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
qemu_fdt_setprop_cell(fdt, cpu_name, "reg", cpu->cpu_index); qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
ppc_get_vcpu_dt_id(pcpu));
qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size", qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size",
env->dcache_line_size); env->dcache_line_size);
qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size", qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size",
......
...@@ -207,19 +207,20 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr) ...@@ -207,19 +207,20 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
DeviceClass *dc = DEVICE_GET_CLASS(cpu); DeviceClass *dc = DEVICE_GET_CLASS(cpu);
int index = ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
uint32_t associativity[] = {cpu_to_be32(0x5), uint32_t associativity[] = {cpu_to_be32(0x5),
cpu_to_be32(0x0), cpu_to_be32(0x0),
cpu_to_be32(0x0), cpu_to_be32(0x0),
cpu_to_be32(0x0), cpu_to_be32(0x0),
cpu_to_be32(cpu->numa_node), cpu_to_be32(cpu->numa_node),
cpu_to_be32(cpu->cpu_index)}; cpu_to_be32(index)};
if ((cpu->cpu_index % smt) != 0) { if ((index % smt) != 0) {
continue; continue;
} }
snprintf(cpu_model, 32, "/cpus/%s@%x", dc->fw_name, snprintf(cpu_model, 32, "/cpus/%s@%x", dc->fw_name,
cpu->cpu_index); index);
offset = fdt_path_offset(fdt, cpu_model); offset = fdt_path_offset(fdt, cpu_model);
if (offset < 0) { if (offset < 0) {
...@@ -368,7 +369,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base, ...@@ -368,7 +369,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
DeviceClass *dc = DEVICE_GET_CLASS(cs); DeviceClass *dc = DEVICE_GET_CLASS(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
int index = cs->cpu_index; int index = ppc_get_vcpu_dt_id(cpu);
uint32_t servers_prop[smp_threads]; uint32_t servers_prop[smp_threads];
uint32_t gservers_prop[smp_threads * 2]; uint32_t gservers_prop[smp_threads * 2];
char *nodename; char *nodename;
......
...@@ -482,13 +482,13 @@ static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr, ...@@ -482,13 +482,13 @@ static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong vpa = args[2]; target_ulong vpa = args[2];
target_ulong ret = H_PARAMETER; target_ulong ret = H_PARAMETER;
CPUPPCState *tenv; CPUPPCState *tenv;
CPUState *tcpu; PowerPCCPU *tcpu;
tcpu = qemu_get_cpu(procno); tcpu = ppc_get_vcpu_by_dt_id(procno);
if (!tcpu) { if (!tcpu) {
return H_PARAMETER; return H_PARAMETER;
} }
tenv = tcpu->env_ptr; tenv = &tcpu->env;
switch (flags) { switch (flags) {
case FLAGS_REGISTER_VPA: case FLAGS_REGISTER_VPA:
......
...@@ -131,7 +131,7 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_, ...@@ -131,7 +131,7 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
uint32_t nret, target_ulong rets) uint32_t nret, target_ulong rets)
{ {
target_ulong id; target_ulong id;
CPUState *cpu; PowerPCCPU *cpu;
if (nargs != 1 || nret != 2) { if (nargs != 1 || nret != 2) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
...@@ -139,9 +139,9 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_, ...@@ -139,9 +139,9 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
} }
id = rtas_ld(args, 0); id = rtas_ld(args, 0);
cpu = qemu_get_cpu(id); cpu = ppc_get_vcpu_by_dt_id(id);
if (cpu != NULL) { if (cpu != NULL) {
if (cpu->halted) { if (CPU(cpu)->halted) {
rtas_st(rets, 1, 0); rtas_st(rets, 1, 0);
} else { } else {
rtas_st(rets, 1, 2); rtas_st(rets, 1, 2);
...@@ -161,7 +161,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr, ...@@ -161,7 +161,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
uint32_t nret, target_ulong rets) uint32_t nret, target_ulong rets)
{ {
target_ulong id, start, r3; target_ulong id, start, r3;
CPUState *cs; PowerPCCPU *cpu;
if (nargs != 3 || nret != 1) { if (nargs != 3 || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
...@@ -172,9 +172,9 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr, ...@@ -172,9 +172,9 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
start = rtas_ld(args, 1); start = rtas_ld(args, 1);
r3 = rtas_ld(args, 2); r3 = rtas_ld(args, 2);
cs = qemu_get_cpu(id); cpu = ppc_get_vcpu_by_dt_id(id);
if (cs != NULL) { if (cpu != NULL) {
PowerPCCPU *cpu = POWERPC_CPU(cs); CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
if (!cs->halted) { if (!cs->halted) {
......
...@@ -402,7 +402,7 @@ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu) ...@@ -402,7 +402,7 @@ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
unsigned long kvm_arch_vcpu_id(CPUState *cpu) unsigned long kvm_arch_vcpu_id(CPUState *cpu)
{ {
return cpu->cpu_index; return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
} }
int kvm_arch_init_vcpu(CPUState *cs) int kvm_arch_init_vcpu(CPUState *cs)
......
...@@ -8457,6 +8457,7 @@ static void ppc_cpu_initfn(Object *obj) ...@@ -8457,6 +8457,7 @@ static void ppc_cpu_initfn(Object *obj)
cs->env_ptr = env; cs->env_ptr = env;
cpu_exec_init(env); cpu_exec_init(env);
cpu->cpu_dt_id = cs->cpu_index;
env->msr_mask = pcc->msr_mask; env->msr_mask = pcc->msr_mask;
env->mmu_model = pcc->mmu_model; env->mmu_model = pcc->mmu_model;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册