提交 202ccb6b 编写于 作者: D Dongjiu Geng 提交者: Peter Maydell

target/arm: Add support for VCPU event states

This patch extends the qemu-kvm state sync logic with support for
KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
And also it can support the exception state migration.

The SError exception states include SError pending state and ESR value,
the kvm_put/get_vcpu_events() will be called when set or get system
registers. When do migration, if source machine has SError pending,
QEMU will do this migration regardless whether the target machine supports
to specify guest ESR value, because if target machine does not support that,
it can also inject the SError with zero ESR value.
Signed-off-by: NDongjiu Geng <gengdongjiu@huawei.com>
Reviewed-by: NAndrew Jones <drjones@redhat.com>
Reviewed-by: NPeter Maydell <peter.maydell@linaro.org>
Message-id: 1538067351-23931-3-git-send-email-gengdongjiu@huawei.com
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
上级 61e9e3cb
......@@ -531,6 +531,13 @@ typedef struct CPUARMState {
*/
} exception;
/* Information associated with an SError */
struct {
uint8_t pending;
uint8_t has_esr;
uint64_t esr;
} serror;
/* Thumb-2 EE state. */
uint32_t teecr;
uint32_t teehbr;
......
......@@ -34,6 +34,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
};
static bool cap_has_mp_state;
static bool cap_has_inject_serror_esr;
static ARMHostCPUFeatures arm_host_cpu_features;
......@@ -48,6 +49,12 @@ int kvm_arm_vcpu_init(CPUState *cs)
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
}
void kvm_arm_init_serror_injection(CPUState *cs)
{
cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
KVM_CAP_ARM_INJECT_SERROR_ESR);
}
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
int *fdarray,
struct kvm_vcpu_init *init)
......@@ -522,6 +529,59 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
return 0;
}
int kvm_put_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
if (!kvm_has_vcpu_events()) {
return 0;
}
memset(&events, 0, sizeof(events));
events.exception.serror_pending = env->serror.pending;
/* Inject SError to guest with specified syndrome if host kernel
* supports it, otherwise inject SError without syndrome.
*/
if (cap_has_inject_serror_esr) {
events.exception.serror_has_esr = env->serror.has_esr;
events.exception.serror_esr = env->serror.esr;
}
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
if (ret) {
error_report("failed to put vcpu events");
}
return ret;
}
int kvm_get_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
if (!kvm_has_vcpu_events()) {
return 0;
}
memset(&events, 0, sizeof(events));
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
if (ret) {
error_report("failed to get vcpu events");
return ret;
}
env->serror.pending = events.exception.serror_pending;
env->serror.has_esr = events.exception.serror_has_esr;
env->serror.esr = events.exception.serror_esr;
return 0;
}
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}
......
......@@ -217,6 +217,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
/* Check whether userspace can specify guest syndrome value */
kvm_arm_init_serror_injection(cs);
return kvm_arm_init_cpreg_list(cpu);
}
......@@ -358,6 +361,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_put_vcpu_events(cpu);
if (ret) {
return ret;
}
/* Note that we do not call write_cpustate_to_list()
* here, so we are only writing the tuple list back to
* KVM. This is safe because nothing can change the
......@@ -445,6 +453,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpscr(env, fpscr);
ret = kvm_get_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
......
......@@ -546,6 +546,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
kvm_arm_init_debug(cs);
/* Check whether user space can specify guest syndrome value */
kvm_arm_init_serror_injection(cs);
return kvm_arm_init_cpreg_list(cpu);
}
......@@ -727,6 +730,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
ret = kvm_put_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
......@@ -863,6 +871,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpcr(env, fpr);
ret = kvm_get_vcpu_events(cpu);
if (ret) {
return ret;
}
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
......
......@@ -121,6 +121,30 @@ bool write_kvmstate_to_list(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
/**
* kvm_arm_init_serror_injection:
* @cs: CPUState
*
* Check whether KVM can set guest SError syndrome.
*/
void kvm_arm_init_serror_injection(CPUState *cs);
/**
* kvm_get_vcpu_events:
* @cpu: ARMCPU
*
* Get VCPU related state from kvm.
*/
int kvm_get_vcpu_events(ARMCPU *cpu);
/**
* kvm_put_vcpu_events:
* @cpu: ARMCPU
*
* Put VCPU related state to kvm.
*/
int kvm_put_vcpu_events(ARMCPU *cpu);
#ifdef CONFIG_KVM
/**
* kvm_arm_create_scratch_host_vcpu:
......
......@@ -172,6 +172,27 @@ static const VMStateDescription vmstate_sve = {
};
#endif /* AARCH64 */
static bool serror_needed(void *opaque)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
return env->serror.pending != 0;
}
static const VMStateDescription vmstate_serror = {
.name = "cpu/serror",
.version_id = 1,
.minimum_version_id = 1,
.needed = serror_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(env.serror.pending, ARMCPU),
VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
VMSTATE_UINT64(env.serror.esr, ARMCPU),
VMSTATE_END_OF_LIST()
}
};
static bool m_needed(void *opaque)
{
ARMCPU *cpu = opaque;
......@@ -726,6 +747,7 @@ const VMStateDescription vmstate_arm_cpu = {
#ifdef TARGET_AARCH64
&vmstate_sve,
#endif
&vmstate_serror,
NULL
}
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册