提交 807b22e7 编写于 作者: C Christoffer Dall 提交者: Yang Yingliang

KVM: arm/arm64: Factor out hypercall handling from PSCI code

mainline inclusion
from mainline-v5.8-rc5
commit 55009c6ed2d24fc0f5521ab2482f145d269389ea
category: feature
bugzilla: NA
CVE: NA

--------------------------------

We currently intertwine the KVM PSCI implementation with the general
dispatch of hypercall handling, which makes perfect sense because PSCI
is the only category of hypercalls we support.

However, as we are about to support additional hypercalls, factor out
this functionality into a separate hypercall handler file.
Signed-off-by: NChristoffer Dall <christoffer.dall@arm.com>
[steven.price@arm.com: rebased]
Reviewed-by: NAndrew Jones <drjones@redhat.com>
Signed-off-by: NSteven Price <steven.price@arm.com>
Signed-off-by: NMarc Zyngier <maz@kernel.org>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 33ecbd18
...@@ -24,7 +24,7 @@ obj-y += kvm-arm.o init.o interrupts.o ...@@ -24,7 +24,7 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += handle_exit.o guest.o emulate.o reset.o obj-y += handle_exit.o guest.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
obj-y += $(KVM)/arm/aarch32.o obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o obj-y += $(KVM)/arm/vgic/vgic.o
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <kvm/arm_psci.h> #include <kvm/arm_hypercalls.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "trace.h" #include "trace.h"
......
...@@ -15,6 +15,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ ...@@ -15,6 +15,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <kvm/arm_psci.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -33,6 +31,8 @@ ...@@ -33,6 +31,8 @@
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <kvm/arm_hypercalls.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019 Arm Ltd. */
#ifndef __KVM_ARM_HYPERCALLS_H
#define __KVM_ARM_HYPERCALLS_H
#include <asm/kvm_emulate.h>
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 0);
}
static inline unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 1);
}
static inline unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 2);
}
static inline unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 3);
}
static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
unsigned long a0,
unsigned long a1,
unsigned long a2,
unsigned long a3)
{
vcpu_set_reg(vcpu, 0, a0);
vcpu_set_reg(vcpu, 1, a1);
vcpu_set_reg(vcpu, 2, a2);
vcpu_set_reg(vcpu, 3, a3);
}
#endif
...@@ -51,7 +51,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) ...@@ -51,7 +51,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
} }
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu);
struct kvm_one_reg; struct kvm_one_reg;
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Arm Ltd.
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_psci.h>
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
{
u32 func_id = smccc_get_function(vcpu);
u32 val = SMCCC_RET_NOT_SUPPORTED;
u32 feature;
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
val = ARM_SMCCC_VERSION_1_1;
break;
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
feature = smccc_get_arg1(vcpu);
switch(feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
if (kvm_arm_harden_branch_predictor())
val = SMCCC_RET_SUCCESS;
break;
case ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) {
case KVM_SSBD_FORCE_DISABLE:
case KVM_SSBD_UNKNOWN:
break;
case KVM_SSBD_KERNEL:
val = SMCCC_RET_SUCCESS;
break;
case KVM_SSBD_FORCE_ENABLE:
case KVM_SSBD_MITIGATED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
}
break;
default:
return kvm_psci_call(vcpu);
}
smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
/* /*
* This is an implementation of the Power State Coordination Interface * This is an implementation of the Power State Coordination Interface
...@@ -34,38 +35,6 @@ ...@@ -34,38 +35,6 @@
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
static u32 smccc_get_function(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 0);
}
static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 1);
}
static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 2);
}
static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
{
return vcpu_get_reg(vcpu, 3);
}
static void smccc_set_retval(struct kvm_vcpu *vcpu,
unsigned long a0,
unsigned long a1,
unsigned long a2,
unsigned long a3)
{
vcpu_set_reg(vcpu, 0, a0);
vcpu_set_reg(vcpu, 1, a1);
vcpu_set_reg(vcpu, 2, a2);
vcpu_set_reg(vcpu, 3, a3);
}
static unsigned long psci_affinity_mask(unsigned long affinity_level) static unsigned long psci_affinity_mask(unsigned long affinity_level)
{ {
if (affinity_level <= 3) if (affinity_level <= 3)
...@@ -384,7 +353,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) ...@@ -384,7 +353,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
* Errors: * Errors:
* -EINVAL: Unrecognized PSCI function * -EINVAL: Unrecognized PSCI function
*/ */
static int kvm_psci_call(struct kvm_vcpu *vcpu) int kvm_psci_call(struct kvm_vcpu *vcpu)
{ {
switch (kvm_psci_version(vcpu, vcpu->kvm)) { switch (kvm_psci_version(vcpu, vcpu->kvm)) {
case KVM_ARM_PSCI_1_0: case KVM_ARM_PSCI_1_0:
...@@ -398,47 +367,6 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu) ...@@ -398,47 +367,6 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
}; };
} }
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
{
u32 func_id = smccc_get_function(vcpu);
u32 val = SMCCC_RET_NOT_SUPPORTED;
u32 feature;
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
val = ARM_SMCCC_VERSION_1_1;
break;
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
feature = smccc_get_arg1(vcpu);
switch(feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
if (kvm_arm_harden_branch_predictor())
val = SMCCC_RET_SUCCESS;
break;
case ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) {
case KVM_SSBD_FORCE_DISABLE:
case KVM_SSBD_UNKNOWN:
break;
case KVM_SSBD_KERNEL:
val = SMCCC_RET_SUCCESS;
break;
case KVM_SSBD_FORCE_ENABLE:
case KVM_SSBD_MITIGATED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
}
break;
default:
return kvm_psci_call(vcpu);
}
smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
{ {
return 1; /* PSCI version */ return 1; /* PSCI version */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册