提交 dae7d6cc 编写于 作者: J Jingyi Wang 提交者: Zheng Zengkai

KVM: arm64: Make use of TWED feature

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I40FGG
CVE: NA

-----------------------------

For HCR_EL2, TWEDEn(bit[59]) decides whether TWED is enabled, and
when the configurable delay is enabled, TWEDEL (bits[63:60]) encodes
the minimum delay in taking a trap of WFE caused by the TWE bit in
this register as 2^(TWEDEL + 8) cycles.

We use two kernel parameters "twed_enable" and "twedel" to configure
the register.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Signed-off-by: NJingyi Wang <wangjingyi11@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 6be796b7
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/types.h> #include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */ /* Hyp Configuration Register (HCR) bits */
#define HCR_TWEDEN (UL(1) << 59)
#define HCR_ATA (UL(1) << 56) #define HCR_ATA (UL(1) << 56)
#define HCR_FWB (UL(1) << 46) #define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41) #define HCR_API (UL(1) << 41)
...@@ -56,6 +57,13 @@ ...@@ -56,6 +57,13 @@
#define HCR_SWIO (UL(1) << 1) #define HCR_SWIO (UL(1) << 1)
#define HCR_VM (UL(1) << 0) #define HCR_VM (UL(1) << 0)
#ifdef CONFIG_ARM64_TWED
#define HCR_TWEDEL_SHIFT 60
#define HCR_TWEDEL_MAX (UL(0xf))
#define HCR_TWEDEL_MASK (HCR_TWEDEL_MAX << HCR_TWEDEL_SHIFT)
#define HCR_TWEDEL (UL(1) << HCR_TWEDEL_SHIFT)
#endif
/* /*
* The bits we set in HCR: * The bits we set in HCR:
* TLOR: Trap LORegion register accesses * TLOR: Trap LORegion register accesses
......
...@@ -102,6 +102,33 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) ...@@ -102,6 +102,33 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TWI; vcpu->arch.hcr_el2 |= HCR_TWI;
} }
#ifdef CONFIG_ARM64_TWED
static inline void vcpu_twed_enable(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 |= HCR_TWEDEN;
}
static inline void vcpu_twed_disable(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 &= ~HCR_TWEDEN;
}
static inline void vcpu_set_twed(struct kvm_vcpu *vcpu)
{
u64 delay = (u64)twedel;
if (delay > HCR_TWEDEL_MAX)
delay = HCR_TWEDEL_MAX;
vcpu->arch.hcr_el2 &= ~HCR_TWEDEL_MASK;
vcpu->arch.hcr_el2 |= (delay << HCR_TWEDEL_SHIFT);
}
#else
static inline void vcpu_twed_enable(struct kvm_vcpu *vcpu) {};
static inline void vcpu_twed_disable(struct kvm_vcpu *vcpu) {};
static inline void vcpu_set_twed(struct kvm_vcpu *vcpu) {};
#endif
static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
......
...@@ -699,4 +699,12 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); ...@@ -699,4 +699,12 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \ #define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
#ifdef CONFIG_ARM64_TWED
#define use_twed() (has_twed() && twed_enable)
extern bool twed_enable;
extern unsigned int twedel;
#else
#define use_twed() (false)
#endif
#endif /* __ARM64_KVM_HOST_H__ */ #endif /* __ARM64_KVM_HOST_H__ */
...@@ -97,6 +97,11 @@ static __always_inline bool has_vhe(void) ...@@ -97,6 +97,11 @@ static __always_inline bool has_vhe(void)
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN); return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
} }
static __always_inline bool has_twed(void)
{
return cpus_have_const_cap(ARM64_HAS_TWED);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */ #endif /* ! __ASM__VIRT_H */
...@@ -61,6 +61,14 @@ static bool vgic_present; ...@@ -61,6 +61,14 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
#ifdef CONFIG_ARM64_TWED
bool twed_enable = false;
module_param(twed_enable, bool, S_IRUGO | S_IWUSR);
unsigned int twedel = 0;
module_param(twedel, uint, S_IRUGO | S_IWUSR);
#endif
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
...@@ -818,6 +826,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -818,6 +826,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_arm_setup_debug(vcpu); kvm_arm_setup_debug(vcpu);
if (use_twed()) {
vcpu_twed_enable(vcpu);
vcpu_set_twed(vcpu);
} else {
vcpu_twed_disable(vcpu);
}
/************************************************************** /**************************************************************
* Enter the guest * Enter the guest
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册