提交 1c53a1ae 编写于 作者: M Marc Zyngier

Merge branch kvm-arm64/misc-5.17 into kvmarm-master/next

* kvm-arm64/misc-5.17:
  : .
  : Misc fixes and improvements:
  : - Add minimal support for ARMv8.7's PMU extension
  : - Constify kvm_io_gic_ops
  : - Drop kvm_is_transparent_hugepage() prototype
  : - Drop unused workaround_flags field
  : - Rework kvm_pgtable initialisation
  : - Documentation fixes
  : - Replace open-coded SCTLR_EL1.EE useage with its defined macro
  : - Sysreg list selftest update to handle PAuth
  : - Include cleanups
  : .
  KVM: arm64: vgic: Replace kernel.h with the necessary inclusions
  KVM: arm64: Fix comment typo in kvm_vcpu_finalize_sve()
  KVM: arm64: selftests: get-reg-list: Add pauth configuration
  KVM: arm64: Fix comment on barrier in kvm_psci_vcpu_on()
  KVM: arm64: Fix comment for kvm_reset_vcpu()
  KVM: arm64: Use defined value for SCTLR_ELx_EE
  KVM: arm64: Rework kvm_pgtable initialisation
  KVM: arm64: Drop unused workaround_flags vcpu field
Signed-off-by: NMarc Zyngier <maz@kernel.org>
...@@ -386,7 +386,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) ...@@ -386,7 +386,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else { } else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25); sctlr |= SCTLR_ELx_EE;
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
} }
} }
......
...@@ -297,9 +297,6 @@ struct kvm_vcpu_arch { ...@@ -297,9 +297,6 @@ struct kvm_vcpu_arch {
/* Exception Information */ /* Exception Information */
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
/* Miscellaneous vcpu state flags */ /* Miscellaneous vcpu state flags */
u64 flags; u64 flags;
......
...@@ -291,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); ...@@ -291,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/** /**
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise. * @pgt: Uninitialised page-table structure to initialise.
* @arch: Arch-specific KVM structure representing the guest virtual * @mmu: S2 MMU context for this S2 translation
* machine.
* @mm_ops: Memory management callbacks. * @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags. * @flags: Stage-2 configuration flags.
* @force_pte_cb: Function that returns true if page level mappings must * @force_pte_cb: Function that returns true if page level mappings must
...@@ -300,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); ...@@ -300,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
* *
* Return: 0 on success, negative error code on failure. * Return: 0 on success, negative error code on failure.
*/ */
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops, struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags, enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb); kvm_pgtable_force_pte_cb_t force_pte_cb);
#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \ #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
__kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL) __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
/** /**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
......
...@@ -111,7 +111,6 @@ int main(void) ...@@ -111,7 +111,6 @@ int main(void)
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
......
...@@ -124,19 +124,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) ...@@ -124,19 +124,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
prepare_host_vtcr(); prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock); hyp_spin_lock_init(&host_kvm.lock);
mmu->arch = &host_kvm.arch;
ret = prepare_s2_pool(pgt_pool_base); ret = prepare_s2_pool(pgt_pool_base);
if (ret) if (ret)
return ret; return ret;
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch, ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS, &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb); host_stage2_force_pte_cb);
if (ret) if (ret)
return ret; return ret;
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt; mmu->pgt = &host_kvm.pgt;
WRITE_ONCE(mmu->vmid.vmid_gen, 0); WRITE_ONCE(mmu->vmid.vmid_gen, 0);
WRITE_ONCE(mmu->vmid.vmid, 0); WRITE_ONCE(mmu->vmid.vmid, 0);
......
...@@ -1178,13 +1178,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) ...@@ -1178,13 +1178,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
} }
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops, struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags, enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb) kvm_pgtable_force_pte_cb_t force_pte_cb)
{ {
size_t pgd_sz; size_t pgd_sz;
u64 vtcr = arch->vtcr; u64 vtcr = mmu->arch->vtcr;
u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 ia_bits = VTCR_EL2_IPA(vtcr);
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
...@@ -1197,7 +1197,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, ...@@ -1197,7 +1197,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->ia_bits = ia_bits; pgt->ia_bits = ia_bits;
pgt->start_level = start_level; pgt->start_level = start_level;
pgt->mm_ops = mm_ops; pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu; pgt->mmu = mmu;
pgt->flags = flags; pgt->flags = flags;
pgt->force_pte_cb = force_pte_cb; pgt->force_pte_cb = force_pte_cb;
......
...@@ -637,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -637,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
if (!pgt) if (!pgt)
return -ENOMEM; return -ENOMEM;
err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); mmu->arch = &kvm->arch;
err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
if (err) if (err)
goto out_free_pgtable; goto out_free_pgtable;
...@@ -650,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -650,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
mmu->arch = &kvm->arch;
mmu->pgt = pgt; mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd); mmu->pgd_phys = __pa(pgt->pgd);
WRITE_ONCE(mmu->vmid.vmid_gen, 0); WRITE_ONCE(mmu->vmid.vmid_gen, 0);
......
...@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
/* /*
* Make sure the reset request is observed if the change to * Make sure the reset request is observed if the change to
* power_state is observed. * power_off is observed.
*/ */
smp_wmb(); smp_wmb();
......
...@@ -101,7 +101,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -101,7 +101,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
/* /*
* Responsibility for these properties is shared between * Responsibility for these properties is shared between
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure: * set_sve_vls(). Double-check here just to be sure:
*/ */
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() || if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
...@@ -208,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) ...@@ -208,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
* kvm_reset_vcpu - sets core registers and sys_regs to reset value * kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* *
* This function finds the right table above and sets the registers on * This function sets the registers on the virtual CPU struct to their
* the virtual CPU struct to their architecturally defined reset * architecturally defined reset values, except for registers whose reset is
* values, except for registers whose reset is deferred until * deferred until kvm_arm_vcpu_finalize().
* kvm_arm_vcpu_finalize().
* *
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI * ioctl or as part of handling a request issued by another VCPU in the PSCI
......
...@@ -5,9 +5,11 @@ ...@@ -5,9 +5,11 @@
#ifndef __KVM_ARM_VGIC_H #ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H
#include <linux/kernel.h> #include <linux/bits.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/types.h> #include <linux/types.h>
......
...@@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = { ...@@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = {
KVM_REG_ARM64_SVE_VLS, KVM_REG_ARM64_SVE_VLS,
}; };
static __u64 pauth_addr_regs[] = {
ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
};
static __u64 pauth_generic_regs[] = {
ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
};
#define BASE_SUBLIST \ #define BASE_SUBLIST \
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
#define VREGS_SUBLIST \ #define VREGS_SUBLIST \
...@@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = { ...@@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = {
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \ { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \ .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
.rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), } .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
#define PAUTH_SUBLIST \
{ \
.name = "pauth_address", \
.capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
.feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
.regs = pauth_addr_regs, \
.regs_n = ARRAY_SIZE(pauth_addr_regs), \
}, \
{ \
.name = "pauth_generic", \
.capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
.feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
.regs = pauth_generic_regs, \
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
static struct vcpu_config vregs_config = { static struct vcpu_config vregs_config = {
.sublists = { .sublists = {
...@@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = { ...@@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = {
{0}, {0},
}, },
}; };
static struct vcpu_config pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
{0},
},
};
static struct vcpu_config pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
PMU_SUBLIST,
{0},
},
};
static struct vcpu_config *vcpu_configs[] = { static struct vcpu_config *vcpu_configs[] = {
&vregs_config, &vregs_config,
&vregs_pmu_config, &vregs_pmu_config,
&sve_config, &sve_config,
&sve_pmu_config, &sve_pmu_config,
&pauth_config,
&pauth_pmu_config,
}; };
static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册