提交 4028c7ba 编写于 作者: M Mark Brown 提交者: Wang ShaoBo

arm64/sme: Implement SVCR context switching

mainline inclusion
from mainline-v5.19-rc1
commit b40c559b
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=b40c559b45bec736f588c57dd5be967fe573058b

-------------------------------------------------

In SME the use of both streaming SVE mode and ZA are tracked through
PSTATE.SM and PSTATE.ZA, visible through the system register SVCR.  In
order to context switch the floating point state for SME we need to
context switch the contents of this register as part of context
switching the floating point state.

Since changing the vector length exits streaming SVE mode and disables
ZA we also make sure we update SVCR appropriately when setting vector
length, and similarly ensure that new threads have streaming SVE mode
and ZA disabled.
Signed-off-by: NMark Brown <broonie@kernel.org>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-14-broonie@kernel.orgSigned-off-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
上级 cfb92fa0
......@@ -47,7 +47,8 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_task_to_cpu(void);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
void *sve_state, unsigned int sve_vl);
void *sve_state, unsigned int sve_vl,
u64 *svcr);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
......
......@@ -167,7 +167,7 @@ struct thread_struct {
KABI_USE(1, unsigned int vl[ARM64_VEC_MAX])
KABI_USE(2, unsigned int vl_onexec[ARM64_VEC_MAX])
KABI_USE(3, u64 tpidr2_el0)
KABI_RESERVE(4)
KABI_USE(4, u64 svcr)
KABI_RESERVE(5)
KABI_RESERVE(6)
KABI_RESERVE(7)
......
......@@ -86,6 +86,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
#define TIF_32BIT_AARCH64 27 /* 32 bit process on AArch64(ILP32) */
#define TIF_PATCH_PENDING 28 /* pending live patching update */
#define TIF_SME 29 /* SME in use */
#define TIF_SME_VL_INHERIT 30 /* Inherit SME vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
......
......@@ -117,6 +117,7 @@
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
void *sve_state;
u64 *svcr;
unsigned int sve_vl;
};
......@@ -353,6 +354,9 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (IS_ENABLED(CONFIG_ARM64_SME) && test_thread_flag(TIF_SME))
write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
sve_load_state(sve_pffr(&current->thread),
......@@ -378,6 +382,12 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
if (IS_ENABLED(CONFIG_ARM64_SME) &&
test_thread_flag(TIF_SME)) {
u64 *svcr = last->svcr;
*svcr = read_sysreg_s(SYS_SVCR_EL0);
}
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
test_thread_flag(TIF_SVE)) {
if (WARN_ON(sve_get_vl() != last->sve_vl)) {
......@@ -729,6 +739,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
sve_to_fpsimd(task);
if (system_supports_sme() && type == ARM64_VEC_SME)
task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK |
SYS_SVCR_EL0_ZA_MASK);
if (task == current)
put_cpu_fpsimd_context();
......@@ -1392,6 +1406,7 @@ void fpsimd_bind_task_to_cpu(void)
last->st = &current->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = task_get_sve_vl(current);
last->svcr = &current->thread.svcr;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
......@@ -1406,7 +1421,7 @@ void fpsimd_bind_task_to_cpu(void)
}
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
unsigned int sve_vl)
unsigned int sve_vl, u64 *svcr)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
......@@ -1415,6 +1430,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
last->svcr = svcr;
last->sve_state = sve_state;
last->sve_vl = sve_vl;
}
......
......@@ -382,6 +382,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
dst->thread.svcr = 0;
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
......
......@@ -97,9 +97,14 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
/*
* Currently we do not support SME guests so SVCR is
* always 0 and we just need a variable to point to.
*/
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state,
vcpu->arch.sve_max_vl);
vcpu->arch.sve_max_vl,
NULL);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册