diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index aa11dbec0d70fb1d28c5b578899871d3dba77407..9bb1873f529515a9ec9e1ee487a4e9e8a02e1f3b 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -67,12 +67,12 @@ extern void fpsimd_save_and_flush_cpu_state(void); static inline bool thread_sm_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK); + return system_supports_sme() && (thread->svcr & SVCR_SM_MASK); } static inline bool thread_za_enabled(struct thread_struct *thread) { - return system_supports_sme() && (thread->svcr & SVCR_EL0_ZA_MASK); + return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK); } /* Maximum VL that SVE/SME VL-agnostic software can transparently support */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 69ce163d2fb28f1137c807039fb3fe61740bde2a..8de5a4fc06e3507ad7b8e6dc2e1b3c5cba1d22a8 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -192,7 +192,7 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) static inline unsigned int thread_get_cur_vl(struct thread_struct *thread) { - if (system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK)) + if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK)) return thread_get_sme_vl(thread); else return thread_get_sve_vl(thread); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4459cd4a37f5f30bee8cbb8f23737bece518886a..a2f0759f65b2706d5ba41e0c5fe7df024a60ece8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -479,9 +479,9 @@ #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) -#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2) -#define SVCR_EL0_ZA_MASK 2 -#define SVCR_EL0_SM_MASK 1 +#define SYS_SVCR sys_reg(3, 3, 4, 2, 2) +#define SVCR_ZA_MASK 2 +#define SVCR_SM_MASK 1 #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a5f6d6d9f372e80c0ec7fb2eafaef03b04e29a20..759d40cac1fe8edaa5f547605e9ada4a76d39d5d 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -410,7 +410,7 @@ static void task_fpsimd_load(void) if (test_thread_flag(TIF_SME)) sme_set_vq(sve_vq_from_vl(sme_vl) - 1); - write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0); + write_sysreg_s(current->thread.svcr, SYS_SVCR); if (thread_za_enabled(¤t->thread)) za_load_state(current->thread.za_state); @@ -462,15 +462,15 @@ static void fpsimd_save(void) if (system_supports_sme()) { u64 *svcr = last->svcr; - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - *svcr = read_sysreg_s(SYS_SVCR_EL0); + *svcr = read_sysreg_s(SYS_SVCR); - if (*svcr & SYS_SVCR_EL0_ZA_MASK) + if (*svcr & SVCR_ZA_MASK) za_save_state(last->za_state); /* If we are in streaming mode override regular SVE. */ - if (*svcr & SYS_SVCR_EL0_SM_MASK) { + if (*svcr & SVCR_SM_MASK) { save_sve_regs = true; save_ffr = system_supports_fa64(); vl = last->sme_vl; @@ -852,8 +852,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, sve_to_fpsimd(task); if (system_supports_sme() && type == ARM64_VEC_SME) { - task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK | - SYS_SVCR_EL0_ZA_MASK); + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); clear_thread_flag(TIF_SME); } @@ -1915,10 +1915,10 @@ void __efi_fpsimd_begin(void) __this_cpu_write(efi_sve_state_used, true); if (system_supports_sme()) { - svcr = read_sysreg_s(SYS_SVCR_EL0); + svcr = read_sysreg_s(SYS_SVCR); if (!system_supports_fa64()) - ffr = svcr & SVCR_EL0_SM_MASK; + ffr = svcr & SVCR_SM_MASK; __this_cpu_write(efi_sm_state, ffr); } @@ -1928,8 +1928,8 @@ void __efi_fpsimd_begin(void) ffr); if (system_supports_sme()) - sysreg_clear_set_s(SYS_SVCR_EL0, - SVCR_EL0_SM_MASK, 0); + sysreg_clear_set_s(SYS_SVCR, + SVCR_SM_MASK, 0); } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); @@ -1962,9 +1962,9 @@ void __efi_fpsimd_end(void) */ if (system_supports_sme()) { if (__this_cpu_read(efi_sm_state)) { - sysreg_clear_set_s(SYS_SVCR_EL0, + sysreg_clear_set_s(SYS_SVCR, 0, - SVCR_EL0_SM_MASK); + SVCR_SM_MASK); if (!system_supports_fa64()) ffr = efi_sm_state; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 60ebc3060cf1dd1907fb0580ed6069a1b6793385..21da83187a602c499e4240d538d28da3bed5624d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -867,10 +867,10 @@ static int sve_set_common(struct task_struct *target, switch (type) { case ARM64_VEC_SVE: - target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + target->thread.svcr &= ~SVCR_SM_MASK; break; case ARM64_VEC_SME: - target->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + target->thread.svcr |= SVCR_SM_MASK; break; default: WARN_ON_ONCE(1); @@ -1100,7 +1100,7 @@ static int za_set(struct task_struct *target, /* If there is no data then disable ZA */ if (!count) { - target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr &= ~SVCR_ZA_MASK; goto out; } @@ -1125,7 +1125,7 @@ static int za_set(struct task_struct *target, /* Mark ZA as active and let userspace use it */ set_tsk_thread_flag(target, TIF_SME); - target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + target->thread.svcr |= SVCR_ZA_MASK; out: fpsimd_flush_task_state(target); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 2295948d97fda7d63d5d0a33a027e85e03288a42..18bf590dc1c72f14261909c9b9f4efbca50807f2 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -288,7 +288,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.head.size <= sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); - current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK; + current->thread.svcr &= ~SVCR_SM_MASK; goto fpsimd_only; } @@ -321,7 +321,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) return -EFAULT; if (sve.flags & SVE_SIG_FLAG_SM) - current->thread.svcr |= SYS_SVCR_EL0_SM_MASK; + current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); @@ -398,7 +398,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EINVAL; if (za.head.size <= sizeof(*user->za)) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; return 0; } @@ -419,7 +419,7 @@ static int restore_za_context(struct user_ctxs __user *user) sme_alloc(current); if (!current->thread.za_state) { - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); return -ENOMEM; } @@ -432,7 +432,7 @@ static int restore_za_context(struct user_ctxs __user *user) return -EFAULT; set_thread_flag(TIF_SME); - current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK; + current->thread.svcr |= SVCR_ZA_MASK; return 0; } @@ -922,8 +922,8 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* Signal handlers are invoked with ZA and streaming mode disabled */ if (system_supports_sme()) { - current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK | - SYS_SVCR_EL0_SM_MASK); + current->thread.svcr &= ~(SVCR_ZA_MASK | + SVCR_SM_MASK); sme_smstop(); } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 92c69e5ac269838877c9c32c80950bcdab2271b7..733451fe7e41f031e2b5cf34396521d8ec0a0639 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -174,9 +174,9 @@ static inline void fp_user_discard(void) * need updating. */ if (system_supports_sme() && test_thread_flag(TIF_SME)) { - u64 svcr = read_sysreg_s(SYS_SVCR_EL0); + u64 svcr = read_sysreg_s(SYS_SVCR); - if (svcr & SYS_SVCR_EL0_SM_MASK) + if (svcr & SVCR_SM_MASK) sme_smstop_sm(); } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 441edb9c398c95a089ddc7c777f6bc4001e8e7f8..3d251a4d2cf7bfdb3ed82df94d30b8d6b48a458c 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -96,8 +96,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED; - if (read_sysreg_s(SYS_SVCR_EL0) & - (SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) { + if (read_sysreg_s(SYS_SVCR) & + (SVCR_SM_MASK | SVCR_ZA_MASK)) { vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; fpsimd_save_and_flush_cpu_state(); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 689e53dd4cb17ade236dfb1c0ca9995804759b8b..1cf01c022b30ad31ada00844cb87dea435243c4e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1685,7 +1685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SMIDR_EL1), undef_access }, { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CTR_EL0), access_ctr }, - { SYS_DESC(SYS_SVCR_EL0), undef_access }, + { SYS_DESC(SYS_SVCR), undef_access }, { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0 },