提交 cfb92fa0 编写于 作者: M Mark Brown 提交者: Wang ShaoBo

arm64/sme: Implement support for TPIDR2

mainline inclusion
from mainline-v5.19-rc1
commit a9d69158
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=a9d69158595017d260ab37bf88b8f125e5e8144c

-------------------------------------------------

The Scalable Matrix Extension introduces support for a new thread specific
data register TPIDR2 intended for use by libc. The kernel must save the
value of TPIDR2 on context switch and should ensure that all new threads
start off with a default value of 0. Add a field to the thread_struct to
store TPIDR2 and context switch it with the other thread specific data.

In case there are future extensions which also use TPIDR2 we introduce
system_supports_tpidr2() and use that rather than system_supports_sme()
for TPIDR2 handling.
Signed-off-by: NMark Brown <broonie@kernel.org>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-13-broonie@kernel.orgSigned-off-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
上级 96b732c5
...@@ -737,6 +737,11 @@ static __always_inline bool system_supports_fa64(void) ...@@ -737,6 +737,11 @@ static __always_inline bool system_supports_fa64(void)
cpus_have_const_cap(ARM64_SME_FA64); cpus_have_const_cap(ARM64_SME_FA64);
} }
static __always_inline bool system_supports_tpidr2(void)
{
return system_supports_sme();
}
static __always_inline bool system_supports_cnp(void) static __always_inline bool system_supports_cnp(void)
{ {
return IS_ENABLED(CONFIG_ARM64_CNP) && return IS_ENABLED(CONFIG_ARM64_CNP) &&
......
...@@ -166,7 +166,7 @@ struct thread_struct { ...@@ -166,7 +166,7 @@ struct thread_struct {
#endif #endif
KABI_USE(1, unsigned int vl[ARM64_VEC_MAX]) KABI_USE(1, unsigned int vl[ARM64_VEC_MAX])
KABI_USE(2, unsigned int vl_onexec[ARM64_VEC_MAX]) KABI_USE(2, unsigned int vl_onexec[ARM64_VEC_MAX])
KABI_RESERVE(3) KABI_USE(3, u64 tpidr2_el0)
KABI_RESERVE(4) KABI_RESERVE(4)
KABI_RESERVE(5) KABI_RESERVE(5)
KABI_RESERVE(6) KABI_RESERVE(6)
......
...@@ -1086,6 +1086,10 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) ...@@ -1086,6 +1086,10 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
/* Allow SME in kernel */ /* Allow SME in kernel */
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1); write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
isb(); isb();
/* Allow EL0 to access TPIDR2 */
write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
isb();
} }
/* /*
......
...@@ -320,6 +320,9 @@ void show_regs(struct pt_regs * regs) ...@@ -320,6 +320,9 @@ void show_regs(struct pt_regs * regs)
static void tls_thread_flush(void) static void tls_thread_flush(void)
{ {
write_sysreg(0, tpidr_el0); write_sysreg(0, tpidr_el0);
if (system_supports_tpidr2())
write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_a32_compat_task()) { if (is_a32_compat_task()) {
current->thread.uw.tp_value = 0; current->thread.uw.tp_value = 0;
...@@ -414,6 +417,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -414,6 +417,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* out-of-sync with the saved value. * out-of-sync with the saved value.
*/ */
*task_user_tls(p) = read_sysreg(tpidr_el0); *task_user_tls(p) = read_sysreg(tpidr_el0);
if (system_supports_tpidr2())
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (stack_start) { if (stack_start) {
if (is_a32_compat_thread(task_thread_info(p))) if (is_a32_compat_thread(task_thread_info(p)))
...@@ -424,10 +429,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -424,10 +429,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
/* /*
* If a TLS pointer was passed to clone, use it for the new * If a TLS pointer was passed to clone, use it for the new
* thread. * thread. We also reset TPIDR2 if it's in use.
*/ */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS) {
p->thread.uw.tp_value = tls; p->thread.uw.tp_value = tls;
p->thread.tpidr2_el0 = 0;
}
} else { } else {
/* /*
* A kthread has no context to ERET to, so ensure any buggy * A kthread has no context to ERET to, so ensure any buggy
...@@ -453,6 +460,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -453,6 +460,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void) void tls_preserve_current_state(void)
{ {
*task_user_tls(current) = read_sysreg(tpidr_el0); *task_user_tls(current) = read_sysreg(tpidr_el0);
if (system_supports_tpidr2() && !is_compat_task())
current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
} }
static void tls_thread_switch(struct task_struct *next) static void tls_thread_switch(struct task_struct *next)
...@@ -465,6 +474,8 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -465,6 +474,8 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0); write_sysreg(*task_user_tls(next), tpidr_el0);
if (system_supports_tpidr2())
write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册