提交 cb4efa60 编写于 作者: M Mark Brown 提交者: Wang ShaoBo

arm64/sve: Track vector lengths for tasks in an array

mainline inclusion
from mainline-v5.16-rc1
commit 5838a155
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=5838a155798479e3fe7e1482a31f0db657d5bbdd

-------------------------------------------------

As for SVE we will track a per task SME vector length for tasks. Convert
the existing storage for the vector length into an array and update
fpsimd_flush_task() to initialise this in a function.
Signed-off-by: NMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20211019172247.3045838-10-broonie@kernel.orgSigned-off-by: NWill Deacon <will@kernel.org>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
上级 aaba3535
......@@ -163,8 +163,8 @@ struct thread_struct {
u64 sctlr_tcf0;
u64 gcr_user_incl;
#endif
KABI_RESERVE(1)
KABI_RESERVE(2)
KABI_USE(1, unsigned int vl[ARM64_VEC_MAX])
KABI_USE(2, unsigned int vl_onexec[ARM64_VEC_MAX])
KABI_RESERVE(3)
KABI_RESERVE(4)
KABI_RESERVE(5)
......@@ -173,15 +173,45 @@ struct thread_struct {
KABI_RESERVE(8)
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
enum vec_type type)
{
return thread->vl[type];
}
static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
{
return thread->sve_vl;
return thread_get_vl(thread, ARM64_VEC_SVE);
}
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl);
void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
unsigned long vl);
unsigned int task_get_vl_onexec(const struct task_struct *task,
enum vec_type type);
static inline unsigned int task_get_sve_vl(const struct task_struct *task)
{
return task_get_vl(task, ARM64_VEC_SVE);
}
unsigned int task_get_sve_vl(const struct task_struct *task);
void task_set_sve_vl(struct task_struct *task, unsigned long vl);
unsigned int task_get_sve_vl_onexec(const struct task_struct *task);
void task_set_sve_vl_onexec(struct task_struct *task, unsigned long vl);
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
{
task_set_vl(task, ARM64_VEC_SVE, vl);
}
static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
{
return task_get_vl_onexec(task, ARM64_VEC_SVE);
}
static inline void task_set_sve_vl_onexec(struct task_struct *task,
unsigned long vl)
{
task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
}
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
......
......@@ -81,7 +81,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* AARCH32 process */
#define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
#define TIF_32BIT_AARCH64 27 /* 32 bit process on AArch64(ILP32) */
......
......@@ -133,6 +133,17 @@ __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
#endif
};
static unsigned int vec_vl_inherit_flag(enum vec_type type)
{
switch (type) {
case ARM64_VEC_SVE:
return TIF_SVE_VL_INHERIT;
default:
WARN_ON_ONCE(1);
return 0;
}
}
struct vl_config {
int __default_vl; /* Default VL for tasks */
};
......@@ -237,24 +248,27 @@ static void sve_free(struct task_struct *task)
__sve_free(task);
}
unsigned int task_get_sve_vl(const struct task_struct *task)
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
{
return task->thread.sve_vl;
return task->thread.vl[type];
}
void task_set_sve_vl(struct task_struct *task, unsigned long vl)
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl)
{
task->thread.sve_vl = vl;
task->thread.vl[type] = vl;
}
unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
unsigned int task_get_vl_onexec(const struct task_struct *task,
enum vec_type type)
{
return task->thread.sve_vl_onexec;
return task->thread.vl_onexec[type];
}
void task_set_sve_vl_onexec(struct task_struct *task, unsigned long vl)
void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
unsigned long vl)
{
task->thread.sve_vl_onexec = vl;
task->thread.vl_onexec[type] = vl;
}
/*
......@@ -1072,10 +1086,43 @@ void fpsimd_thread_switch(struct task_struct *next)
__put_cpu_fpsimd_context();
}
void fpsimd_flush_thread(void)
static void fpsimd_flush_thread_vl(enum vec_type type)
{
int vl, supported_vl;
/*
* Reset the task vector length as required. This is where we
* ensure that all user tasks have a valid vector length
* configured: no kernel task can become a user task without
* an exec and hence a call to this function. By the time the
* first call to this function is made, all early hardware
* probing is complete, so __sve_default_vl should be valid.
* If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here.
*/
vl = task_get_vl_onexec(current, type);
if (!vl)
vl = get_default_vl(type);
if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN;
supported_vl = find_supported_vector_length(type, vl);
if (WARN_ON(supported_vl != vl))
vl = supported_vl;
task_set_vl(current, type, vl);
/*
* If the task is not set to inherit, ensure that the vector
* length will be reset by a subsequent exec:
*/
if (!test_thread_flag(vec_vl_inherit_flag(type)))
task_set_vl_onexec(current, type, 0);
}
void fpsimd_flush_thread(void)
{
if (!system_supports_fpsimd())
return;
......@@ -1088,37 +1135,7 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
sve_free(current);
/*
* Reset the task vector length as required.
* This is where we ensure that all user tasks have a valid
* vector length configured: no kernel task can become a user
* task without an exec and hence a call to this function.
* By the time the first call to this function is made, all
* early hardware probing is complete, so __sve_default_vl
* should be valid.
* If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here.
*/
vl = task_get_sve_vl_onexec(current);
if (!vl)
vl = get_sve_default_vl();
if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN;
supported_vl = find_supported_vector_length(ARM64_VEC_SVE, vl);
if (WARN_ON(supported_vl != vl))
vl = supported_vl;
task_set_sve_vl(current, vl);
/*
* If the task is not set to inherit, ensure that the vector
* length will be reset by a subsequent exec:
*/
if (!test_thread_flag(TIF_SVE_VL_INHERIT))
task_set_sve_vl_onexec(current, 0);
fpsimd_flush_thread_vl(ARM64_VEC_SVE);
}
put_cpu_fpsimd_context();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册