提交 c2f1181d 编写于 作者: M Marc Zyngier 提交者: Xie XiuQi

arm64: Force SSBS on context switch

mainline inclusion
from mainline-v5.3-rc2
commit cbdf8a189a66001c36007bf0f5c975d0376c5c3a
category: feature
bugzilla: 20806
CVE: NA

-------------------------------------------------

On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
Signed-off-by: NMarc Zyngier <marc.zyngier@arm.com>
[will: inverted logic and added comments]
Signed-off-by: NWill Deacon <will@kernel.org>
Conflicts:
  arch/arm64/kernel/process.c
[yyl: adjust context]
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 0c708103
...@@ -180,6 +180,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) ...@@ -180,6 +180,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON; regs->pmr_save = GIC_PRIO_IRQON;
} }
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc, static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
...@@ -187,7 +197,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -187,7 +197,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t; regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(regs);
regs->sp = sp; regs->sp = sp;
} }
...@@ -206,7 +216,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -206,7 +216,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif #endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
regs->pstate |= PSR_AA32_SSBS_BIT; set_compat_ssbs_bit(regs);
regs->compat_sp = sp; regs->compat_sp = sp;
} }
......
...@@ -411,7 +411,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -411,7 +411,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pmr_save = GIC_PRIO_IRQON; childregs->pmr_save = GIC_PRIO_IRQON;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
childregs->pstate |= PSR_SSBS_BIT; set_ssbs_bit(childregs);
p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz; p->thread.cpu_context.x20 = stk_sz;
...@@ -452,6 +452,32 @@ void uao_thread_switch(struct task_struct *next) ...@@ -452,6 +452,32 @@ void uao_thread_switch(struct task_struct *next)
} }
} }
/*
* Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system.
*/
static void ssbs_thread_switch(struct task_struct *next)
{
struct pt_regs *regs = task_pt_regs(next);
/*
* Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early.
*/
if (unlikely(next->flags & PF_KTHREAD))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return;
if (compat_user_mode(regs))
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
}
/* /*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a * We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace. * shadow copy so that we can restore this upon entry from userspace.
...@@ -480,6 +506,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ...@@ -480,6 +506,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next); contextidr_thread_switch(next);
entry_task_switch(next); entry_task_switch(next);
uao_thread_switch(next); uao_thread_switch(next);
ssbs_thread_switch(next);
/* /*
* Complete any pending TLB or cache maintenance on this CPU in case * Complete any pending TLB or cache maintenance on this CPU in case
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册