提交 5d176f75 编写于 作者: C Cyril Bur 提交者: Michael Ellerman

powerpc: tm: Enable transactional memory (TM) lazily for userspace

Currently the MSR TM bit is always set if the hardware is TM capable.
This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and
TFAIR) must be swapped for each process regardless of if they use TM.

For processes that don't use TM the TM MSR bit can be turned off
allowing the kernel to avoid the expensive swap of the TM registers.

A TM unavailable exception will occur if a thread does use TM and the
kernel will enable MSR_TM and leave it so for some time afterwards.
Signed-off-by: NCyril Bur <cyrilbur@gmail.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 172f7aaa
...@@ -257,6 +257,7 @@ struct thread_struct { ...@@ -257,6 +257,7 @@ struct thread_struct {
int used_spe; /* set if process has used spe */ int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u8 load_tm;
u64 tm_tfhar; /* Transaction fail handler addr */ u64 tm_tfhar; /* Transaction fail handler addr */
u64 tm_texasr; /* Transaction exception & summary */ u64 tm_texasr; /* Transaction exception & summary */
u64 tm_tfiar; /* Transaction fail instr address reg */ u64 tm_tfiar; /* Transaction fail instr address reg */
......
...@@ -812,6 +812,12 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a, ...@@ -812,6 +812,12 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
{
return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
}
static void tm_reclaim_thread(struct thread_struct *thr, static void tm_reclaim_thread(struct thread_struct *thr,
struct thread_info *ti, uint8_t cause) struct thread_info *ti, uint8_t cause)
{ {
...@@ -892,6 +898,9 @@ void tm_recheckpoint(struct thread_struct *thread, ...@@ -892,6 +898,9 @@ void tm_recheckpoint(struct thread_struct *thread,
{ {
unsigned long flags; unsigned long flags;
if (!(thread->regs->msr & MSR_TM))
return;
/* We really can't be interrupted here as the TEXASR registers can't /* We really can't be interrupted here as the TEXASR registers can't
* change and later in the trecheckpoint code, we have a userspace R1. * change and later in the trecheckpoint code, we have a userspace R1.
* So let's hard disable over this region. * So let's hard disable over this region.
...@@ -924,7 +933,7 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) ...@@ -924,7 +933,7 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
* unavailable later, we are unable to determine which set of FP regs * unavailable later, we are unable to determine which set of FP regs
* need to be restored. * need to be restored.
*/ */
if (!new->thread.regs) if (!tm_enabled(new))
return; return;
if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
...@@ -955,8 +964,16 @@ static inline void __switch_to_tm(struct task_struct *prev, ...@@ -955,8 +964,16 @@ static inline void __switch_to_tm(struct task_struct *prev,
struct task_struct *new) struct task_struct *new)
{ {
if (cpu_has_feature(CPU_FTR_TM)) { if (cpu_has_feature(CPU_FTR_TM)) {
tm_enable(); if (tm_enabled(prev) || tm_enabled(new))
tm_reclaim_task(prev); tm_enable();
if (tm_enabled(prev)) {
prev->thread.load_tm++;
tm_reclaim_task(prev);
if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
prev->thread.regs->msr &= ~MSR_TM;
}
tm_recheckpoint_new_task(new); tm_recheckpoint_new_task(new);
} }
} }
...@@ -1393,6 +1410,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -1393,6 +1410,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
* transitions the CPU out of TM mode. Hence we need to call * transitions the CPU out of TM mode. Hence we need to call
* tm_recheckpoint_new_task() (on the same task) to restore the * tm_recheckpoint_new_task() (on the same task) to restore the
* checkpointed state back and the TM mode. * checkpointed state back and the TM mode.
*
* Can't pass dst because it isn't ready. Doesn't matter, passing
* dst is only important for __switch_to()
*/ */
__switch_to_tm(src, src); __switch_to_tm(src, src);
...@@ -1636,8 +1656,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) ...@@ -1636,8 +1656,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.used_spe = 0; current->thread.used_spe = 0;
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM))
regs->msr |= MSR_TM;
current->thread.tm_tfhar = 0; current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0; current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0; current->thread.tm_tfiar = 0;
......
...@@ -1392,6 +1392,15 @@ void vsx_unavailable_exception(struct pt_regs *regs) ...@@ -1392,6 +1392,15 @@ void vsx_unavailable_exception(struct pt_regs *regs)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static void tm_unavailable(struct pt_regs *regs) static void tm_unavailable(struct pt_regs *regs)
{ {
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
regs->msr |= MSR_TM;
tm_enable();
tm_restore_sprs(&current->thread);
return;
}
#endif
pr_emerg("Unrecoverable TM Unavailable Exception " pr_emerg("Unrecoverable TM Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip); "%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册