diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 4986bf5ffd297c4f41ca876fe78b4b2bb29f0e2a..cd9a98bc8f606197a15b70d729aa66a0431f3e0e 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -426,4 +426,24 @@ ld_d 31, THREAD_FPR31, \thread .endm + .macro msa_init_upper wd +#ifdef CONFIG_64BIT + insert_d \wd, 1 +#else + insert_w \wd, 2 + insert_w \wd, 3 +#endif + .if 31-\wd + msa_init_upper (\wd+1) + .endif + .endm + + .macro msa_init_all_upper + .set push + .set noat + not $1, zero + msa_init_upper 0 + .set pop + .endm + #endif /* _ASM_ASMMACRO_H */ diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h index e80e85c1334f56250ab2b78e69bba6611ae508b3..fe25a17bc7834336edcae558f19d9c503797d704 100644 --- a/arch/mips/include/asm/msa.h +++ b/arch/mips/include/asm/msa.h @@ -16,6 +16,7 @@ extern void _save_msa(struct task_struct *); extern void _restore_msa(struct task_struct *); +extern void _init_msa_upper(void); static inline void enable_msa(void) { diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 1a1aef04312d370e9c9601365b3428a78ac53c17..4c4ec1812420b873663d682d706860f394d7db00 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -144,6 +144,11 @@ LEAF(_restore_msa) jr ra END(_restore_msa) +LEAF(_init_msa_upper) + msa_init_all_upper + jr ra + END(_init_msa_upper) + #endif /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 649c151fe1dbcb77cc6165da5a504ef828f5eac9..1ed84577d3e318c50b6a1546fafd39440c6ddc8f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1089,13 +1089,15 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, static int enable_restore_fp_context(int msa) { - int err, was_fpu_owner; + int err, was_fpu_owner, prior_msa; if (!used_math()) { /* First time FP context user. */ err = init_fpu(); - if (msa && !err) + if (msa && !err) { enable_msa(); + _init_msa_upper(); + } if (!err) set_used_math(); return err; @@ -1147,18 +1149,37 @@ static int enable_restore_fp_context(int msa) /* * If this is the first time that the task is using MSA and it has * previously used scalar FP in this time slice then we already nave - * FP context which we shouldn't clobber. + * FP context which we shouldn't clobber. We do however need to clear + * the upper 64b of each vector register so that this task has no + * opportunity to see data left behind by another. */ - if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) + prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); + if (!prior_msa && was_fpu_owner) { + _init_msa_upper(); return 0; + } - /* We need to restore the vector context. */ - restore_msa(current); + if (!prior_msa) { + /* + * Restore the least significant 64b of each vector register + * from the existing scalar FP context. + */ + _restore_fp(current); - /* Restore the scalar FP control & status register */ - if (!was_fpu_owner) - asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + /* + * The task has not formerly used MSA, so clear the upper 64b + * of each vector register such that it cannot see data left + * behind by another task. + */ + _init_msa_upper(); + } else { + /* We need to restore the vector context. */ + restore_msa(current); + /* Restore the scalar FP control & status register */ + if (!was_fpu_owner) + asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + } return 0; }