提交 f22d4c2e 编写于 作者: J Julien Thierry 提交者: Xie XiuQi

arm64: daifflags: Include PMR in daifflags restore operations

hulk inclusion
category: feature
bugzilla: 9291
CVE: NA

ported from https://lore.kernel.org/patchwork/patch/1037485/

--------------------------------

The addition of PMR should not bypass the semantics of daifflags.

When DA_F are set, I bit is also set as no interrupts (even of higher
priority) is allowed.

When DA_F are cleared, I bit is cleared and interrupt enabling/disabling
goes through ICC_PMR_EL1.
Signed-off-by: NJulien Thierry <julien.thierry@arm.com>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: NMarc Zyngier <marc.zyngier@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: James Morse <james.morse@arm.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 04435f11
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <asm/cpufeature.h>
#define DAIF_PROCCTX 0 #define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_PROCCTX_NOIRQ PSR_I_BIT
...@@ -36,11 +38,14 @@ static inline unsigned long local_daif_save(void) ...@@ -36,11 +38,14 @@ static inline unsigned long local_daif_save(void)
{ {
unsigned long flags; unsigned long flags;
asm volatile( flags = read_sysreg(daif);
"mrs %0, daif // local_daif_save\n"
: "=r" (flags) if (system_uses_irq_prio_masking()) {
: /* If IRQs are masked with PMR, reflect it in the flags */
: "memory"); if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
flags |= PSR_I_BIT;
}
local_daif_mask(); local_daif_mask();
return flags; return flags;
...@@ -48,14 +53,46 @@ static inline unsigned long local_daif_save(void) ...@@ -48,14 +53,46 @@ static inline unsigned long local_daif_save(void)
static inline void local_daif_restore(unsigned long flags) static inline void local_daif_restore(unsigned long flags)
{ {
if (!arch_irqs_disabled_flags(flags)) bool irq_disabled = flags & PSR_I_BIT;
if (!irq_disabled) {
trace_hardirqs_on(); trace_hardirqs_on();
asm volatile(
"msr daif, %0 // local_daif_restore" if (system_uses_irq_prio_masking())
: arch_local_irq_enable();
: "r" (flags) } else if (!(flags & PSR_A_BIT)) {
: "memory"); /*
if (arch_irqs_disabled_flags(flags)) * If interrupts are disabled but we can take
* asynchronous errors, we can take NMIs
*/
if (system_uses_irq_prio_masking()) {
flags &= ~PSR_I_BIT;
/*
* There has been concern that the write to daif
* might be reordered before this write to PMR.
* From the ARM ARM DDI 0487D.a, section D1.7.1
* "Accessing PSTATE fields":
* Writes to the PSTATE fields have side-effects on
* various aspects of the PE operation. All of these
* side-effects are guaranteed:
* - Not to be visible to earlier instructions in
* the execution stream.
* - To be visible to later instructions in the
* execution stream
*
* Also, writes to PMR are self-synchronizing, so no
* interrupts with a lower priority than PMR is signaled
* to the PE after the write.
*
* So we don't need additional synchronization here.
*/
arch_local_irq_disable();
}
}
write_sysreg(flags, daif);
if (irq_disabled)
trace_hardirqs_off(); trace_hardirqs_off();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册