提交 d56089f5 编写于 作者: J James Morse 提交者: Baolin Wang

ACPI / APEI: Move locking to the notification helper

fix #28612342

commit 3b880cbe4df5dd78a2b2279dbe16db9d193412ca upstream

ghes_copy_tofrom_phys() takes different locks depending on in_nmi().
This doesn't work if there are multiple NMI-like notifications, that
can interrupt each other.

Now that NOTIFY_SEA is always called in the same context, move the
lock-taking to the notification helper. The helper will always know
which lock to take. This avoids ghes_copy_tofrom_phys() taking a guess
based on in_nmi().

This splits NOTIFY_NMI and NOTIFY_SEA to use different locks. All
the other notifications use ghes_proc(), and are called in process
or IRQ context. Move the spin_lock_irqsave() around their ghes_proc()
calls.
Signed-off-by: NJames Morse <james.morse@arm.com>
Reviewed-by: NBorislav Petkov <bp@suse.de>
Signed-off-by: NRafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: NBaolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: NAlex Shi <alex.shi@linux.alibaba.com>
Acked-by: NCaspar Zhang <caspar@linux.alibaba.com>
Reviewed-by: Nluanshi <zhangliguang@linux.alibaba.com>
上级 c5f48328
...@@ -114,11 +114,10 @@ static DEFINE_MUTEX(ghes_list_mutex); ...@@ -114,11 +114,10 @@ static DEFINE_MUTEX(ghes_list_mutex);
* handler, but general ioremap can not be used in atomic context, so * handler, but general ioremap can not be used in atomic context, so
* the fixmap is used instead. * the fixmap is used instead.
* *
* These 2 spinlocks are used to prevent the fixmap entries from being used * This spinlock is used to prevent the fixmap entry from being used
* simultaneously. * simultaneously.
*/ */
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_notify_lock_irq);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
static struct gen_pool *ghes_estatus_pool; static struct gen_pool *ghes_estatus_pool;
static unsigned long ghes_estatus_pool_size_request; static unsigned long ghes_estatus_pool_size_request;
...@@ -287,7 +286,6 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, ...@@ -287,7 +286,6 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
int from_phys) int from_phys)
{ {
void __iomem *vaddr; void __iomem *vaddr;
unsigned long flags = 0;
int in_nmi = in_nmi(); int in_nmi = in_nmi();
u64 offset; u64 offset;
u32 trunk; u32 trunk;
...@@ -295,10 +293,8 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, ...@@ -295,10 +293,8 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
while (len > 0) { while (len > 0) {
offset = paddr - (paddr & PAGE_MASK); offset = paddr - (paddr & PAGE_MASK);
if (in_nmi) { if (in_nmi) {
raw_spin_lock(&ghes_ioremap_lock_nmi);
vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
} else { } else {
spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
} }
trunk = PAGE_SIZE - offset; trunk = PAGE_SIZE - offset;
...@@ -312,10 +308,8 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, ...@@ -312,10 +308,8 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
buffer += trunk; buffer += trunk;
if (in_nmi) { if (in_nmi) {
ghes_iounmap_nmi(); ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else { } else {
ghes_iounmap_irq(); ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
} }
} }
} }
...@@ -729,8 +723,11 @@ static void ghes_add_timer(struct ghes *ghes) ...@@ -729,8 +723,11 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t) static void ghes_poll_func(struct timer_list *t)
{ {
struct ghes *ghes = from_timer(ghes, t, timer); struct ghes *ghes = from_timer(ghes, t, timer);
unsigned long flags;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes); ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (!(ghes->flags & GHES_EXITING)) if (!(ghes->flags & GHES_EXITING))
ghes_add_timer(ghes); ghes_add_timer(ghes);
} }
...@@ -738,9 +735,12 @@ static void ghes_poll_func(struct timer_list *t) ...@@ -738,9 +735,12 @@ static void ghes_poll_func(struct timer_list *t)
static irqreturn_t ghes_irq_func(int irq, void *data) static irqreturn_t ghes_irq_func(int irq, void *data)
{ {
struct ghes *ghes = data; struct ghes *ghes = data;
unsigned long flags;
int rc; int rc;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rc = ghes_proc(ghes); rc = ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (rc) if (rc)
return IRQ_NONE; return IRQ_NONE;
...@@ -751,14 +751,17 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event, ...@@ -751,14 +751,17 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
void *data) void *data)
{ {
struct ghes *ghes; struct ghes *ghes;
unsigned long flags;
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) { list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes)) if (!ghes_proc(ghes))
ret = NOTIFY_OK; ret = NOTIFY_OK;
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret; return ret;
} }
...@@ -912,7 +915,14 @@ static LIST_HEAD(ghes_sea); ...@@ -912,7 +915,14 @@ static LIST_HEAD(ghes_sea);
*/ */
int ghes_notify_sea(void) int ghes_notify_sea(void)
{ {
return ghes_in_nmi_spool_from_list(&ghes_sea); static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
int rv;
raw_spin_lock(&ghes_notify_lock_sea);
rv = ghes_in_nmi_spool_from_list(&ghes_sea);
raw_spin_unlock(&ghes_notify_lock_sea);
return rv;
} }
static void ghes_sea_add(struct ghes *ghes) static void ghes_sea_add(struct ghes *ghes)
...@@ -945,13 +955,16 @@ static LIST_HEAD(ghes_nmi); ...@@ -945,13 +955,16 @@ static LIST_HEAD(ghes_nmi);
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{ {
static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
int ret = NMI_DONE; int ret = NMI_DONE;
if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
return ret; return ret;
raw_spin_lock(&ghes_notify_lock_nmi);
if (!ghes_in_nmi_spool_from_list(&ghes_nmi)) if (!ghes_in_nmi_spool_from_list(&ghes_nmi))
ret = NMI_HANDLED; ret = NMI_HANDLED;
raw_spin_unlock(&ghes_notify_lock_nmi);
atomic_dec(&ghes_in_nmi); atomic_dec(&ghes_in_nmi);
return ret; return ret;
...@@ -993,6 +1006,7 @@ static int ghes_probe(struct platform_device *ghes_dev) ...@@ -993,6 +1006,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
{ {
struct acpi_hest_generic *generic; struct acpi_hest_generic *generic;
struct ghes *ghes = NULL; struct ghes *ghes = NULL;
unsigned long flags;
int rc = -EINVAL; int rc = -EINVAL;
...@@ -1095,7 +1109,9 @@ static int ghes_probe(struct platform_device *ghes_dev) ...@@ -1095,7 +1109,9 @@ static int ghes_probe(struct platform_device *ghes_dev)
ghes_edac_register(ghes, &ghes_dev->dev); ghes_edac_register(ghes, &ghes_dev->dev);
/* Handle any pending errors right away */ /* Handle any pending errors right away */
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes); ghes_proc(ghes);
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册