提交 8c4890d1 编写于 作者: P Peter Zijlstra 提交者: Borislav Petkov

smp, irq_work: Continue smp_call_function*() and irq_work*() integration

Instead of relying on BUG_ON() to ensure the various data structures
line up, use a bunch of horrible unions to make it all automatic.

Much of the union magic is to ensure irq_work and smp_call_function do
not (yet) see the members of their respective data structures change
name.
Suggested-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
Reviewed-by: NFrederic Weisbecker <frederic@kernel.org>
Link: https://lkml.kernel.org/r/20200622100825.844455025@infradead.org
上级 739f70b4
......@@ -2,7 +2,7 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
#include <linux/llist.h>
#include <linux/smp_types.h>
/*
* An entry can be in one of four states:
......@@ -13,24 +13,14 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
/* flags share CSD_FLAG_ space */
#define IRQ_WORK_PENDING BIT(0)
#define IRQ_WORK_BUSY BIT(1)
/* Doesn't want IPI, wait for tick: */
#define IRQ_WORK_LAZY BIT(2)
/* Run hard IRQ context, even on RT */
#define IRQ_WORK_HARD_IRQ BIT(3)
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
/*
* structure shares layout with single_call_data_t.
*/
struct irq_work {
struct llist_node llnode;
atomic_t flags;
union {
struct __call_single_node node;
struct {
struct llist_node llnode;
atomic_t flags;
};
};
void (*func)(struct irq_work *);
};
......
......@@ -654,11 +654,8 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_SMP
struct {
struct llist_node wake_entry;
unsigned int wake_entry_type;
};
int on_cpu;
struct __call_single_node wake_entry;
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* Current CPU: */
unsigned int cpu;
......
......@@ -12,29 +12,22 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/llist.h>
#include <linux/smp_types.h>
typedef void (*smp_call_func_t)(void *info);
typedef bool (*smp_cond_func_t)(int cpu, void *info);
enum {
CSD_FLAG_LOCK = 0x01,
/* IRQ_WORK_flags */
CSD_TYPE_ASYNC = 0x00,
CSD_TYPE_SYNC = 0x10,
CSD_TYPE_IRQ_WORK = 0x20,
CSD_TYPE_TTWU = 0x30,
CSD_FLAG_TYPE_MASK = 0xF0,
};
/*
* structure shares (partial) layout with struct irq_work
*/
struct __call_single_data {
struct llist_node llist;
unsigned int flags;
union {
struct __call_single_node node;
struct {
struct llist_node llist;
unsigned int flags;
};
};
smp_call_func_t func;
void *info;
};
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SMP_TYPES_H
#define __LINUX_SMP_TYPES_H
#include <linux/llist.h>
enum {
CSD_FLAG_LOCK = 0x01,
IRQ_WORK_PENDING = 0x01,
IRQ_WORK_BUSY = 0x02,
IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */
IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */
IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),
CSD_TYPE_ASYNC = 0x00,
CSD_TYPE_SYNC = 0x10,
CSD_TYPE_IRQ_WORK = 0x20,
CSD_TYPE_TTWU = 0x30,
CSD_FLAG_TYPE_MASK = 0xF0,
};
/*
* struct __call_single_node is the primary type on
* smp.c:call_single_queue.
*
* flush_smp_call_function_queue() only reads the type from
* __call_single_node::u_flags as a regular load, the above
* (anonymous) enum defines all the bits of this word.
*
* Other bits are not modified until the type is known.
*
* CSD_TYPE_SYNC/ASYNC:
* struct {
* struct llist_node node;
* unsigned int flags;
* smp_call_func_t func;
* void *info;
* };
*
* CSD_TYPE_IRQ_WORK:
* struct {
* struct llist_node node;
* atomic_t flags;
* void (*func)(struct irq_work *);
* };
*
* CSD_TYPE_TTWU:
* struct {
* struct llist_node node;
* unsigned int flags;
* };
*
*/
struct __call_single_node {
struct llist_node llist;
union {
unsigned int u_flags;
atomic_t a_flags;
};
};
#endif /* __LINUX_SMP_TYPES_H */
......@@ -2293,7 +2293,7 @@ void sched_ttwu_pending(void *arg)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
llist_for_each_entry_safe(p, t, llist, wake_entry) {
llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
if (WARN_ON_ONCE(p->on_cpu))
smp_cond_load_acquire(&p->on_cpu, !VAL);
......@@ -2329,7 +2329,7 @@ static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags
p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
WRITE_ONCE(rq->ttwu_pending, 1);
__smp_call_single_queue(cpu, &p->wake_entry);
__smp_call_single_queue(cpu, &p->wake_entry.llist);
}
void wake_up_if_idle(int cpu)
......@@ -2786,7 +2786,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#endif
init_numa_balancing(clone_flags, p);
#ifdef CONFIG_SMP
p->wake_entry_type = CSD_TYPE_TTWU;
p->wake_entry.u_flags = CSD_TYPE_TTWU;
#endif
}
......
......@@ -669,24 +669,6 @@ void __init smp_init(void)
{
int num_nodes, num_cpus;
/*
* Ensure struct irq_work layout matches so that
* flush_smp_call_function_queue() can do horrible things.
*/
BUILD_BUG_ON(offsetof(struct irq_work, llnode) !=
offsetof(struct __call_single_data, llist));
BUILD_BUG_ON(offsetof(struct irq_work, func) !=
offsetof(struct __call_single_data, func));
BUILD_BUG_ON(offsetof(struct irq_work, flags) !=
offsetof(struct __call_single_data, flags));
/*
* Assert the CSD_TYPE_TTWU layout is similar enough
* for task_struct to be on the @call_single_queue.
*/
BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) !=
offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist));
idle_threads_init();
cpuhp_threads_init();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册