提交 4f9ed36f 编写于 作者: P Peter Zijlstra 提交者: Zheng Zengkai

smp: Cleanup smp_call_function*()

mainline inclusion
from mainline-v5.11-rc1
commit 545b8c8d
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZV2C
CVE: NA

-------------------------------------------------

Get rid of the __call_single_node union and cleanup the API a little
to avoid external code relying on the structure layout as much.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NFrederic Weisbecker <frederic@kernel.org>

conflict:
	kernel/debug/debug_core.c
	kernel/sched/core.c
	kernel/smp.c: fix csd_lock_wait_getcpu() csd->node.dst
Signed-off-by: NTong Tiangen <tongtiangen@huawei.com>
Reviewed-by: NChen Wandun <chenwandun@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 8e05ed1e
...@@ -702,7 +702,6 @@ unsigned long arch_align_stack(unsigned long sp) ...@@ -702,7 +702,6 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK; return sp & ALMASK;
} }
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy; static struct cpumask backtrace_csd_busy;
static void handle_backtrace(void *info) static void handle_backtrace(void *info)
...@@ -711,6 +710,9 @@ static void handle_backtrace(void *info) ...@@ -711,6 +710,9 @@ static void handle_backtrace(void *info)
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
} }
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
CSD_INIT(handle_backtrace, NULL);
static void raise_backtrace(cpumask_t *mask) static void raise_backtrace(cpumask_t *mask)
{ {
call_single_data_t *csd; call_single_data_t *csd;
...@@ -730,7 +732,6 @@ static void raise_backtrace(cpumask_t *mask) ...@@ -730,7 +732,6 @@ static void raise_backtrace(cpumask_t *mask)
} }
csd = &per_cpu(backtrace_csd, cpu); csd = &per_cpu(backtrace_csd, cpu);
csd->func = handle_backtrace;
smp_call_function_single_async(cpu, csd); smp_call_function_single_async(cpu, csd);
} }
} }
......
...@@ -687,36 +687,23 @@ EXPORT_SYMBOL(flush_tlb_one); ...@@ -687,36 +687,23 @@ EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu);
smp_call_function_single_async(cpu, csd);
}
}
static void tick_broadcast_callee(void *info) static void tick_broadcast_callee(void *info)
{ {
tick_receive_broadcast(); tick_receive_broadcast();
} }
static int __init tick_broadcast_init(void) static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
CSD_INIT(tick_broadcast_callee, NULL);
void tick_broadcast(const struct cpumask *mask)
{ {
call_single_data_t *csd; call_single_data_t *csd;
int cpu; int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) { for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu); csd = &per_cpu(tick_broadcast_csd, cpu);
csd->func = tick_broadcast_callee; smp_call_function_single_async(cpu, csd);
} }
return 0;
} }
early_initcall(tick_broadcast_init);
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
...@@ -179,9 +179,7 @@ static void zpci_handle_fallback_irq(void) ...@@ -179,9 +179,7 @@ static void zpci_handle_fallback_irq(void)
if (atomic_inc_return(&cpu_data->scheduled) > 1) if (atomic_inc_return(&cpu_data->scheduled) > 1)
continue; continue;
cpu_data->csd.func = zpci_handle_remote_irq; INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
cpu_data->csd.info = &cpu_data->scheduled;
cpu_data->csd.flags = 0;
smp_call_function_single_async(cpu, &cpu_data->csd); smp_call_function_single_async(cpu, &cpu_data->csd);
} }
} }
......
...@@ -74,10 +74,9 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, ...@@ -74,10 +74,9 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
init_completion(&cmd.done); init_completion(&cmd.done);
for (; count; count -= 16) { for (; count; count -= 16) {
call_single_data_t csd = { call_single_data_t csd;
.func = cpuid_smp_cpuid,
.info = &cmd, INIT_CSD(&csd, cpuid_smp_cpuid, &cmd);
};
cmd.regs.eax = pos; cmd.regs.eax = pos;
cmd.regs.ecx = pos >> 32; cmd.regs.ecx = pos >> 32;
......
...@@ -169,12 +169,11 @@ static void __wrmsr_safe_on_cpu(void *info) ...@@ -169,12 +169,11 @@ static void __wrmsr_safe_on_cpu(void *info)
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{ {
struct msr_info_completion rv; struct msr_info_completion rv;
call_single_data_t csd = { call_single_data_t csd;
.func = __rdmsr_safe_on_cpu,
.info = &rv,
};
int err; int err;
INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
memset(&rv, 0, sizeof(rv)); memset(&rv, 0, sizeof(rv));
init_completion(&rv.done); init_completion(&rv.done);
rv.msr.msr_no = msr_no; rv.msr.msr_no = msr_no;
......
...@@ -672,9 +672,7 @@ bool blk_mq_complete_request_remote(struct request *rq) ...@@ -672,9 +672,7 @@ bool blk_mq_complete_request_remote(struct request *rq)
return false; return false;
if (blk_mq_complete_need_ipi(rq)) { if (blk_mq_complete_need_ipi(rq)) {
rq->csd.func = __blk_mq_complete_request_remote; INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
} else { } else {
if (rq->q->nr_hw_queues > 1) if (rq->q->nr_hw_queues > 1)
......
...@@ -674,8 +674,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev) ...@@ -674,8 +674,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
coupled->refcnt++; coupled->refcnt++;
csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
csd->func = cpuidle_coupled_handle_poke; INIT_CSD(csd, cpuidle_coupled_handle_poke, (void *)(unsigned long)dev->cpu);
csd->info = (void *)(unsigned long)dev->cpu;
return 0; return 0;
} }
......
...@@ -729,13 +729,8 @@ static void liquidio_napi_drv_callback(void *arg) ...@@ -729,13 +729,8 @@ static void liquidio_napi_drv_callback(void *arg)
droq->cpu_id == this_cpu) { droq->cpu_id == this_cpu) {
napi_schedule_irqoff(&droq->napi); napi_schedule_irqoff(&droq->napi);
} else { } else {
call_single_data_t *csd = &droq->csd; INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
smp_call_function_single_async(droq->cpu_id, &droq->csd);
csd->func = napi_schedule_wrapper;
csd->info = &droq->napi;
csd->flags = 0;
smp_call_function_single_async(droq->cpu_id, csd);
} }
} }
......
...@@ -21,24 +21,23 @@ typedef bool (*smp_cond_func_t)(int cpu, void *info); ...@@ -21,24 +21,23 @@ typedef bool (*smp_cond_func_t)(int cpu, void *info);
* structure shares (partial) layout with struct irq_work * structure shares (partial) layout with struct irq_work
*/ */
struct __call_single_data { struct __call_single_data {
union {
struct __call_single_node node; struct __call_single_node node;
struct {
struct llist_node llist;
unsigned int flags;
#ifdef CONFIG_64BIT
u16 src, dst;
#endif
};
};
smp_call_func_t func; smp_call_func_t func;
void *info; void *info;
}; };
#define CSD_INIT(_func, _info) \
(struct __call_single_data){ .func = (_func), .info = (_info), }
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */ /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data)); __aligned(sizeof(struct __call_single_data));
#define INIT_CSD(_csd, _func, _info) \
do { \
*(_csd) = CSD_INIT((_func), (_info)); \
} while (0)
/* /*
* Enqueue a llist_node on the call_single_queue; be very careful, read * Enqueue a llist_node on the call_single_queue; be very careful, read
* flush_smp_call_function_queue() in detail. * flush_smp_call_function_queue() in detail.
......
...@@ -225,8 +225,6 @@ NOKPROBE_SYMBOL(kgdb_skipexception); ...@@ -225,8 +225,6 @@ NOKPROBE_SYMBOL(kgdb_skipexception);
* Default (weak) implementation for kgdb_roundup_cpus * Default (weak) implementation for kgdb_roundup_cpus
*/ */
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
void __weak kgdb_call_nmi_hook(void *ignored) void __weak kgdb_call_nmi_hook(void *ignored)
{ {
/* /*
...@@ -241,6 +239,9 @@ void __weak kgdb_call_nmi_hook(void *ignored) ...@@ -241,6 +239,9 @@ void __weak kgdb_call_nmi_hook(void *ignored)
} }
NOKPROBE_SYMBOL(kgdb_call_nmi_hook); NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
CSD_INIT(kgdb_call_nmi_hook, NULL);
void kgdb_smp_call_nmi_hook(void) void kgdb_smp_call_nmi_hook(void)
{ {
call_single_data_t *csd; call_single_data_t *csd;
...@@ -267,7 +268,6 @@ void kgdb_smp_call_nmi_hook(void) ...@@ -267,7 +268,6 @@ void kgdb_smp_call_nmi_hook(void)
continue; continue;
kgdb_info[cpu].rounding_up = true; kgdb_info[cpu].rounding_up = true;
csd->func = kgdb_call_nmi_hook;
ret = smp_call_function_single_async(cpu, csd); ret = smp_call_function_single_async(cpu, csd);
if (ret) if (ret)
kgdb_info[cpu].rounding_up = false; kgdb_info[cpu].rounding_up = false;
......
...@@ -320,14 +320,6 @@ void update_rq_clock(struct rq *rq) ...@@ -320,14 +320,6 @@ void update_rq_clock(struct rq *rq)
update_rq_clock_task(rq, delta); update_rq_clock_task(rq, delta);
} }
static inline void
rq_csd_init(struct rq *rq, struct __call_single_data *csd, smp_call_func_t func)
{
csd->flags = 0;
csd->func = func;
csd->info = rq;
}
#ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SCHED_HRTICK
/* /*
* Use HR-timers to deliver accurate preemption points. * Use HR-timers to deliver accurate preemption points.
...@@ -426,7 +418,7 @@ void hrtick_start(struct rq *rq, u64 delay) ...@@ -426,7 +418,7 @@ void hrtick_start(struct rq *rq, u64 delay)
static void hrtick_rq_init(struct rq *rq) static void hrtick_rq_init(struct rq *rq)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start); INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
#endif #endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick; rq->hrtick_timer.function = hrtick;
...@@ -7188,7 +7180,7 @@ void __init sched_init(void) ...@@ -7188,7 +7180,7 @@ void __init sched_init(void)
rq->last_blocked_load_update_tick = jiffies; rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0); atomic_set(&rq->nohz_flags, 0);
rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func); INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
#endif #endif
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
hrtick_rq_init(rq); hrtick_rq_init(rq);
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "smpboot.h" #include "smpboot.h"
#include "sched/smp.h" #include "sched/smp.h"
#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK) #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
struct call_function_data { struct call_function_data {
call_single_data_t __percpu *csd; call_single_data_t __percpu *csd;
...@@ -131,7 +131,7 @@ static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd) ...@@ -131,7 +131,7 @@ static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
csd_type = CSD_TYPE(csd); csd_type = CSD_TYPE(csd);
if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */ return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
return -1; return -1;
} }
...@@ -147,7 +147,7 @@ static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd ...@@ -147,7 +147,7 @@ static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd
bool firsttime; bool firsttime;
u64 ts2, ts_delta; u64 ts2, ts_delta;
call_single_data_t *cpu_cur_csd; call_single_data_t *cpu_cur_csd;
unsigned int flags = READ_ONCE(csd->flags); unsigned int flags = READ_ONCE(csd->node.u_flags);
if (!(flags & CSD_FLAG_LOCK)) { if (!(flags & CSD_FLAG_LOCK)) {
if (!unlikely(*bug_id)) if (!unlikely(*bug_id))
...@@ -225,14 +225,14 @@ static void csd_lock_record(struct __call_single_data *csd) ...@@ -225,14 +225,14 @@ static void csd_lock_record(struct __call_single_data *csd)
static __always_inline void csd_lock_wait(struct __call_single_data *csd) static __always_inline void csd_lock_wait(struct __call_single_data *csd)
{ {
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
} }
#endif #endif
static __always_inline void csd_lock(struct __call_single_data *csd) static __always_inline void csd_lock(struct __call_single_data *csd)
{ {
csd_lock_wait(csd); csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK; csd->node.u_flags |= CSD_FLAG_LOCK;
/* /*
* prevent CPU from reordering the above assignment * prevent CPU from reordering the above assignment
...@@ -244,12 +244,12 @@ static __always_inline void csd_lock(struct __call_single_data *csd) ...@@ -244,12 +244,12 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
static __always_inline void csd_unlock(struct __call_single_data *csd) static __always_inline void csd_unlock(struct __call_single_data *csd)
{ {
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
/* /*
* ensure we're all done before releasing data: * ensure we're all done before releasing data:
*/ */
smp_store_release(&csd->flags, 0); smp_store_release(&csd->node.u_flags, 0);
} }
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
...@@ -301,7 +301,7 @@ static int generic_exec_single(int cpu, struct __call_single_data *csd) ...@@ -301,7 +301,7 @@ static int generic_exec_single(int cpu, struct __call_single_data *csd)
return -ENXIO; return -ENXIO;
} }
__smp_call_single_queue(cpu, &csd->llist); __smp_call_single_queue(cpu, &csd->node.llist);
return 0; return 0;
} }
...@@ -354,7 +354,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -354,7 +354,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* We don't have to use the _safe() variant here * We don't have to use the _safe() variant here
* because we are not invoking the IPI handlers yet. * because we are not invoking the IPI handlers yet.
*/ */
llist_for_each_entry(csd, entry, llist) { llist_for_each_entry(csd, entry, node.llist) {
switch (CSD_TYPE(csd)) { switch (CSD_TYPE(csd)) {
case CSD_TYPE_ASYNC: case CSD_TYPE_ASYNC:
case CSD_TYPE_SYNC: case CSD_TYPE_SYNC:
...@@ -379,16 +379,16 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -379,16 +379,16 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* First; run all SYNC callbacks, people are waiting for us. * First; run all SYNC callbacks, people are waiting for us.
*/ */
prev = NULL; prev = NULL;
llist_for_each_entry_safe(csd, csd_next, entry, llist) { llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
/* Do we wait until *after* callback? */ /* Do we wait until *after* callback? */
if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
smp_call_func_t func = csd->func; smp_call_func_t func = csd->func;
void *info = csd->info; void *info = csd->info;
if (prev) { if (prev) {
prev->next = &csd_next->llist; prev->next = &csd_next->node.llist;
} else { } else {
entry = &csd_next->llist; entry = &csd_next->node.llist;
} }
csd_lock_record(csd); csd_lock_record(csd);
...@@ -396,7 +396,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -396,7 +396,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
csd_unlock(csd); csd_unlock(csd);
csd_lock_record(NULL); csd_lock_record(NULL);
} else { } else {
prev = &csd->llist; prev = &csd->node.llist;
} }
} }
...@@ -407,14 +407,14 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -407,14 +407,14 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
* Second; run all !SYNC callbacks. * Second; run all !SYNC callbacks.
*/ */
prev = NULL; prev = NULL;
llist_for_each_entry_safe(csd, csd_next, entry, llist) { llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
int type = CSD_TYPE(csd); int type = CSD_TYPE(csd);
if (type != CSD_TYPE_TTWU) { if (type != CSD_TYPE_TTWU) {
if (prev) { if (prev) {
prev->next = &csd_next->llist; prev->next = &csd_next->node.llist;
} else { } else {
entry = &csd_next->llist; entry = &csd_next->node.llist;
} }
if (type == CSD_TYPE_ASYNC) { if (type == CSD_TYPE_ASYNC) {
...@@ -430,7 +430,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -430,7 +430,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
} }
} else { } else {
prev = &csd->llist; prev = &csd->node.llist;
} }
} }
...@@ -469,7 +469,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, ...@@ -469,7 +469,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
{ {
call_single_data_t *csd; call_single_data_t *csd;
call_single_data_t csd_stack = { call_single_data_t csd_stack = {
.flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
}; };
int this_cpu; int this_cpu;
int err; int err;
...@@ -506,8 +506,8 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, ...@@ -506,8 +506,8 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id(); csd->node.src = smp_processor_id();
csd->dst = cpu; csd->node.dst = cpu;
#endif #endif
err = generic_exec_single(cpu, csd); err = generic_exec_single(cpu, csd);
...@@ -548,12 +548,12 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd) ...@@ -548,12 +548,12 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
preempt_disable(); preempt_disable();
if (csd->flags & CSD_FLAG_LOCK) { if (csd->node.u_flags & CSD_FLAG_LOCK) {
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
csd->flags = CSD_FLAG_LOCK; csd->node.u_flags = CSD_FLAG_LOCK;
smp_wmb(); smp_wmb();
err = generic_exec_single(cpu, csd); err = generic_exec_single(cpu, csd);
...@@ -671,14 +671,14 @@ static void smp_call_function_many_cond(const struct cpumask *mask, ...@@ -671,14 +671,14 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
csd_lock(csd); csd_lock(csd);
if (wait) if (wait)
csd->flags |= CSD_TYPE_SYNC; csd->node.u_flags |= CSD_TYPE_SYNC;
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id(); csd->node.src = smp_processor_id();
csd->dst = cpu; csd->node.dst = cpu;
#endif #endif
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu)))
__cpumask_set_cpu(cpu, cfd->cpumask_ipi); __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
} }
......
...@@ -11261,8 +11261,7 @@ static int __init net_dev_init(void) ...@@ -11261,8 +11261,7 @@ static int __init net_dev_init(void)
INIT_LIST_HEAD(&sd->poll_list); INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue; sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
sd->csd.func = rps_trigger_softirq; INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->csd.info = sd;
sd->cpu = i; sd->cpu = i;
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册