提交 ab4720ec 编写于 作者: D Dipankar Sarma 提交者: Linus Torvalds

[PATCH] add rcu_barrier() synchronization point

This introduces a new interface - rcu_barrier() which waits until all
the RCUs queued until this call have been completed.

Reiser4 needs this, because we do more than just freeing memory object
in our RCU callback: we also remove it from the list hanging off
super-block.  This means, that before freeing reiser4-specific portion
of super-block (during umount) we have to wait until all pending RCU
callbacks are executed.

The only change of reiser4 made to the original patch, is exporting of
rcu_barrier().

Cc: Hans Reiser <reiser@namesys.com>
Cc: Vladimir V. Saveliev <vs@namesys.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 85b87242
......@@ -100,6 +100,7 @@ struct rcu_data {
struct rcu_head *donelist;
struct rcu_head **donetail;
int cpu;
struct rcu_head barrier;
};
DECLARE_PER_CPU(struct rcu_data, rcu_data);
......@@ -285,6 +286,7 @@ extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
extern __deprecated_for_modules void synchronize_kernel(void);
extern void synchronize_rcu(void);
void synchronize_idle(void);
extern void rcu_barrier(void);
#endif /* __KERNEL__ */
#endif /* __LINUX_RCUPDATE_H */
......@@ -116,6 +116,10 @@ void fastcall call_rcu(struct rcu_head *head,
local_irq_restore(flags);
}
static atomic_t rcu_barrier_cpu_count;
static struct semaphore rcu_barrier_sema;
static struct completion rcu_barrier_completion;
/**
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
......@@ -162,6 +166,42 @@ long rcu_batches_completed(void)
return rcu_ctrlblk.completed;
}
static void rcu_barrier_callback(struct rcu_head *notused)
{
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
complete(&rcu_barrier_completion);
}
/*
* Called with preemption disabled, and from cross-cpu IRQ context.
*/
static void rcu_barrier_func(void *notused)
{
int cpu = smp_processor_id();
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
struct rcu_head *head;
head = &rdp->barrier;
atomic_inc(&rcu_barrier_cpu_count);
call_rcu(head, rcu_barrier_callback);
}
/**
* rcu_barrier - Wait until all the in-flight RCUs are complete.
*/
void rcu_barrier(void)
{
BUG_ON(in_interrupt());
/* Take cpucontrol semaphore to protect against CPU hotplug */
down(&rcu_barrier_sema);
init_completion(&rcu_barrier_completion);
atomic_set(&rcu_barrier_cpu_count, 0);
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
wait_for_completion(&rcu_barrier_completion);
up(&rcu_barrier_sema);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
/*
* Invoke the completed RCU callbacks. They are expected to be in
* a per-cpu list.
......@@ -457,6 +497,7 @@ static struct notifier_block __devinitdata rcu_nb = {
*/
void __init rcu_init(void)
{
sema_init(&rcu_barrier_sema, 1);
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
/* Register notifier for non-boot CPUs */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册