提交 6932689e 编写于 作者: P Paul E. McKenney

livepatch: Replace synchronize_sched() with synchronize_rcu()

Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can be
replaced by synchronize_rcu().  This commit therefore makes this change,
even though it is but a comment.
Signed-off-by: NPaul E. McKenney <paulmck@linux.ibm.com>
上级 2af3024c
......@@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ops = container_of(fops, struct klp_ops, fops);
/*
* A variant of synchronize_sched() is used to allow patching functions
* A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
......@@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
/*
* func should never be NULL because preemption should be disabled here
* and unregister_ftrace_function() does the equivalent of a
* synchronize_sched() before the func_stack removal.
* synchronize_rcu() before the func_stack removal.
*/
if (WARN_ON_ONCE(!func))
goto unlock;
......
......@@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*/
static void klp_sync(struct work_struct *work)
......@@ -175,7 +175,7 @@ void klp_cancel_transition(void)
void klp_update_patch_state(struct task_struct *task)
{
/*
* A variant of synchronize_sched() is used to allow patching functions
* A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册