提交 9e3bd6c3 编写于 作者: P Pavel Emelyanov 提交者: Linus Torvalds

signals: consolidate send_sigqueue and send_group_sigqueue

Both functions do the same thing after proper locking, but with
different sigpending structs, so move the common code into a helper.

After this we have 4 places that look very similar: send_sigqueue: calls
do_send_sigqueue and signal_wakeup send_group_sigqueue: calls
do_send_sigqueue and __group_complete_signal __group_send_sig_info:
calls send_signal and __group_complete_signal specific_send_sig_info:
calls send_signal and signal_wakeup

Besides, send_signal performs actions similar to do_send_sigqueue's
and __group_complete_signal - to signal_wakeup.

It looks like they can be consolidated gracefully.

Oleg said:

  Personally, I think this change is very good.  But send_sigqueue() and
  send_group_sigqueue() have a very subtle difference which I was never able
  to understand.

  Let's suppose that sigqueue is already queued, and the signal is ignored
  (the latter means we should re-schedule cpu timer or handle overrruns).  In
  that case send_sigqueue() returns 0, but send_group_sigqueue() returns 1.

  I think this is not the problem (in fact, I think this patch makes the
  behaviour more correct), but I hope Thomas can take a look and confirm.
Signed-off-by: NPavel Emelyanov <xemul@openvz.org>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 c5363d03
...@@ -1290,10 +1290,33 @@ void sigqueue_free(struct sigqueue *q) ...@@ -1290,10 +1290,33 @@ void sigqueue_free(struct sigqueue *q)
__sigqueue_free(q); __sigqueue_free(q);
} }
static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
struct sigpending *pending)
{
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
return 0;
}
if (sig_ignored(t, sig))
return 1;
signalfd_notify(t, sig);
list_add_tail(&q->list, &pending->list);
sigaddset(&pending->signal, sig);
return 0;
}
int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = -1;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
...@@ -1307,37 +1330,14 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) ...@@ -1307,37 +1330,14 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
*/ */
rcu_read_lock(); rcu_read_lock();
if (!likely(lock_task_sighand(p, &flags))) { if (!likely(lock_task_sighand(p, &flags)))
ret = -1;
goto out_err; goto out_err;
}
if (unlikely(!list_empty(&q->list))) { ret = do_send_sigqueue(sig, q, p, &p->pending);
/*
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
/* Short-circuit ignored signals. */
if (sig_ignored(p, sig)) {
ret = 1;
goto out;
}
/*
* Deliver the signal to listening signalfds. This must be called
* with the sighand lock held.
*/
signalfd_notify(p, sig);
list_add_tail(&q->list, &p->pending.list);
sigaddset(&p->pending.signal, sig);
if (!sigismember(&p->blocked, sig)) if (!sigismember(&p->blocked, sig))
signal_wake_up(p, sig == SIGKILL); signal_wake_up(p, sig == SIGKILL);
out:
unlock_task_sighand(p, &flags); unlock_task_sighand(p, &flags);
out_err: out_err:
rcu_read_unlock(); rcu_read_unlock();
...@@ -1349,7 +1349,7 @@ int ...@@ -1349,7 +1349,7 @@ int
send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
...@@ -1358,38 +1358,10 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) ...@@ -1358,38 +1358,10 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
spin_lock_irqsave(&p->sighand->siglock, flags); spin_lock_irqsave(&p->sighand->siglock, flags);
handle_stop_signal(sig, p); handle_stop_signal(sig, p);
/* Short-circuit ignored signals. */ ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
if (sig_ignored(p, sig)) {
ret = 1;
goto out;
}
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
* the overrun count. Other uses should not try to
* send the signal multiple times.
*/
BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
/*
* Deliver the signal to listening signalfds. This must be called
* with the sighand lock held.
*/
signalfd_notify(p, sig);
/*
* Put this signal on the shared-pending queue.
* We always use the shared queue for process-wide signals,
* to avoid several races.
*/
list_add_tail(&q->list, &p->signal->shared_pending.list);
sigaddset(&p->signal->shared_pending.signal, sig);
__group_complete_signal(sig, p); __group_complete_signal(sig, p);
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags); spin_unlock_irqrestore(&p->sighand->siglock, flags);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
return ret; return ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册