提交 5ae9497d 编写于 作者: E Eric W. Biederman

signal: requeuing undeliverable signals

Kyle Huey recently reported[1] that rr gets confused if SIGKILL prevents
ptrace_signal from delivering a signal, as the kernel setups up a signal
frame for a signal that rr did not have a chance to observe with ptrace.

In looking into it I found a couple of bugs and a quality of
implementation issue.

- The test for signal_group_exit should be inside the for loop in get_signal.
- Signals should be requeued on the same queue they were dequeued from.
- When a fatal signal is pending ptrace_signal should not return another
  signal for delivery.

Kyle Huey has verified[2] an earlier version of this change.

I have reworked things one more time to completely fix the issues
raised, and to keep the code maintainable long term.

I have smoke tested this code and combined with a careful review I
expect this code to work fine.  Kyle if you can double check that
my last round of changes still works for rr I would appreciate it.

Eric W. Biederman (3):
      signal: In get_signal test for signal_group_exit every time through the loop
      signal: Requeue signals in the appropriate queue
      signal: Requeue ptrace signals

 fs/signalfd.c                |  5 +++--
 include/linux/sched/signal.h |  7 ++++---
 kernel/signal.c              | 44 ++++++++++++++++++++++++++------------------
 3 files changed, 33 insertions(+), 23 deletions(-)

[1] https://lkml.kernel.org/r/20211101034147.6203-1-khuey@kylehuey.com
[2] https://lkml.kernel.org/r/CAP045ApAX725ZfujaK-jJNkfCo5s+oVFpBvNfPJk+DKY8K7d=Q@mail.gmail.comTested-by: NKyle Huey <khuey@kylehuey.com>
Link: https://lkml.kernel.org/r/87bl2kekig.fsf_-_@email.froward.int.ebiederm.orgSigned-off-by: N"Eric W. Biederman" <ebiederm@xmission.com>
......@@ -165,11 +165,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
int nonblock)
{
enum pid_type type;
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(&current->sighand->siglock);
ret = dequeue_signal(current, &ctx->sigmask, info);
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
switch (ret) {
case 0:
if (!nonblock)
......@@ -184,7 +185,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
add_wait_queue(&current->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = dequeue_signal(current, &ctx->sigmask, info);
ret = dequeue_signal(current, &ctx->sigmask, info, &type);
if (ret != 0)
break;
if (signal_pending(current)) {
......
......@@ -286,17 +286,18 @@ static inline int signal_group_exit(const struct signal_struct *sig)
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *task,
sigset_t *mask, kernel_siginfo_t *info);
extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
ret = dequeue_signal(task, &task->blocked, &__info);
ret = dequeue_signal(task, &task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
......
......@@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*
* All callers have to hold the siglock.
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
kernel_siginfo_t *info, enum pid_type *type)
{
bool resched_timer = false;
int signr;
......@@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
*type = PIDTYPE_PID;
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
*type = PIDTYPE_TGID;
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info, &resched_timer);
#ifdef CONFIG_POSIX_TIMERS
......@@ -2522,7 +2525,7 @@ static void do_freezer_trap(void)
freezable_schedule();
}
static int ptrace_signal(int signr, kernel_siginfo_t *info)
static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
{
/*
* We do not check sig_kernel_stop(signr) but set this marker
......@@ -2562,8 +2565,9 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
}
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
send_signal(signr, info, current, PIDTYPE_PID);
if (sigismember(&current->blocked, signr) ||
fatal_signal_pending(current)) {
send_signal(signr, info, current, type);
signr = 0;
}
......@@ -2662,18 +2666,19 @@ bool get_signal(struct ksignal *ksig)
goto relock;
}
/* Has this task already been marked for death? */
if (signal_group_exit(signal)) {
ksig->info.si_signo = signr = SIGKILL;
sigdelset(&current->pending.signal, SIGKILL);
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL - 1]);
recalc_sigpending();
goto fatal;
}
for (;;) {
struct k_sigaction *ka;
enum pid_type type;
/* Has this task already been marked for death? */
if (signal_group_exit(signal)) {
ksig->info.si_signo = signr = SIGKILL;
sigdelset(&current->pending.signal, SIGKILL);
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL - 1]);
recalc_sigpending();
goto fatal;
}
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
do_signal_stop(0))
......@@ -2706,16 +2711,18 @@ bool get_signal(struct ksignal *ksig)
* so that the instruction pointer in the signal stack
* frame points to the faulting instruction.
*/
type = PIDTYPE_PID;
signr = dequeue_synchronous_signal(&ksig->info);
if (!signr)
signr = dequeue_signal(current, &current->blocked, &ksig->info);
signr = dequeue_signal(current, &current->blocked,
&ksig->info, &type);
if (!signr)
break; /* will return 0 */
if (unlikely(current->ptrace) && (signr != SIGKILL) &&
!(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
signr = ptrace_signal(signr, &ksig->info);
signr = ptrace_signal(signr, &ksig->info, type);
if (!signr)
continue;
}
......@@ -3540,6 +3547,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
ktime_t *to = NULL, timeout = KTIME_MAX;
struct task_struct *tsk = current;
sigset_t mask = *which;
enum pid_type type;
int sig, ret = 0;
if (ts) {
......@@ -3556,7 +3564,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
sig = dequeue_signal(tsk, &mask, info);
sig = dequeue_signal(tsk, &mask, info, &type);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
......@@ -3575,7 +3583,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
sig = dequeue_signal(tsk, &mask, info);
sig = dequeue_signal(tsk, &mask, info, &type);
}
spin_unlock_irq(&tsk->sighand->siglock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册