diff --git a/include/linux/sched.h b/include/linux/sched.h index 28a98fc4ded4f6c6c397175962f24ac7ab3e3146..32813c345115fb766ce18736ae2774f97920c986 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -997,7 +997,6 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; - struct sigqueue *sigqueue_cache; sigset_t blocked; sigset_t real_blocked; /* Restored if set_restore_sigmask() was used: */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 201f88e3738b2e641370a62ff18a564d6dbdd070..5160fd45e5cab878b2393b10ee7336f8a1a9fea9 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -267,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); -extern void exit_task_sigqueue_cache(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/kernel/exit.c b/kernel/exit.c index fd1c04193e18b8d1dd678f7d5c4183881e552b74..65809fac30387519e76ba16d51fbcd51fa5abaf2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -162,7 +162,6 @@ static void __exit_signal(struct task_struct *tsk) flush_sigqueue(&sig->shared_pending); tty_kref_put(tty); } - exit_task_sigqueue_cache(tsk); } static void delayed_put_task_struct(struct rcu_head *rhp) diff --git a/kernel/fork.c b/kernel/fork.c index dc06afd725cbde95f22296d5694e73fa92c68283..a070caed5c8edb0f1f05ad7327acf740c06b561e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2008,7 +2008,6 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); - p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/signal.c b/kernel/signal.c index f1ecd8f0c11d988db484b04adc60b9d384ea5ac8..30a0bee5ff9bb88e8f18c49db9c06eaff7b8d59a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -431,22 +431,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, rcu_read_unlock(); if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { - /* - * Preallocation does not hold sighand::siglock so it can't - * use the cache. The lockless caching requires that only - * one consumer and only one producer run at a time. - * - * For the regular allocation case it is sufficient to - * check @q for NULL because this code can only be called - * if the target task @t has not been reaped yet; which - * means this code can never observe the error pointer which is - * written to @t->sigqueue_cache in exit_task_sigqueue_cache(). - */ - q = READ_ONCE(t->sigqueue_cache); - if (!q || sigqueue_flags) - q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); - else - WRITE_ONCE(t->sigqueue_cache, NULL); + q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); } else { print_dropped_signal(sig); } @@ -463,53 +448,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, return q; } -void exit_task_sigqueue_cache(struct task_struct *tsk) -{ - /* Race free because @tsk is mopped up */ - struct sigqueue *q = tsk->sigqueue_cache; - - if (q) { - /* - * Hand it back to the cache as the task might - * be self reaping which would leak the object. - */ - kmem_cache_free(sigqueue_cachep, q); - } - - /* - * Set an error pointer to ensure that @tsk will not cache a - * sigqueue when it is reaping it's child tasks - */ - tsk->sigqueue_cache = ERR_PTR(-1); -} - -static void sigqueue_cache_or_free(struct sigqueue *q) -{ - /* - * Cache one sigqueue per task. This pairs with the consumer side - * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the - * compiler from store tearing and to tell KCSAN that the data race - * is intentional when run without holding current->sighand->siglock, - * which is fine as current obviously cannot run __sigqueue_free() - * concurrently. - * - * The NULL check is safe even if current has been reaped already, - * in which case exit_task_sigqueue_cache() wrote an error pointer - * into current->sigqueue_cache. - */ - if (!READ_ONCE(current->sigqueue_cache)) - WRITE_ONCE(current->sigqueue_cache, q); - else - kmem_cache_free(sigqueue_cachep, q); -} - static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; if (atomic_dec_and_test(&q->user->sigpending)) free_uid(q->user); - sigqueue_cache_or_free(q); + kmem_cache_free(sigqueue_cachep, q); } void flush_sigqueue(struct sigpending *queue)