提交 c291ee62 编写于 作者: T Thomas Gleixner

genirq: Prevent proc race against freeing of irq descriptors

Since the rework of the sparse interrupt code to actually free the
unused interrupt descriptors there exists a race between the /proc
interfaces to the irq subsystem and the code which frees the interrupt
descriptor.

CPU0				CPU1
				show_interrupts()
				  desc = irq_to_desc(X);
free_desc(desc)
  remove_from_radix_tree();
  kfree(desc);
				  raw_spinlock_irq(&desc->lock);

/proc/interrupts is the only interface which can actively corrupt
kernel memory via the lock access. /proc/stat can only read from freed
memory. Extremly hard to trigger, but possible.

The interfaces in /proc/irq/N/ are not affected by this because the
removal of the proc file is serialized in procfs against concurrent
readers/writers. The removal happens before the descriptor is freed.

For architectures which have CONFIG_SPARSE_IRQ=n this is a non issue
as the descriptor is never freed. It's merely cleared out with the irq
descriptor lock held. So any concurrent proc access will either see
the old correct value or the cleared out ones.

Protect the lookup and access to the irq descriptor in
show_interrupts() with the sparse_irq_lock.

Provide kstat_irqs_usr() which is protecting the lookup and access
with sparse_irq_lock and switch /proc/stat to use it.

Document the existing kstat_irqs interfaces so it's clear that the
caller needs to take care about protection. The users of these
interfaces are either not affected due to SPARSE_IRQ=n or already
protected against removal.

Fixes: 1f5a5b87 "genirq: Implement a sane sparse_irq allocator"
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
上级 3a5dc1fa
...@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
/* sum again ? it could be updated? */ /* sum again ? it could be updated? */
for_each_irq_nr(j) for_each_irq_nr(j)
seq_put_decimal_ull(p, ' ', kstat_irqs(j)); seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
seq_printf(p, seq_printf(p,
"\nctxt %llu\n" "\nctxt %llu\n"
......
...@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) ...@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
* Number of interrupts per specific IRQ source, since bootup * Number of interrupts per specific IRQ source, since bootup
*/ */
extern unsigned int kstat_irqs(unsigned int irq); extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq);
/* /*
* Number of interrupts per cpu, since bootup * Number of interrupts per cpu, since bootup
......
...@@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc); ...@@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
#ifdef CONFIG_SPARSE_IRQ #ifdef CONFIG_SPARSE_IRQ
static inline void irq_mark_irq(unsigned int irq) { } static inline void irq_mark_irq(unsigned int irq) { }
extern void irq_lock_sparse(void);
extern void irq_unlock_sparse(void);
#else #else
extern void irq_mark_irq(unsigned int irq); extern void irq_mark_irq(unsigned int irq);
static inline void irq_lock_sparse(void) { }
static inline void irq_unlock_sparse(void) { }
#endif #endif
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
......
...@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc) ...@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
static inline void free_masks(struct irq_desc *desc) { } static inline void free_masks(struct irq_desc *desc) { }
#endif #endif
void irq_lock_sparse(void)
{
mutex_lock(&sparse_irq_lock);
}
void irq_unlock_sparse(void)
{
mutex_unlock(&sparse_irq_lock);
}
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{ {
struct irq_desc *desc; struct irq_desc *desc;
...@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq) ...@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
unregister_irq_proc(irq, desc); unregister_irq_proc(irq, desc);
/*
* sparse_irq_lock protects also show_interrupts() and
* kstat_irq_usr(). Once we deleted the descriptor from the
* sparse tree we can free it. Access in proc will fail to
* lookup the descriptor.
*/
mutex_lock(&sparse_irq_lock); mutex_lock(&sparse_irq_lock);
delete_irq_desc(irq); delete_irq_desc(irq);
mutex_unlock(&sparse_irq_lock); mutex_unlock(&sparse_irq_lock);
...@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq) ...@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
} }
/**
* kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
* @irq: The interrupt number
* @cpu: The cpu number
*
* Returns the sum of interrupt counts on @cpu since boot for
* @irq. The caller must ensure that the interrupt is not removed
* concurrently.
*/
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
...@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) ...@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0; *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
} }
/**
* kstat_irqs - Get the statistics for an interrupt
* @irq: The interrupt number
*
* Returns the sum of interrupt counts on all cpus since boot for
* @irq. The caller must ensure that the interrupt is not removed
* concurrently.
*/
unsigned int kstat_irqs(unsigned int irq) unsigned int kstat_irqs(unsigned int irq)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
...@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq) ...@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu); sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum; return sum;
} }
/**
* kstat_irqs_usr - Get the statistics for an interrupt
* @irq: The interrupt number
*
* Returns the sum of interrupt counts on all cpus since boot for
* @irq. Contrary to kstat_irqs() this can be called from any
* preemptible context. It's protected against concurrent removal of
* an interrupt descriptor when sparse irqs are enabled.
*/
unsigned int kstat_irqs_usr(unsigned int irq)
{
int sum;
irq_lock_sparse();
sum = kstat_irqs(irq);
irq_unlock_sparse();
return sum;
}
...@@ -15,6 +15,23 @@ ...@@ -15,6 +15,23 @@
#include "internals.h" #include "internals.h"
/*
* Access rules:
*
* procfs protects read/write of /proc/irq/N/ files against a
* concurrent free of the interrupt descriptor. remove_proc_entry()
* immediately prevents new read/writes to happen and waits for
* already running read/write functions to complete.
*
* We remove the proc entries first and then delete the interrupt
* descriptor from the radix tree and free it. So it is guaranteed
* that irq_to_desc(N) is valid as long as the read/writes are
* permitted by procfs.
*
* The read from /proc/interrupts is a different problem because there
* is no protection. So the lookup and the access to irqdesc
* information must be protected by sparse_irq_lock.
*/
static struct proc_dir_entry *root_irq_dir; static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
irq_lock_sparse();
desc = irq_to_desc(i); desc = irq_to_desc(i);
if (!desc) if (!desc)
return 0; goto outsparse;
raw_spin_lock_irqsave(&desc->lock, flags); raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j) for_each_online_cpu(j)
...@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n'); seq_putc(p, '\n');
out: out:
raw_spin_unlock_irqrestore(&desc->lock, flags); raw_spin_unlock_irqrestore(&desc->lock, flags);
outsparse:
irq_unlock_sparse();
return 0; return 0;
} }
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册