提交 8b8f79b9 编写于 作者: M Marcin Slusarz 提交者: Ingo Molnar

x86, kmmio/mmiotrace: Fix double free of kmmio_fault_pages

After every iounmap mmiotrace has to free kmmio_fault_pages, but
it can't do it directly, so it defers freeing by RCU.

It usually works, but when mmiotraced code calls ioremap-iounmap
multiple times without sleeping between (so RCU won't kick in
and start freeing) it can be given the same virtual address, so
at every iounmap mmiotrace will schedule the same pages for
release. Obviously it will explode on second free.

Fix it by marking kmmio_fault_pages which are scheduled for
release and not adding them second time.
Signed-off-by: NMarcin Slusarz <marcin.slusarz@gmail.com>
Tested-by: NMarcin Kocielnicki <koriakin@0x04.net>
Tested-by: NShinpei KATO <shinpei@il.is.s.u-tokyo.ac.jp>
Acked-by: NPekka Paalanen <pq@iki.fi>
Cc: Stuart Bennett <stuart@freedesktop.org>
Cc: Marcin Kocielnicki <koriakin@0x04.net>
Cc: nouveau@lists.freedesktop.org
Cc: <stable@kernel.org>
LKML-Reference: <20100613215654.GA3829@joi.lan>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 7e27d6e7
...@@ -45,6 +45,8 @@ struct kmmio_fault_page { ...@@ -45,6 +45,8 @@ struct kmmio_fault_page {
* Protected by kmmio_lock, when linked into kmmio_page_table. * Protected by kmmio_lock, when linked into kmmio_page_table.
*/ */
int count; int count;
bool scheduled_for_release;
}; };
struct kmmio_delayed_release { struct kmmio_delayed_release {
...@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page, ...@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page,
BUG_ON(f->count < 0); BUG_ON(f->count < 0);
if (!f->count) { if (!f->count) {
disarm_kmmio_fault_page(f); disarm_kmmio_fault_page(f);
f->release_next = *release_list; if (!f->scheduled_for_release) {
*release_list = f; f->release_next = *release_list;
*release_list = f;
f->scheduled_for_release = true;
}
} }
} }
...@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) ...@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
prevp = &f->release_next; prevp = &f->release_next;
} else { } else {
*prevp = f->release_next; *prevp = f->release_next;
f->release_next = NULL;
f->scheduled_for_release = false;
} }
f = f->release_next; f = *prevp;
} }
spin_unlock_irqrestore(&kmmio_lock, flags); spin_unlock_irqrestore(&kmmio_lock, flags);
...@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p) ...@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
kmmio_count--; kmmio_count--;
spin_unlock_irqrestore(&kmmio_lock, flags); spin_unlock_irqrestore(&kmmio_lock, flags);
if (!release_list)
return;
drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
if (!drelease) { if (!drelease) {
pr_crit("leaking kmmio_fault_page objects.\n"); pr_crit("leaking kmmio_fault_page objects.\n");
......
...@@ -90,6 +90,27 @@ static void do_test(unsigned long size) ...@@ -90,6 +90,27 @@ static void do_test(unsigned long size)
iounmap(p); iounmap(p);
} }
/*
* Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in
* a short time. We had a bug in deferred freeing procedure which tried
* to free this region multiple times (ioremap can reuse the same address
* for many mappings).
*/
static void do_test_bulk_ioremapping(void)
{
void __iomem *p;
int i;
for (i = 0; i < 10; ++i) {
p = ioremap_nocache(mmio_address, PAGE_SIZE);
if (p)
iounmap(p);
}
/* Force freeing. If it will crash we will know why. */
synchronize_rcu();
}
static int __init init(void) static int __init init(void)
{ {
unsigned long size = (read_far) ? (8 << 20) : (16 << 10); unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
...@@ -104,6 +125,7 @@ static int __init init(void) ...@@ -104,6 +125,7 @@ static int __init init(void)
"and writing 16 kB of rubbish in there.\n", "and writing 16 kB of rubbish in there.\n",
size >> 10, mmio_address); size >> 10, mmio_address);
do_test(size); do_test(size);
do_test_bulk_ioremapping();
pr_info("All done.\n"); pr_info("All done.\n");
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册