提交 7856dfeb 编写于 作者: A Andi Kleen 提交者: Linus Torvalds

[PATCH] x86_64: Fixed guard page handling again in iounmap

Caused oopses again.  Also fix potential mismatch in checking if
change_page_attr was needed.

To do it without races I needed to change mm/vmalloc.c to export a
__remove_vm_area that does not take vmlist lock.

Noticed by Terence Ripperda and based on a patch of his.
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 c4d1fcf3
...@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
int err = 0; int err = 0;
if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long vaddr = (unsigned long) __va(phys_addr); unsigned long vaddr = (unsigned long) __va(phys_addr);
...@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l ...@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
return NULL; return NULL;
} }
if (ioremap_change_attr(phys_addr, size, flags) < 0) { if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
area->flags &= 0xffffff; area->flags &= 0xffffff;
vunmap(addr); vunmap(addr);
return NULL; return NULL;
...@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) ...@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
void iounmap(volatile void __iomem *addr) void iounmap(volatile void __iomem *addr)
{ {
struct vm_struct *p, **pprev; struct vm_struct *p;
if (addr <= high_memory) if (addr <= high_memory)
return; return;
...@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr) ...@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
return; return;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) if (!p)
break; printk("iounmap: bad address %p\n", addr);
if (!p) { else if (p->flags >> 20)
printk("__iounmap: bad address %p\n", addr); ioremap_change_attr(p->phys_addr, p->size, 0);
goto out_unlock;
}
*pprev = p->next;
unmap_vm_area(p);
if ((p->flags >> 20) &&
p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
/* p->size includes the guard page, but cpa doesn't like that */
change_page_attr_addr((unsigned long)__va(p->phys_addr),
p->size >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
out_unlock:
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
kfree(p); kfree(p);
} }
...@@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); ...@@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern struct vm_struct *remove_vm_area(void *addr); extern struct vm_struct *remove_vm_area(void *addr);
extern struct vm_struct *__remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area); extern void unmap_vm_area(struct vm_struct *area);
......
...@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) ...@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
} }
/** /* Caller must hold vmlist_lock */
* remove_vm_area - find and remove a contingous kernel virtual area struct vm_struct *__remove_vm_area(void *addr)
*
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines.
*/
struct vm_struct *remove_vm_area(void *addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr) if (tmp->addr == addr)
goto found; goto found;
} }
write_unlock(&vmlist_lock);
return NULL; return NULL;
found: found:
unmap_vm_area(tmp); unmap_vm_area(tmp);
*p = tmp->next; *p = tmp->next;
write_unlock(&vmlist_lock);
/* /*
* Remove the guard page. * Remove the guard page.
...@@ -281,6 +270,24 @@ struct vm_struct *remove_vm_area(void *addr) ...@@ -281,6 +270,24 @@ struct vm_struct *remove_vm_area(void *addr)
return tmp; return tmp;
} }
/**
* remove_vm_area - find and remove a contingous kernel virtual area
*
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags.
*/
struct vm_struct *remove_vm_area(void *addr)
{
struct vm_struct *v;
write_lock(&vmlist_lock);
v = __remove_vm_area(addr);
write_unlock(&vmlist_lock);
return v;
}
void __vunmap(void *addr, int deallocate_pages) void __vunmap(void *addr, int deallocate_pages)
{ {
struct vm_struct *area; struct vm_struct *area;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册