diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 32c4b0e35b37768f7d75fa01194f1f9c3d0baac8..3de7f84b53c25e3cdd5f8d44da252336fb55645a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -73,7 +73,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - start_addr = addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: if (do_align) @@ -90,6 +95,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -101,6 +107,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 5aa06001a4bde0537fd22a0eebbdb528bf38ca6b..3b099f32b9487a054db420509ad25591ea0d06eb 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -140,7 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, struct vm_area_struct *vma; unsigned long start_addr; - start_addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = mm->free_area_cache; + } else { + start_addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: addr = ALIGN(start_addr, HPAGE_SIZE); @@ -154,6 +159,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -162,6 +168,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, HPAGE_SIZE); } } @@ -173,12 +181,17 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; + unsigned long largest_hole = mm->cached_hole_size; int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) mm->free_area_cache = base; + if (len <= largest_hole) { + largest_hole = 0; + mm->free_area_cache = base; + } try_again: /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) @@ -199,13 +212,21 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * vma->vm_start, use it: */ if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) + (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - else + mm->cached_hole_size = largest_hole; + return (mm->free_area_cache = addr); + } else { /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) + if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; + mm->cached_hole_size = largest_hole; + } + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) + largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = (vma->vm_start - len) & HPAGE_MASK; @@ -218,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, */ if (first_time) { mm->free_area_cache = base; + largest_hole = 0; first_time = 0; goto try_again; } @@ -228,6 +250,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * allocations. */ mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); @@ -235,6 +258,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * Restore the topdown base: */ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; return addr; } diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index b4ab766f59800bcc66774d6ff70c2accc390f835..fdcfe97c75c1dbc388cec0fe261ce446e82ab3e3 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -292,7 +292,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, && !is_hugepage_only_range(mm, addr,len)) return addr; } - start_addr = addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: vma = find_vma(mm, addr); @@ -316,6 +321,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; vma = vma->vm_next; } @@ -323,6 +330,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* Make sure we didn't miss any holes */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -344,6 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; unsigned long base = mm->mmap_base, addr = addr0; + unsigned long largest_hole = mm->cached_hole_size; int first_time = 1; /* requested length too big for entire address space */ @@ -364,6 +373,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } + if (len <= largest_hole) { + largest_hole = 0; + mm->free_area_cache = base; + } try_again: /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) @@ -392,13 +405,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * vma->vm_start, use it: */ if (addr+len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) + (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - else + mm->cached_hole_size = largest_hole; + return (mm->free_area_cache = addr); + } else { /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) + if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; + mm->cached_hole_size = largest_hole; + } + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) + largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; @@ -411,6 +432,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, */ if (first_time) { mm->free_area_cache = base; + largest_hole = 0; first_time = 0; goto try_again; } @@ -421,11 +443,13 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * allocations. */ mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; return addr; } diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index df5ac294c37906cd575a9bd482b2478cdfbd4805..917b2f32f260888b2a84b272303edce8d0b5c1b6 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -79,6 +79,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = TASK_UNMAPPED_BASE; + } if (flags & MAP_PRIVATE) addr = PAGE_ALIGN(mm->free_area_cache); else @@ -95,6 +99,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -106,6 +111,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; if (!(flags & MAP_PRIVATE)) addr = COLOUR_ALIGN(addr); diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 0077f02f4b374e34ced8a57e03490a6dd06c2165..5f8c822a2b4a4a3dbcdab26809d94a90679682d8 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -84,6 +84,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return addr; } + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = TASK_UNMAPPED_BASE; + } start_addr = addr = mm->free_area_cache; task_size -= len; @@ -103,6 +107,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (task_size < addr) { if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -114,6 +119,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c index 1965efc974dc4d1312717c9a948e927a456ae884..c12edf5d97f02a6e3c87e90a8f55d3b8c73618aa 100644 --- a/arch/x86_64/ia32/ia32_aout.c +++ b/arch/x86_64/ia32/ia32_aout.c @@ -312,6 +312,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = TASK_UNMAPPED_BASE; + current->mm->cached_hole_size = 0; set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c index d9798dd433fca68d01f37d055df1374d28ff1353..cc7821c68851d5334b01359f8dd892a5266fdf0b 100644 --- a/arch/x86_64/kernel/sys_x86_64.c +++ b/arch/x86_64/kernel/sys_x86_64.c @@ -105,6 +105,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) + && len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = begin; + } addr = mm->free_area_cache; if (addr < begin) addr = begin; @@ -120,6 +125,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, */ if (start_addr != begin) { start_addr = addr = begin; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -131,6 +137,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; } } diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 009b8920c1fff5fe796bc08f42d06c1917636c66..dd9baabaf0166afff7a55c5692f48660553b9bfa 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -316,6 +316,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = current->mm->mmap_base; + current->mm->cached_hole_size = 0; set_mm_counter(current->mm, rss, 0); current->mm->mmap = NULL; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f8f6b6b76179da87bfca266adadf360b10f2e91a..7976a238f0a3d60bb4c4a0d1fba0b3ad5183ad08 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -775,6 +775,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) change some of these later */ set_mm_counter(current->mm, rss, 0); current->mm->free_area_cache = current->mm->mmap_base; + current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 2af3338f891bb17d069ec48a0323ecc710cf1ebd..3a9b6d179cbdf492da7924012fd85aa8241a64f1 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -122,6 +122,9 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, start_addr = mm->free_area_cache; + if (len <= mm->cached_hole_size) + start_addr = TASK_UNMAPPED_BASE; + full_search: addr = ALIGN(start_addr, HPAGE_SIZE); diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dbb109022f3646ff39b7f64464bebb0200071fc..b58afd97a180274bad8b8904b018aac1c08621ef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -201,8 +201,8 @@ extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern void arch_unmap_area(struct vm_area_struct *area); -extern void arch_unmap_area_topdown(struct vm_area_struct *area); +extern void arch_unmap_area(struct mm_struct *, unsigned long); +extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #define set_mm_counter(mm, member, value) (mm)->_##member = (value) #define get_mm_counter(mm, member) ((mm)->_##member) @@ -218,9 +218,10 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct vm_area_struct *area); - unsigned long mmap_base; /* base of mmap area */ - unsigned long free_area_cache; /* first hole */ + void (*unmap_area) (struct mm_struct *mm, unsigned long addr); + unsigned long mmap_base; /* base of mmap area */ + unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ + unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ diff --git a/kernel/fork.c b/kernel/fork.c index f42a17f88699bb4c1a07b4f93961c92e3c5d75a5..876b31cd822d2669b5831b31707aabfdba676304 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -194,6 +194,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) mm->mmap = NULL; mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; + mm->cached_hole_size = ~0UL; mm->map_count = 0; set_mm_counter(mm, rss, 0); set_mm_counter(mm, anon_rss, 0); @@ -322,6 +323,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm) mm->ioctx_list = NULL; mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; diff --git a/mm/mmap.c b/mm/mmap.c index de54acd9942f9929004921042721df5cdfe2b6c7..9da23c1ef9dce6ea1710d2f36d3e0c7ece033876 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1175,7 +1175,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - start_addr = addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { @@ -1186,7 +1191,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + addr = TASK_UNMAPPED_BASE; + start_addr = addr; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -1198,19 +1205,22 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; } } #endif -void arch_unmap_area(struct vm_area_struct *area) +void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the lowest possible address? */ - if (area->vm_start >= TASK_UNMAPPED_BASE && - area->vm_start < area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_start; + if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } } /* @@ -1240,6 +1250,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; @@ -1264,6 +1280,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + /* try just below the current vma->vm_start */ addr = vma->vm_start-len; } while (len < vma->vm_start); @@ -1274,28 +1294,30 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; return addr; } #endif -void arch_unmap_area_topdown(struct vm_area_struct *area) +void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the highest possible address? */ - if (area->vm_end > area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_end; + if (addr > mm->free_area_cache) + mm->free_area_cache = addr; /* dont allow allocations above current base */ - if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base) - area->vm_mm->free_area_cache = area->vm_mm->mmap_base; + if (mm->free_area_cache > mm->mmap_base) + mm->free_area_cache = mm->mmap_base; } unsigned long @@ -1595,7 +1617,6 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; vm_stat_unaccount(area); - area->vm_mm->unmap_area(area); remove_vm_struct(area); } @@ -1649,6 +1670,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; + unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); do { @@ -1659,6 +1681,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } while (vma && vma->vm_start < end); *insertion_point = vma; tail_vma->vm_next = NULL; + if (mm->unmap_area == arch_unmap_area) + addr = prev ? prev->vm_end : mm->mmap_base; + else + addr = vma ? vma->vm_start : mm->mmap_base; + mm->unmap_area(mm, addr); mm->mmap_cache = NULL; /* Kill the cache. */ } diff --git a/mm/nommu.c b/mm/nommu.c index c53e9c8f6b4adc98ac9fe69b1bc3526bba1baaa5..ce74452c02d945e40e6cdb503499609f24e1cd64 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1067,7 +1067,7 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } -void arch_unmap_area(struct vm_area_struct *area) +void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { }