提交 98d1e64f 编写于 作者: M Michel Lespinasse 提交者: Linus Torvalds

mm: remove free_area_cache

Since all architectures have been converted to use vm_unmapped_area(),
there is no remaining use for the free_area_cache.
Signed-off-by: NMichel Lespinasse <walken@google.com>
Acked-by: NRik van Riel <riel@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 61b0d760
...@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(random_factor); mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
......
...@@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(); mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
......
...@@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(random_factor); mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
......
...@@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(); mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
...@@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(); mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
...@@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = s390_get_unmapped_area; mm->get_unmapped_area = s390_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(); mm->mmap_base = mmap_base();
mm->get_unmapped_area = s390_get_unmapped_area_topdown; mm->get_unmapped_area = s390_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
......
...@@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
sysctl_legacy_va_layout) { sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
/* We know it's 32-bit */ /* We know it's 32-bit */
unsigned long task_size = STACK_TOP32; unsigned long task_size = STACK_TOP32;
...@@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
......
...@@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(mm); mm->mmap_base = mmap_base(mm);
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
...@@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm) ...@@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
(current->mm->start_data = N_DATADDR(ex)); (current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss + current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex)); (current->mm->start_brk = N_BSSADDR(ex));
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
current->mm->cached_hole_size = 0;
retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
if (retval < 0) { if (retval < 0) {
......
...@@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (mmap_is_legacy()) { if (mmap_is_legacy()) {
mm->mmap_base = mmap_legacy_base(); mm->mmap_base = mmap_legacy_base();
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else { } else {
mm->mmap_base = mmap_base(); mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
} }
} }
...@@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm) ...@@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
(current->mm->start_data = N_DATADDR(ex)); (current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss + current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex)); (current->mm->start_brk = N_BSSADDR(ex));
current->mm->free_area_cache = current->mm->mmap_base;
current->mm->cached_hole_size = 0;
retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
if (retval < 0) { if (retval < 0) {
......
...@@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
/* Do this so that we can load the interpreter, if need be. We will /* Do this so that we can load the interpreter, if need be. We will
change some of these later */ change some of these later */
current->mm->free_area_cache = current->mm->mmap_base;
current->mm->cached_hole_size = 0;
retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
executable_stack); executable_stack);
if (retval < 0) { if (retval < 0) {
......
...@@ -330,12 +330,9 @@ struct mm_struct { ...@@ -330,12 +330,9 @@ struct mm_struct {
unsigned long (*get_unmapped_area) (struct file *filp, unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags); unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
#endif #endif
unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */ unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
unsigned long highest_vm_end; /* highest vma end address */ unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd; pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */ atomic_t mm_users; /* How many users with user space? */
......
...@@ -322,8 +322,6 @@ extern unsigned long ...@@ -322,8 +322,6 @@ extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long len, unsigned long pgoff,
unsigned long flags); unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#else #else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif #endif
......
...@@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->locked_vm = 0; mm->locked_vm = 0;
mm->mmap = NULL; mm->mmap = NULL;
mm->mmap_cache = NULL; mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0; mm->map_count = 0;
cpumask_clear(mm_cpumask(mm)); cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT; mm->mm_rb = RB_ROOT;
...@@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) ...@@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm->nr_ptes = 0; mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->page_table_lock);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
......
...@@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
} }
#endif #endif
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
/*
* Is this a new hole at the lowest possible address?
*/
if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
mm->free_area_cache = addr;
}
/* /*
* This mmap-allocator allocates new areas top-down from below the * This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base): * stack's low limit (the base):
...@@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
} }
#endif #endif
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
/*
* Is this a new hole at the highest possible address?
*/
if (addr > mm->free_area_cache)
mm->free_area_cache = addr;
/* dont allow allocations above current base */
if (mm->free_area_cache > mm->mmap_base)
mm->free_area_cache = mm->mmap_base;
}
unsigned long unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
...@@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
struct vm_area_struct **insertion_point; struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL; struct vm_area_struct *tail_vma = NULL;
unsigned long addr;
insertion_point = (prev ? &prev->vm_next : &mm->mmap); insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL; vma->vm_prev = NULL;
...@@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
} else } else
mm->highest_vm_end = prev ? prev->vm_end : 0; mm->highest_vm_end = prev ? prev->vm_end : 0;
tail_vma->vm_next = NULL; tail_vma->vm_next = NULL;
if (mm->unmap_area == arch_unmap_area)
addr = prev ? prev->vm_end : mm->mmap_base;
else
addr = vma ? vma->vm_start : mm->mmap_base;
mm->unmap_area(mm, addr);
mm->mmap_cache = NULL; /* Kill the cache. */ mm->mmap_cache = NULL; /* Kill the cache. */
} }
......
...@@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM; return -ENOMEM;
} }
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
}
void unmap_mapping_range(struct address_space *mapping, void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, loff_t const holebegin, loff_t const holelen,
int even_cows) int even_cows)
......
...@@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{ {
mm->mmap_base = TASK_UNMAPPED_BASE; mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} }
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册