提交 547ee84c 编写于 作者: B Benjamin Herrenschmidt 提交者: Linus Torvalds

[PATCH] ppc64: Improve mapping of vDSO

This patch reworks the way the ppc64 is mapped in user memory by the kernel
to make it more robust against possible collisions with executable
segments.  Instead of just whacking a VMA at 1Mb, I now use
get_unmapped_area() with a hint, and I moved the mapping of the vDSO to
after the mapping of the various ELF segments and of the interpreter, so
that conflicts get caught properly (it still has to be before
create_elf_tables since the later will fill the AT_SYSINFO_EHDR with the
proper address).

While I was at it, I also changed the 32 and 64 bits vDSO's to link at
their "natural" address of 1Mb instead of 0.  This is the address where
they are normally mapped in absence of conflict.  By doing so, it should be
possible to properly prelink one it's been verified to work on glibc.
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 fa89c509
...@@ -213,13 +213,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) ...@@ -213,13 +213,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
vdso_base = VDSO64_MBASE; vdso_base = VDSO64_MBASE;
} }
current->thread.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the /* vDSO has a problem and was disabled, just don't "enable" it for the
* process * process
*/ */
if (vdso_pages == 0) { if (vdso_pages == 0)
current->thread.vdso_base = 0;
return 0; return 0;
}
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL) if (vma == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -230,12 +231,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) ...@@ -230,12 +231,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
memset(vma, 0, sizeof(*vma)); memset(vma, 0, sizeof(*vma));
/* /*
* pick a base address for the vDSO in process space. We have a default * pick a base address for the vDSO in process space. We try to put it
* base of 1Mb on which we had a random offset up to 1Mb. * at vdso_base which is the "natural" base for it, but we might fail
* XXX: Add possibility for a program header to specify that location * and end up putting it elsewhere.
*/ */
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (vdso_base & ~PAGE_MASK)
return (int)vdso_base;
current->thread.vdso_base = vdso_base; current->thread.vdso_base = vdso_base;
/* + ((unsigned long)vma & 0x000ff000); */
vma->vm_mm = mm; vma->vm_mm = mm;
vma->vm_start = current->thread.vdso_base; vma->vm_start = current->thread.vdso_base;
......
...@@ -782,14 +782,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -782,14 +782,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out_free_dentry; goto out_free_dentry;
} }
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
retval = arch_setup_additional_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
current->mm->start_stack = bprm->p; current->mm->start_stack = bprm->p;
/* Now we do a little grungy work by mmaping the ELF image into /* Now we do a little grungy work by mmaping the ELF image into
...@@ -949,6 +941,14 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -949,6 +941,14 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
set_binfmt(&elf_format); set_binfmt(&elf_format);
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
retval = arch_setup_additional_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
compute_creds(bprm); compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC; current->flags &= ~PF_FORKNOEXEC;
create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT), create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
......
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* Default link addresses for the vDSOs */ /* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0 #define VDSO32_LBASE 0x100000
#define VDSO64_LBASE 0 #define VDSO64_LBASE 0x100000
/* Default map addresses */ /* Default map addresses */
#define VDSO32_MBASE 0x100000 #define VDSO32_MBASE VDSO32_LBASE
#define VDSO64_MBASE 0x100000 #define VDSO64_MBASE VDSO64_LBASE
#define VDSO_VERSION_STRING LINUX_2.6.12 #define VDSO_VERSION_STRING LINUX_2.6.12
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册