提交 1b028f78 编写于 作者: D Dmitry Safonov 提交者: Thomas Gleixner

x86/mm: Introduce mmap_compat_base() for 32-bit mmap()

mmap() uses a base address, from which it starts to look for a free space
for allocation.

The base address is stored in mm->mmap_base, which is calculated during
exec(). The address depends on task's size, set rlimit for stack, ASLR
randomization. The base depends on the task size and the number of random
bits which are different for 64-bit and 32bit applications.

Due to the fact, that the base address is fixed, its mmap() from a compat
(32bit) syscall issued by a 64bit task will return a address which is based
on the 64bit base address and does not fit into the 32bit address space
(4GB). The returned pointer is truncated to 32bit, which results in an
invalid address.

To solve store a seperate compat address base plus a compat legacy address
base in mm_struct. These bases are calculated at exec() time and can be
used later to address the 32bit compat mmap() issued by 64 bit
applications.

As a consequence of this change 32-bit applications issuing a 64-bit
syscall (after doing a long jump) will get a 64-bit mapping now. Before
this change 32-bit applications always got a 32bit mapping.

[ tglx: Massaged changelog and added a comment ]
Signed-off-by: NDmitry Safonov <dsafonov@virtuozzo.com>
Cc: 0x7f454c46@gmail.com
Cc: linux-mm@kvack.org
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/20170306141721.9188-4-dsafonov@virtuozzo.comSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 8f3e474f
...@@ -700,6 +700,13 @@ config ARCH_MMAP_RND_COMPAT_BITS ...@@ -700,6 +700,13 @@ config ARCH_MMAP_RND_COMPAT_BITS
This value can be changed after boot using the This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_compat_bits tunable /proc/sys/vm/mmap_rnd_compat_bits tunable
config HAVE_ARCH_COMPAT_MMAP_BASES
bool
help
This allows 64bit applications to invoke 32-bit mmap() syscall
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
config HAVE_COPY_THREAD_TLS config HAVE_COPY_THREAD_TLS
bool bool
help help
......
...@@ -106,6 +106,7 @@ config X86 ...@@ -106,6 +106,7 @@ config X86
select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
......
...@@ -303,6 +303,9 @@ static inline int mmap_is_ia32(void) ...@@ -303,6 +303,9 @@ static inline int mmap_is_ia32(void)
test_thread_flag(TIF_ADDR32)); test_thread_flag(TIF_ADDR32));
} }
extern unsigned long tasksize_32bit(void);
extern unsigned long tasksize_64bit(void);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __STACK_RND_MASK(is32bit) (0x7ff) #define __STACK_RND_MASK(is32bit) (0x7ff)
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/elf.h>
#include <asm/compat.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
...@@ -98,6 +100,18 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, ...@@ -98,6 +100,18 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
return error; return error;
} }
static unsigned long get_mmap_base(int is_legacy)
{
struct mm_struct *mm = current->mm;
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
if (in_compat_syscall())
return is_legacy ? mm->mmap_compat_legacy_base
: mm->mmap_compat_base;
#endif
return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
}
static void find_start_end(unsigned long flags, unsigned long *begin, static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end) unsigned long *end)
{ {
...@@ -114,10 +128,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin, ...@@ -114,10 +128,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
if (current->flags & PF_RANDOMIZE) { if (current->flags & PF_RANDOMIZE) {
*begin = randomize_page(*begin, 0x02000000); *begin = randomize_page(*begin, 0x02000000);
} }
} else { return;
*begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
} }
*begin = get_mmap_base(1);
*end = in_compat_syscall() ? tasksize_32bit() : tasksize_64bit();
} }
unsigned long unsigned long
...@@ -191,7 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -191,7 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len; info.length = len;
info.low_limit = PAGE_SIZE; info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base; info.high_limit = get_mmap_base(0);
info.align_mask = 0; info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT; info.align_offset = pgoff << PAGE_SHIFT;
if (filp) { if (filp) {
......
...@@ -36,11 +36,16 @@ struct va_alignment __read_mostly va_align = { ...@@ -36,11 +36,16 @@ struct va_alignment __read_mostly va_align = {
.flags = -1, .flags = -1,
}; };
static inline unsigned long tasksize_32bit(void) unsigned long tasksize_32bit(void)
{ {
return IA32_PAGE_OFFSET; return IA32_PAGE_OFFSET;
} }
unsigned long tasksize_64bit(void)
{
return TASK_SIZE_MAX;
}
static unsigned long stack_maxrandom_size(unsigned long task_size) static unsigned long stack_maxrandom_size(unsigned long task_size)
{ {
unsigned long max = 0; unsigned long max = 0;
...@@ -81,6 +86,8 @@ static unsigned long arch_rnd(unsigned int rndbits) ...@@ -81,6 +86,8 @@ static unsigned long arch_rnd(unsigned int rndbits)
unsigned long arch_mmap_rnd(void) unsigned long arch_mmap_rnd(void)
{ {
if (!(current->flags & PF_RANDOMIZE))
return 0;
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits); return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
} }
...@@ -114,22 +121,36 @@ static unsigned long mmap_legacy_base(unsigned long rnd, ...@@ -114,22 +121,36 @@ static unsigned long mmap_legacy_base(unsigned long rnd,
* This function, called very early during the creation of a new * This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use: * process VM image, sets up which VM layout function to use:
*/ */
void arch_pick_mmap_layout(struct mm_struct *mm) static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
unsigned long random_factor, unsigned long task_size)
{ {
unsigned long random_factor = 0UL; *legacy_base = mmap_legacy_base(random_factor, task_size);
if (mmap_is_legacy())
if (current->flags & PF_RANDOMIZE) *base = *legacy_base;
random_factor = arch_mmap_rnd(); else
*base = mmap_base(random_factor, task_size);
mm->mmap_legacy_base = mmap_legacy_base(random_factor, TASK_SIZE); }
if (mmap_is_legacy()) { void arch_pick_mmap_layout(struct mm_struct *mm)
mm->mmap_base = mm->mmap_legacy_base; {
if (mmap_is_legacy())
mm->get_unmapped_area = arch_get_unmapped_area; mm->get_unmapped_area = arch_get_unmapped_area;
} else { else
mm->mmap_base = mmap_base(random_factor, TASK_SIZE);
mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
arch_rnd(mmap64_rnd_bits), tasksize_64bit());
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
/*
* The mmap syscall mapping base decision depends solely on the
* syscall type (64-bit or compat). This applies for 64bit
* applications and 32bit applications. The 64bit syscall uses
* mmap_base, the compat syscall uses mmap_compat_base.
*/
arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
arch_rnd(mmap32_rnd_bits), tasksize_32bit());
#endif
} }
const char *arch_vma_name(struct vm_area_struct *vma) const char *arch_vma_name(struct vm_area_struct *vma)
......
...@@ -367,6 +367,11 @@ struct mm_struct { ...@@ -367,6 +367,11 @@ struct mm_struct {
#endif #endif
unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
/* Base adresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */ unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */ unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd; pgd_t * pgd;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册