提交 84929801 编写于 作者: S Suresh Siddha 提交者: Linus Torvalds

[PATCH] x86_64: TASK_SIZE fixes for compatibility mode processes

Appended patch will setup compatibility mode TASK_SIZE properly.  This will
fix atleast three known bugs that can be encountered while running
compatibility mode apps.

a) A malicious 32bit app can have an elf section at 0xffffe000.  During
   exec of this app, we will have a memory leak as insert_vm_struct() is
   not checking for return value in syscall32_setup_pages() and thus not
   freeing the vma allocated for the vsyscall page.  And instead of exec
   failing (as it has addresses > TASK_SIZE), we were allowing it to
   succeed previously.

b) With a 32bit app, hugetlb_get_unmapped_area/arch_get_unmapped_area
   may return addresses beyond 32bits, ultimately causing corruption
   because of wrap-around and resulting in SEGFAULT, instead of returning
   ENOMEM.

c) 32bit app doing this below mmap will now fail.

  mmap((void *)(0xFFFFE000UL), 0x10000UL, PROT_READ|PROT_WRITE,
	MAP_FIXED|MAP_PRIVATE|MAP_ANON, 0, 0);
Signed-off-by: NZou Nan hai <nanhai.zou@intel.com>
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 589777ea
......@@ -46,7 +46,7 @@ struct elf_phdr;
#define IA32_EMULATOR 1
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_32 + 0x1000000)
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
#undef ELF_ARCH
#define ELF_ARCH EM_386
......@@ -307,9 +307,6 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
#define elf_addr_t __u32
#undef TASK_SIZE
#define TASK_SIZE 0xffffffff
static void elf32_init(struct pt_regs *);
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
......
......@@ -656,7 +656,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
switch (code) {
case ARCH_SET_GS:
if (addr >= TASK_SIZE)
if (addr >= TASK_SIZE_OF(task))
return -EPERM;
cpu = get_cpu();
/* handle small bases via the GDT because that's faster to
......@@ -682,7 +682,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
case ARCH_SET_FS:
/* Not strictly needed for fs, but do it for symmetry
with gs */
if (addr >= TASK_SIZE)
if (addr >= TASK_SIZE_OF(task))
return -EPERM;
cpu = get_cpu();
/* handle small bases via the GDT because that's faster to
......
......@@ -257,12 +257,12 @@ static int putreg(struct task_struct *child,
value &= 0xffff;
return 0;
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE)
if (value >= TASK_SIZE_OF(child))
return -EIO;
child->thread.fs = value;
return 0;
case offsetof(struct user_regs_struct,gs_base):
if (value >= TASK_SIZE)
if (value >= TASK_SIZE_OF(child))
return -EIO;
child->thread.gs = value;
return 0;
......@@ -279,7 +279,7 @@ static int putreg(struct task_struct *child,
break;
case offsetof(struct user_regs_struct, rip):
/* Check if the new RIP address is canonical */
if (value >= TASK_SIZE)
if (value >= TASK_SIZE_OF(child))
return -EIO;
break;
}
......@@ -419,6 +419,8 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
{
int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
ret = -EIO;
if ((addr & 7) ||
addr > sizeof(struct user) - 7)
......@@ -430,22 +432,22 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
break;
/* Disallows to set a breakpoint into the vsyscall */
case offsetof(struct user, u_debugreg[0]):
if (data >= TASK_SIZE-7) break;
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg0 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[1]):
if (data >= TASK_SIZE-7) break;
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg1 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[2]):
if (data >= TASK_SIZE-7) break;
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg2 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[3]):
if (data >= TASK_SIZE-7) break;
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg3 = data;
ret = 0;
break;
......@@ -469,6 +471,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
break;
}
break;
}
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: /* restart after signal. */
......
......@@ -68,13 +68,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long pr
static void find_start_end(unsigned long flags, unsigned long *begin,
unsigned long *end)
{
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32)) {
*begin = TASK_UNMAPPED_32;
*end = IA32_PAGE_OFFSET;
} else
#endif
if (flags & MAP_32BIT) {
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
/* This is usually used needed to map code in small
model, so it needs to be in the first 31bit. Limit
it to that. This means we need to move the
......@@ -84,10 +78,10 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
of playground for now. -AK */
*begin = 0x40000000;
*end = 0x80000000;
} else {
*begin = TASK_UNMAPPED_64;
} else {
*begin = TASK_UNMAPPED_BASE;
*end = TASK_SIZE;
}
}
}
unsigned long
......
......@@ -350,7 +350,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 1) == 0.
*/
if (unlikely(address >= TASK_SIZE)) {
if (unlikely(address >= TASK_SIZE64)) {
if (!(error_code & 5) &&
((address >= VMALLOC_START && address < VMALLOC_END) ||
(address >= MODULES_VADDR && address < MODULES_END))) {
......
......@@ -21,7 +21,7 @@ struct exec
#ifdef __KERNEL__
#include <linux/thread_info.h>
#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
#define STACK_TOP TASK_SIZE
#endif
#endif /* __A_OUT_GNU_H__ */
......@@ -160,16 +160,17 @@ static inline void clear_in_cr4 (unsigned long mask)
/*
* User space process size. 47bits minus one guard page.
*/
#define TASK_SIZE (0x800000000000UL - 4096)
#define TASK_SIZE64 (0x800000000000UL - 4096)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
/*
* Size of io_bitmap.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册