mmap.c 4.7 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2011 Wind River Systems,
 *   written by Ralf Baechle <ralf@linux-mips.org>
 */
9
#include <linux/compiler.h>
10 11 12 13
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
J
Jian Peng 已提交
14
#include <linux/personality.h>
15 16 17 18 19 20
#include <linux/random.h>
#include <linux/sched.h>

unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
EXPORT_SYMBOL(shm_align_mask);

J
Jian Peng 已提交
21 22
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
23
#define MAX_GAP ((TASK_SIZE)/6*5)
J
Jian Peng 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

static int mmap_is_legacy(void)
{
	if (current->personality & ADDR_COMPAT_LAYOUT)
		return 1;

	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
		return 1;

	return sysctl_legacy_va_layout;
}

static unsigned long mmap_base(unsigned long rnd)
{
	unsigned long gap = rlimit(RLIMIT_STACK);

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}

48
#define COLOUR_ALIGN(addr, pgoff)				\
49 50 51
	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))

J
Jian Peng 已提交
52 53
enum mmap_allocation_direction {UP, DOWN};

54
static unsigned long arch_get_unmapped_area_common(struct file *filp,
J
Jian Peng 已提交
55 56
	unsigned long addr0, unsigned long len, unsigned long pgoff,
	unsigned long flags, enum mmap_allocation_direction dir)
57
{
J
Jian Peng 已提交
58 59 60
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long addr = addr0;
61
	int do_color_align;
62
	struct vm_unmapped_area_info info;
63

J
Jian Peng 已提交
64
	if (unlikely(len > TASK_SIZE))
65 66 67
		return -ENOMEM;

	if (flags & MAP_FIXED) {
J
Jian Peng 已提交
68
		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
		if (TASK_SIZE - len < addr)
			return -EINVAL;

		/*
		 * We do not accept a shared mapping if it would violate
		 * cache aliasing constraints.
		 */
		if ((flags & MAP_SHARED) &&
		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
			return -EINVAL;
		return addr;
	}

	do_color_align = 0;
	if (filp || (flags & MAP_SHARED))
		do_color_align = 1;
J
Jian Peng 已提交
85 86

	/* requesting a specific address */
87 88 89 90 91
	if (addr) {
		if (do_color_align)
			addr = COLOUR_ALIGN(addr, pgoff);
		else
			addr = PAGE_ALIGN(addr);
J
Jian Peng 已提交
92 93

		vma = find_vma(mm, addr);
94
		if (TASK_SIZE - len >= addr &&
95
		    (!vma || addr + len <= vma->vm_start))
96 97 98
			return addr;
	}

99 100 101
	info.length = len;
	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
	info.align_offset = pgoff << PAGE_SHIFT;
J
Jian Peng 已提交
102

103 104 105 106 107 108 109 110
	if (dir == DOWN) {
		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
		info.low_limit = PAGE_SIZE;
		info.high_limit = mm->mmap_base;
		addr = vm_unmapped_area(&info);

		if (!(addr & ~PAGE_MASK))
			return addr;
J
Jian Peng 已提交
111 112 113 114 115 116 117

		/*
		 * A failed mmap() very likely causes application failure,
		 * so fall back to the bottom-up function here. This scenario
		 * can happen with large stack limits and large mmap()
		 * allocations.
		 */
118
	}
119 120 121 122 123

	info.flags = 0;
	info.low_limit = mm->mmap_base;
	info.high_limit = TASK_SIZE;
	return vm_unmapped_area(&info);
124 125
}

J
Jian Peng 已提交
126 127 128
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
	unsigned long len, unsigned long pgoff, unsigned long flags)
{
129
	return arch_get_unmapped_area_common(filp,
J
Jian Peng 已提交
130 131 132 133 134 135 136 137 138 139 140
			addr0, len, pgoff, flags, UP);
}

/*
 * There is no need to export this but sched.h declares the function as
 * extern so making it static here results in an error.
 */
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
	unsigned long addr0, unsigned long len, unsigned long pgoff,
	unsigned long flags)
{
141
	return arch_get_unmapped_area_common(filp,
J
Jian Peng 已提交
142 143 144
			addr0, len, pgoff, flags, DOWN);
}

145 146 147 148 149 150 151 152 153 154 155 156 157
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	unsigned long random_factor = 0UL;

	if (current->flags & PF_RANDOMIZE) {
		random_factor = get_random_int();
		random_factor = random_factor << PAGE_SHIFT;
		if (TASK_IS_32BIT_ADDR)
			random_factor &= 0xfffffful;
		else
			random_factor &= 0xffffffful;
	}

J
Jian Peng 已提交
158 159 160 161 162 163 164
	if (mmap_is_legacy()) {
		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
		mm->get_unmapped_area = arch_get_unmapped_area;
	} else {
		mm->mmap_base = mmap_base(random_factor);
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
	}
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
}

static inline unsigned long brk_rnd(void)
{
	unsigned long rnd = get_random_int();

	rnd = rnd << PAGE_SHIFT;
	/* 8MB for 32bit, 256MB for 64bit */
	if (TASK_IS_32BIT_ADDR)
		rnd = rnd & 0x7ffffful;
	else
		rnd = rnd & 0xffffffful;

	return rnd;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
	unsigned long base = mm->brk;
	unsigned long ret;

	ret = PAGE_ALIGN(base + brk_rnd());

	if (ret < mm->brk)
		return mm->brk;

	return ret;
}
193 194 195 196 197 198

int __virt_addr_valid(const volatile void *kaddr)
{
	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);