mmap.c 4.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  linux/arch/s390/mm/mmap.c
 *
 *  flexible mmap layout support
 *
 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
 * All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 *
 * Started by Ingo Molnar <mingo@elte.hu>
 */

#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/module.h>
M
Martin Schwidefsky 已提交
30
#include <asm/pgalloc.h>
H
Heiko Carstens 已提交
31
#include <asm/compat.h>
L
Linus Torvalds 已提交
32

33 34 35 36 37 38 39 40 41
static unsigned long stack_maxrandom_size(void)
{
	if (!(current->flags & PF_RANDOMIZE))
		return 0;
	if (current->personality & ADDR_NO_RANDOMIZE)
		return 0;
	return STACK_RND_MASK << PAGE_SHIFT;
}

L
Linus Torvalds 已提交
42 43 44 45 46 47
/*
 * Top of mmap area (just below the process stack).
 *
 * Leave an at least ~128 MB hole.
 */
#define MIN_GAP (128*1024*1024)
48
#define MAX_GAP (STACK_TOP/6*5)
L
Linus Torvalds 已提交
49 50 51

static inline unsigned long mmap_base(void)
{
J
Jiri Slaby 已提交
52
	unsigned long gap = rlimit(RLIMIT_STACK);
L
Linus Torvalds 已提交
53 54 55 56 57 58

	if (gap < MIN_GAP)
		gap = MIN_GAP;
	else if (gap > MAX_GAP)
		gap = MAX_GAP;

59
	return STACK_TOP - stack_maxrandom_size() - (gap & PAGE_MASK);
L
Linus Torvalds 已提交
60 61 62 63
}

static inline int mmap_is_legacy(void)
{
64
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
65 66 67
	/*
	 * Force standard allocation for 64 bit programs.
	 */
H
Heiko Carstens 已提交
68
	if (!is_compat_task())
L
Linus Torvalds 已提交
69 70 71 72
		return 1;
#endif
	return sysctl_legacy_va_layout ||
	    (current->personality & ADDR_COMPAT_LAYOUT) ||
J
Jiri Slaby 已提交
73
	    rlimit(RLIMIT_STACK) == RLIM_INFINITY;
L
Linus Torvalds 已提交
74 75
}

M
Martin Schwidefsky 已提交
76 77
#ifndef CONFIG_64BIT

L
Linus Torvalds 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * This function, called very early during the creation of a new
 * process VM image, sets up which VM layout function to use:
 */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	/*
	 * Fall back to the standard layout if the personality
	 * bit is set, or if the expected stack growth is unlimited:
	 */
	if (mmap_is_legacy()) {
		mm->mmap_base = TASK_UNMAPPED_BASE;
		mm->get_unmapped_area = arch_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
	} else {
		mm->mmap_base = mmap_base();
		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
	}
}
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);

M
Martin Schwidefsky 已提交
100 101
#else

102 103
int s390_mmap_check(unsigned long addr, unsigned long len)
{
H
Heiko Carstens 已提交
104
	if (!is_compat_task() &&
105 106 107 108 109
	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
		return crst_table_upgrade(current->mm, 1UL << 53);
	return 0;
}

M
Martin Schwidefsky 已提交
110 111 112 113 114
static unsigned long
s390_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
115
	unsigned long area;
M
Martin Schwidefsky 已提交
116 117
	int rc;

118 119 120
	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
	if (!(area & ~PAGE_MASK))
		return area;
H
Heiko Carstens 已提交
121
	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
122 123
		/* Upgrade the page table to 4 levels and retry. */
		rc = crst_table_upgrade(mm, 1UL << 53);
M
Martin Schwidefsky 已提交
124 125
		if (rc)
			return (unsigned long) rc;
126
		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
M
Martin Schwidefsky 已提交
127
	}
128
	return area;
M
Martin Schwidefsky 已提交
129 130 131
}

static unsigned long
132
s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
M
Martin Schwidefsky 已提交
133 134 135 136
			  const unsigned long len, const unsigned long pgoff,
			  const unsigned long flags)
{
	struct mm_struct *mm = current->mm;
137
	unsigned long area;
M
Martin Schwidefsky 已提交
138 139
	int rc;

140 141 142
	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
	if (!(area & ~PAGE_MASK))
		return area;
H
Heiko Carstens 已提交
143
	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
144 145
		/* Upgrade the page table to 4 levels and retry. */
		rc = crst_table_upgrade(mm, 1UL << 53);
M
Martin Schwidefsky 已提交
146 147
		if (rc)
			return (unsigned long) rc;
148 149
		area = arch_get_unmapped_area_topdown(filp, addr, len,
						      pgoff, flags);
M
Martin Schwidefsky 已提交
150
	}
151
	return area;
M
Martin Schwidefsky 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
}
/*
 * This function, called very early during the creation of a new
 * process VM image, sets up which VM layout function to use:
 */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	/*
	 * Fall back to the standard layout if the personality
	 * bit is set, or if the expected stack growth is unlimited:
	 */
	if (mmap_is_legacy()) {
		mm->mmap_base = TASK_UNMAPPED_BASE;
		mm->get_unmapped_area = s390_get_unmapped_area;
		mm->unmap_area = arch_unmap_area;
	} else {
		mm->mmap_base = mmap_base();
		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
		mm->unmap_area = arch_unmap_area_topdown;
	}
}
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);

#endif