vma.c 6.6 KB
Newer Older
1 2 3
/*
 * Copyright 2007 Andi Kleen, SUSE Labs.
 * Subject to the GPL, v.2
4 5
 *
 * This contains most of the x86 vDSO kernel-side code.
6 7
 */
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
8
#include <linux/err.h>
9
#include <linux/sched.h>
10
#include <linux/slab.h>
11 12
#include <linux/init.h>
#include <linux/random.h>
13
#include <linux/elf.h>
14
#include <linux/cpu.h>
15 16
#include <asm/vgtod.h>
#include <asm/proto.h>
R
Roland McGrath 已提交
17
#include <asm/vdso.h>
18
#include <asm/vvar.h>
19
#include <asm/page.h>
20
#include <asm/hpet.h>
21
#include <asm/desc.h>
22

23
#if defined(CONFIG_X86_64)
24
unsigned int __read_mostly vdso64_enabled = 1;
25
#endif
H
H. J. Lu 已提交
26

27
void __init init_vdso_image(const struct vdso_image *image)
H
H. J. Lu 已提交
28 29
{
	int i;
30
	int npages = (image->size) / PAGE_SIZE;
H
H. J. Lu 已提交
31

32 33
	BUG_ON(image->size % PAGE_SIZE != 0);
	for (i = 0; i < npages; i++)
34 35
		image->text_mapping.pages[i] =
			virt_to_page(image->data + i*PAGE_SIZE);
H
H. J. Lu 已提交
36

37 38 39
	apply_alternatives((struct alt_instr *)(image->data + image->alt),
			   (struct alt_instr *)(image->data + image->alt +
						image->alt_len));
H
H. J. Lu 已提交
40
}
41

42 43
struct linux_binprm;

44 45 46 47 48 49 50 51 52 53 54
/*
 * Put the vdso above the (randomized) stack with another randomized
 * offset.  This way there is no hole in the middle of address space.
 * To save memory make sure it is still in the same PTE as the stack
 * top.  This doesn't give that many random bits.
 *
 * Note that this algorithm is imperfect: the distribution of the vdso
 * start address within a PMD is biased toward the end.
 *
 * Only used for the 64-bit and x32 vdsos.
 */
55 56
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
57 58 59
#ifdef CONFIG_X86_32
	return 0;
#else
60 61
	unsigned long addr, end;
	unsigned offset;
62 63 64 65 66 67 68 69 70

	/*
	 * Round up the start address.  It can start out unaligned as a result
	 * of stack start randomization.
	 */
	start = PAGE_ALIGN(start);

	/* Round the lowest possible end address up to a PMD boundary. */
	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
71 72
	if (end >= TASK_SIZE_MAX)
		end = TASK_SIZE_MAX;
73
	end -= len;
74 75 76 77 78 79 80

	if (end > start) {
		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
		addr = start + (offset << PAGE_SHIFT);
	} else {
		addr = start;
	}
81 82

	/*
83 84
	 * Forcibly align the final address in case we have a hardware
	 * issue that requires alignment for performance reasons.
85
	 */
86
	addr = align_vdso_addr(addr);
87

88
	return addr;
89
#endif
90 91
}

92
static int map_vdso(const struct vdso_image *image, bool calculate_addr)
93 94
{
	struct mm_struct *mm = current->mm;
95
	struct vm_area_struct *vma;
96
	unsigned long addr, text_start;
97
	int ret = 0;
98
	static struct page *no_pages[] = {NULL};
99 100 101 102
	static struct vm_special_mapping vvar_mapping = {
		.name = "[vvar]",
		.pages = no_pages,
	};
103

104 105
	if (calculate_addr) {
		addr = vdso_addr(current->mm->start_stack,
106
				 image->size - image->sym_vvar_start);
107 108 109
	} else {
		addr = 0;
	}
110 111

	down_write(&mm->mmap_sem);
112

113 114
	addr = get_unmapped_area(NULL, addr,
				 image->size - image->sym_vvar_start, 0, 0);
115 116 117 118 119
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}

120 121
	text_start = addr - image->sym_vvar_start;
	current->mm->context.vdso = (void __user *)text_start;
122

123 124 125
	/*
	 * MAYWRITE to allow gdb to COW and set breakpoints
	 */
126
	vma = _install_special_mapping(mm,
127
				       text_start,
128 129 130 131
				       image->size,
				       VM_READ|VM_EXEC|
				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
				       &image->text_mapping);
132

133 134
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
135
		goto up_fail;
136
	}
137 138

	vma = _install_special_mapping(mm,
139 140
				       addr,
				       -image->sym_vvar_start,
141
				       VM_READ|VM_MAYREAD,
142
				       &vvar_mapping);
143 144 145

	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
146
		goto up_fail;
147
	}
148

149 150
	if (image->sym_vvar_page)
		ret = remap_pfn_range(vma,
151
				      text_start + image->sym_vvar_page,
152 153 154 155 156 157 158 159 160 161
				      __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
				      PAGE_SIZE,
				      PAGE_READONLY);

	if (ret)
		goto up_fail;

#ifdef CONFIG_HPET_TIMER
	if (hpet_address && image->sym_hpet_page) {
		ret = io_remap_pfn_range(vma,
162
			text_start + image->sym_hpet_page,
163 164 165 166 167 168 169 170 171
			hpet_address >> PAGE_SHIFT,
			PAGE_SIZE,
			pgprot_noncached(PAGE_READONLY));

		if (ret)
			goto up_fail;
	}
#endif

172
up_fail:
173 174 175
	if (ret)
		current->mm->context.vdso = NULL;

176 177 178 179
	up_write(&mm->mmap_sem);
	return ret;
}

180
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
static int load_vdso32(void)
{
	int ret;

	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
		return 0;

	ret = map_vdso(selected_vdso32, false);
	if (ret)
		return ret;

	if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
		current_thread_info()->sysenter_return =
			current->mm->context.vdso +
			selected_vdso32->sym_VDSO32_SYSENTER_RETURN;

	return 0;
}
#endif

#ifdef CONFIG_X86_64
H
H. J. Lu 已提交
202 203
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
204 205 206 207
	if (!vdso64_enabled)
		return 0;

	return map_vdso(&vdso_image_64, true);
H
H. J. Lu 已提交
208 209
}

210 211 212 213
#ifdef CONFIG_COMPAT
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
				       int uses_interp)
{
H
H. J. Lu 已提交
214
#ifdef CONFIG_X86_X32_ABI
215 216 217 218 219 220 221
	if (test_thread_flag(TIF_X32)) {
		if (!vdso64_enabled)
			return 0;

		return map_vdso(&vdso_image_x32, true);
	}
#endif
222
#ifdef CONFIG_IA32_EMULATION
223
	return load_vdso32();
224 225 226
#else
	return 0;
#endif
227 228 229 230
}
#endif
#else
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
H
H. J. Lu 已提交
231
{
232
	return load_vdso32();
H
H. J. Lu 已提交
233 234 235
}
#endif

236
#ifdef CONFIG_X86_64
237 238
static __init int vdso_setup(char *s)
{
239
	vdso64_enabled = simple_strtoul(s, NULL, 0);
240 241 242
	return 0;
}
__setup("vdso=", vdso_setup);
243
#endif
244 245

#ifdef CONFIG_X86_64
246
static void vgetcpu_cpu_init(void *arg)
247
{
248
	int cpu = smp_processor_id();
249
	struct desc_struct d = { };
250 251 252 253 254 255 256 257
	unsigned long node = 0;
#ifdef CONFIG_NUMA
	node = cpu_to_node(cpu);
#endif
	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
		write_rdtscp_aux((node << 12) | cpu);

	/*
258 259 260
	 * Store cpu number in limit so that it can be loaded
	 * quickly in user space in vgetcpu. (12 bits for the CPU
	 * and 8 bits for the node)
261
	 */
262 263 264 265 266 267 268
	d.limit0 = cpu | ((node & 0xf) << 12);
	d.limit = node >> 4;
	d.type = 5;		/* RO data, expand down, accessed */
	d.dpl = 3;		/* Visible to user code */
	d.s = 1;		/* Not a system segment */
	d.p = 1;		/* Present */
	d.d = 1;		/* 32-bit */
269 270 271 272 273

	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}

static int
274
vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
275 276 277 278
{
	long cpu = (long)arg;

	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
279
		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
280 281 282 283

	return NOTIFY_DONE;
}

284
static int __init init_vdso(void)
285
{
286 287 288 289 290 291
	init_vdso_image(&vdso_image_64);

#ifdef CONFIG_X86_X32_ABI
	init_vdso_image(&vdso_image_x32);
#endif

292 293
	cpu_notifier_register_begin();

294
	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
295
	/* notifier priority > KVM */
296
	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
297 298 299 300 301

	cpu_notifier_register_done();

	return 0;
}
302 303
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */