vma.c 3.2 KB
Newer Older
1 2 3 4 5 6
/*
 * Set up the VMAs to tell the VM about the vDSO.
 * Copyright 2007 Andi Kleen, SUSE Labs.
 * Subject to the GPL, v.2
 */
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
7
#include <linux/err.h>
8 9 10
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/random.h>
11
#include <linux/elf.h>
12 13 14
#include <asm/vsyscall.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
R
Roland McGrath 已提交
15
#include <asm/vdso.h>
16

R
Roland McGrath 已提交
17
#include "vextern.h"		/* Just for VMAGIC.  */
18 19
#undef VEXTERN

20
unsigned int __read_mostly vdso_enabled = 1;
R
Roland McGrath 已提交
21 22

extern char vdso_start[], vdso_end[];
23 24
extern unsigned short vdso_sync_cpuid;

J
Jan Beulich 已提交
25 26
static struct page **vdso_pages;
static unsigned vdso_size;
27

R
Roland McGrath 已提交
28
static inline void *var_ref(void *p, char *name)
29 30 31 32 33 34 35 36 37 38 39 40 41 42
{
	if (*(void **)p != (void *)VMAGIC) {
		printk("VDSO: variable %s broken\n", name);
		vdso_enabled = 0;
	}
	return p;
}

static int __init init_vdso_vars(void)
{
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int i;
	char *vbase;

J
Jan Beulich 已提交
43
	vdso_size = npages << PAGE_SHIFT;
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
	if (!vdso_pages)
		goto oom;
	for (i = 0; i < npages; i++) {
		struct page *p;
		p = alloc_page(GFP_KERNEL);
		if (!p)
			goto oom;
		vdso_pages[i] = p;
		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
	}

	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
	if (!vbase)
		goto oom;

	if (memcmp(vbase, "\177ELF", 4)) {
		printk("VDSO: I'm broken; not ELF\n");
		vdso_enabled = 0;
	}

#define VEXTERN(x) \
R
Roland McGrath 已提交
66
	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
#include "vextern.h"
#undef VEXTERN
	return 0;

 oom:
	printk("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
__initcall(init_vdso_vars);

struct linux_binprm;

/* Put the vdso above the (randomized) stack with another randomized offset.
   This way there is no hole in the middle of address space.
   To save memory make sure it is still in the same PTE as the stack top.
   This doesn't give that many random bits */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
	unsigned long addr, end;
	unsigned offset;
	end = (start + PMD_SIZE - 1) & PMD_MASK;
89 90
	if (end >= TASK_SIZE_MAX)
		end = TASK_SIZE_MAX;
91 92 93 94 95 96 97 98 99 100 101
	end -= len;
	/* This loses some more bits than a modulo, but is cheaper */
	offset = get_random_int() & (PTRS_PER_PTE - 1);
	addr = start + (offset << PAGE_SHIFT);
	if (addr >= end)
		addr = end;
	return addr;
}

/* Setup a VMA at program startup for the vsyscall page.
   Not called for compat tasks */
102
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
103 104 105 106 107 108 109 110 111
{
	struct mm_struct *mm = current->mm;
	unsigned long addr;
	int ret;

	if (!vdso_enabled)
		return 0;

	down_write(&mm->mmap_sem);
J
Jan Beulich 已提交
112 113
	addr = vdso_addr(mm->start_stack, vdso_size);
	addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
114 115 116 117 118
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}

119 120
	current->mm->context.vdso = (void *)addr;

J
Jan Beulich 已提交
121
	ret = install_special_mapping(mm, addr, vdso_size,
122 123 124 125
				      VM_READ|VM_EXEC|
				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
				      VM_ALWAYSDUMP,
				      vdso_pages);
126 127
	if (ret) {
		current->mm->context.vdso = NULL;
128
		goto up_fail;
129
	}
130 131 132 133 134 135 136 137 138 139 140 141

up_fail:
	up_write(&mm->mmap_sem);
	return ret;
}

static __init int vdso_setup(char *s)
{
	vdso_enabled = simple_strtoul(s, NULL, 0);
	return 0;
}
__setup("vdso=", vdso_setup);