vma.c 3.2 KB
Newer Older
1 2 3 4 5 6
/*
 * Set up the VMAs to tell the VM about the vDSO.
 * Copyright 2007 Andi Kleen, SUSE Labs.
 * Subject to the GPL, v.2
 */
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
7
#include <linux/err.h>
8
#include <linux/sched.h>
9
#include <linux/slab.h>
10 11
#include <linux/init.h>
#include <linux/random.h>
12
#include <linux/elf.h>
13 14 15
#include <asm/vsyscall.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
R
Roland McGrath 已提交
16
#include <asm/vdso.h>
17

R
Roland McGrath 已提交
18
#include "vextern.h"		/* Just for VMAGIC.  */
19 20
#undef VEXTERN

21
unsigned int __read_mostly vdso_enabled = 1;
R
Roland McGrath 已提交
22 23

extern char vdso_start[], vdso_end[];
24 25
extern unsigned short vdso_sync_cpuid;

J
Jan Beulich 已提交
26 27
static struct page **vdso_pages;
static unsigned vdso_size;
28

R
Roland McGrath 已提交
29
static inline void *var_ref(void *p, char *name)
30 31 32 33 34 35 36 37 38 39 40 41 42 43
{
	if (*(void **)p != (void *)VMAGIC) {
		printk("VDSO: variable %s broken\n", name);
		vdso_enabled = 0;
	}
	return p;
}

static int __init init_vdso_vars(void)
{
	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
	int i;
	char *vbase;

J
Jan Beulich 已提交
44
	vdso_size = npages << PAGE_SHIFT;
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
	if (!vdso_pages)
		goto oom;
	for (i = 0; i < npages; i++) {
		struct page *p;
		p = alloc_page(GFP_KERNEL);
		if (!p)
			goto oom;
		vdso_pages[i] = p;
		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
	}

	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
	if (!vbase)
		goto oom;

	if (memcmp(vbase, "\177ELF", 4)) {
		printk("VDSO: I'm broken; not ELF\n");
		vdso_enabled = 0;
	}

#define VEXTERN(x) \
R
Roland McGrath 已提交
67
	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#include "vextern.h"
#undef VEXTERN
	return 0;

 oom:
	printk("Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
__initcall(init_vdso_vars);

struct linux_binprm;

/* Put the vdso above the (randomized) stack with another randomized offset.
   This way there is no hole in the middle of address space.
   To save memory make sure it is still in the same PTE as the stack top.
   This doesn't give that many random bits */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
	unsigned long addr, end;
	unsigned offset;
	end = (start + PMD_SIZE - 1) & PMD_MASK;
90 91
	if (end >= TASK_SIZE_MAX)
		end = TASK_SIZE_MAX;
92 93 94 95 96 97 98 99 100 101 102
	end -= len;
	/* This loses some more bits than a modulo, but is cheaper */
	offset = get_random_int() & (PTRS_PER_PTE - 1);
	addr = start + (offset << PAGE_SHIFT);
	if (addr >= end)
		addr = end;
	return addr;
}

/* Setup a VMA at program startup for the vsyscall page.
   Not called for compat tasks */
103
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
104 105 106 107 108 109 110 111 112
{
	struct mm_struct *mm = current->mm;
	unsigned long addr;
	int ret;

	if (!vdso_enabled)
		return 0;

	down_write(&mm->mmap_sem);
J
Jan Beulich 已提交
113 114
	addr = vdso_addr(mm->start_stack, vdso_size);
	addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
115 116 117 118 119
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}

120 121
	current->mm->context.vdso = (void *)addr;

J
Jan Beulich 已提交
122
	ret = install_special_mapping(mm, addr, vdso_size,
123 124 125 126
				      VM_READ|VM_EXEC|
				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
				      VM_ALWAYSDUMP,
				      vdso_pages);
127 128
	if (ret) {
		current->mm->context.vdso = NULL;
129
		goto up_fail;
130
	}
131 132 133 134 135 136 137 138 139 140 141 142

up_fail:
	up_write(&mm->mmap_sem);
	return ret;
}

static __init int vdso_setup(char *s)
{
	vdso_enabled = simple_strtoul(s, NULL, 0);
	return 0;
}
__setup("vdso=", vdso_setup);