vsyscall_64.c 7.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
 *  Copyright 2003 Andi Kleen, SuSE Labs.
 *
 *  Thanks to hpa@transmeta.com for some useful hint.
 *  Special thanks to Ingo Molnar for his early experience with
 *  a different vsyscall implementation for Linux/IA32 and for the name.
 *
 *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
 *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
 *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
 *  jumping out of line if necessary. We cannot add more with this
 *  mechanism because older kernels won't return -ENOSYS.
 *  If we want more than four we need a vDSO.
 *
 *  Note: the concept clashes with user mode linux. If you use UML and
 *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
 */

20
/* Disable profiling for userspace code: */
21
#define DISABLE_BRANCH_PROFILING
22

L
Linus Torvalds 已提交
23 24 25 26 27 28 29
#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/seqlock.h>
#include <linux/jiffies.h>
#include <linux/sysctl.h>
30
#include <linux/clocksource.h>
31
#include <linux/getcpu.h>
32 33 34
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/notifier.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <asm/vsyscall.h>
#include <asm/pgtable.h>
#include <asm/page.h>
39
#include <asm/unistd.h>
L
Linus Torvalds 已提交
40 41 42
#include <asm/fixmap.h>
#include <asm/errno.h>
#include <asm/io.h>
43 44 45
#include <asm/segment.h>
#include <asm/desc.h>
#include <asm/topology.h>
46
#include <asm/vgtod.h>
L
Linus Torvalds 已提交
47

48 49
#define __vsyscall(nr) \
		__attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
50
#define __syscall_clobber "r11","cx","memory"
L
Linus Torvalds 已提交
51

52 53
DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
L
Linus Torvalds 已提交
54
{
55
	.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
56
};
L
Linus Torvalds 已提交
57

58 59 60 61 62 63 64 65 66 67
void update_vsyscall_tz(void)
{
	unsigned long flags;

	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
	/* sys_tz has changed */
	vsyscall_gtod_data.sys_tz = sys_tz;
	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}

68 69
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
			struct clocksource *clock, u32 mult)
L
Linus Torvalds 已提交
70
{
71
	unsigned long flags;
L
Linus Torvalds 已提交
72

73 74
	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
	/* copy vsyscall data */
75 76 77
	vsyscall_gtod_data.clock.vread = clock->vread;
	vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
	vsyscall_gtod_data.clock.mask = clock->mask;
78
	vsyscall_gtod_data.clock.mult = mult;
79 80 81
	vsyscall_gtod_data.clock.shift = clock->shift;
	vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
	vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
82
	vsyscall_gtod_data.wall_to_monotonic = *wtm;
83
	vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
84
	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
L
Linus Torvalds 已提交
85 86
}

87 88 89
/* RED-PEN may want to readd seq locking, but then the variable should be
 * write-once.
 */
90
static __always_inline void do_get_tz(struct timezone * tz)
L
Linus Torvalds 已提交
91
{
92
	*tz = VVAR(vsyscall_gtod_data).sys_tz;
L
Linus Torvalds 已提交
93 94
}

95
static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
L
Linus Torvalds 已提交
96 97
{
	int ret;
T
Thomas Gleixner 已提交
98
	asm volatile("syscall"
L
Linus Torvalds 已提交
99
		: "=a" (ret)
100 101
		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
		: __syscall_clobber );
L
Linus Torvalds 已提交
102 103 104
	return ret;
}

105 106 107
static __always_inline void do_vgettimeofday(struct timeval * tv)
{
	cycle_t now, base, mask, cycle_delta;
108 109
	unsigned seq;
	unsigned long mult, shift, nsec;
110 111
	cycle_t (*vread)(void);
	do {
112
		seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
113

114
		vread = VVAR(vsyscall_gtod_data).clock.vread;
115
		if (unlikely(!vread)) {
A
Al Viro 已提交
116
			gettimeofday(tv,NULL);
117 118
			return;
		}
I
Ingo Molnar 已提交
119

120
		now = vread();
121 122 123 124
		base = VVAR(vsyscall_gtod_data).clock.cycle_last;
		mask = VVAR(vsyscall_gtod_data).clock.mask;
		mult = VVAR(vsyscall_gtod_data).clock.mult;
		shift = VVAR(vsyscall_gtod_data).clock.shift;
125

126 127 128
		tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
		nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
	} while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
129 130 131 132

	/* calculate interval: */
	cycle_delta = (now - base) & mask;
	/* convert to nsecs: */
133
	nsec += (cycle_delta * mult) >> shift;
134

135
	while (nsec >= NSEC_PER_SEC) {
136
		tv->tv_sec += 1;
137
		nsec -= NSEC_PER_SEC;
138
	}
139
	tv->tv_usec = nsec / NSEC_PER_USEC;
140 141
}

142
int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150 151 152
{
	if (tv)
		do_vgettimeofday(tv);
	if (tz)
		do_get_tz(tz);
	return 0;
}

/* This will break when the xtime seconds get inaccurate, but that is
 * unlikely */
153
time_t __vsyscall(1) vtime(time_t *t)
L
Linus Torvalds 已提交
154
{
155
	unsigned seq;
156
	time_t result;
J
john stultz 已提交
157

158
	do {
159
		seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
160

161
		result = VVAR(vsyscall_gtod_data).wall_time_sec;
162

163
	} while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
164

165 166 167
	if (t)
		*t = result;
	return result;
L
Linus Torvalds 已提交
168 169
}

170 171 172 173 174 175 176 177 178 179
/* Fast way to get current CPU and node.
   This helps to do per node and per CPU caches in user space.
   The result is not guaranteed without CPU affinity, but usually
   works out because the scheduler tries to keep a thread on the same
   CPU.

   tcache must point to a two element sized long array.
   All arguments can be NULL. */
long __vsyscall(2)
vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
L
Linus Torvalds 已提交
180
{
181
	unsigned int p;
182 183 184 185 186 187 188 189 190 191
	unsigned long j = 0;

	/* Fast cache - only recompute value once per jiffies and avoid
	   relatively costly rdtscp/cpuid otherwise.
	   This works because the scheduler usually keeps the process
	   on the same CPU and this syscall doesn't guarantee its
	   results anyways.
	   We do this here because otherwise user space would do it on
	   its own in a likely inferior way (no access to jiffies).
	   If you don't like it pass NULL. */
192
	if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
193
		p = tcache->blob[1];
194
	} else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
195
		/* Load per CPU data from RDTSCP */
196
		native_read_tscp(&p);
197 198 199 200 201
	} else {
		/* Load per CPU data from GDT */
		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
	}
	if (tcache) {
202 203
		tcache->blob[0] = j;
		tcache->blob[1] = p;
204 205 206 207 208 209
	}
	if (cpu)
		*cpu = p & 0xfff;
	if (node)
		*node = p >> 12;
	return 0;
L
Linus Torvalds 已提交
210 211
}

212 213 214
/* Assume __initcall executes before all user space. Hopefully kmod
   doesn't violate that. We'll find out if it does. */
static void __cpuinit vsyscall_set_cpu(int cpu)
215
{
216
	unsigned long d;
217 218
	unsigned long node = 0;
#ifdef CONFIG_NUMA
M
Mike Travis 已提交
219
	node = cpu_to_node(cpu);
220
#endif
221
	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
222
		write_rdtscp_aux((node << 12) | cpu);
223 224 225 226

	/* Store cpu number in limit so that it can be loaded quickly
	   in user space in vgetcpu.
	   12 bits for the CPU and 8 bits for the node. */
227 228 229 230 231
	d = 0x0f40000000000ULL;
	d |= cpu;
	d |= (node & 0xf) << 12;
	d |= (node >> 4) << 48;
	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
232 233
}

234 235 236 237 238 239 240 241 242 243
static void __cpuinit cpu_vsyscall_init(void *arg)
{
	/* preemption should be already off */
	vsyscall_set_cpu(raw_smp_processor_id());
}

static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
	long cpu = (long)arg;
244
	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
245
		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
246 247 248
	return NOTIFY_DONE;
}

I
Ingo Molnar 已提交
249
void __init map_vsyscall(void)
L
Linus Torvalds 已提交
250 251 252
{
	extern char __vsyscall_0;
	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
253 254
	extern char __vvar_page;
	unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
L
Linus Torvalds 已提交
255

256
	/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
L
Linus Torvalds 已提交
257
	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
258 259 260
	__set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
	BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
		     (unsigned long)VVAR_ADDRESS);
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268
}

static int __init vsyscall_init(void)
{
	BUG_ON(((unsigned long) &vgettimeofday !=
			VSYSCALL_ADDR(__NR_vgettimeofday)));
	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
269
	BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
270
	on_each_cpu(cpu_vsyscall_init, NULL, 1);
271 272
	/* notifier priority > KVM */
	hotcpu_notifier(cpu_vsyscall_notifier, 30);
L
Linus Torvalds 已提交
273 274 275 276
	return 0;
}

__initcall(vsyscall_init);