xen-asm_64.S 3.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11
 * Asm versions of Xen pv-ops, suitable for either direct use or
 * inlining.  The inline versions are the same as the direct-use
 * versions, with the pre- and post-amble chopped off.
 *
 * This code is encoded for size rather than absolute efficiency, with
 * a view to being able to inline as much as possible.
 *
 * We only bother with direct forms (ie, vcpu in pda) of the
 * operations here; the indirect forms are better handled in C, since
 * they're generally too large to inline anyway.
12 13
 */

14
#include <asm/errno.h>
15
#include <asm/percpu.h>
16 17
#include <asm/processor-flags.h>
#include <asm/segment.h>
18 19 20

#include <xen/interface/xen.h>

21
#include "xen-asm.h"
22

23
ENTRY(xen_adjust_exception_frame)
24 25
	mov 8+0(%rsp), %rcx
	mov 8+8(%rsp), %r11
26 27
	ret $16

28 29
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/*
30 31 32 33 34 35 36 37 38 39 40 41 42
 * Xen64 iret frame:
 *
 *	ss
 *	rsp
 *	rflags
 *	cs
 *	rip		<-- standard iret frame
 *
 *	flags
 *
 *	rcx		}
 *	r11		}<-- pushed by hypercall page
 * rsp->rax		}
43
 */
44 45
ENTRY(xen_iret)
	pushq $0
46 47 48
1:	jmp hypercall_iret
ENDPATCH(xen_iret)
RELOC(xen_iret, 1b+1)
49

50
/*
51 52
 * sysexit is not used for 64-bit processes, so it's only ever used to
 * return to 32-bit compat userspace.
53
 */
54
ENTRY(xen_sysexit)
55 56 57 58 59 60
	pushq $__USER32_DS
	pushq %rcx
	pushq $X86_EFLAGS_IF
	pushq $__USER32_CS
	pushq %rdx

61
	pushq $0
62 63 64 65 66
1:	jmp hypercall_iret
ENDPATCH(xen_sysexit)
RELOC(xen_sysexit, 1b+1)

ENTRY(xen_sysret64)
67 68 69 70
	/*
	 * We're already on the usermode stack at this point, but
	 * still with the kernel gs, so we can easily switch back
	 */
71
	movq %rsp, PER_CPU_VAR(old_rsp)
72
	movq PER_CPU_VAR(kernel_stack), %rsp
73 74

	pushq $__USER_DS
75
	pushq PER_CPU_VAR(old_rsp)
76 77 78 79 80 81 82 83 84 85
	pushq %r11
	pushq $__USER_CS
	pushq %rcx

	pushq $VGCF_in_syscall
1:	jmp hypercall_iret
ENDPATCH(xen_sysret64)
RELOC(xen_sysret64, 1b+1)

ENTRY(xen_sysret32)
86 87 88 89
	/*
	 * We're already on the usermode stack at this point, but
	 * still with the kernel gs, so we can easily switch back
	 */
90
	movq %rsp, PER_CPU_VAR(old_rsp)
91
	movq PER_CPU_VAR(kernel_stack), %rsp
92 93

	pushq $__USER32_DS
94
	pushq PER_CPU_VAR(old_rsp)
95 96 97 98
	pushq %r11
	pushq $__USER32_CS
	pushq %rcx

99
	pushq $0
100 101 102 103 104
1:	jmp hypercall_iret
ENDPATCH(xen_sysret32)
RELOC(xen_sysret32, 1b+1)

/*
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
 * Xen handles syscall callbacks much like ordinary exceptions, which
 * means we have:
 * - kernel gs
 * - kernel rsp
 * - an iret-like stack frame on the stack (including rcx and r11):
 *	ss
 *	rsp
 *	rflags
 *	cs
 *	rip
 *	r11
 * rsp->rcx
 *
 * In all the entrypoints, we undo all that to make it look like a
 * CPU-generated syscall/sysenter and jump to the normal entrypoint.
120 121 122
 */

.macro undo_xen_syscall
123 124 125
	mov 0*8(%rsp), %rcx
	mov 1*8(%rsp), %r11
	mov 5*8(%rsp), %rsp
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
.endm

/* Normal 64-bit system call target */
ENTRY(xen_syscall_target)
	undo_xen_syscall
	jmp system_call_after_swapgs
ENDPROC(xen_syscall_target)

#ifdef CONFIG_IA32_EMULATION

/* 32-bit compat syscall target */
ENTRY(xen_syscall32_target)
	undo_xen_syscall
	jmp ia32_cstar_target
ENDPROC(xen_syscall32_target)

/* 32-bit compat sysenter target */
ENTRY(xen_sysenter_target)
	undo_xen_syscall
	jmp ia32_sysenter_target
ENDPROC(xen_sysenter_target)

#else /* !CONFIG_IA32_EMULATION */

ENTRY(xen_syscall32_target)
ENTRY(xen_sysenter_target)
152
	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
153
	mov $-ENOSYS, %rax
154
	pushq $0
155 156 157 158 159
	jmp hypercall_iret
ENDPROC(xen_syscall32_target)
ENDPROC(xen_sysenter_target)

#endif	/* CONFIG_IA32_EMULATION */