xstate.h 6.2 KB
Newer Older
1 2 3
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H

4
#include <linux/types.h>
5 6
#include <asm/processor.h>

7
#define XSTATE_CPUID		0x0000000d
8

9 10 11 12 13 14 15 16
#define XSTATE_FP		0x1
#define XSTATE_SSE		0x2
#define XSTATE_YMM		0x4
#define XSTATE_BNDREGS		0x8
#define XSTATE_BNDCSR		0x10
#define XSTATE_OPMASK		0x20
#define XSTATE_ZMM_Hi256	0x40
#define XSTATE_Hi16_ZMM		0x80
17

18 19 20
/* The highest xstate bit above (of XSTATE_Hi16_ZMM): */
#define XFEATURES_NR_MAX	8

21
#define XSTATE_FPSSE	(XSTATE_FP | XSTATE_SSE)
22
#define XSTATE_AVX512	(XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
23 24
/* Bit 63 of XCR0 is reserved for future expansion */
#define XSTATE_EXTEND_MASK	(~(XSTATE_FPSSE | (1ULL << 63)))
25 26 27

#define FXSAVE_SIZE	512

28 29 30 31 32
#define XSAVE_HDR_SIZE	    64
#define XSAVE_HDR_OFFSET    FXSAVE_SIZE

#define XSAVE_YMM_SIZE	    256
#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
S
Sheng Yang 已提交
33

34
/* Supported features which support lazy state saving */
35 36
#define XSTATE_LAZY	(XSTATE_FP | XSTATE_SSE | XSTATE_YMM		      \
			| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
37 38 39 40 41 42

/* Supported features which require eager state saving */
#define XSTATE_EAGER	(XSTATE_BNDREGS | XSTATE_BNDCSR)

/* All currently supported features */
#define XCNTXT_MASK	(XSTATE_LAZY | XSTATE_EAGER)
43

44 45 46 47 48 49
#ifdef CONFIG_X86_64
#define REX_PREFIX	"0x48, "
#else
#define REX_PREFIX
#endif

50
extern unsigned int xstate_size;
51
extern u64 xfeatures_mask;
52
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
53
extern struct xsave_struct init_xstate_ctx;
54

55
extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
56

57
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
58 59 60 61 62 63
#define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
#define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
#define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"

64 65 66 67 68 69 70
#define xstate_fault	".section .fixup,\"ax\"\n"	\
			"3:  movl $-1,%[err]\n"		\
			"    jmp  2b\n"			\
			".previous\n"			\
			_ASM_EXTABLE(1b, 3b)		\
			: [err] "=r" (err)

71 72 73 74
/*
 * This function is called only during boot time when x86 caps are not set
 * up and alternative can not be used yet.
 */
75
static inline int xsave_state_booting(struct xsave_struct *fx)
76
{
77
	u64 mask = -1;
78 79 80 81 82 83 84 85 86
	u32 lmask = mask;
	u32 hmask = mask >> 32;
	int err = 0;

	WARN_ON(system_state != SYSTEM_BOOTING);

	if (boot_cpu_has(X86_FEATURE_XSAVES))
		asm volatile("1:"XSAVES"\n\t"
			"2:\n\t"
87 88
			     xstate_fault
			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
89 90 91 92
			:   "memory");
	else
		asm volatile("1:"XSAVE"\n\t"
			"2:\n\t"
93 94
			     xstate_fault
			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
			:   "memory");
	return err;
}

/*
 * This function is called only during boot time when x86 caps are not set
 * up and alternative can not be used yet.
 */
static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
{
	u32 lmask = mask;
	u32 hmask = mask >> 32;
	int err = 0;

	WARN_ON(system_state != SYSTEM_BOOTING);

	if (boot_cpu_has(X86_FEATURE_XSAVES))
		asm volatile("1:"XRSTORS"\n\t"
			"2:\n\t"
114 115
			     xstate_fault
			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
116 117 118 119
			:   "memory");
	else
		asm volatile("1:"XRSTOR"\n\t"
			"2:\n\t"
120 121
			     xstate_fault
			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
122 123 124 125
			:   "memory");
	return err;
}

126 127 128
/*
 * Save processor xstate to xsave area.
 */
129
static inline int xsave_state(struct xsave_struct *fx)
130
{
131
	u64 mask = -1;
132 133 134 135
	u32 lmask = mask;
	u32 hmask = mask >> 32;
	int err = 0;

I
Ingo Molnar 已提交
136 137
	WARN_ON(system_state == SYSTEM_BOOTING);

138 139 140 141 142 143 144 145 146 147 148 149 150
	/*
	 * If xsaves is enabled, xsaves replaces xsaveopt because
	 * it supports compact format and supervisor states in addition to
	 * modified optimization in xsaveopt.
	 *
	 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
	 * because xsaveopt supports modified optimization which is not
	 * supported by xsave.
	 *
	 * If none of xsaves and xsaveopt is enabled, use xsave.
	 */
	alternative_input_2(
		"1:"XSAVE,
151
		XSAVEOPT,
152
		X86_FEATURE_XSAVEOPT,
153
		XSAVES,
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
		X86_FEATURE_XSAVES,
		[fx] "D" (fx), "a" (lmask), "d" (hmask) :
		"memory");
	asm volatile("2:\n\t"
		     xstate_fault
		     : "0" (0)
		     : "memory");

	return err;
}

/*
 * Restore processor xstate from xsave area.
 */
static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
{
	int err = 0;
	u32 lmask = mask;
	u32 hmask = mask >> 32;

	/*
	 * Use xrstors to restore context if it is enabled. xrstors supports
	 * compacted format of xsave area which is not supported by xrstor.
	 */
	alternative_input(
		"1: " XRSTOR,
180
		XRSTORS,
181 182 183 184 185 186 187 188 189 190 191 192
		X86_FEATURE_XSAVES,
		"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
		: "memory");

	asm volatile("2:\n"
		     xstate_fault
		     : "0" (0)
		     : "memory");

	return err;
}

193 194 195 196 197 198
/*
 * Restore xstate context for new process during context switch.
 */
static inline int fpu_xrstor_checking(struct xsave_struct *fx)
{
	return xrstor_state(fx, -1);
199 200
}

201 202 203 204 205 206 207 208 209 210
/*
 * Save xstate to user space xsave area.
 *
 * We don't use modified optimization because xrstor/xrstors might track
 * a different application.
 *
 * We don't use compacted format xsave area for
 * backward compatibility for old applications which don't understand
 * compacted format of xsave area.
 */
211
static inline int xsave_user(struct xsave_struct __user *buf)
212 213
{
	int err;
214 215 216 217 218

	/*
	 * Clear the xsave header first, so that reserved fields are
	 * initialized to zero.
	 */
219
	err = __clear_user(&buf->header, sizeof(buf->header));
220 221 222
	if (unlikely(err))
		return -EFAULT;

223
	__asm__ __volatile__(ASM_STAC "\n"
224
			     "1:"XSAVE"\n"
225
			     "2: " ASM_CLAC "\n"
226
			     xstate_fault
227 228 229 230 231
			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)
			     : "memory");
	return err;
}

232 233 234
/*
 * Restore xstate from user space xsave area.
 */
235
static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
236
{
237
	int err = 0;
238
	struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
239 240
	u32 lmask = mask;
	u32 hmask = mask >> 32;
241

242
	__asm__ __volatile__(ASM_STAC "\n"
243
			     "1:"XRSTOR"\n"
244
			     "2: " ASM_CLAC "\n"
245
			     xstate_fault
246 247 248 249 250
			     : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
			     : "memory");	/* memory required? */
	return err;
}

251 252 253
void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
void setup_xstate_comp(void);

254
#endif