init.c 8.7 KB
Newer Older
1
/*
2
 * x86 FPU boot time init code:
3
 */
4
#include <asm/fpu/internal.h>
5 6
#include <asm/tlbflush.h>

7 8 9 10
/*
 * Initialize the TS bit in CR0 according to the style of context-switches
 * we are using:
 */
I
Ingo Molnar 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
static void fpu__init_cpu_ctx_switch(void)
{
	if (!cpu_has_eager_fpu)
		stts();
	else
		clts();
}

/*
 * Initialize the registers found in all CPUs, CR0 and CR4:
 */
static void fpu__init_cpu_generic(void)
{
	unsigned long cr0;
	unsigned long cr4_mask = 0;

	if (cpu_has_fxsr)
		cr4_mask |= X86_CR4_OSFXSR;
	if (cpu_has_xmm)
		cr4_mask |= X86_CR4_OSXMMEXCPT;
	if (cr4_mask)
		cr4_set_bits(cr4_mask);

	cr0 = read_cr0();
	cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
	if (!cpu_has_fpu)
		cr0 |= X86_CR0_EM;
	write_cr0(cr0);
39 40 41

	/* Flush out any pending x87 state: */
	asm volatile ("fninit");
I
Ingo Molnar 已提交
42 43 44
}

/*
45
 * Enable all supported FPU features. Called when a CPU is brought online:
I
Ingo Molnar 已提交
46 47 48 49 50 51 52 53
 */
void fpu__init_cpu(void)
{
	fpu__init_cpu_generic();
	fpu__init_cpu_xstate();
	fpu__init_cpu_ctx_switch();
}

54
/*
55 56 57 58
 * The earliest FPU detection code.
 *
 * Set the X86_FEATURE_FPU CPU-capability bit based on
 * trying to execute an actual sequence of FPU instructions:
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
 */
static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
{
	unsigned long cr0;
	u16 fsw, fcw;

	fsw = fcw = 0xffff;

	cr0 = read_cr0();
	cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
	write_cr0(cr0);

	asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
		     : "+m" (fsw), "+m" (fcw));

	if (fsw == 0 && (fcw & 0x103f) == 0x003f)
		set_cpu_cap(c, X86_FEATURE_FPU);
	else
		clear_cpu_cap(c, X86_FEATURE_FPU);
78 79 80

#ifndef CONFIG_MATH_EMULATION
	if (!cpu_has_fpu) {
81
		pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
82 83 84 85
		for (;;)
			asm volatile("hlt");
	}
#endif
86 87
}

88 89 90
/*
 * Boot time FPU feature detection code:
 */
91
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
92

93
static void __init fpu__init_system_mxcsr(void)
94
{
95
	unsigned int mask = 0;
96 97

	if (cpu_has_fxsr) {
98 99
		/* Static because GCC does not get 16-byte stack alignment right: */
		static struct fxregs_state fxregs __initdata;
100

101
		asm volatile("fxsave %0" : "+m" (fxregs));
102

103
		mask = fxregs.mxcsr_mask;
104 105 106 107 108 109

		/*
		 * If zero then use the default features mask,
		 * which has all features set, except the
		 * denormals-are-zero feature bit:
		 */
110 111 112 113 114 115
		if (mask == 0)
			mask = 0x0000ffbf;
	}
	mxcsr_feature_mask &= mask;
}

116 117 118
/*
 * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
 */
119
static void __init fpu__init_system_generic(void)
120 121 122 123 124
{
	/*
	 * Set up the legacy init FPU context. (xstate init might overwrite this
	 * with a more modern format, if the CPU supports it.)
	 */
125
	fpstate_init_fxstate(&init_fpstate.fxsave);
126 127 128 129

	fpu__init_system_mxcsr();
}

130 131 132 133 134 135
/*
 * Size of the FPU context state. All tasks in the system use the
 * same context size, regardless of what portion they use.
 * This is inherent to the XSAVE architecture which puts all state
 * components into a single, continuous memory block:
 */
I
Ingo Molnar 已提交
136 137 138 139 140 141 142 143 144
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);

/*
 * Set up the xstate_size based on the legacy FPU context size.
 *
 * We set this up first, and later it will be overwritten by
 * fpu__init_system_xstate() if the CPU knows about xstates.
 */
145
static void __init fpu__init_system_xstate_size_legacy(void)
146
{
147 148 149 150 151
	static int on_boot_cpu = 1;

	WARN_ON_FPU(!on_boot_cpu);
	on_boot_cpu = 0;

152 153
	/*
	 * Note that xstate_size might be overwriten later during
I
Ingo Molnar 已提交
154
	 * fpu__init_system_xstate().
155 156 157 158 159 160 161 162 163
	 */

	if (!cpu_has_fpu) {
		/*
		 * Disable xsave as we do not support it if i387
		 * emulation is enabled.
		 */
		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
164
		xstate_size = sizeof(struct swregs_state);
165 166
	} else {
		if (cpu_has_fxsr)
167
			xstate_size = sizeof(struct fxregs_state);
168
		else
169
			xstate_size = sizeof(struct fregs_state);
170
	}
171 172 173 174 175 176 177 178 179 180 181 182 183 184
	/*
	 * Quirk: we don't yet handle the XSAVES* instructions
	 * correctly, as we don't correctly convert between
	 * standard and compacted format when interfacing
	 * with user-space - so disable it for now.
	 *
	 * The difference is small: with recent CPUs the
	 * compacted format is only marginally smaller than
	 * the standard FPU state format.
	 *
	 * ( This is easy to backport while we are fixing
	 *   XSAVES* support. )
	 */
	setup_clear_cpu_cap(X86_FEATURE_XSAVES);
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
/*
 * FPU context switching strategies:
 *
 * Against popular belief, we don't do lazy FPU saves, due to the
 * task migration complications it brings on SMP - we only do
 * lazy FPU restores.
 *
 * 'lazy' is the traditional strategy, which is based on setting
 * CR0::TS to 1 during context-switch (instead of doing a full
 * restore of the FPU state), which causes the first FPU instruction
 * after the context switch (whenever it is executed) to fault - at
 * which point we lazily restore the FPU state into FPU registers.
 *
 * Tasks are of course under no obligation to execute FPU instructions,
 * so it can easily happen that another context-switch occurs without
 * a single FPU instruction being executed. If we eventually switch
 * back to the original task (that still owns the FPU) then we have
 * not only saved the restores along the way, but we also have the
 * FPU ready to be used for the original task.
 *
 * 'eager' switching is used on modern CPUs, there we switch the FPU
 * state during every context switch, regardless of whether the task
 * has used FPU instructions in that time slice or not. This is done
 * because modern FPU context saving instructions are able to optimize
 * state saving and restoration in hardware: they can detect both
 * unused and untouched FPU state and optimize accordingly.
 *
 * [ Note that even in 'lazy' mode we might optimize context switches
 *   to use 'eager' restores, if we detect that a task is using the FPU
 *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
 */
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;

static int __init eager_fpu_setup(char *s)
{
	if (!strcmp(s, "on"))
		eagerfpu = ENABLE;
	else if (!strcmp(s, "off"))
		eagerfpu = DISABLE;
	else if (!strcmp(s, "auto"))
		eagerfpu = AUTO;
	return 1;
}
__setup("eagerfpu=", eager_fpu_setup);

/*
233
 * Pick the FPU context switching strategy:
234
 */
235
static void __init fpu__init_system_ctx_switch(void)
236
{
237 238 239 240 241 242
	static bool on_boot_cpu = 1;

	WARN_ON_FPU(!on_boot_cpu);
	on_boot_cpu = 0;

	WARN_ON_FPU(current->thread.fpu.fpstate_active);
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	current_thread_info()->status = 0;

	/* Auto enable eagerfpu for xsaveopt */
	if (cpu_has_xsaveopt && eagerfpu != DISABLE)
		eagerfpu = ENABLE;

	if (xfeatures_mask & XSTATE_EAGER) {
		if (eagerfpu == DISABLE) {
			pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
			       xfeatures_mask & XSTATE_EAGER);
			xfeatures_mask &= ~XSTATE_EAGER;
		} else {
			eagerfpu = ENABLE;
		}
	}

	if (eagerfpu == ENABLE)
		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);

262
	printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
263 264
}

265
/*
266 267
 * Called on the boot CPU once per system bootup, to set up the initial
 * FPU state that is later cloned into all processes:
268
 */
269
void __init fpu__init_system(struct cpuinfo_x86 *c)
270
{
271 272
	fpu__init_system_early_generic(c);

273 274 275 276
	/*
	 * The FPU has to be operational for some of the
	 * later FPU init activities:
	 */
277
	fpu__init_cpu();
278

279
	/*
280 281 282 283
	 * But don't leave CR0::TS set yet, as some of the FPU setup
	 * methods depend on being able to execute FPU instructions
	 * that will fault on a set TS, such as the FXSAVE in
	 * fpu__init_system_mxcsr().
284 285 286
	 */
	clts();

287
	fpu__init_system_generic();
288
	fpu__init_system_xstate_size_legacy();
I
Ingo Molnar 已提交
289
	fpu__init_system_xstate();
290

291
	fpu__init_system_ctx_switch();
292
}
293

294 295 296
/*
 * Boot parameter to turn off FPU support and fall back to math-emu:
 */
297 298 299 300 301 302
static int __init no_387(char *s)
{
	setup_clear_cpu_cap(X86_FEATURE_FPU);
	return 1;
}
__setup("no387", no_387);
303

304 305 306 307
/*
 * Disable all xstate CPU features:
 */
static int __init x86_noxsave_setup(char *s)
308 309 310
{
	if (strlen(s))
		return 0;
311

312 313 314 315 316
	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
	setup_clear_cpu_cap(X86_FEATURE_XSAVES);
	setup_clear_cpu_cap(X86_FEATURE_AVX);
	setup_clear_cpu_cap(X86_FEATURE_AVX2);
317

318 319
	return 1;
}
320
__setup("noxsave", x86_noxsave_setup);
321

322 323 324 325
/*
 * Disable the XSAVEOPT instruction specifically:
 */
static int __init x86_noxsaveopt_setup(char *s)
326 327
{
	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
328

329 330
	return 1;
}
331
__setup("noxsaveopt", x86_noxsaveopt_setup);
332

333 334 335 336
/*
 * Disable the XSAVES instruction:
 */
static int __init x86_noxsaves_setup(char *s)
337 338
{
	setup_clear_cpu_cap(X86_FEATURE_XSAVES);
339

340 341
	return 1;
}
342
__setup("noxsaves", x86_noxsaves_setup);
343

344 345 346 347
/*
 * Disable FX save/restore and SSE support:
 */
static int __init x86_nofxsr_setup(char *s)
348 349 350 351
{
	setup_clear_cpu_cap(X86_FEATURE_FXSR);
	setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
	setup_clear_cpu_cap(X86_FEATURE_XMM);
352

353 354
	return 1;
}
355
__setup("nofxsr", x86_nofxsr_setup);