amd.c 27.3 KB
Newer Older
1
#include <linux/export.h>
L
Linus Torvalds 已提交
2
#include <linux/bitops.h>
3
#include <linux/elf.h>
L
Linus Torvalds 已提交
4
#include <linux/mm.h>
Y
Yinghai Lu 已提交
5

A
Alan Cox 已提交
6
#include <linux/io.h>
7
#include <linux/sched.h>
8
#include <linux/sched/clock.h>
9
#include <linux/random.h>
L
Linus Torvalds 已提交
10
#include <asm/processor.h>
11
#include <asm/apic.h>
12
#include <asm/cacheinfo.h>
13
#include <asm/cpu.h>
14
#include <asm/spec-ctrl.h>
B
Borislav Petkov 已提交
15
#include <asm/smp.h>
16
#include <asm/pci-direct.h>
17
#include <asm/delay.h>
18
#include <asm/debugreg.h>
L
Linus Torvalds 已提交
19

Y
Yinghai Lu 已提交
20 21
#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
L
Laura Abbott 已提交
22
# include <asm/set_memory.h>
Y
Yinghai Lu 已提交
23 24
#endif

L
Linus Torvalds 已提交
25 26
#include "cpu.h"

27 28 29 30
static const int amd_erratum_383[];
static const int amd_erratum_400[];
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);

31 32 33 34 35 36 37
/*
 * nodes_per_socket: Stores the number of nodes per socket.
 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
 * Node Identifiers[10:8]
 */
static u32 nodes_per_socket = 1;

38 39 40 41 42
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
	u32 gprs[8] = { 0 };
	int err;

43 44
	WARN_ONCE((boot_cpu_data.x86 != 0xf),
		  "%s should only be used on K8!\n", __func__);
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

	gprs[1] = msr;
	gprs[7] = 0x9c5a203a;

	err = rdmsr_safe_regs(gprs);

	*p = gprs[0] | ((u64)gprs[2] << 32);

	return err;
}

static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
{
	u32 gprs[8] = { 0 };

60 61
	WARN_ONCE((boot_cpu_data.x86 != 0xf),
		  "%s should only be used on K8!\n", __func__);
62 63 64 65 66 67 68 69 70

	gprs[0] = (u32)val;
	gprs[1] = msr;
	gprs[2] = val >> 32;
	gprs[7] = 0x9c5a203a;

	return wrmsr_safe_regs(gprs);
}

L
Linus Torvalds 已提交
71 72 73 74 75 76
/*
 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
 *	misexecution of code under Linux. Owners of such processors should
 *	contact AMD for precise details and a CPU swap.
 *
 *	See	http://www.multimania.com/poulot/k6bug.html
77 78
 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
 *		(Publication # 21266  Issue Date: August 1998)
L
Linus Torvalds 已提交
79 80 81 82 83
 *
 *	The following test is erm.. interesting. AMD neglected to up
 *	the chip setting when fixing the bug but they also tweaked some
 *	performance at the same time..
 */
84

85
extern __visible void vide(void);
86 87
__asm__(".text\n"
	".globl vide\n"
88 89 90
	".type vide, @function\n"
	".align 4\n"
	"vide: ret\n");
L
Linus Torvalds 已提交
91

92
static void init_amd_k5(struct cpuinfo_x86 *c)
93
{
B
Borislav Petkov 已提交
94
#ifdef CONFIG_X86_32
95 96
/*
 * General Systems BIOSen alias the cpu frequency registers
97
 * of the Elan at 0x000df000. Unfortunately, one of the Linux
98 99 100 101 102 103 104
 * drivers subsequently pokes it, and changes the CPU speed.
 * Workaround : Remove the unneeded alias.
 */
#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
#define CBAR_ENB	(0x80000000)
#define CBAR_KEY	(0X000000CB)
	if (c->x86_model == 9 || c->x86_model == 10) {
A
Alan Cox 已提交
105 106
		if (inl(CBAR) & CBAR_ENB)
			outl(0 | CBAR_KEY, CBAR);
107
	}
B
Borislav Petkov 已提交
108
#endif
109 110
}

111
static void init_amd_k6(struct cpuinfo_x86 *c)
112
{
B
Borislav Petkov 已提交
113
#ifdef CONFIG_X86_32
114
	u32 l, h;
115
	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
116 117 118 119 120 121 122 123 124 125

	if (c->x86_model < 6) {
		/* Based on AMD doc 20734R - June 2000 */
		if (c->x86_model == 0) {
			clear_cpu_cap(c, X86_FEATURE_APIC);
			set_cpu_cap(c, X86_FEATURE_PGE);
		}
		return;
	}

126
	if (c->x86_model == 6 && c->x86_stepping == 1) {
127 128 129
		const int K6_BUG_LOOP = 1000000;
		int n;
		void (*f_vide)(void);
130
		u64 d, d2;
131

132
		pr_info("AMD K6 stepping B detected - ");
133 134 135 136 137 138 139 140

		/*
		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
		 * calls at the same time.
		 */

		n = K6_BUG_LOOP;
		f_vide = vide;
141
		OPTIMIZER_HIDE_VAR(f_vide);
142
		d = rdtsc();
143 144
		while (n--)
			f_vide();
145
		d2 = rdtsc();
146 147 148
		d = d2-d;

		if (d > 20*K6_BUG_LOOP)
149
			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
150
		else
151
			pr_cont("probably OK (after B9730xxxx).\n");
152 153 154 155
	}

	/* K6 with old style WHCR */
	if (c->x86_model < 8 ||
156
	   (c->x86_model == 8 && c->x86_stepping < 8)) {
157 158 159 160 161 162 163 164 165 166 167 168
		/* We can only write allocate on the low 508Mb */
		if (mbytes > 508)
			mbytes = 508;

		rdmsr(MSR_K6_WHCR, l, h);
		if ((l&0x0000FFFF) == 0) {
			unsigned long flags;
			l = (1<<0)|((mbytes/4)<<1);
			local_irq_save(flags);
			wbinvd();
			wrmsr(MSR_K6_WHCR, l, h);
			local_irq_restore(flags);
169
			pr_info("Enabling old style K6 write allocation for %d Mb\n",
170 171 172 173 174
				mbytes);
		}
		return;
	}

175
	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
176 177 178 179 180 181 182 183 184 185 186 187 188 189
	     c->x86_model == 9 || c->x86_model == 13) {
		/* The more serious chips .. */

		if (mbytes > 4092)
			mbytes = 4092;

		rdmsr(MSR_K6_WHCR, l, h);
		if ((l&0xFFFF0000) == 0) {
			unsigned long flags;
			l = ((mbytes>>2)<<22)|(1<<16);
			local_irq_save(flags);
			wbinvd();
			wrmsr(MSR_K6_WHCR, l, h);
			local_irq_restore(flags);
190
			pr_info("Enabling new style K6 write allocation for %d Mb\n",
191 192 193 194 195 196 197 198 199 200 201
				mbytes);
		}

		return;
	}

	if (c->x86_model == 10) {
		/* AMD Geode LX is model 10 */
		/* placeholder for any needed mods */
		return;
	}
B
Borislav Petkov 已提交
202
#endif
203 204
}

B
Borislav Petkov 已提交
205
static void init_amd_k7(struct cpuinfo_x86 *c)
206
{
B
Borislav Petkov 已提交
207 208 209 210 211 212 213 214 215 216
#ifdef CONFIG_X86_32
	u32 l, h;

	/*
	 * Bit 15 of Athlon specific MSR 15, needs to be 0
	 * to enable SSE on Palomino/Morgan/Barton CPU's.
	 * If the BIOS didn't enable it already, enable it here.
	 */
	if (c->x86_model >= 6 && c->x86_model <= 10) {
		if (!cpu_has(c, X86_FEATURE_XMM)) {
217
			pr_info("Enabling disabled K7/SSE Support.\n");
B
Borislav Petkov 已提交
218 219 220 221 222 223 224 225 226 227
			msr_clear_bit(MSR_K7_HWCR, 15);
			set_cpu_cap(c, X86_FEATURE_XMM);
		}
	}

	/*
	 * It's been determined by AMD that Athlons since model 8 stepping 1
	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
	 * As per AMD technical note 27212 0.2
	 */
228
	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
B
Borislav Petkov 已提交
229 230
		rdmsr(MSR_K7_CLK_CTL, l, h);
		if ((l & 0xfff00000) != 0x20000000) {
231 232
			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
				l, ((l & 0x000fffff)|0x20000000));
B
Borislav Petkov 已提交
233 234 235 236
			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
		}
	}

237
	/* calling is from identify_secondary_cpu() ? */
238
	if (!c->cpu_index)
239 240 241 242 243 244 245
		return;

	/*
	 * Certain Athlons might work (for various values of 'work') in SMP
	 * but they are not certified as MP capable.
	 */
	/* Athlon 660/661 is valid. */
246 247
	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
	    (c->x86_stepping == 1)))
248
		return;
249 250

	/* Duron 670 is valid */
251
	if ((c->x86_model == 7) && (c->x86_stepping == 0))
252
		return;
253 254 255 256 257 258 259 260

	/*
	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
	 * bit. It's worth noting that the A5 stepping (662) of some
	 * Athlon XP's have the MP bit set.
	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
	 * more.
	 */
261 262
	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
263
	     (c->x86_model > 7))
B
Borislav Petkov 已提交
264
		if (cpu_has(c, X86_FEATURE_MP))
265
			return;
266 267 268 269 270 271 272 273

	/* If we get here, not a certified SMP capable AMD system. */

	/*
	 * Don't taint if we are running SMP kernel on a single non-MP
	 * approved Athlon
	 */
	WARN_ONCE(1, "WARNING: This combination of AMD"
274
		" processors is not suitable for SMP.\n");
275
	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
276
#endif
B
Borislav Petkov 已提交
277
}
278

279
#ifdef CONFIG_NUMA
280 281 282 283
/*
 * To workaround broken NUMA config.  Read the comment in
 * srat_detect_node().
 */
284
static int nearby_node(int apicid)
285 286 287 288
{
	int i, node;

	for (i = apicid - 1; i >= 0; i--) {
289
		node = __apicid_to_node[i];
290 291 292 293
		if (node != NUMA_NO_NODE && node_online(node))
			return node;
	}
	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
294
		node = __apicid_to_node[i];
295 296 297 298 299 300
		if (node != NUMA_NO_NODE && node_online(node))
			return node;
	}
	return first_node(node_online_map); /* Shouldn't happen */
}
#endif
301

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/*
 * Fix up cpu_core_id for pre-F17h systems to be in the
 * [0 .. cores_per_node - 1] range. Not really needed but
 * kept so as not to break existing setups.
 */
static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
{
	u32 cus_per_node;

	if (c->x86 >= 0x17)
		return;

	cus_per_node = c->x86_max_cores / nodes_per_socket;
	c->cpu_core_id %= cus_per_node;
}

318 319 320

static void amd_get_topology_early(struct cpuinfo_x86 *c)
{
321 322
	if (cpu_has(c, X86_FEATURE_TOPOEXT))
		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
323 324
}

325
/*
326 327 328
 * Fixup core topology information for
 * (1) AMD multi-node processors
 *     Assumption: Number of cores in each internal node is the same.
329
 * (2) AMD processors supporting compute units
330
 */
331
static void amd_get_topology(struct cpuinfo_x86 *c)
332
{
333
	u8 node_id;
334 335
	int cpu = smp_processor_id();

336
	/* get information required for multi-node processors */
337
	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
338
		int err;
339
		u32 eax, ebx, ecx, edx;
340

341 342 343 344 345 346
		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);

		node_id  = ecx & 0xff;

		if (c->x86 == 0x15)
			c->cu_id = ebx & 0xff;
347

348 349 350 351 352 353 354
		if (c->x86 >= 0x17) {
			c->cpu_core_id = ebx & 0xff;

			if (smp_num_siblings > 1)
				c->x86_max_cores /= smp_num_siblings;
		}

355
		/*
356 357
		 * In case leaf B is available, use it to derive
		 * topology information.
358
		 */
359 360 361 362
		err = detect_extended_topology(c);
		if (!err)
			c->x86_coreid_bits = get_count_order(c->x86_max_cores);

363 364
		cacheinfo_amd_init_llc_id(c, cpu, node_id);

365
	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
366 367
		u64 value;

368 369
		rdmsrl(MSR_FAM10H_NODE_ID, value);
		node_id = value & 7;
370 371

		per_cpu(cpu_llc_id, cpu) = node_id;
372
	} else
373 374
		return;

375
	if (nodes_per_socket > 1) {
376
		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
377
		legacy_fixup_core_id(c);
378
	}
379 380
}

381
/*
382
 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
383 384
 * Assumes number of cores is a power of two.
 */
385
static void amd_detect_cmp(struct cpuinfo_x86 *c)
386 387
{
	unsigned bits;
388
	int cpu = smp_processor_id();
389 390 391 392 393 394

	bits = c->x86_coreid_bits;
	/* Low order bits define the core id (index of core in socket) */
	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
	/* Convert the initial APIC ID into the socket ID */
	c->phys_proc_id = c->initial_apicid >> bits;
395 396
	/* use socket ID also for last level cache */
	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
397 398
}

399
u16 amd_get_nb_id(int cpu)
400
{
401
	return per_cpu(cpu_llc_id, cpu);
402 403 404
}
EXPORT_SYMBOL_GPL(amd_get_nb_id);

405 406 407 408 409 410
u32 amd_get_nodes_per_socket(void)
{
	return nodes_per_socket;
}
EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);

411
static void srat_detect_node(struct cpuinfo_x86 *c)
412
{
413
#ifdef CONFIG_NUMA
414 415
	int cpu = smp_processor_id();
	int node;
416
	unsigned apicid = c->apicid;
417

418 419 420
	node = numa_cpu_node(cpu);
	if (node == NUMA_NO_NODE)
		node = per_cpu(cpu_llc_id, cpu);
421

422
	/*
423 424 425
	 * On multi-fabric platform (e.g. Numascale NumaChip) a
	 * platform-specific handler needs to be called to fixup some
	 * IDs of the CPU.
426
	 */
427
	if (x86_cpuinit.fixup_cpu_id)
428 429
		x86_cpuinit.fixup_cpu_id(c, node);

430
	if (!node_online(node)) {
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		/*
		 * Two possibilities here:
		 *
		 * - The CPU is missing memory and no node was created.  In
		 *   that case try picking one from a nearby CPU.
		 *
		 * - The APIC IDs differ from the HyperTransport node IDs
		 *   which the K8 northbridge parsing fills in.  Assume
		 *   they are all increased by a constant offset, but in
		 *   the same order as the HT nodeids.  If that doesn't
		 *   result in a usable node fall back to the path for the
		 *   previous case.
		 *
		 * This workaround operates directly on the mapping between
		 * APIC ID and NUMA node, assuming certain relationship
		 * between APIC ID, HT node ID and NUMA topology.  As going
		 * through CPU mapping may alter the outcome, directly
		 * access __apicid_to_node[].
		 */
450 451
		int ht_nodeid = c->initial_apicid;

452
		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
453
			node = __apicid_to_node[ht_nodeid];
454 455 456 457 458 459 460 461
		/* Pick a nearby node */
		if (!node_online(node))
			node = nearby_node(apicid);
	}
	numa_set_node(cpu, node);
#endif
}

462
static void early_init_amd_mc(struct cpuinfo_x86 *c)
463
{
B
Borislav Petkov 已提交
464
#ifdef CONFIG_SMP
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	unsigned bits, ecx;

	/* Multi core CPU? */
	if (c->extended_cpuid_level < 0x80000008)
		return;

	ecx = cpuid_ecx(0x80000008);

	c->x86_max_cores = (ecx & 0xff) + 1;

	/* CPU telling us the core id bits shift? */
	bits = (ecx >> 12) & 0xF;

	/* Otherwise recompute */
	if (bits == 0) {
		while ((1 << bits) < c->x86_max_cores)
			bits++;
	}

	c->x86_coreid_bits = bits;
#endif
}

488
static void bsp_init_amd(struct cpuinfo_x86 *c)
489
{
B
Borislav Petkov 已提交
490 491 492 493 494 495 496 497 498 499 500 501 502

#ifdef CONFIG_X86_64
	if (c->x86 >= 0xf) {
		unsigned long long tseg;

		/*
		 * Split up direct mapping around the TSEG SMM area.
		 * Don't do it for gbpages because there seems very little
		 * benefit in doing so.
		 */
		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
			unsigned long pfn = tseg >> PAGE_SHIFT;

503
			pr_debug("tseg: %010llx\n", tseg);
B
Borislav Petkov 已提交
504 505 506 507 508 509
			if (pfn_range_is_mapped(pfn, pfn + 1))
				set_memory_4k((unsigned long)__va(tseg), 1);
		}
	}
#endif

510 511 512 513 514 515 516 517
	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {

		if (c->x86 > 0x10 ||
		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
			u64 val;

			rdmsrl(MSR_K7_HWCR, val);
			if (!(val & BIT(24)))
518
				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
519 520 521 522 523 524 525 526 527 528 529 530 531
		}
	}

	if (c->x86 == 0x15) {
		unsigned long upperbit;
		u32 cpuid, assoc;

		cpuid	 = cpuid_edx(0x80000005);
		assoc	 = cpuid >> 16 & 0xff;
		upperbit = ((cpuid >> 24) << 10) / assoc;

		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
532 533 534

		/* A random value per boot for bit slice [12:upper_bit) */
		va_align.bits = get_random_int() & va_align.mask;
535
	}
536 537 538

	if (cpu_has(c, X86_FEATURE_MWAITX))
		use_mwaitx_delay();
539 540 541 542 543 544 545 546 547 548 549 550

	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
		u32 ecx;

		ecx = cpuid_ecx(0x8000001e);
		nodes_per_socket = ((ecx >> 8) & 7) + 1;
	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
		u64 value;

		rdmsrl(MSR_FAM10H_NODE_ID, value);
		nodes_per_socket = ((value >> 3) & 7) + 1;
	}
551

552 553 554
	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
	    c->x86 >= 0x15 && c->x86 <= 0x17) {
555 556 557 558 559 560 561 562 563 564
		unsigned int bit;

		switch (c->x86) {
		case 0x15: bit = 54; break;
		case 0x16: bit = 33; break;
		case 0x17: bit = 10; break;
		default: return;
		}
		/*
		 * Try to cache the base value so further operations can
565
		 * avoid RMW. If that faults, do not enable SSBD.
566 567
		 */
		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
568
			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
569 570
			setup_force_cpu_cap(X86_FEATURE_SSBD);
			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
571 572
		}
	}
573 574
}

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
{
	u64 msr;

	/*
	 * BIOS support is required for SME and SEV.
	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
	 *	      the SME physical address space reduction value.
	 *	      If BIOS has not enabled SME then don't advertise the
	 *	      SME feature (set in scattered.c).
	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
	 *            SEV feature (set in scattered.c).
	 *
	 *   In all cases, since support for SME and SEV requires long mode,
	 *   don't advertise the feature under CONFIG_X86_32.
	 */
	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
		/* Check if memory encryption is enabled */
		rdmsrl(MSR_K8_SYSCFG, msr);
		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
			goto clear_all;

		/*
		 * Always adjust physical address bits. Even though this
		 * will be a value above 32-bits this is still done for
		 * CONFIG_X86_32 so that accurate values are reported.
		 */
		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;

		if (IS_ENABLED(CONFIG_X86_32))
			goto clear_all;

		rdmsrl(MSR_K7_HWCR, msr);
		if (!(msr & MSR_K7_HWCR_SMMLOCK))
			goto clear_sev;

		return;

clear_all:
		clear_cpu_cap(c, X86_FEATURE_SME);
clear_sev:
		clear_cpu_cap(c, X86_FEATURE_SEV);
	}
}

620
static void early_init_amd(struct cpuinfo_x86 *c)
621
{
622
	u64 value;
623 624
	u32 dummy;

625 626
	early_init_amd_mc(c);

627 628 629 630 631 632 633 634
#ifdef CONFIG_X86_32
	if (c->x86 == 6)
		set_cpu_cap(c, X86_FEATURE_K7);
#endif

	if (c->x86 >= 0xf)
		set_cpu_cap(c, X86_FEATURE_K8);

635 636
	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);

637 638 639 640 641
	/*
	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
	 * with P/T states and does not stop in deep C-states
	 */
	if (c->x86_power & (1 << 8)) {
642
		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
643 644
		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
	}
645

646 647 648 649
	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
	if (c->x86_power & BIT(12))
		set_cpu_cap(c, X86_FEATURE_ACC_POWER);

650 651 652
#ifdef CONFIG_X86_64
	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
653
	/*  Set MTRR capability flag if appropriate */
654 655
	if (c->x86 == 5)
		if (c->x86_model == 13 || c->x86_model == 9 ||
656
		    (c->x86_model == 8 && c->x86_stepping >= 8))
657 658
			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
#endif
659
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
660 661 662 663 664 665
	/*
	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
	 * after 16h.
	 */
666 667
	if (boot_cpu_has(X86_FEATURE_APIC)) {
		if (c->x86 > 0x16)
668
			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
669 670 671 672 673 674 675 676
		else if (c->x86 >= 0xf) {
			/* check CPU config space for extended APIC ID */
			unsigned int val;

			val = read_pci_config(0, 24, 0, 0x68);
			if ((val >> 17 & 0x3) == 0x3)
				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
		}
677 678
	}
#endif
679

680 681 682 683 684 685 686
	/*
	 * This is only needed to tell the kernel whether to use VMCALL
	 * and VMMCALL.  VMMCALL is never executed except under virt, so
	 * we can set it unconditionally.
	 */
	set_cpu_cap(c, X86_FEATURE_VMMCALL);

687
	/* F16h erratum 793, CVE-2013-6885 */
688 689
	if (c->x86 == 0x16 && c->x86_model <= 0xf)
		msr_set_bit(MSR_AMD64_LS_CFG, 15);
690

691 692 693 694 695 696 697 698
	/*
	 * Check whether the machine is affected by erratum 400. This is
	 * used to select the proper idle routine and to enable the check
	 * whether the machine is affected in arch_post_acpi_init(), which
	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
	 */
	if (cpu_has_amd_erratum(c, amd_erratum_400))
		set_cpu_bug(c, X86_BUG_AMD_E400);
699

700
	early_detect_mem_encrypt(c);
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715
	/* Re-enable TopologyExtensions if switched off by BIOS */
	if (c->x86 == 0x15 &&
	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {

		if (msr_set_bit(0xc0011005, 54) > 0) {
			rdmsrl(0xc0011005, value);
			if (value & BIT_64(54)) {
				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
			}
		}
	}

716
	amd_get_topology_early(c);
717
}
718

B
Borislav Petkov 已提交
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
static void init_amd_k8(struct cpuinfo_x86 *c)
{
	u32 level;
	u64 value;

	/* On C+ stepping K8 rep microcode works well for copy/memset */
	level = cpuid_eax(1);
	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
		set_cpu_cap(c, X86_FEATURE_REP_GOOD);

	/*
	 * Some BIOSes incorrectly force this feature, but only K8 revision D
	 * (model = 0x14) and later actually support it.
	 * (AMD Erratum #110, docId: 25759).
	 */
	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
			value &= ~BIT_64(32);
			wrmsrl_amd_safe(0xc001100d, value);
		}
	}

	if (!c->x86_model_id[0])
		strcpy(c->x86_model_id, "Hammer");
744 745 746 747 748 749 750 751 752 753 754

#ifdef CONFIG_SMP
	/*
	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
	 * bit 6 of msr C001_0015
	 *
	 * Errata 63 for SH-B3 steppings
	 * Errata 122 for all steppings (F+ have it disabled by default)
	 */
	msr_set_bit(MSR_K7_HWCR, 6);
#endif
755
	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
B
Borislav Petkov 已提交
756 757 758 759
}

static void init_amd_gh(struct cpuinfo_x86 *c)
{
760
#ifdef CONFIG_MMCONF_FAM10H
B
Borislav Petkov 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	/* do this for boot cpu */
	if (c == &boot_cpu_data)
		check_enable_amd_mmconf_dmi();

	fam10h_check_enable_mmcfg();
#endif

	/*
	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
	 * is always needed when GART is enabled, even in a kernel which has no
	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
	 * If it doesn't, we do it here as suggested by the BKDG.
	 *
	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
	 */
	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);

	/*
	 * On family 10h BIOS may not have properly enabled WC+ support, causing
	 * it to be converted to CD memtype. This may result in performance
	 * degradation for certain nested-paging guests. Prevent this conversion
	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
	 *
	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
	 * guests on older kvm hosts.
	 */
	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);

	if (cpu_has_amd_erratum(c, amd_erratum_383))
		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}

793 794 795 796 797 798 799 800 801 802 803
#define MSR_AMD64_DE_CFG	0xC0011029

static void init_amd_ln(struct cpuinfo_x86 *c)
{
	/*
	 * Apply erratum 665 fix unconditionally so machines without a BIOS
	 * fix work.
	 */
	msr_set_bit(MSR_AMD64_DE_CFG, 31);
}

B
Borislav Petkov 已提交
804 805 806 807 808 809 810 811 812
static void init_amd_bd(struct cpuinfo_x86 *c)
{
	u64 value;

	/*
	 * The way access filter has a performance penalty on some workloads.
	 * Disable it on the affected CPUs.
	 */
	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
813
		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
B
Borislav Petkov 已提交
814
			value |= 0x1E;
815
			wrmsrl_safe(MSR_F15H_IC_CFG, value);
B
Borislav Petkov 已提交
816 817 818 819
		}
	}
}

820 821
static void init_amd_zn(struct cpuinfo_x86 *c)
{
822
	set_cpu_cap(c, X86_FEATURE_ZEN);
823 824 825

	/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
	if (!cpu_has(c, X86_FEATURE_CPB))
826 827 828
		set_cpu_cap(c, X86_FEATURE_CPB);
}

829
static void init_amd(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
830
{
831 832
	early_init_amd(c);

833 834
	/*
	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
835
	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
836
	 */
837
	clear_cpu_cap(c, 0*32+31);
838

839
	if (c->x86 >= 0x10)
840
		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
841 842 843

	/* get apicid instead of initial apic id from cpuid */
	c->apicid = hard_smp_processor_id();
844 845 846 847

	/* K6s reports MCEs but don't actually have all the MSRs */
	if (c->x86 < 6)
		clear_cpu_cap(c, X86_FEATURE_MCE);
B
Borislav Petkov 已提交
848 849 850 851 852 853 854

	switch (c->x86) {
	case 4:    init_amd_k5(c); break;
	case 5:    init_amd_k6(c); break;
	case 6:	   init_amd_k7(c); break;
	case 0xf:  init_amd_k8(c); break;
	case 0x10: init_amd_gh(c); break;
855
	case 0x12: init_amd_ln(c); break;
B
Borislav Petkov 已提交
856
	case 0x15: init_amd_bd(c); break;
857
	case 0x17: init_amd_zn(c); break;
B
Borislav Petkov 已提交
858
	}
859

860 861 862 863 864
	/*
	 * Enable workaround for FXSAVE leak on CPUs
	 * without a XSaveErPtr feature
	 */
	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
865
		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
L
Linus Torvalds 已提交
866

867
	cpu_detect_cache_sizes(c);
868

869 870 871
	amd_detect_cmp(c);
	amd_get_topology(c);
	srat_detect_node(c);
872

873
	init_amd_cacheinfo(c);
874

875
	if (cpu_has(c, X86_FEATURE_XMM2)) {
876 877 878
		unsigned long long val;
		int ret;

879 880 881 882 883 884 885 886 887 888
		/*
		 * A serializing LFENCE has less overhead than MFENCE, so
		 * use it for execution serialization.  On families which
		 * don't have that MSR, LFENCE is already serializing.
		 * msr_set_bit() uses the safe accessors, too, even if the MSR
		 * is not present.
		 */
		msr_set_bit(MSR_F10H_DECFG,
			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);

889 890 891 892 893 894 895 896 897 898 899 900 901
		/*
		 * Verify that the MSR write was successful (could be running
		 * under a hypervisor) and only then assume that LFENCE is
		 * serializing.
		 */
		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
			/* A serializing LFENCE stops RDTSC speculation */
			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
		} else {
			/* MFENCE stops RDTSC speculation */
			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
		}
902
	}
903

904 905 906 907 908
	/*
	 * Family 0x12 and above processors have APIC timer
	 * running in deep C states.
	 */
	if (c->x86 > 0x11)
909
		set_cpu_cap(c, X86_FEATURE_ARAT);
910

911 912 913 914
	/* 3DNow or LM implies PREFETCHW */
	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
915

916 917 918
	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
	if (!cpu_has(c, X86_FEATURE_XENPV))
		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
L
Linus Torvalds 已提交
919 920
}

921
#ifdef CONFIG_X86_32
922
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
L
Linus Torvalds 已提交
923 924
{
	/* AMD errata T13 (order #21922) */
925
	if (c->x86 == 6) {
A
Alan Cox 已提交
926
		/* Duron Rev A0 */
927
		if (c->x86_model == 3 && c->x86_stepping == 0)
L
Linus Torvalds 已提交
928
			size = 64;
A
Alan Cox 已提交
929
		/* Tbird rev A1/A2 */
L
Linus Torvalds 已提交
930
		if (c->x86_model == 4 &&
931
			(c->x86_stepping == 0 || c->x86_stepping == 1))
L
Linus Torvalds 已提交
932 933 934 935
			size = 256;
	}
	return size;
}
936
#endif
L
Linus Torvalds 已提交
937

938
static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
{
	u32 ebx, eax, ecx, edx;
	u16 mask = 0xfff;

	if (c->x86 < 0xf)
		return;

	if (c->extended_cpuid_level < 0x80000006)
		return;

	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);

	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
	tlb_lli_4k[ENTRIES] = ebx & mask;

	/*
	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
	 * characteristics from the CPUID function 0x80000005 instead.
	 */
	if (c->x86 == 0xf) {
		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
		mask = 0xff;
	}

	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
964 965 966
	if (!((eax >> 16) & mask))
		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
	else
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;

	/* a 4M entry uses two 2M entries */
	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;

	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
	if (!(eax & mask)) {
		/* Erratum 658 */
		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
			tlb_lli_2m[ENTRIES] = 1024;
		} else {
			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
			tlb_lli_2m[ENTRIES] = eax & 0xff;
		}
	} else
		tlb_lli_2m[ENTRIES] = eax & mask;

	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
}

987
static const struct cpu_dev amd_cpu_dev = {
L
Linus Torvalds 已提交
988
	.c_vendor	= "AMD",
989
	.c_ident	= { "AuthenticAMD" },
990
#ifdef CONFIG_X86_32
991 992
	.legacy_models = {
		{ .family = 4, .model_names =
L
Linus Torvalds 已提交
993 994 995
		  {
			  [3] = "486 DX/2",
			  [7] = "486 DX/2-WB",
996 997
			  [8] = "486 DX/4",
			  [9] = "486 DX/4-WB",
L
Linus Torvalds 已提交
998
			  [14] = "Am5x86-WT",
999
			  [15] = "Am5x86-WB"
L
Linus Torvalds 已提交
1000 1001 1002
		  }
		},
	},
1003
	.legacy_cache_size = amd_size_cache,
1004
#endif
1005
	.c_early_init   = early_init_amd,
1006
	.c_detect_tlb	= cpu_detect_tlb_amd,
1007
	.c_bsp_init	= bsp_init_amd,
L
Linus Torvalds 已提交
1008
	.c_init		= init_amd,
Y
Yinghai Lu 已提交
1009
	.c_x86_vendor	= X86_VENDOR_AMD,
L
Linus Torvalds 已提交
1010 1011
};

Y
Yinghai Lu 已提交
1012
cpu_dev_register(amd_cpu_dev);
1013 1014 1015 1016 1017 1018 1019 1020

/*
 * AMD errata checking
 *
 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
 * have an OSVW id assigned, which it takes as first argument. Both take a
 * variable number of family-specific model-stepping ranges created by
1021
 * AMD_MODEL_RANGE().
1022 1023 1024 1025 1026 1027 1028 1029 1030
 *
 * Example:
 *
 * const int amd_erratum_319[] =
 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
 */

1031 1032 1033 1034 1035 1036 1037 1038 1039
#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)

static const int amd_erratum_400[] =
1040
	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1041 1042
			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));

1043
static const int amd_erratum_383[] =
1044
	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1045

1046 1047

static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
{
	int osvw_id = *erratum++;
	u32 range;
	u32 ms;

	if (osvw_id >= 0 && osvw_id < 65536 &&
	    cpu_has(cpu, X86_FEATURE_OSVW)) {
		u64 osvw_len;

		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
		if (osvw_id < osvw_len) {
			u64 osvw_bits;

			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
			    osvw_bits);
			return osvw_bits & (1ULL << (osvw_id & 0x3f));
		}
	}

	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1068
	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1069 1070 1071 1072 1073 1074 1075 1076
	while ((range = *erratum++))
		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
		    (ms >= AMD_MODEL_RANGE_START(range)) &&
		    (ms <= AMD_MODEL_RANGE_END(range)))
			return true;

	return false;
}
1077 1078 1079

void set_dr_addr_mask(unsigned long mask, int dr)
{
1080
	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
		return;

	switch (dr) {
	case 0:
		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
		break;
	case 1:
	case 2:
	case 3:
		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
		break;
	default:
		break;
	}
}