microcode.h 3.9 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
3

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#define native_rdmsr(msr, val1, val2)			\
do {							\
	u64 __val = native_read_msr((msr));		\
	(void)((val1) = (u32)__val);			\
	(void)((val2) = (u32)(__val >> 32));		\
} while (0)

#define native_wrmsr(msr, low, high)			\
	native_write_msr(msr, low, high)

#define native_wrmsrl(msr, val)				\
	native_write_msr((msr),				\
			 (u32)((u64)(val)),		\
			 (u32)((u64)(val) >> 32))

19 20 21 22 23
struct cpu_signature {
	unsigned int sig;
	unsigned int pf;
	unsigned int rev;
};
P
Peter Oruba 已提交
24

D
Dmitry Adamushko 已提交
25
struct device;
26

27
enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
28
extern bool dis_ucode_ldr;
29

30
struct microcode_ops {
31 32
	enum ucode_state (*request_microcode_user) (int cpu,
				const void __user *buf, size_t size);
D
Dmitry Adamushko 已提交
33

34 35
	enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
						  bool refresh_fw);
D
Dmitry Adamushko 已提交
36 37

	void (*microcode_fini_cpu) (int cpu);
38 39 40 41 42 43 44 45 46

	/*
	 * The generic 'microcode_core' part guarantees that
	 * the callbacks below run on a target cpu when they
	 * are being called.
	 * See also the "Synchronization" section in microcode_core.c.
	 */
	int (*apply_microcode) (int cpu);
	int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
47 48
};

49
struct ucode_cpu_info {
50 51 52
	struct cpu_signature	cpu_sig;
	int			valid;
	void			*mc;
53
};
54 55
extern struct ucode_cpu_info ucode_cpu_info[];

56 57 58 59 60 61 62 63 64 65 66
#ifdef CONFIG_MICROCODE_INTEL
extern struct microcode_ops * __init init_intel_microcode(void);
#else
static inline struct microcode_ops * __init init_intel_microcode(void)
{
	return NULL;
}
#endif /* CONFIG_MICROCODE_INTEL */

#ifdef CONFIG_MICROCODE_AMD
extern struct microcode_ops * __init init_amd_microcode(void);
67
extern void __exit exit_amd_microcode(void);
68 69 70 71 72
#else
static inline struct microcode_ops * __init init_amd_microcode(void)
{
	return NULL;
}
73
static inline void __exit exit_amd_microcode(void) {}
74 75
#endif

76 77
#ifdef CONFIG_MICROCODE_EARLY
#define MAX_UCODE_COUNT 128
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150

#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')

#define CPUID_IS(a, b, c, ebx, ecx, edx)	\
		(!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))

/*
 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
 * x86_vendor() gets vendor id for BSP.
 *
 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
 * coding, we still use x86_vendor() to get vendor id for AP.
 *
 * x86_vendor() gets vendor information directly from CPUID.
 */
static inline int x86_vendor(void)
{
	u32 eax = 0x00000000;
	u32 ebx, ecx = 0, edx;

	native_cpuid(&eax, &ebx, &ecx, &edx);

	if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
		return X86_VENDOR_INTEL;

	if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
		return X86_VENDOR_AMD;

	return X86_VENDOR_UNKNOWN;
}

static inline unsigned int __x86_family(unsigned int sig)
{
	unsigned int x86;

	x86 = (sig >> 8) & 0xf;

	if (x86 == 0xf)
		x86 += (sig >> 20) & 0xff;

	return x86;
}

static inline unsigned int x86_family(void)
{
	u32 eax = 0x00000001;
	u32 ebx, ecx = 0, edx;

	native_cpuid(&eax, &ebx, &ecx, &edx);

	return __x86_family(eax);
}

static inline unsigned int x86_model(unsigned int sig)
{
	unsigned int x86, model;

	x86 = __x86_family(sig);

	model = (sig >> 4) & 0xf;

	if (x86 == 0x6 || x86 == 0xf)
		model += ((sig >> 16) & 0xf) << 4;

	return model;
}

151
extern void __init load_ucode_bsp(void);
152
extern void load_ucode_ap(void);
153
extern int __init save_microcode_in_initrd(void);
154
void reload_early_microcode(void);
155 156
#else
static inline void __init load_ucode_bsp(void) {}
157
static inline void load_ucode_ap(void) {}
158 159 160 161
static inline int __init save_microcode_in_initrd(void)
{
	return 0;
}
162
static inline void reload_early_microcode(void) {}
163 164
#endif

H
H. Peter Anvin 已提交
165
#endif /* _ASM_X86_MICROCODE_H */