提交 6ac3bb16 编写于 作者: L Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "There's a number of fixes:

   - a round of fixes for CPUID-less legacy CPUs
   - a number of microcode loader fixes
   - i8042 detection robustization fixes
   - stack dump/unwinder fixes
   - x86 SoC platform driver fixes
   - a GCC 7 warning fix
   - virtualization related fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  Revert "x86/unwind: Detect bad stack return address"
  x86/paravirt: Mark unused patch_default label
  x86/microcode/AMD: Reload proper initrd start address
  x86/platform/intel/quark: Add printf attribute to imr_self_test_result()
  x86/platform/intel-mid: Switch MPU3050 driver to IIO
  x86/alternatives: Do not use sync_core() to serialize I$
  x86/topology: Document cpu_llc_id
  x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic
  x86/asm: Rewrite sync_core() to use IRET-to-self
  x86/microcode/intel: Replace sync_core() with native_cpuid()
  Revert "x86/boot: Fail the boot if !M486 and CPUID is missing"
  x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels
  x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6
  x86/tools: Fix gcc-7 warning in relocs.c
  x86/unwind: Dump stack data on warnings
  x86/unwind: Adjust last frame check for aligned function stacks
  x86/init: Fix a couple of comment typos
  x86/init: Remove i8042_detect() from platform ops
  Input: i8042 - Trust firmware a bit more when probing on X86
  x86/init: Add i8042 state to the platform data
  ...
...@@ -63,6 +63,15 @@ The topology of a system is described in the units of: ...@@ -63,6 +63,15 @@ The topology of a system is described in the units of:
The maximum possible number of packages in the system. Helpful for per The maximum possible number of packages in the system. Helpful for per
package facilities to preallocate per package information. package facilities to preallocate per package information.
- cpu_llc_id:
A per-CPU variable containing:
- On Intel, the first APIC ID of the list of CPUs sharing the Last Level
Cache
- On AMD, the Node ID or Core Complex ID containing the Last Level
Cache. In general, it is a number identifying an LLC uniquely on the
system.
* Cores: * Cores:
......
...@@ -87,12 +87,6 @@ int validate_cpu(void) ...@@ -87,12 +87,6 @@ int validate_cpu(void)
return -1; return -1;
} }
if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
!has_eflag(X86_EFLAGS_ID)) {
printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
return -1;
}
if (err_flags) { if (err_flags) {
puts("This kernel requires the following features " puts("This kernel requires the following features "
"not present on the CPU:\n"); "not present on the CPU:\n");
......
...@@ -602,33 +602,69 @@ static __always_inline void cpu_relax(void) ...@@ -602,33 +602,69 @@ static __always_inline void cpu_relax(void)
rep_nop(); rep_nop();
} }
/* Stop speculative execution and prefetching of modified code. */ /*
* This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases:
*
* a) Text was modified using one virtual address and is about to be executed
* from the same physical page at a different virtual address.
*
* b) Text was modified on a different CPU, may subsequently be
* executed on this CPU, and you want to make sure the new version
* gets executed. This generally means you're calling this in a IPI.
*
* If you're calling this for a different reason, you're probably doing
* it wrong.
*/
static inline void sync_core(void) static inline void sync_core(void)
{ {
int tmp;
#ifdef CONFIG_M486
/* /*
* Do a CPUID if available, otherwise do a jump. The jump * There are quite a few ways to do this. IRET-to-self is nice
* can conveniently enough be the jump around CPUID. * because it works on every CPU, at any CPL (so it's compatible
* with paravirtualization), and it never exits to a hypervisor.
* The only down sides are that it's a bit slow (it seems to be
* a bit more than 2x slower than the fastest options) and that
* it unmasks NMIs. The "push %cs" is needed because, in
* paravirtual environments, __KERNEL_CS may not be a valid CS
* value when we do IRET directly.
*
* In case NMI unmasking or performance ever becomes a problem,
* the next best option appears to be MOV-to-CR2 and an
* unconditional jump. That sequence also works on all CPUs,
* but it will fault at CPL3 (i.e. Xen PV and lguest).
*
* CPUID is the conventional way, but it's nasty: it doesn't
* exist on some 486-like CPUs, and it usually exits to a
* hypervisor.
*
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/ */
asm volatile("cmpl %2,%1\n\t" register void *__sp asm(_ASM_SP);
"jl 1f\n\t"
"cpuid\n" #ifdef CONFIG_X86_32
"1:" asm volatile (
: "=a" (tmp) "pushfl\n\t"
: "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) "pushl %%cs\n\t"
: "ebx", "ecx", "edx", "memory"); "pushl $1f\n\t"
"iret\n\t"
"1:"
: "+r" (__sp) : : "memory");
#else #else
/* unsigned int tmp;
* CPUID is a barrier to speculative execution.
* Prefetched instructions are automatically asm volatile (
* invalidated when modified. "mov %%ss, %0\n\t"
*/ "pushq %q0\n\t"
asm volatile("cpuid" "pushq %%rsp\n\t"
: "=a" (tmp) "addq $8, (%%rsp)\n\t"
: "0" (1) "pushfq\n\t"
: "ebx", "ecx", "edx", "memory"); "mov %%cs, %0\n\t"
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
"1:"
: "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
#endif #endif
} }
......
...@@ -12,7 +12,7 @@ struct unwind_state { ...@@ -12,7 +12,7 @@ struct unwind_state {
struct task_struct *task; struct task_struct *task;
int graph_idx; int graph_idx;
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
unsigned long *bp; unsigned long *bp, *orig_sp;
struct pt_regs *regs; struct pt_regs *regs;
#else #else
unsigned long *sp; unsigned long *sp;
......
...@@ -59,7 +59,7 @@ struct x86_init_irqs { ...@@ -59,7 +59,7 @@ struct x86_init_irqs {
/** /**
* struct x86_init_oem - oem platform specific customizing functions * struct x86_init_oem - oem platform specific customizing functions
* @arch_setup: platform specific architecure setup * @arch_setup: platform specific architecture setup
* @banner: print a platform specific banner * @banner: print a platform specific banner
*/ */
struct x86_init_oem { struct x86_init_oem {
...@@ -164,9 +164,26 @@ struct x86_legacy_devices { ...@@ -164,9 +164,26 @@ struct x86_legacy_devices {
int pnpbios; int pnpbios;
}; };
/**
* enum x86_legacy_i8042_state - i8042 keyboard controller state
* @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on
* given platform/subarch.
* @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller
* is absent.
* @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be
* present, the i8042 driver should probe for controller existence.
*/
enum x86_legacy_i8042_state {
X86_LEGACY_I8042_PLATFORM_ABSENT,
X86_LEGACY_I8042_FIRMWARE_ABSENT,
X86_LEGACY_I8042_EXPECTED_PRESENT,
};
/** /**
* struct x86_legacy_features - legacy x86 features * struct x86_legacy_features - legacy x86 features
* *
* @i8042: indicated if we expect the device to have i8042 controller
* present.
* @rtc: this device has a CMOS real-time clock present * @rtc: this device has a CMOS real-time clock present
* @reserve_bios_regions: boot code will search for the EBDA address and the * @reserve_bios_regions: boot code will search for the EBDA address and the
* start of the 640k - 1M BIOS region. If false, the platform must * start of the 640k - 1M BIOS region. If false, the platform must
...@@ -175,6 +192,7 @@ struct x86_legacy_devices { ...@@ -175,6 +192,7 @@ struct x86_legacy_devices {
* documentation for further details. * documentation for further details.
*/ */
struct x86_legacy_features { struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc; int rtc;
int reserve_bios_regions; int reserve_bios_regions;
struct x86_legacy_devices devices; struct x86_legacy_devices devices;
...@@ -188,15 +206,14 @@ struct x86_legacy_features { ...@@ -188,15 +206,14 @@ struct x86_legacy_features {
* @set_wallclock: set time back to HW clock * @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic * @is_untracked_pat_range exclude from PAT logic
* @nmi_init enable NMI on cpus * @nmi_init enable NMI on cpus
* @i8042_detect pre-detect if i8042 controller exists
* @save_sched_clock_state: save state for sched_clock() on suspend * @save_sched_clock_state: save state for sched_clock() on suspend
* @restore_sched_clock_state: restore state for sched_clock() on resume * @restore_sched_clock_state: restore state for sched_clock() on resume
* @apic_post_init: adjust apic if neeeded * @apic_post_init: adjust apic if needed
* @legacy: legacy features * @legacy: legacy features
* @set_legacy_features: override legacy features. Use of this callback * @set_legacy_features: override legacy features. Use of this callback
* is highly discouraged. You should only need * is highly discouraged. You should only need
* this if your hardware platform requires further * this if your hardware platform requires further
* custom fine tuning far beyong what may be * custom fine tuning far beyond what may be
* possible in x86_early_init_platform_quirks() by * possible in x86_early_init_platform_quirks() by
* only using the current x86_hardware_subarch * only using the current x86_hardware_subarch
* semantics. * semantics.
...@@ -210,7 +227,6 @@ struct x86_platform_ops { ...@@ -210,7 +227,6 @@ struct x86_platform_ops {
bool (*is_untracked_pat_range)(u64 start, u64 end); bool (*is_untracked_pat_range)(u64 start, u64 end);
void (*nmi_init)(void); void (*nmi_init)(void);
unsigned char (*get_nmi_reason)(void); unsigned char (*get_nmi_reason)(void);
int (*i8042_detect)(void);
void (*save_sched_clock_state)(void); void (*save_sched_clock_state)(void);
void (*restore_sched_clock_state)(void); void (*restore_sched_clock_state)(void);
void (*apic_post_init)(void); void (*apic_post_init)(void);
......
...@@ -930,6 +930,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) ...@@ -930,6 +930,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
x86_platform.legacy.devices.pnpbios = 0; x86_platform.legacy.devices.pnpbios = 0;
} }
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
pr_debug("ACPI: i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
pr_debug("ACPI: not registering RTC platform device\n"); pr_debug("ACPI: not registering RTC platform device\n");
x86_platform.legacy.rtc = 0; x86_platform.legacy.rtc = 0;
......
...@@ -337,7 +337,11 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) ...@@ -337,7 +337,11 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
} }
static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) /*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
{ {
unsigned long flags; unsigned long flags;
...@@ -346,7 +350,6 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) ...@@ -346,7 +350,6 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
local_irq_save(flags); local_irq_save(flags);
add_nops(instr + (a->instrlen - a->padlen), a->padlen); add_nops(instr + (a->instrlen - a->padlen), a->padlen);
sync_core();
local_irq_restore(flags); local_irq_restore(flags);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
...@@ -359,9 +362,12 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) ...@@ -359,9 +362,12 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
* This implies that asymmetric systems where APs have less capabilities than * This implies that asymmetric systems where APs have less capabilities than
* the boot processor are not handled. Tough. Make sure you disable such * the boot processor are not handled. Tough. Make sure you disable such
* features by hand. * features by hand.
*
* Marked "noinline" to cause control flow change and thus insn cache
* to refetch changed I$ lines.
*/ */
void __init_or_module apply_alternatives(struct alt_instr *start, void __init_or_module noinline apply_alternatives(struct alt_instr *start,
struct alt_instr *end) struct alt_instr *end)
{ {
struct alt_instr *a; struct alt_instr *a;
u8 *instr, *replacement; u8 *instr, *replacement;
...@@ -667,7 +673,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, ...@@ -667,7 +673,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
memcpy(addr, opcode, len); memcpy(addr, opcode, len);
sync_core();
local_irq_restore(flags); local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but /* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */ that causes hangs on some VIA CPUs. */
......
...@@ -667,13 +667,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -667,13 +667,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[CPUID_1_EDX] = edx; c->x86_capability[CPUID_1_EDX] = edx;
} }
/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
if (c->cpuid_level >= 0x00000006)
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
/* Additional Intel-defined flags: level 0x00000007 */ /* Additional Intel-defined flags: level 0x00000007 */
if (c->cpuid_level >= 0x00000007) { if (c->cpuid_level >= 0x00000007) {
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
c->x86_capability[CPUID_7_ECX] = ecx; c->x86_capability[CPUID_7_ECX] = ecx;
} }
......
...@@ -116,10 +116,11 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, ...@@ -116,10 +116,11 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
/* /*
* This scans the ucode blob for the proper container as we can have multiple * This scans the ucode blob for the proper container as we can have multiple
* containers glued together. * containers glued together. Returns the equivalence ID from the equivalence
* table or 0 if none found.
*/ */
static struct container static u16
find_proper_container(u8 *ucode, size_t size, u16 *ret_id) find_proper_container(u8 *ucode, size_t size, struct container *ret_cont)
{ {
struct container ret = { NULL, 0 }; struct container ret = { NULL, 0 };
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
...@@ -138,7 +139,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) ...@@ -138,7 +139,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
if (header[0] != UCODE_MAGIC || if (header[0] != UCODE_MAGIC ||
header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
header[2] == 0) /* size */ header[2] == 0) /* size */
return ret; return eq_id;
eax = 0x00000001; eax = 0x00000001;
ecx = 0; ecx = 0;
...@@ -163,8 +164,9 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) ...@@ -163,8 +164,9 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
* ucode update loop below * ucode update loop below
*/ */
left = ret.size - offset; left = ret.size - offset;
*ret_id = eq_id;
return ret; *ret_cont = ret;
return eq_id;
} }
/* /*
...@@ -189,7 +191,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) ...@@ -189,7 +191,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
ucode = data; ucode = data;
} }
return ret; return eq_id;
} }
static int __apply_microcode_amd(struct microcode_amd *mc_amd) static int __apply_microcode_amd(struct microcode_amd *mc_amd)
...@@ -214,17 +216,18 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd) ...@@ -214,17 +216,18 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd)
* and on 32-bit during save_microcode_in_initrd_amd() -- we can call * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
* load_microcode_amd() to save equivalent cpu table and microcode patches in * load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory. * kernel heap memory.
*
* Returns true if container found (sets @ret_cont), false otherwise.
*/ */
static struct container static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) struct container *ret_cont)
{ {
struct container ret = { NULL, 0 };
u8 (*patch)[PATCH_MAX_SIZE]; u8 (*patch)[PATCH_MAX_SIZE];
u32 rev, *header, *new_rev;
struct container ret;
int offset, left; int offset, left;
u32 rev, *header;
u8 *data;
u16 eq_id = 0; u16 eq_id = 0;
u32 *new_rev; u8 *data;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
...@@ -235,11 +238,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) ...@@ -235,11 +238,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
#endif #endif
if (check_current_patch_level(&rev, true)) if (check_current_patch_level(&rev, true))
return (struct container){ NULL, 0 }; return false;
ret = find_proper_container(ucode, size, &eq_id); eq_id = find_proper_container(ucode, size, &ret);
if (!eq_id) if (!eq_id)
return (struct container){ NULL, 0 }; return false;
this_equiv_id = eq_id; this_equiv_id = eq_id;
header = (u32 *)ret.data; header = (u32 *)ret.data;
...@@ -273,7 +276,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) ...@@ -273,7 +276,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
data += offset; data += offset;
left -= offset; left -= offset;
} }
return ret;
if (ret_cont)
*ret_cont = ret;
return true;
} }
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
...@@ -294,6 +301,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ...@@ -294,6 +301,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
void __init load_ucode_amd_bsp(unsigned int family) void __init load_ucode_amd_bsp(unsigned int family)
{ {
struct ucode_cpu_info *uci; struct ucode_cpu_info *uci;
u32 eax, ebx, ecx, edx;
struct cpio_data cp; struct cpio_data cp;
const char *path; const char *path;
bool use_pa; bool use_pa;
...@@ -315,9 +323,12 @@ void __init load_ucode_amd_bsp(unsigned int family) ...@@ -315,9 +323,12 @@ void __init load_ucode_amd_bsp(unsigned int family)
return; return;
/* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
uci->cpu_sig.sig = cpuid_eax(1); eax = 1;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
uci->cpu_sig.sig = eax;
apply_microcode_early_amd(cp.data, cp.size, true); apply_microcode_early_amd(cp.data, cp.size, true, NULL);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -349,7 +360,7 @@ void load_ucode_amd_ap(unsigned int family) ...@@ -349,7 +360,7 @@ void load_ucode_amd_ap(unsigned int family)
* This would set amd_ucode_patch above so that the following APs can * This would set amd_ucode_patch above so that the following APs can
* use it directly instead of going down this path again. * use it directly instead of going down this path again.
*/ */
apply_microcode_early_amd(cp.data, cp.size, true); apply_microcode_early_amd(cp.data, cp.size, true, NULL);
} }
#else #else
void load_ucode_amd_ap(unsigned int family) void load_ucode_amd_ap(unsigned int family)
...@@ -387,8 +398,7 @@ void load_ucode_amd_ap(unsigned int family) ...@@ -387,8 +398,7 @@ void load_ucode_amd_ap(unsigned int family)
} }
} }
cont = apply_microcode_early_amd(cp.data, cp.size, false); if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) {
if (!(cont.data && cont.size)) {
cont.size = -1; cont.size = -1;
return; return;
} }
...@@ -443,7 +453,7 @@ int __init save_microcode_in_initrd_amd(unsigned int fam) ...@@ -443,7 +453,7 @@ int __init save_microcode_in_initrd_amd(unsigned int fam)
return -EINVAL; return -EINVAL;
} }
cont = find_proper_container(cp.data, cp.size, &eq_id); eq_id = find_proper_container(cp.data, cp.size, &cont);
if (!eq_id) { if (!eq_id) {
cont.size = -1; cont.size = -1;
return -EINVAL; return -EINVAL;
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#define DRIVER_VERSION "2.2" #define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops; static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr; static bool dis_ucode_ldr = true;
LIST_HEAD(microcode_cache); LIST_HEAD(microcode_cache);
...@@ -76,6 +76,7 @@ struct cpu_info_ctx { ...@@ -76,6 +76,7 @@ struct cpu_info_ctx {
static bool __init check_loader_disabled_bsp(void) static bool __init check_loader_disabled_bsp(void)
{ {
static const char *__dis_opt_str = "dis_ucode_ldr"; static const char *__dis_opt_str = "dis_ucode_ldr";
u32 a, b, c, d;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line); const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
...@@ -88,8 +89,23 @@ static bool __init check_loader_disabled_bsp(void) ...@@ -88,8 +89,23 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr; bool *res = &dis_ucode_ldr;
#endif #endif
if (cmdline_find_option_bool(cmdline, option)) if (!have_cpuid_p())
*res = true; return *res;
a = 1;
c = 0;
native_cpuid(&a, &b, &c, &d);
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
if (c & BIT(31))
return *res;
if (cmdline_find_option_bool(cmdline, option) <= 0)
*res = false;
return *res; return *res;
} }
...@@ -121,9 +137,6 @@ void __init load_ucode_bsp(void) ...@@ -121,9 +137,6 @@ void __init load_ucode_bsp(void)
if (check_loader_disabled_bsp()) if (check_loader_disabled_bsp())
return; return;
if (!have_cpuid_p())
return;
vendor = x86_cpuid_vendor(); vendor = x86_cpuid_vendor();
family = x86_cpuid_family(); family = x86_cpuid_family();
...@@ -157,9 +170,6 @@ void load_ucode_ap(void) ...@@ -157,9 +170,6 @@ void load_ucode_ap(void)
if (check_loader_disabled_ap()) if (check_loader_disabled_ap())
return; return;
if (!have_cpuid_p())
return;
vendor = x86_cpuid_vendor(); vendor = x86_cpuid_vendor();
family = x86_cpuid_family(); family = x86_cpuid_family();
...@@ -233,14 +243,12 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) ...@@ -233,14 +243,12 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
# endif # endif
/* /*
* Did we relocate the ramdisk? * Fixup the start address: after reserve_initrd() runs, initrd_start
* * has the virtual address of the beginning of the initrd. It also
* So we possibly relocate the ramdisk *after* applying microcode on the * possibly relocates the ramdisk. In either case, initrd_start contains
* BSP so we rely on use_pa (use physical addresses) - even if it is not * the updated address so use that instead.
* absolutely correct - to determine whether we've done the ramdisk
* relocation already.
*/ */
if (!use_pa && relocated_ramdisk) if (!use_pa && initrd_start)
start = initrd_start; start = initrd_start;
return find_cpio_data(path, (void *)start, size, NULL); return find_cpio_data(path, (void *)start, size, NULL);
......
...@@ -368,6 +368,26 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) ...@@ -368,6 +368,26 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
return patch; return patch;
} }
static void cpuid_1(void)
{
/*
* According to the Intel SDM, Volume 3, 9.11.7:
*
* CPUID returns a value in a model specific register in
* addition to its usual register return values. The
* semantics of CPUID cause it to deposit an update ID value
* in the 64-bit model-specific register at address 08BH
* (IA32_BIOS_SIGN_ID). If no update is present in the
* processor, the value in the MSR remains unmodified.
*
* Use native_cpuid -- this code runs very early and we don't
* want to mess with paravirt.
*/
unsigned int eax = 1, ebx, ecx = 0, edx;
native_cpuid(&eax, &ebx, &ecx, &edx);
}
static int collect_cpu_info_early(struct ucode_cpu_info *uci) static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{ {
unsigned int val[2]; unsigned int val[2];
...@@ -393,7 +413,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) ...@@ -393,7 +413,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_wrmsrl(MSR_IA32_UCODE_REV, 0); native_wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */ /* As documented in the SDM: Do a CPUID 1 here */
sync_core(); cpuid_1();
/* get the current revision from MSR 0x8B */ /* get the current revision from MSR 0x8B */
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
...@@ -593,7 +613,7 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) ...@@ -593,7 +613,7 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
native_wrmsrl(MSR_IA32_UCODE_REV, 0); native_wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */ /* As documented in the SDM: Do a CPUID 1 here */
sync_core(); cpuid_1();
/* get the current revision from MSR 0x8B */ /* get the current revision from MSR 0x8B */
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
...@@ -805,7 +825,7 @@ static int apply_microcode_intel(int cpu) ...@@ -805,7 +825,7 @@ static int apply_microcode_intel(int cpu)
wrmsrl(MSR_IA32_UCODE_REV, 0); wrmsrl(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */ /* As documented in the SDM: Do a CPUID 1 here */
sync_core(); cpuid_1();
/* get the current revision from MSR 0x8B */ /* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/nmi.h>
struct ms_hyperv_info ms_hyperv; struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv); EXPORT_SYMBOL_GPL(ms_hyperv);
...@@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void) ...@@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void)
return 0; return 0;
} }
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
* it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
* unknown NMI on the first CPU which gets it.
*/
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
{
static atomic_t nmi_cpu = ATOMIC_INIT(-1);
if (!unknown_nmi_panic)
return NMI_DONE;
if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
return NMI_HANDLED;
return NMI_DONE;
}
#endif
static void __init ms_hyperv_init_platform(void) static void __init ms_hyperv_init_platform(void)
{ {
/* /*
...@@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void) ...@@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void)
pr_info("HyperV: LAPIC Timer Frequency: %#x\n", pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
lapic_timer_frequency); lapic_timer_frequency);
} }
register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
"hv_nmi_unknown");
#endif #endif
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
......
...@@ -68,7 +68,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, ...@@ -68,7 +68,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
#endif #endif
default: default:
patch_default: patch_default: __maybe_unused
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
break; break;
......
...@@ -80,7 +80,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, ...@@ -80,7 +80,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
#endif #endif
default: default:
patch_default: patch_default: __maybe_unused
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
break; break;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
void __init x86_early_init_platform_quirks(void) void __init x86_early_init_platform_quirks(void)
{ {
x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT;
x86_platform.legacy.rtc = 1; x86_platform.legacy.rtc = 1;
x86_platform.legacy.reserve_bios_regions = 0; x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 1; x86_platform.legacy.devices.pnpbios = 1;
...@@ -16,10 +17,14 @@ void __init x86_early_init_platform_quirks(void) ...@@ -16,10 +17,14 @@ void __init x86_early_init_platform_quirks(void)
break; break;
case X86_SUBARCH_XEN: case X86_SUBARCH_XEN:
case X86_SUBARCH_LGUEST: case X86_SUBARCH_LGUEST:
x86_platform.legacy.devices.pnpbios = 0;
x86_platform.legacy.rtc = 0;
break;
case X86_SUBARCH_INTEL_MID: case X86_SUBARCH_INTEL_MID:
case X86_SUBARCH_CE4100: case X86_SUBARCH_CE4100:
x86_platform.legacy.devices.pnpbios = 0; x86_platform.legacy.devices.pnpbios = 0;
x86_platform.legacy.rtc = 0; x86_platform.legacy.rtc = 0;
x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
break; break;
} }
......
...@@ -6,6 +6,37 @@ ...@@ -6,6 +6,37 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2) #define FRAME_HEADER_SIZE (sizeof(long) * 2)
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
{
static bool dumped_before = false;
bool prev_zero, zero = false;
unsigned long word;
if (dumped_before)
return;
dumped_before = true;
printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n",
state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx);
for (sp = state->orig_sp; sp < state->stack_info.end; sp++) {
word = READ_ONCE_NOCHECK(*sp);
prev_zero = zero;
zero = word == 0;
if (zero) {
if (!prev_zero)
printk_deferred("%p: %016x ...\n", sp, 0);
continue;
}
printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word);
}
}
unsigned long unwind_get_return_address(struct unwind_state *state) unsigned long unwind_get_return_address(struct unwind_state *state)
{ {
unsigned long addr; unsigned long addr;
...@@ -20,15 +51,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state) ...@@ -20,15 +51,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
addr_p); addr_p);
if (!__kernel_text_address(addr)) { return __kernel_text_address(addr) ? addr : 0;
printk_deferred_once(KERN_WARNING
"WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n",
(void *)addr, addr_p, state->task->comm,
state->task->pid);
return 0;
}
return addr;
} }
EXPORT_SYMBOL_GPL(unwind_get_return_address); EXPORT_SYMBOL_GPL(unwind_get_return_address);
...@@ -46,7 +69,14 @@ static bool is_last_task_frame(struct unwind_state *state) ...@@ -46,7 +69,14 @@ static bool is_last_task_frame(struct unwind_state *state)
unsigned long bp = (unsigned long)state->bp; unsigned long bp = (unsigned long)state->bp;
unsigned long regs = (unsigned long)task_pt_regs(state->task); unsigned long regs = (unsigned long)task_pt_regs(state->task);
return bp == regs - FRAME_HEADER_SIZE; /*
* We have to check for the last task frame at two different locations
* because gcc can occasionally decide to realign the stack pointer and
* change the offset of the stack frame by a word in the prologue of a
* function called by head/entry code.
*/
return bp == regs - FRAME_HEADER_SIZE ||
bp == regs - FRAME_HEADER_SIZE - sizeof(long);
} }
/* /*
...@@ -67,6 +97,7 @@ static bool update_stack_state(struct unwind_state *state, void *addr, ...@@ -67,6 +97,7 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
size_t len) size_t len)
{ {
struct stack_info *info = &state->stack_info; struct stack_info *info = &state->stack_info;
enum stack_type orig_type = info->type;
/* /*
* If addr isn't on the current stack, switch to the next one. * If addr isn't on the current stack, switch to the next one.
...@@ -80,6 +111,9 @@ static bool update_stack_state(struct unwind_state *state, void *addr, ...@@ -80,6 +111,9 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
&state->stack_mask)) &state->stack_mask))
return false; return false;
if (!state->orig_sp || info->type != orig_type)
state->orig_sp = addr;
return true; return true;
} }
...@@ -178,11 +212,13 @@ bool unwind_next_frame(struct unwind_state *state) ...@@ -178,11 +212,13 @@ bool unwind_next_frame(struct unwind_state *state)
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
state->regs, state->task->comm, state->regs, state->task->comm,
state->task->pid, next_frame); state->task->pid, next_frame);
unwind_dump(state, (unsigned long *)state->regs);
} else { } else {
printk_deferred_once(KERN_WARNING printk_deferred_once(KERN_WARNING
"WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
state->bp, state->task->comm, state->bp, state->task->comm,
state->task->pid, next_frame); state->task->pid, next_frame);
unwind_dump(state, state->bp);
} }
the_end: the_end:
state->stack_info.type = STACK_TYPE_UNKNOWN; state->stack_info.type = STACK_TYPE_UNKNOWN;
......
...@@ -89,7 +89,6 @@ struct x86_cpuinit_ops x86_cpuinit = { ...@@ -89,7 +89,6 @@ struct x86_cpuinit_ops x86_cpuinit = {
}; };
static void default_nmi_init(void) { }; static void default_nmi_init(void) { };
static int default_i8042_detect(void) { return 1; };
struct x86_platform_ops x86_platform __ro_after_init = { struct x86_platform_ops x86_platform __ro_after_init = {
.calibrate_cpu = native_calibrate_cpu, .calibrate_cpu = native_calibrate_cpu,
...@@ -100,7 +99,6 @@ struct x86_platform_ops x86_platform __ro_after_init = { ...@@ -100,7 +99,6 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.is_untracked_pat_range = is_ISA_range, .is_untracked_pat_range = is_ISA_range,
.nmi_init = default_nmi_init, .nmi_init = default_nmi_init,
.get_nmi_reason = default_get_nmi_reason, .get_nmi_reason = default_get_nmi_reason,
.i8042_detect = default_i8042_detect,
.save_sched_clock_state = tsc_save_sched_clock_state, .save_sched_clock_state = tsc_save_sched_clock_state,
.restore_sched_clock_state = tsc_restore_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state,
}; };
......
...@@ -23,11 +23,6 @@ ...@@ -23,11 +23,6 @@
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/emergency-restart.h> #include <asm/emergency-restart.h>
static int ce4100_i8042_detect(void)
{
return 0;
}
/* /*
* The CE4100 platform has an internal 8051 Microcontroller which is * The CE4100 platform has an internal 8051 Microcontroller which is
* responsible for signaling to the external Power Management Unit the * responsible for signaling to the external Power Management Unit the
...@@ -145,7 +140,6 @@ static void sdv_pci_init(void) ...@@ -145,7 +140,6 @@ static void sdv_pci_init(void)
void __init x86_ce4100_early_setup(void) void __init x86_ce4100_early_setup(void)
{ {
x86_init.oem.arch_setup = sdv_arch_setup; x86_init.oem.arch_setup = sdv_arch_setup;
x86_platform.i8042_detect = ce4100_i8042_detect;
x86_init.resources.probe_roms = x86_init_noop; x86_init.resources.probe_roms = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop;
x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.find_smp_config = x86_init_noop;
......
...@@ -19,7 +19,7 @@ obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o ...@@ -19,7 +19,7 @@ obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
# I2C Devices # I2C Devices
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o
obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
# I2C GPIO Expanders # I2C GPIO Expanders
......
...@@ -161,12 +161,6 @@ static void intel_mid_arch_setup(void) ...@@ -161,12 +161,6 @@ static void intel_mid_arch_setup(void)
regulator_has_full_constraints(); regulator_has_full_constraints();
} }
/* MID systems don't have i8042 controller */
static int intel_mid_i8042_detect(void)
{
return 0;
}
/* /*
* Moorestown does not have external NMI source nor port 0x61 to report * Moorestown does not have external NMI source nor port 0x61 to report
* NMI status. The possible NMI sources are from pmu as a result of NMI * NMI status. The possible NMI sources are from pmu as a result of NMI
...@@ -197,7 +191,6 @@ void __init x86_intel_mid_early_setup(void) ...@@ -197,7 +191,6 @@ void __init x86_intel_mid_early_setup(void)
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.i8042_detect = intel_mid_i8042_detect;
x86_init.timers.wallclock_init = intel_mid_rtc_init; x86_init.timers.wallclock_init = intel_mid_rtc_init;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
......
...@@ -25,7 +25,8 @@ ...@@ -25,7 +25,8 @@
* @fmt: format string. * @fmt: format string.
* ... variadic argument list. * ... variadic argument list.
*/ */
static void __init imr_self_test_result(int res, const char *fmt, ...) static __printf(2, 3)
void __init imr_self_test_result(int res, const char *fmt, ...)
{ {
va_list vlist; va_list vlist;
......
...@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode) ...@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
die("Segment relocations found but --realmode not specified\n"); die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */ /* Order the relocations for more efficient processing */
sort_relocs(&relocs16);
sort_relocs(&relocs32); sort_relocs(&relocs32);
#if ELF_BITS == 64 #if ELF_BITS == 64
sort_relocs(&relocs32neg); sort_relocs(&relocs32neg);
sort_relocs(&relocs64); sort_relocs(&relocs64);
#else
sort_relocs(&relocs16);
#endif #endif
/* Print the relocations */ /* Print the relocations */
......
...@@ -983,7 +983,11 @@ static int __init i8042_pnp_init(void) ...@@ -983,7 +983,11 @@ static int __init i8042_pnp_init(void)
#if defined(__ia64__) #if defined(__ia64__)
return -ENODEV; return -ENODEV;
#else #else
pr_info("PNP: No PS/2 controller found. Probing ports directly.\n"); pr_info("PNP: No PS/2 controller found.\n");
if (x86_platform.legacy.i8042 !=
X86_LEGACY_I8042_EXPECTED_PRESENT)
return -ENODEV;
pr_info("Probing ports directly.\n");
return 0; return 0;
#endif #endif
} }
...@@ -1070,8 +1074,8 @@ static int __init i8042_platform_init(void) ...@@ -1070,8 +1074,8 @@ static int __init i8042_platform_init(void)
#ifdef CONFIG_X86 #ifdef CONFIG_X86
u8 a20_on = 0xdf; u8 a20_on = 0xdf;
/* Just return if pre-detection shows no i8042 controller exist */ /* Just return if platform does not have i8042 controller */
if (!x86_platform.i8042_detect()) if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT)
return -ENODEV; return -ENODEV;
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册