提交 a930dc45 编写于 作者: B Borislav Petkov

x86/asm: Cleanup prefetch primitives

This is based on a patch originally by hpa.

With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:

  apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
  c104648b: alt_insn: 90 90 90 90
  c195566c: rpl_insn: 0f 0d 4b 5c

  ...

  apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
  c18e09b4: alt_insn: 90 90 90
  c1955948: rpl_insn: 0f 0d 08

  ...

  apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
  c1190cf9: alt_insn: 90 90 90 90 90 90 90
  c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1

all with the proper padding done depending on the size of the
replacement instruction the compiler generates.
Signed-off-by: NBorislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>
上级 c70e1b47
...@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v) ...@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{ {
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)), ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr))); ASM_OUTPUT2("0" (v), "m" (*addr)));
} }
......
...@@ -761,10 +761,10 @@ extern char ignore_fpu_irq; ...@@ -761,10 +761,10 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define BASE_PREFETCH ASM_NOP4 # define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH # define ARCH_HAS_PREFETCH
#else #else
# define BASE_PREFETCH "prefetcht0 (%1)" # define BASE_PREFETCH "prefetcht0 %P1"
#endif #endif
/* /*
...@@ -775,10 +775,9 @@ extern char ignore_fpu_irq; ...@@ -775,10 +775,9 @@ extern char ignore_fpu_irq;
*/ */
static inline void prefetch(const void *x) static inline void prefetch(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH, "prefetchnta %P1",
"prefetchnta (%1)",
X86_FEATURE_XMM, X86_FEATURE_XMM,
"r" (x)); "m" (*(const char *)x));
} }
/* /*
...@@ -788,10 +787,9 @@ static inline void prefetch(const void *x) ...@@ -788,10 +787,9 @@ static inline void prefetch(const void *x)
*/ */
static inline void prefetchw(const void *x) static inline void prefetchw(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH, "prefetchw %P1",
"prefetchw (%1)", X86_FEATURE_3DNOWPREFETCH,
X86_FEATURE_3DNOW, "m" (*(const char *)x));
"r" (x));
} }
static inline void spin_lock_prefetch(const void *x) static inline void spin_lock_prefetch(const void *x)
......
...@@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/* 3DNow or LM implies PREFETCHW */
if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册