提交 6619a8fb 编写于 作者: H H. Peter Anvin 提交者: Thomas Gleixner

x86: Create clflush() inline, remove hardcoded wbinvd

Create an inline function for clflush(), with the proper arguments,
and use it instead of hard-coding the instruction.

This also removes one instance of hard-coded wbinvd, based on a patch
by Bauder de Oliveira Costa.

[ tglx: arch/x86 adaptation ]

Cc: Andi Kleen <andi@firstfloor.org>
Cc: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: NH. Peter Anvin <hpa@zytor.com>
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 9689ba8a
...@@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr) ...@@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr)
{ {
/* a single tce can't cross a cache line */ /* a single tce can't cross a cache line */
if (cpu_has_clflush) if (cpu_has_clflush)
asm volatile("clflush (%0)" :: "r" (tceaddr)); clflush(tceaddr);
else else
asm volatile("wbinvd":::"memory"); wbinvd();
} }
void tce_build(struct iommu_table *tbl, unsigned long index, void tce_build(struct iommu_table *tbl, unsigned long index,
......
...@@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, ...@@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
static void cache_flush_page(struct page *p) static void cache_flush_page(struct page *p)
{ {
unsigned long adr = (unsigned long)page_address(p); void *adr = page_address(p);
int i; int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i)); clflush(adr+i);
} }
static void flush_kernel_map(void *arg) static void flush_kernel_map(void *arg)
......
...@@ -65,7 +65,7 @@ static void cache_flush_page(void *adr) ...@@ -65,7 +65,7 @@ static void cache_flush_page(void *adr)
{ {
int i; int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i)); clflush(adr+i);
} }
static void flush_kernel_map(void *arg) static void flush_kernel_map(void *arg)
......
...@@ -221,7 +221,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) ...@@ -221,7 +221,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
SetPageReserved(virt_to_page((char *)page)); SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
asm volatile("clflush %0" : : "m" (*(char *)(page+offset))); clflush((char *)page+offset);
efficeon_private.l1_table[index] = page; efficeon_private.l1_table[index] = page;
...@@ -268,15 +268,16 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t ...@@ -268,15 +268,16 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
*page = insert; *page = insert;
/* clflush is slow, so don't clflush until we have to */ /* clflush is slow, so don't clflush until we have to */
if ( last_page && if (last_page &&
((unsigned long)page^(unsigned long)last_page) & clflush_mask ) (((unsigned long)page^(unsigned long)last_page) &
asm volatile("clflush %0" : : "m" (*last_page)); clflush_mask))
clflush(last_page);
last_page = page; last_page = page;
} }
if ( last_page ) if ( last_page )
asm volatile("clflush %0" : : "m" (*last_page)); clflush(last_page);
agp_bridge->driver->tlb_flush(mem); agp_bridge->driver->tlb_flush(mem);
return 0; return 0;
......
...@@ -161,6 +161,10 @@ static inline void native_wbinvd(void) ...@@ -161,6 +161,10 @@ static inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory"); asm volatile("wbinvd": : :"memory");
} }
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
......
...@@ -137,6 +137,11 @@ static inline void write_cr8(unsigned long val) ...@@ -137,6 +137,11 @@ static inline void write_cr8(unsigned long val)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#define nop() __asm__ __volatile__ ("nop") #define nop() __asm__ __volatile__ ("nop")
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册