提交 c38116bb 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

x86/mm/cpa: Better use CLFLUSHOPT

Currently we issue an MFENCE before and after flushing a range. This
means that if we flush a bunch of single page ranges -- like with the
cpa array, we issue a whole bunch of superfluous MFENCEs.

Reorgainze the code a little to avoid this.

[ mingo: capitalize instructions, tweak changelog and comments. ]
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.626999883@infradead.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 fe0937b2
...@@ -251,15 +251,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) ...@@ -251,15 +251,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
* Flushing functions * Flushing functions
*/ */
/** static void clflush_cache_range_opt(void *vaddr, unsigned int size)
* clflush_cache_range - flush a cache range with clflush
* @vaddr: virtual start address
* @size: number of bytes to flush
*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues.
*/
void clflush_cache_range(void *vaddr, unsigned int size)
{ {
const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
...@@ -268,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size) ...@@ -268,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size)
if (p >= vend) if (p >= vend)
return; return;
mb();
for (; p < vend; p += clflush_size) for (; p < vend; p += clflush_size)
clflushopt(p); clflushopt(p);
}
/**
* clflush_cache_range - flush a cache range with clflush
* @vaddr: virtual start address
* @size: number of bytes to flush
*
* CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
* SFENCE to avoid ordering issues.
*/
void clflush_cache_range(void *vaddr, unsigned int size)
{
mb();
clflush_cache_range_opt(vaddr, size);
mb(); mb();
} }
EXPORT_SYMBOL_GPL(clflush_cache_range); EXPORT_SYMBOL_GPL(clflush_cache_range);
...@@ -333,6 +336,7 @@ static void cpa_flush(struct cpa_data *data, int cache) ...@@ -333,6 +336,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
if (!cache) if (!cache)
return; return;
mb();
for (i = 0; i < cpa->numpages; i++) { for (i = 0; i < cpa->numpages; i++) {
unsigned long addr = __cpa_addr(cpa, i); unsigned long addr = __cpa_addr(cpa, i);
unsigned int level; unsigned int level;
...@@ -343,8 +347,9 @@ static void cpa_flush(struct cpa_data *data, int cache) ...@@ -343,8 +347,9 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses: * Only flush present addresses:
*/ */
if (pte && (pte_val(*pte) & _PAGE_PRESENT)) if (pte && (pte_val(*pte) & _PAGE_PRESENT))
clflush_cache_range((void *)addr, PAGE_SIZE); clflush_cache_range_opt((void *)addr, PAGE_SIZE);
} }
mb();
} }
static bool overlaps(unsigned long r1_start, unsigned long r1_end, static bool overlaps(unsigned long r1_start, unsigned long r1_end,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册