提交 3b233e52 编写于 作者: T Thomas Gleixner 提交者: Ingo Molnar

x86: optimize clflush

clflush is sufficient to be issued on one CPU. The invalidation is
broadcast throughout the coherence domain.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 cd8ddf1a
...@@ -64,35 +64,29 @@ static void cpa_flush_all(void) ...@@ -64,35 +64,29 @@ static void cpa_flush_all(void)
on_each_cpu(__cpa_flush_all, NULL, 1, 1); on_each_cpu(__cpa_flush_all, NULL, 1, 1);
} }
struct clflush_data {
unsigned long addr;
int numpages;
};
static void __cpa_flush_range(void *arg) static void __cpa_flush_range(void *arg)
{ {
struct clflush_data *cld = arg;
/* /*
* We could optimize that further and do individual per page * We could optimize that further and do individual per page
* tlb invalidates for a low number of pages. Caveat: we must * tlb invalidates for a low number of pages. Caveat: we must
* flush the high aliases on 64bit as well. * flush the high aliases on 64bit as well.
*/ */
__flush_tlb_all(); __flush_tlb_all();
clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
} }
static void cpa_flush_range(unsigned long addr, int numpages) static void cpa_flush_range(unsigned long addr, int numpages)
{ {
struct clflush_data cld;
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
cld.addr = addr; on_each_cpu(__cpa_flush_range, NULL, 1, 1);
cld.numpages = numpages;
on_each_cpu(__cpa_flush_range, &cld, 1, 1); /*
* We only need to flush on one CPU,
* clflush is a MESI-coherent instruction that
* will cause all other CPUs to flush the same
* cachelines:
*/
clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册