提交 7a591cfe 编写于 作者: D David S. Miller

[SPARC64]: Avoid dcache-dirty page state management on sun4v.

It is totally wasted work, since we have no D-cache aliasing
issues on sun4v.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 2a3a5f5d
...@@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); ...@@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
#endif #endif
#endif #endif
__inline__ void flush_dcache_page_impl(struct page *page) inline void flush_dcache_page_impl(struct page *page)
{ {
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes); atomic_inc(&dcpage_flushes);
#endif #endif
...@@ -279,29 +280,31 @@ unsigned long _PAGE_SZBITS __read_mostly; ...@@ -279,29 +280,31 @@ unsigned long _PAGE_SZBITS __read_mostly;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{ {
struct mm_struct *mm; struct mm_struct *mm;
struct page *page;
unsigned long pfn;
unsigned long pg_flags;
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
int this_cpu = get_cpu();
/* This is just to optimize away some function calls
* in the SMP case.
*/
if (cpu == this_cpu)
flush_dcache_page_impl(page);
else
smp_flush_dcache_page_impl(page, cpu);
clear_dcache_dirty_cpu(page, cpu); if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
unsigned long pg_flags;
struct page *page;
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
int this_cpu = get_cpu();
/* This is just to optimize away some function calls
* in the SMP case.
*/
if (cpu == this_cpu)
flush_dcache_page_impl(page);
else
smp_flush_dcache_page_impl(page, cpu);
clear_dcache_dirty_cpu(page, cpu);
put_cpu(); put_cpu();
}
} }
mm = vma->vm_mm; mm = vma->vm_mm;
...@@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page) ...@@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page)
struct address_space *mapping; struct address_space *mapping;
int this_cpu; int this_cpu;
if (tlb_type == hypervisor)
return;
/* Do not bother with the expensive D-cache flush if it /* Do not bother with the expensive D-cache flush if it
* is merely the zero page. The 'bigcore' testcase in GDB * is merely the zero page. The 'bigcore' testcase in GDB
* causes this case to run millions of times. * causes this case to run millions of times.
......
...@@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t ...@@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
if (pte_exec(orig)) if (pte_exec(orig))
vaddr |= 0x1UL; vaddr |= 0x1UL;
if (pte_dirty(orig)) { if (tlb_type != hypervisor &&
pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig); unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping; struct address_space *mapping;
struct page *page; struct page *page;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册