diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 2f9b751878ba86bf6cc32cfdadd9b7847ad158d5..de65f66ea64e7538f4f7c431ca3800e86c152e5d 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -132,7 +132,6 @@ void mark_rodata_ro(void); static inline void *kmap(struct page *page) { might_sleep(); - flush_dcache_page(page); return page_address(page); } @@ -144,7 +143,6 @@ static inline void kunmap(struct page *page) static inline void *kmap_atomic(struct page *page) { pagefault_disable(); - flush_dcache_page(page); return page_address(page); } diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index c53fc63149e8312437fe4109e8070495ba39d90a..637fe031aa8476d743a8141ca9b549019985a0c6 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -29,7 +29,8 @@ struct page; void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) -#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg); /* #define CONFIG_PARISC_TMPALIAS */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index a72545554a3154c254eace0648a769d4df645b25..ac87a40502e6f6a0e11d21eaf6789259859ad092 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -388,6 +388,20 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg) +{ + /* Copy using kernel mapping. No coherency is needed (all in + kunmap) for the `to' page. However, the `from' page needs to + be flushed through a mapping equivalent to the user mapping + before it can be accessed through the kernel mapping. */ + preempt_disable(); + flush_dcache_page_asm(__pa(vfrom), vaddr); + preempt_enable(); + copy_page_asm(vto, vfrom); +} +EXPORT_SYMBOL(copy_user_page); + void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { unsigned long flags;