提交 5472e862 编写于 作者: C Cong Wang 提交者: Cong Wang

arm: remove the second argument of k[un]map_atomic()

Signed-off-by: NCong Wang <amwang@redhat.com>
上级 1ec9c5dd
...@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from, ...@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
fa_copy_user_page(kto, kfrom); fa_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
...@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from, ...@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
*/ */
void fa_clear_user_highpage(struct page *page, unsigned long vaddr) void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
...@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns fa_user_fns __initdata = { struct cpu_user_fns fa_user_fns __initdata = {
......
...@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, ...@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom); feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\ asm volatile ("\
mov r1, %2 \n\ mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
...@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns feroceon_user_fns __initdata = { struct cpu_user_fns feroceon_user_fns __initdata = {
......
...@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from, ...@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
v3_copy_user_page(kto, kfrom); v3_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
...@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from, ...@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v3_clear_user_highpage(struct page *page, unsigned long vaddr) void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\n\ asm volatile("\n\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
...@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v3_user_fns __initdata = { struct cpu_user_fns v3_user_fns __initdata = {
......
...@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to) ...@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_highpage(struct page *to, struct page *from, void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
...@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock); raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
} }
/* /*
...@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
...@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4_mc_user_fns __initdata = { struct cpu_user_fns v4_mc_user_fns __initdata = {
......
...@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, ...@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom); v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
...@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, ...@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
...@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4wb_user_fns __initdata = { struct cpu_user_fns v4wb_user_fns __initdata = {
......
...@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, ...@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
v4wt_copy_user_page(kto, kfrom); v4wt_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
...@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, ...@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
*/ */
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\ asm volatile("\
mov r1, %2 @ 1\n\ mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\ mov r2, #0 @ 1\n\
...@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64) : "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr"); : "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns v4wt_user_fns __initdata = { struct cpu_user_fns v4wt_user_fns __initdata = {
......
...@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, ...@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kfrom = kmap_atomic(from, KM_USER0); kfrom = kmap_atomic(from);
kto = kmap_atomic(to, KM_USER1); kto = kmap_atomic(to);
copy_page(kto, kfrom); copy_page(kto, kfrom);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
kunmap_atomic(kfrom, KM_USER0); kunmap_atomic(kfrom);
} }
/* /*
...@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, ...@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
*/ */
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{ {
void *kaddr = kmap_atomic(page, KM_USER0); void *kaddr = kmap_atomic(page);
clear_page(kaddr); clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
/* /*
......
...@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto, *kfrom; void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0); kto = kmap_atomic(to);
kfrom = kmap_atomic(from, KM_USER1); kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from)); flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom); xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kfrom);
kunmap_atomic(kto, KM_USER0); kunmap_atomic(kto);
} }
/* /*
...@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
*/ */
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\ asm volatile ("\
mov r1, %2 \n\ mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
...@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3"); : "r1", "r2", "r3");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns xsc3_mc_user_fns __initdata = { struct cpu_user_fns xsc3_mc_user_fns __initdata = {
......
...@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to) ...@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
...@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock); raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1); kunmap_atomic(kto);
} }
/* /*
...@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{ {
void *ptr, *kaddr = kmap_atomic(page, KM_USER0); void *ptr, *kaddr = kmap_atomic(page);
asm volatile( asm volatile(
"mov r1, %2 \n\ "mov r1, %2 \n\
mov r2, #0 \n\ mov r2, #0 \n\
...@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr) : "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32) : "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip"); : "r1", "r2", "r3", "ip");
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
struct cpu_user_fns xscale_mc_user_fns __initdata = { struct cpu_user_fns xscale_mc_user_fns __initdata = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册