提交 144cf864 编写于 作者: C Cong Wang

frv: remove the second parameter of kmap_atomic_primary()

All callers of kmap_atomic_primary() use __KM_CACHE, so it can be
removed safely, and __kmap_atomic_primary() only check if 'type' if
__KM_CACHE or not, so 'type' can be changed to a boolean as well.

Ditto for kunmap_atomic_primary()/__kunmap_atomic_primary().
Acked-by: NGeert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: NCong Wang <amwang@redhat.com>
上级 906adea1
...@@ -76,15 +76,16 @@ extern struct page *kmap_atomic_to_page(void *ptr); ...@@ -76,15 +76,16 @@ extern struct page *kmap_atomic_to_page(void *ptr);
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define __kmap_atomic_primary(type, paddr, ampr) \ #define __kmap_atomic_primary(cached, paddr, ampr) \
({ \ ({ \
unsigned long damlr, dampr; \ unsigned long damlr, dampr; \
\ \
dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
\ \
if (type != __KM_CACHE) \ if (!cached) \
asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \ asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
else \ else \
/* cache flush page attachment point */ \
asm volatile("movgs %0,iampr"#ampr"\n" \ asm volatile("movgs %0,iampr"#ampr"\n" \
"movgs %0,dampr"#ampr"\n" \ "movgs %0,dampr"#ampr"\n" \
:: "r"(dampr) : "memory" \ :: "r"(dampr) : "memory" \
...@@ -112,29 +113,20 @@ extern struct page *kmap_atomic_to_page(void *ptr); ...@@ -112,29 +113,20 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \ (void *) damlr; \
}) })
static inline void *kmap_atomic_primary(struct page *page, enum km_type type) static inline void *kmap_atomic_primary(struct page *page)
{ {
unsigned long paddr; unsigned long paddr;
pagefault_disable(); pagefault_disable();
paddr = page_to_phys(page); paddr = page_to_phys(page);
switch (type) { return __kmap_atomic_primary(1, paddr, 2);
case 0: return __kmap_atomic_primary(0, paddr, 2);
case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5);
default:
BUG();
return NULL;
}
} }
#define __kunmap_atomic_primary(type, ampr) \ #define __kunmap_atomic_primary(cached, ampr) \
do { \ do { \
asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \ asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
if (type == __KM_CACHE) \ if (cached) \
asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \ asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
} while(0) } while(0)
...@@ -143,17 +135,9 @@ do { \ ...@@ -143,17 +135,9 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0) } while(0)
static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) static inline void kunmap_atomic_primary(void *kvaddr)
{ {
switch (type) { __kunmap_atomic_primary(1, 2);
case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break;
default:
BUG();
}
pagefault_enable(); pagefault_enable();
} }
......
...@@ -62,14 +62,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -62,14 +62,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) { for (i = 0; i < nents; i++) {
vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); vaddr = kmap_atomic_primary(sg_page(&sg[i]));
frv_dcache_writeback((unsigned long) vaddr, frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE); (unsigned long) vaddr + PAGE_SIZE);
} }
kunmap_atomic_primary(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2); __set_IAMPR(2, dampr2);
......
...@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page) ...@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic_primary(page, __KM_CACHE); vaddr = kmap_atomic_primary(page);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
kunmap_atomic_primary(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
...@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
vaddr = kmap_atomic_primary(page, __KM_CACHE); vaddr = kmap_atomic_primary(page);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr; start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len); frv_cache_wback_inv(start, start + len);
kunmap_atomic_primary(vaddr, __KM_CACHE); kunmap_atomic_primary(vaddr);
if (dampr2) { if (dampr2) {
__set_DAMPR(2, dampr2); __set_DAMPR(2, dampr2);
......
...@@ -50,11 +50,11 @@ void *kmap_atomic(struct page *page) ...@@ -50,11 +50,11 @@ void *kmap_atomic(struct page *page)
/* /*
* The first 4 primary maps are reserved for architecture code * The first 4 primary maps are reserved for architecture code
*/ */
case 0: return __kmap_atomic_primary(4, paddr, 6); case 0: return __kmap_atomic_primary(0, paddr, 6);
case 1: return __kmap_atomic_primary(5, paddr, 7); case 1: return __kmap_atomic_primary(0, paddr, 7);
case 2: return __kmap_atomic_primary(6, paddr, 8); case 2: return __kmap_atomic_primary(0, paddr, 8);
case 3: return __kmap_atomic_primary(7, paddr, 9); case 3: return __kmap_atomic_primary(0, paddr, 9);
case 4: return __kmap_atomic_primary(8, paddr, 10); case 4: return __kmap_atomic_primary(0, paddr, 10);
case 5 ... 5 + NR_TLB_LINES - 1: case 5 ... 5 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 5, paddr); return __kmap_atomic_secondary(type - 5, paddr);
...@@ -70,11 +70,11 @@ void __kunmap_atomic(void *kvaddr) ...@@ -70,11 +70,11 @@ void __kunmap_atomic(void *kvaddr)
{ {
int type = kmap_atomic_idx(); int type = kmap_atomic_idx();
switch (type) { switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break; case 0: __kunmap_atomic_primary(0, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break; case 1: __kunmap_atomic_primary(0, 7); break;
case 2: __kunmap_atomic_primary(6, 8); break; case 2: __kunmap_atomic_primary(0, 8); break;
case 3: __kunmap_atomic_primary(7, 9); break; case 3: __kunmap_atomic_primary(0, 9); break;
case 4: __kunmap_atomic_primary(8, 10); break; case 4: __kunmap_atomic_primary(0, 10); break;
case 5 ... 5 + NR_TLB_LINES - 1: case 5 ... 5 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 5, kvaddr); __kunmap_atomic_secondary(type - 5, kvaddr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册