提交 d38153f9 编写于 作者: N Nicholas Piggin 提交者: Michael Ellerman

powerpc/64s/radix: ioremap use ioremap_page_range

Radix can use ioremap_page_range for ioremap, after slab is available.
This makes it possible to enable huge ioremap mapping support.
Signed-off-by: NNicholas Piggin <npiggin@gmail.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 a72808a7
...@@ -266,6 +266,9 @@ extern void radix__vmemmap_remove_mapping(unsigned long start, ...@@ -266,6 +266,9 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz); pgprot_t flags, unsigned int psz);
extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
unsigned long size, pgprot_t prot, int nid);
static inline unsigned long radix__get_tree_size(void) static inline unsigned long radix__get_tree_size(void)
{ {
unsigned long rts_field; unsigned long rts_field;
......
...@@ -447,3 +447,24 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, ...@@ -447,3 +447,24 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true; return true;
} }
int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
{
unsigned long i;
if (radix_enabled())
return radix__ioremap_range(ea, pa, size, prot, nid);
for (i = 0; i < size; i += PAGE_SIZE) {
int err = map_kernel_page(ea + i, pa + i, prot);
if (err) {
if (slab_is_available())
unmap_kernel_range(ea, size);
else
WARN_ON_ONCE(1); /* Should clean up */
return err;
}
}
return 0;
}
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "radix-mmu: " fmt #define pr_fmt(fmt) "radix-mmu: " fmt
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -1122,3 +1123,23 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, ...@@ -1122,3 +1123,23 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
} }
int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
pgprot_t prot, int nid)
{
if (likely(slab_is_available())) {
int err = ioremap_page_range(ea, ea + size, pa, prot);
if (err)
unmap_kernel_range(ea, size);
return err;
} else {
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
int err = map_kernel_page(ea + i, pa + i, prot);
if (WARN_ON_ONCE(err)) /* Should clean up */
return err;
}
return 0;
}
}
...@@ -108,7 +108,7 @@ unsigned long ioremap_bot; ...@@ -108,7 +108,7 @@ unsigned long ioremap_bot;
unsigned long ioremap_bot = IOREMAP_BASE; unsigned long ioremap_bot = IOREMAP_BASE;
#endif #endif
static int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
{ {
unsigned long i; unsigned long i;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册