提交 8436965e 编写于 作者: C Christoph Hellwig 提交者: Yang Yingliang

mm: remove map_vm_range

mainline inclusion
from mainline-v5.8-rc1
commit ed1f324c
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI
CVE: NA

Add adjustment for map_vm_area in dma mapping, and don't remove in
ceph_common.

---------------------------

Switch all callers to map_kernel_range, which symmetric to the unmap side
(as well as the _noflush versions).
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-17-hch@lst.deSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NRui Xiang <rui.xiang@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Reviewed-by: NZefan Li <lizefan@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6c9417b7
...@@ -223,7 +223,7 @@ Here are the routines, one by one: ...@@ -223,7 +223,7 @@ Here are the routines, one by one:
there will be no entries in the cache for the kernel address there will be no entries in the cache for the kernel address
space for virtual addresses in the range 'start' to 'end-1'. space for virtual addresses in the range 'start' to 'end-1'.
The first of these two routines is invoked after map_vm_area() The first of these two routines is invoked after map_kernel_range()
has installed the page table entries. The second is invoked has installed the page table entries. The second is invoked
before unmap_kernel_range() deletes the page table entries. before unmap_kernel_range() deletes the page table entries.
......
...@@ -140,11 +140,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, ...@@ -140,11 +140,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page **pages);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages); pgprot_t prot, struct page **pages);
int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
#else #else
...@@ -154,14 +154,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size, ...@@ -154,14 +154,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size,
{ {
return size >> PAGE_SHIFT; return size >> PAGE_SHIFT;
} }
#define map_kernel_range map_kernel_range_noflush
static inline void static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size) unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{ {
} }
static inline void #define unmap_kernel_range unmap_kernel_range_noflush
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
#endif #endif
/* Allocate/destroy a 'vmalloc' VM area. */ /* Allocate/destroy a 'vmalloc' VM area. */
......
...@@ -255,7 +255,8 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages, ...@@ -255,7 +255,8 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
if (!area) if (!area)
return NULL; return NULL;
if (map_vm_area(area, prot, pages)) { if (map_kernel_range((unsigned long)area->addr, size, prot,
pages) < 0) {
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
} }
......
...@@ -263,8 +263,8 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, ...@@ -263,8 +263,8 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
return 0; return 0;
} }
static int map_kernel_range(unsigned long start, unsigned long size, int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
pgprot_t prot, struct page **pages) struct page **pages)
{ {
int ret; int ret;
...@@ -1908,16 +1908,6 @@ void unmap_kernel_range(unsigned long addr, unsigned long size) ...@@ -1908,16 +1908,6 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
flush_tlb_kernel_range(addr, end); flush_tlb_kernel_range(addr, end);
} }
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
{
unsigned long addr = (unsigned long)area->addr;
int err;
err = map_kernel_range(addr, get_vm_area_size(area), prot, pages);
return err > 0 ? 0 : err;
}
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
unsigned long flags, const void *caller) unsigned long flags, const void *caller)
{ {
...@@ -2202,7 +2192,8 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2202,7 +2192,8 @@ void *vmap(struct page **pages, unsigned int count,
if (!area) if (!area)
return NULL; return NULL;
if (map_vm_area(area, prot, pages)) { if (map_kernel_range((unsigned long)area->addr, size, prot,
pages) < 0) {
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
} }
...@@ -2265,8 +2256,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -2265,8 +2256,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched(); cond_resched();
} }
if (map_vm_area(area, prot, pages)) if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
prot, pages) < 0)
goto fail; goto fail;
return area->addr; return area->addr;
fail: fail:
......
...@@ -1156,7 +1156,9 @@ static inline void __zs_cpu_down(struct mapping_area *area) ...@@ -1156,7 +1156,9 @@ static inline void __zs_cpu_down(struct mapping_area *area)
static inline void *__zs_map_object(struct mapping_area *area, static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size) struct page *pages[2], int off, int size)
{ {
BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); unsigned long addr = (unsigned long)area->vm->addr;
BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
area->vm_addr = area->vm->addr; area->vm_addr = area->vm->addr;
return area->vm_addr + off; return area->vm_addr + off;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册