提交 7b1bca35 编写于 作者: W Wang Wensheng 提交者: Zheng Zengkai

memory: introduce do_mm_populate

ascend inclusion
category: Feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NDAW
CVE: NA

-------------------

The do_mmap/mmap_region/__mm_populate could only be used to handle the
current process, now the share pool need to handle the other process
and create memory mmaping, so need to export new function to distinguish
different process and handle it, it would not break the current logic
and only valid for share pool.
Signed-off-by: NTang Yizhou <tangyizhou@huawei.com>
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Signed-off-by: NWang Wensheng <wangwensheng4@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Reviewed-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 a3425d41
......@@ -2611,6 +2611,10 @@ extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
extern unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff,
unsigned long *populate, struct list_head *uf);
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
......@@ -2619,8 +2623,15 @@ static inline void mm_populate(unsigned long addr, unsigned long len)
/* Ignore errors */
(void) __mm_populate(addr, len, 1);
}
extern int do_mm_populate(struct mm_struct *mm, unsigned long addr, unsigned long len,
int ignore_errors);
#else
static inline void mm_populate(unsigned long addr, unsigned long len) {}
static inline int do_mm_populate(struct mm_struct *mm, unsigned long addr, unsigned long len,
int ignore_errors)
{
return -EPERM;
}
#endif
/* These take the mm semaphore themselves */
......
......@@ -1507,15 +1507,12 @@ long populate_vma_page_range(struct vm_area_struct *vma,
}
/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
* mmap_lock must not be held.
* do_mm_populate - populate and/or mlock pages within a range of
* address space for the specified mm_struct.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
int do_mm_populate(struct mm_struct *mm, unsigned long start, unsigned long len,
int ignore_errors)
{
struct mm_struct *mm = current->mm;
unsigned long end, nstart, nend;
struct vm_area_struct *vma = NULL;
int locked = 0;
......@@ -1565,6 +1562,19 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
mmap_read_unlock(mm);
return ret; /* 0 or negative error code */
}
/*
* __mm_populate - populate and/or mlock pages within a range of address space.
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
* mmap_lock must not be held.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
return do_mm_populate(current->mm, start, len, ignore_errors);
}
#else /* CONFIG_MMU */
static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
unsigned long nr_pages, struct page **pages,
......
......@@ -1399,12 +1399,17 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
return true;
}
static inline unsigned long
__do_mmap(struct file *file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf)
static unsigned long __mmap_region(struct mm_struct *mm,
struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags,
unsigned long pgoff, struct list_head *uf);
unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff,
unsigned long *populate, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
int pkey = 0;
*populate = 0;
......@@ -1587,14 +1592,22 @@ __do_mmap(struct file *file, unsigned long addr, unsigned long len,
if (flags & MAP_CHECKNODE)
set_vm_checknode(&vm_flags, flags);
addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
addr = __mmap_region(mm, file, addr, len, vm_flags, pgoff, uf);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
*populate = len;
return addr;
}
EXPORT_SYMBOL(__do_mmap_mm);
static inline unsigned long
__do_mmap(struct file *file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf)
{
return __do_mmap_mm(current->mm, file, addr, len, prot, flags, vm_flags, pgoff, populate, uf);
}
#ifdef CONFIG_USERSWAP
/*
* Check if pages between 'addr ~ addr+len' can be user swapped. If so, get
......@@ -1955,11 +1968,11 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev, *merge;
int error;
struct rb_node **rb_link, *rb_parent;
......@@ -2148,6 +2161,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return error;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
return __mmap_region(current->mm, file, addr, len, vm_flags, pgoff, uf);
}
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
......@@ -3209,7 +3229,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return __vm_munmap(addr, len, true);
}
/*
* Emulation of deprecated remap_file_pages() syscall.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册