提交 6c9c0745 编写于 作者: J Jiankang Chen 提交者: Xie XiuQi

mm: Add a flag MAP_VA32BIT for mmap

ascend inclusion
category: bugfix
bugzilla: NA
CVE: NA

-------------------

There is a bug in dvpp. The virtual address used by dvpp must
have same higt 16bits.

Add a MAP_VA32BIT for mmap. mmap(..., MAP_VA32BIT) will return
the virtual address with same higt 16bits.
Signed-off-by: NJiankang Chen <chenjiankang1@huawei.com>
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Signed-off-by: NLijun Fang <fanglijun3@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 450e6336
...@@ -70,6 +70,8 @@ ...@@ -70,6 +70,8 @@
#define TASK_SIZE TASK_SIZE_64 #define TASK_SIZE TASK_SIZE_64
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#define MMAP_TOP_4G_SIZE (0x100000000UL)
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
#define STACK_TOP_MAX TASK_SIZE_64 #define STACK_TOP_MAX TASK_SIZE_64
......
...@@ -215,6 +215,10 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -215,6 +215,10 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { if (addr) {
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
if (!mmap_va32bit_check(addr, len, flags))
return -ENOMEM;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma))) (!vma || addr + len <= vm_start_gap(vma)))
...@@ -227,6 +231,9 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -227,6 +231,9 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
info.high_limit = TASK_SIZE; info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0; info.align_offset = 0;
mmap_va32bit_set_limit(&info, flags);
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }
#endif #endif
......
...@@ -2360,6 +2360,29 @@ struct vm_unmapped_area_info { ...@@ -2360,6 +2360,29 @@ struct vm_unmapped_area_info {
unsigned long align_offset; unsigned long align_offset;
}; };
#ifdef CONFIG_ARCH_ASCEND
extern int mmap_va32bit_check(unsigned long addr, unsigned long len,
unsigned long flags);
extern void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info,
unsigned long flags);
#else
static inline int mmap_va32bit_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
return 1;
}
static inline void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info,
unsigned long flags)
{
}
#endif
extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ #define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
#define MAP_VA32BIT 0x0200
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
......
...@@ -1841,6 +1841,38 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1841,6 +1841,38 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return error; return error;
} }
#ifdef CONFIG_ARCH_ASCEND
int mmap_va32bit_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
return !(!(flags & MAP_VA32BIT) &&
((addr + len) > (TASK_SIZE - MMAP_TOP_4G_SIZE)) &&
(addr < TASK_SIZE));
}
void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info,
unsigned long flags)
{
const unsigned long high_limit = TASK_SIZE;
const unsigned long divide = TASK_SIZE - MMAP_TOP_4G_SIZE;
if (info == NULL)
return;
if (flags & MAP_VA32BIT) {
info->low_limit = divide;
info->high_limit = high_limit;
} else {
/* low_limit do not need set */
if (info->high_limit > divide)
info->high_limit = divide;
}
}
#endif
unsigned long unmapped_area(struct vm_unmapped_area_info *info) unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{ {
/* /*
...@@ -2071,6 +2103,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -2071,6 +2103,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
if (!mmap_va32bit_check(addr, len, flags))
return -ENOMEM;
vma = find_vma_prev(mm, addr, &prev); vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) && (!vma || addr + len <= vm_start_gap(vma)) &&
...@@ -2083,6 +2119,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -2083,6 +2119,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base; info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE; info.high_limit = TASK_SIZE;
info.align_mask = 0; info.align_mask = 0;
mmap_va32bit_set_limit(&info, flags);
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }
#endif #endif
...@@ -2112,6 +2151,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -2112,6 +2151,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */ /* requesting a specific address */
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
if (!mmap_va32bit_check(addr, len, flags))
return -ENOMEM;
vma = find_vma_prev(mm, addr, &prev); vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) && (!vma || addr + len <= vm_start_gap(vma)) &&
...@@ -2124,6 +2167,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -2124,6 +2167,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base; info.high_limit = mm->mmap_base;
info.align_mask = 0; info.align_mask = 0;
mmap_va32bit_set_limit(&info, flags);
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
/* /*
...@@ -2137,6 +2183,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -2137,6 +2183,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = 0; info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE; info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE; info.high_limit = TASK_SIZE;
mmap_va32bit_set_limit(&info, flags);
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册