diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 6e0083f26b4f832e6c2c6a162bffcf2a634190b1..9ef94ed45311a9402cea1f7151ce7ba337f98952 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -70,6 +70,8 @@ #define TASK_SIZE TASK_SIZE_64 #endif /* CONFIG_COMPAT */ +#define MMAP_TOP_4G_SIZE (0x100000000UL) + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define STACK_TOP_MAX TASK_SIZE_64 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 2c75dc4b0b85596c6791037820a90b7c406e95c3..42ea465ed9d06142d0394393ef76dcd8d46e8381 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -215,6 +215,10 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); + + if (!mmap_va32bit_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) @@ -227,6 +231,9 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + + mmap_va32bit_set_limit(&info, flags); + return vm_unmapped_area(&info); } #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index a7b322093ac4d83df1c6e4a4ed435f6fea5fb767..f3fd893314a92cc35fd3843d66432a64fc18b602 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2360,6 +2360,29 @@ struct vm_unmapped_area_info { unsigned long align_offset; }; + +#ifdef CONFIG_ARCH_ASCEND + +extern int mmap_va32bit_check(unsigned long addr, unsigned long len, + unsigned long flags); +extern void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info, + unsigned long flags); + +#else + +static inline int mmap_va32bit_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + return 1; +} + +static inline void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info, + unsigned long flags) +{ +} + +#endif + extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index 653687d9771b9d0e824fe4b25eee079f7fcec1cc..84ae839fe7d11bdf62ea34959cf93811d35ccebe 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h @@ -14,6 +14,7 @@ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ #define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ +#define MAP_VA32BIT 0x0200 /* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ diff --git a/mm/mmap.c b/mm/mmap.c index 0a260e102b24583f20eaca8a903870c47c6e17e0..2eeee2f9845e528e0e8a57103a897a330bb57a2a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1841,6 +1841,38 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return error; } +#ifdef CONFIG_ARCH_ASCEND + +int mmap_va32bit_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + return !(!(flags & MAP_VA32BIT) && + ((addr + len) > (TASK_SIZE - MMAP_TOP_4G_SIZE)) && + (addr < TASK_SIZE)); +} + + +void mmap_va32bit_set_limit(struct vm_unmapped_area_info *info, + unsigned long flags) +{ + const unsigned long high_limit = TASK_SIZE; + const unsigned long divide = TASK_SIZE - MMAP_TOP_4G_SIZE; + + if (info == NULL) + return; + + if (flags & MAP_VA32BIT) { + info->low_limit = divide; + info->high_limit = high_limit; + } else { + /* low_limit do not need set */ + if (info->high_limit > divide) + info->high_limit = divide; + } +} + +#endif + unsigned long unmapped_area(struct vm_unmapped_area_info *info) { /* @@ -2071,6 +2103,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); + + if (!mmap_va32bit_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -2083,6 +2119,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = 0; + + mmap_va32bit_set_limit(&info, flags); + return vm_unmapped_area(&info); } #endif @@ -2112,6 +2151,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); + + if (!mmap_va32bit_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -2124,6 +2167,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; info.align_mask = 0; + + mmap_va32bit_set_limit(&info, flags); + addr = vm_unmapped_area(&info); /* @@ -2137,6 +2183,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; + + mmap_va32bit_set_limit(&info, flags); + addr = vm_unmapped_area(&info); }