提交 69ebe14f 编写于 作者: Z Zhang Zekun 提交者: Zhong Jinghua

vmalloc: Add config for Extend for hugepages mapping

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6HRGK

----------------------------------------

Use CONFIG_EXTEND_HUGEPAGE_MAPPING to isolate code
introduced in a3425d41.

Besides, use tab instead of space to match the format
of Kconfig
Signed-off-by: NZhang Zekun <zhangzekun11@huawei.com>
上级 a921a672
......@@ -2081,7 +2081,7 @@ config ASCEND_CHARGE_MIGRATE_HUGEPAGES
config ASCEND_SHARE_POOL
bool "Enable support for the Share Pool Memory"
default n
depends on HAVE_ARCH_HUGE_VMALLOC
depends on HAVE_ARCH_HUGE_VMALLOC && EXTEND_HUGEPAGE_MAPPING
select ARCH_USES_HIGH_VMA_FLAGS
help
This feature allows multiple processes to share virtual memory both
......
......@@ -1131,6 +1131,7 @@ CONFIG_PIN_MEMORY=y
CONFIG_PID_RESERVE=y
CONFIG_MEMORY_RELIABLE=y
# CONFIG_CLEAR_FREELIST_PAGE is not set
CONFIG_EXTEND_HUGEPAGE_MAPPING=y
#
# Data Access Monitoring
......
......@@ -235,11 +235,13 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/* to align the pointer to the (next) PMD hugepage boundary */
#define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PMD_SIZE */
#define PMD_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PMD_SIZE)
#endif
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
......
......@@ -27,7 +27,9 @@ struct notifier_block; /* in notifier.h */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
#define VM_HUGE_PAGES 0x00001000 /* used for vmalloc hugepages */
#endif
#ifdef CONFIG_ASCEND_SHARE_POOL
#define VM_SHAREPOOL 0x00002000 /* remapped to sharepool */
#else
......@@ -142,8 +144,11 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller);
void *vmalloc_no_huge(unsigned long size);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
extern void *vmalloc_hugepage(unsigned long size);
extern void *vmalloc_hugepage_user(unsigned long size);
#endif
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
......@@ -160,6 +165,7 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
extern void *vmap_hugepage(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma,
......@@ -167,6 +173,7 @@ extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma,
unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_hugepage_range(struct vm_area_struct *vma,
void *addr, unsigned long pgoff);
#endif
/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
......
......@@ -992,6 +992,12 @@ config CLEAR_FREELIST_PAGE
To enable this feature, kernel parameter "clear_freelist" also
needs to be added.
config EXTEND_HUGEPAGE_MAPPING
bool "Extend for hugepages mapping"
depends on ARM64
default n
help
Introduce vmalloc/vmap/remap interfaces that handle only hugepages.
source "mm/damon/Kconfig"
......
......@@ -578,6 +578,7 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
return err;
}
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
static int vmap_hugepages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
......@@ -609,6 +610,7 @@ static int vmap_hugepages_range(unsigned long addr, unsigned long end,
return err;
}
#endif
/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
......@@ -2792,6 +2794,7 @@ void *vmap(struct page **pages, unsigned int count,
}
EXPORT_SYMBOL(vmap);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/**
* vmap_hugepage - map an array of huge pages into virtually contiguous space
* @pages: array of huge page pointers (only the header)
......@@ -2830,6 +2833,7 @@ void *vmap_hugepage(struct page **pages, unsigned int count,
return area->addr;
}
EXPORT_SYMBOL(vmap_hugepage);
#endif
#ifdef CONFIG_VMAP_PFN
struct vmap_pfn_data {
......@@ -3015,7 +3019,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
size_per_node = size;
if (node == NUMA_NO_NODE)
size_per_node /= num_online_nodes();
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
if (size_per_node >= PMD_SIZE || vm_flags & VM_HUGE_PAGES) {
#else
if (size_per_node >= PMD_SIZE) {
#endif
shift = PMD_SHIFT;
align = max(real_align, 1UL << shift);
size = ALIGN(real_size, 1UL << shift);
......@@ -3050,8 +3058,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr;
fail:
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/* User could specify VM_HUGE_PAGES to alloc only hugepages. */
if (shift > PAGE_SHIFT && !(vm_flags & VM_HUGE_PAGES)) {
#else
if (shift > PAGE_SHIFT) {
#endif
shift = PAGE_SHIFT;
align = real_align;
size = real_size;
......@@ -3261,6 +3273,7 @@ void *vmalloc_32_user(unsigned long size)
}
EXPORT_SYMBOL(vmalloc_32_user);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/**
* vmalloc_hugepage - allocate virtually contiguous hugetlb memory
* @size: allocation size
......@@ -3298,6 +3311,7 @@ void *vmalloc_hugepage_user(unsigned long size)
__builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_hugepage_user);
#endif
/*
* small helper routine , copy contents to buf from addr.
......@@ -3620,6 +3634,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
}
EXPORT_SYMBOL(remap_vmalloc_range);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/**
* remap_vmalloc_hugepage_range_partial - map vmalloc hugepages
* to userspace
......@@ -3706,6 +3721,7 @@ int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, void *addr,
vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(remap_vmalloc_hugepage_range);
#endif
void free_vm_area(struct vm_struct *area)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部