提交 98ecb3cd 编写于 作者: L Liu Shixin 提交者: Zheng Zengkai

mm: declare several functions

hulk inclusion
category: feature
bugzilla: 46904, https://gitee.com/openeuler/kernel/issues/I4QSHG
CVE: NA

--------------------------------

There are several functions that will be used in next patches for
dynamic hugetlb feature. Declare them.

No functional changes.
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 481ef9ce
......@@ -471,6 +471,9 @@ static inline struct hstate *hstate_inode(struct inode *i)
{
return HUGETLBFS_SB(i->i_sb)->hstate;
}
bool prep_compound_gigantic_page(struct page *page, unsigned int order);
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
......
......@@ -1239,6 +1239,22 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
/*
* Test whether @memcg has children, dead or alive. Note that this
* function doesn't care whether @memcg has use_hierarchy enabled and
* returns %true if there are child csses according to the cgroup
* hierarchy. Testing use_hierarchy is the caller's responsibility.
*/
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
bool ret;
rcu_read_lock();
ret = css_next_child(NULL, &memcg->css);
rcu_read_unlock();
return ret;
}
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
......
......@@ -284,6 +284,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern int remove_memory(int nid, u64 start, u64 size);
......@@ -291,6 +292,11 @@ extern void __remove_memory(int nid, u64 start, u64 size);
extern int offline_and_remove_memory(int nid, u64 start, u64 size);
#else
static inline int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
return -ENOSYS;
}
static inline void try_offline_node(int nid) {}
static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
......
......@@ -1633,7 +1633,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
spin_unlock_irq(&hugetlb_lock);
}
static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
bool prep_compound_gigantic_page(struct page *page, unsigned int order)
{
int i, j;
int nr_pages = 1 << order;
......
......@@ -197,6 +197,9 @@ extern void __free_pages_core(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order, bool check_free);
extern int user_min_free_kbytes;
extern void zone_pcp_update(struct zone *zone);
......
......@@ -3402,22 +3402,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
return nr_reclaimed;
}
/*
* Test whether @memcg has children, dead or alive. Note that this
* function doesn't care whether @memcg has use_hierarchy enabled and
* returns %true if there are child csses according to the cgroup
* hierarchy. Testing use_hierarchy is the caller's responsibility.
*/
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
bool ret;
rcu_read_lock();
ret = css_next_child(NULL, &memcg->css);
rcu_read_unlock();
return ret;
}
/*
* Reclaims as many pages from the given memcg as possible.
*
......
......@@ -1165,8 +1165,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
return 0;
}
static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page, *head;
......
......@@ -1203,7 +1203,7 @@ static void kernel_init_free_pages(struct page *page, int numpages)
kasan_enable_current();
}
static __always_inline bool free_pages_prepare(struct page *page,
__always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free)
{
int bad = 0;
......@@ -2283,8 +2283,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_owner(page, order, gfp_flags);
}
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags)
void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags)
{
post_alloc_hook(page, order, gfp_flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册