提交 5f53feed 编写于 作者: L Liu Shixin 提交者: Zheng Zengkai

mm/hugetlb: add parameter hugetlbfs_inode_info to several functions

hulk inclusion
category: feature
bugzilla: 46904, https://gitee.com/openeuler/kernel/issues/I4QSHG
CVE: NA

--------------------------------

In next patches, struct hugetlbfs_inode_info will be used to check whether
a hugetlbfs file has memory in hpool, so add paramter hugetlbfs_inode_info
to related functions, including hugetlb_acct_memory/hugepage_subpool_get_pages/
hugepage_subpool_put_pages.

No functional changes.
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 98ecb3cd
...@@ -86,7 +86,7 @@ int sysctl_hugetlb_mig_noalloc; ...@@ -86,7 +86,7 @@ int sysctl_hugetlb_mig_noalloc;
int sysctl_hugetlb_pmem_allocall; int sysctl_hugetlb_pmem_allocall;
/* Forward declaration */ /* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta); static int hugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *info);
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
unsigned long irq_flags) unsigned long irq_flags)
...@@ -101,7 +101,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, ...@@ -101,7 +101,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
if (free) { if (free) {
if (spool->min_hpages != -1) if (spool->min_hpages != -1)
hugetlb_acct_memory(spool->hstate, hugetlb_acct_memory(spool->hstate,
-spool->min_hpages); -spool->min_hpages, NULL);
kfree(spool); kfree(spool);
} }
} }
...@@ -121,7 +121,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, ...@@ -121,7 +121,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
spool->hstate = h; spool->hstate = h;
spool->min_hpages = min_hpages; spool->min_hpages = min_hpages;
if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages, NULL)) {
kfree(spool); kfree(spool);
return NULL; return NULL;
} }
...@@ -149,7 +149,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool) ...@@ -149,7 +149,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
* a subpool minimum size must be maintained. * a subpool minimum size must be maintained.
*/ */
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta) long delta, struct hugetlbfs_inode_info *info)
{ {
long ret = delta; long ret = delta;
...@@ -194,7 +194,7 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, ...@@ -194,7 +194,7 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
* in the case where a subpool minimum size must be maintained. * in the case where a subpool minimum size must be maintained.
*/ */
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta) long delta, struct hugetlbfs_inode_info *info)
{ {
long ret = delta; long ret = delta;
unsigned long flags; unsigned long flags;
...@@ -742,11 +742,11 @@ void hugetlb_fix_reserve_counts(struct inode *inode) ...@@ -742,11 +742,11 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
long rsv_adjust; long rsv_adjust;
bool reserved = false; bool reserved = false;
rsv_adjust = hugepage_subpool_get_pages(spool, 1); rsv_adjust = hugepage_subpool_get_pages(spool, 1, HUGETLBFS_I(inode));
if (rsv_adjust > 0) { if (rsv_adjust > 0) {
struct hstate *h = hstate_inode(inode); struct hstate *h = hstate_inode(inode);
if (!hugetlb_acct_memory(h, 1)) if (!hugetlb_acct_memory(h, 1, HUGETLBFS_I(inode)))
reserved = true; reserved = true;
} else if (!rsv_adjust) { } else if (!rsv_adjust) {
reserved = true; reserved = true;
...@@ -1589,7 +1589,7 @@ void free_huge_page(struct page *page) ...@@ -1589,7 +1589,7 @@ void free_huge_page(struct page *page)
* after page is free. Therefore, force restore_reserve * after page is free. Therefore, force restore_reserve
* operation. * operation.
*/ */
if (hugepage_subpool_put_pages(spool, 1) == 0) if (hugepage_subpool_put_pages(spool, 1, NULL) == 0)
restore_reserve = true; restore_reserve = true;
} }
...@@ -2465,6 +2465,7 @@ static void restore_reserve_on_error(struct hstate *h, ...@@ -2465,6 +2465,7 @@ static void restore_reserve_on_error(struct hstate *h,
struct page *alloc_huge_page(struct vm_area_struct *vma, struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve) unsigned long addr, int avoid_reserve)
{ {
struct hugetlbfs_inode_info *info = HUGETLBFS_I(file_inode(vma->vm_file));
struct hugepage_subpool *spool = subpool_vma(vma); struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *page; struct page *page;
...@@ -2492,7 +2493,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2492,7 +2493,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
* checked against any subpool limit. * checked against any subpool limit.
*/ */
if (map_chg || avoid_reserve) { if (map_chg || avoid_reserve) {
gbl_chg = hugepage_subpool_get_pages(spool, 1); gbl_chg = hugepage_subpool_get_pages(spool, 1, info);
if (gbl_chg < 0) { if (gbl_chg < 0) {
vma_end_reservation(h, vma, addr); vma_end_reservation(h, vma, addr);
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
...@@ -2570,8 +2571,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2570,8 +2571,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
*/ */
long rsv_adjust; long rsv_adjust;
rsv_adjust = hugepage_subpool_put_pages(spool, 1); rsv_adjust = hugepage_subpool_put_pages(spool, 1, info);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust, info);
if (deferred_reserve) if (deferred_reserve)
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
pages_per_huge_page(h), page); pages_per_huge_page(h), page);
...@@ -2586,7 +2587,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2586,7 +2587,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
h_cg); h_cg);
out_subpool_put: out_subpool_put:
if (map_chg || avoid_reserve) if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1); hugepage_subpool_put_pages(spool, 1, info);
vma_end_reservation(h, vma, addr); vma_end_reservation(h, vma, addr);
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
...@@ -3891,7 +3892,7 @@ unsigned long hugetlb_total_pages(void) ...@@ -3891,7 +3892,7 @@ unsigned long hugetlb_total_pages(void)
return nr_total_pages; return nr_total_pages;
} }
static int hugetlb_acct_memory(struct hstate *h, long delta) static int hugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *info)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
...@@ -3958,6 +3959,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) ...@@ -3958,6 +3959,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
static void hugetlb_vm_op_close(struct vm_area_struct *vma) static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{ {
struct hugetlbfs_inode_info *info = HUGETLBFS_I(file_inode(vma->vm_file));
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct resv_map *resv = vma_resv_map(vma); struct resv_map *resv = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma); struct hugepage_subpool *spool = subpool_vma(vma);
...@@ -3977,8 +3979,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) ...@@ -3977,8 +3979,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
* Decrement reserve counts. The global reserve count may be * Decrement reserve counts. The global reserve count may be
* adjusted if the subpool has a minimum size. * adjusted if the subpool has a minimum size.
*/ */
gbl_reserve = hugepage_subpool_put_pages(spool, reserve); gbl_reserve = hugepage_subpool_put_pages(spool, reserve, info);
hugetlb_acct_memory(h, -gbl_reserve); hugetlb_acct_memory(h, -gbl_reserve, info);
} }
kref_put(&resv->refs, resv_map_release); kref_put(&resv->refs, resv_map_release);
...@@ -5424,6 +5426,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5424,6 +5426,7 @@ int hugetlb_reserve_pages(struct inode *inode,
struct resv_map *resv_map; struct resv_map *resv_map;
struct hugetlb_cgroup *h_cg = NULL; struct hugetlb_cgroup *h_cg = NULL;
long gbl_reserve, regions_needed = 0; long gbl_reserve, regions_needed = 0;
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
/* This should never happen */ /* This should never happen */
if (from > to) { if (from > to) {
...@@ -5492,7 +5495,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5492,7 +5495,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* the subpool has a minimum size, there may be some global * the subpool has a minimum size, there may be some global
* reservations already in place (gbl_reserve). * reservations already in place (gbl_reserve).
*/ */
gbl_reserve = hugepage_subpool_get_pages(spool, chg); gbl_reserve = hugepage_subpool_get_pages(spool, chg, info);
if (gbl_reserve < 0) { if (gbl_reserve < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_uncharge_cgroup; goto out_uncharge_cgroup;
...@@ -5502,7 +5505,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5502,7 +5505,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* Check enough hugepages are available for the reservation. * Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not * Hand the pages back to the subpool if there are not
*/ */
ret = hugetlb_acct_memory(h, gbl_reserve); ret = hugetlb_acct_memory(h, gbl_reserve, info);
if (ret < 0) { if (ret < 0) {
goto out_put_pages; goto out_put_pages;
} }
...@@ -5522,7 +5525,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5522,7 +5525,7 @@ int hugetlb_reserve_pages(struct inode *inode,
add = region_add(resv_map, from, to, regions_needed, h, h_cg); add = region_add(resv_map, from, to, regions_needed, h, h_cg);
if (unlikely(add < 0)) { if (unlikely(add < 0)) {
hugetlb_acct_memory(h, -gbl_reserve); hugetlb_acct_memory(h, -gbl_reserve, info);
ret = add; ret = add;
goto out_put_pages; goto out_put_pages;
} else if (unlikely(chg > add)) { } else if (unlikely(chg > add)) {
...@@ -5544,8 +5547,8 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5544,8 +5547,8 @@ int hugetlb_reserve_pages(struct inode *inode,
(chg - add) * pages_per_huge_page(h), h_cg); (chg - add) * pages_per_huge_page(h), h_cg);
rsv_adjust = hugepage_subpool_put_pages(spool, rsv_adjust = hugepage_subpool_put_pages(spool,
chg - add); chg - add, info);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust, info);
} else if (h_cg) { } else if (h_cg) {
/* /*
* The file_regions will hold their own reference to * The file_regions will hold their own reference to
...@@ -5559,7 +5562,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5559,7 +5562,7 @@ int hugetlb_reserve_pages(struct inode *inode,
return 0; return 0;
out_put_pages: out_put_pages:
/* put back original number of pages, chg */ /* put back original number of pages, chg */
(void)hugepage_subpool_put_pages(spool, chg); (void)hugepage_subpool_put_pages(spool, chg, info);
out_uncharge_cgroup: out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
chg * pages_per_huge_page(h), h_cg); chg * pages_per_huge_page(h), h_cg);
...@@ -5583,6 +5586,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, ...@@ -5583,6 +5586,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long chg = 0; long chg = 0;
struct hugepage_subpool *spool = subpool_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode);
long gbl_reserve; long gbl_reserve;
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
/* /*
* Since this routine can be called in the evict inode path for all * Since this routine can be called in the evict inode path for all
...@@ -5607,8 +5611,8 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, ...@@ -5607,8 +5611,8 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
* If the subpool has a minimum size, the number of global * If the subpool has a minimum size, the number of global
* reservations to be released may be adjusted. * reservations to be released may be adjusted.
*/ */
gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed), info);
hugetlb_acct_memory(h, -gbl_reserve); hugetlb_acct_memory(h, -gbl_reserve, info);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册