hugetlb_vmemmap.h 1.6 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0
/*
3
 * HugeTLB Vmemmap Optimization (HVO)
4
 *
5
 * Copyright (c) 2020, ByteDance. All rights reserved.
6 7 8 9 10 11 12
 *
 *     Author: Muchun Song <songmuchun@bytedance.com>
 */
#ifndef _LINUX_HUGETLB_VMEMMAP_H
#define _LINUX_HUGETLB_VMEMMAP_H
#include <linux/hugetlb.h>

13
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
14 15
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
16 17

/*
18 19
 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
 * Documentation/vm/vmemmap_dedup.rst.
20
 */
21 22 23
#define HUGETLB_VMEMMAP_RESERVE_SIZE	PAGE_SIZE

static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
24
{
25 26 27 28 29 30 31 32 33 34 35 36 37 38
	return pages_per_huge_page(h) * sizeof(struct page);
}

/*
 * Return how many vmemmap size associated with a HugeTLB page that can be
 * optimized and can be freed to the buddy allocator.
 */
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
	int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;

	if (!is_power_of_2(sizeof(struct page)))
		return 0;
	return size > 0 ? size : 0;
39
}
40
#else
41
static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
42 43 44 45
{
	return 0;
}

46
static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
47 48
{
}
49

50
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
51
{
52
	return 0;
53
}
54
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
55

56
static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
57
{
58
	return hugetlb_vmemmap_optimizable_size(h) != 0;
59
}
60
#endif /* _LINUX_HUGETLB_VMEMMAP_H */