未验证 提交 8e2491eb 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!443 OLK-5.10 backport sharepool and config isolation patches

Merge Pull Request from: @zhang-zekun-zk 
 
OLK-5.10 backport sharepool and config isolation patches 
 
Link:https://gitee.com/openeuler/kernel/pulls/443 

Reviewed-by: Weilong Chen <chenweilong@huawei.com> 
Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
...@@ -2081,7 +2081,7 @@ config ASCEND_CHARGE_MIGRATE_HUGEPAGES ...@@ -2081,7 +2081,7 @@ config ASCEND_CHARGE_MIGRATE_HUGEPAGES
config ASCEND_SHARE_POOL config ASCEND_SHARE_POOL
bool "Enable support for the Share Pool Memory" bool "Enable support for the Share Pool Memory"
default n default n
depends on HAVE_ARCH_HUGE_VMALLOC depends on HAVE_ARCH_HUGE_VMALLOC && EXTEND_HUGEPAGE_MAPPING
select ARCH_USES_HIGH_VMA_FLAGS select ARCH_USES_HIGH_VMA_FLAGS
help help
This feature allows multiple processes to share virtual memory both This feature allows multiple processes to share virtual memory both
......
...@@ -721,6 +721,8 @@ CONFIG_ACPI_HMAT=y ...@@ -721,6 +721,8 @@ CONFIG_ACPI_HMAT=y
CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI=y
CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=y CONFIG_ACPI_APEI_GHES=y
CONFIG_ACPI_APEI_GHES_TS_CORE=y
CONFIG_ACPI_APEI_GHES_NOTIFY_ALL_RAS_ERR=y
CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_PCIEAER=y
CONFIG_ACPI_APEI_SEA=y CONFIG_ACPI_APEI_SEA=y
CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_ACPI_APEI_MEMORY_FAILURE=y
...@@ -1129,6 +1131,7 @@ CONFIG_PIN_MEMORY=y ...@@ -1129,6 +1131,7 @@ CONFIG_PIN_MEMORY=y
CONFIG_PID_RESERVE=y CONFIG_PID_RESERVE=y
CONFIG_MEMORY_RELIABLE=y CONFIG_MEMORY_RELIABLE=y
# CONFIG_CLEAR_FREELIST_PAGE is not set # CONFIG_CLEAR_FREELIST_PAGE is not set
CONFIG_EXTEND_HUGEPAGE_MAPPING=y
# #
# Data Access Monitoring # Data Access Monitoring
...@@ -5927,6 +5930,7 @@ CONFIG_ARM_SMMU=y ...@@ -5927,6 +5930,7 @@ CONFIG_ARM_SMMU=y
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
CONFIG_ARM_SMMU_V3=y CONFIG_ARM_SMMU_V3=y
CONFIG_ARM_SMMU_V3_SVA=y CONFIG_ARM_SMMU_V3_SVA=y
CONFIG_ARM_SMMU_V3_PM=y
# CONFIG_AGENT_SMMU_ATOS is not set # CONFIG_AGENT_SMMU_ATOS is not set
# CONFIG_QCOM_IOMMU is not set # CONFIG_QCOM_IOMMU is not set
# CONFIG_VIRTIO_IOMMU is not set # CONFIG_VIRTIO_IOMMU is not set
...@@ -6110,6 +6114,8 @@ CONFIG_THUNDERX2_PMU=m ...@@ -6110,6 +6114,8 @@ CONFIG_THUNDERX2_PMU=m
CONFIG_XGENE_PMU=y CONFIG_XGENE_PMU=y
CONFIG_ARM_SPE_PMU=y CONFIG_ARM_SPE_PMU=y
CONFIG_HISI_PMU=m CONFIG_HISI_PMU=m
CONFIG_HISI_L3T_PMU=m
CONFIG_HISI_LPDDRC_PMU=m
CONFIG_HISI_PCIE_PMU=m CONFIG_HISI_PCIE_PMU=m
CONFIG_HNS3_PMU=m CONFIG_HNS3_PMU=m
# end of Performance monitor support # end of Performance monitor support
......
...@@ -33,6 +33,20 @@ config ACPI_APEI_GHES ...@@ -33,6 +33,20 @@ config ACPI_APEI_GHES
by firmware to produce more valuable hardware error by firmware to produce more valuable hardware error
information for Linux. information for Linux.
config ACPI_APEI_GHES_TS_CORE
bool "Support ts core ras process for ascend"
depends on ARM64 && ACPI_APEI_GHES
default n
help
Enable the support for ts core ras process for ascend.
config ACPI_APEI_GHES_NOTIFY_ALL_RAS_ERR
bool "Notify all ras err to driver"
depends on ARM64 && ACPI_APEI_GHES
default n
help
Deliver all types of error to driver.
config ACPI_APEI_PCIEAER config ACPI_APEI_PCIEAER
bool "APEI PCIe AER logging/recovering support" bool "APEI PCIe AER logging/recovering support"
depends on ACPI_APEI && PCIEAER depends on ACPI_APEI && PCIEAER
......
...@@ -118,8 +118,10 @@ module_param_named(disable, ghes_disable, bool, 0); ...@@ -118,8 +118,10 @@ module_param_named(disable, ghes_disable, bool, 0);
static LIST_HEAD(ghes_hed); static LIST_HEAD(ghes_hed);
static DEFINE_MUTEX(ghes_list_mutex); static DEFINE_MUTEX(ghes_list_mutex);
#ifdef CONFIG_ACPI_APEI_GHES_TS_CORE
BLOCKING_NOTIFIER_HEAD(ghes_ts_err_chain); BLOCKING_NOTIFIER_HEAD(ghes_ts_err_chain);
EXPORT_SYMBOL(ghes_ts_err_chain); EXPORT_SYMBOL(ghes_ts_err_chain);
#endif
/* /*
* Because the memory area used to transfer hardware error information * Because the memory area used to transfer hardware error information
...@@ -657,20 +659,26 @@ static bool ghes_do_proc(struct ghes *ghes, ...@@ -657,20 +659,26 @@ static bool ghes_do_proc(struct ghes *ghes,
} }
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev); queued = ghes_handle_arm_hw_error(gdata, sev);
#ifdef CONFIG_ACPI_APEI_GHES_TS_CORE
} }
else if (guid_equal(sec_type, &CPER_SEC_TS_CORE)) { else if (guid_equal(sec_type, &CPER_SEC_TS_CORE)) {
blocking_notifier_call_chain(&ghes_ts_err_chain, blocking_notifier_call_chain(&ghes_ts_err_chain,
0, acpi_hest_get_payload(gdata)); 0, acpi_hest_get_payload(gdata));
#endif
} else { } else {
void *err = acpi_hest_get_payload(gdata); void *err = acpi_hest_get_payload(gdata);
#ifndef CONFIG_ACPI_APEI_GHES_NOTIFY_ALL_RAS_ERR
ghes_defer_non_standard_event(gdata, sev);
#endif
log_non_standard_event(sec_type, fru_id, fru_text, log_non_standard_event(sec_type, fru_id, fru_text,
sec_sev, err, sec_sev, err,
gdata->error_data_length); gdata->error_data_length);
} }
#ifdef CONFIG_ACPI_APEI_GHES_NOTIFY_ALL_RAS_ERR
/* Customization deliver all types error to driver. */ /* Customization deliver all types error to driver. */
ghes_defer_non_standard_event(gdata, sev); ghes_defer_non_standard_event(gdata, sev);
#endif
} }
return queued; return queued;
......
...@@ -336,6 +336,13 @@ config AGENT_SMMU_ATOS ...@@ -336,6 +336,13 @@ config AGENT_SMMU_ATOS
Say Y here if your system will be used in Ascend Advanced Accelerator Say Y here if your system will be used in Ascend Advanced Accelerator
with HCCS bus. Or want use the ATOS of SMMU. with HCCS bus. Or want use the ATOS of SMMU.
config ARM_SMMU_V3_PM
bool "Add arm_smmu_v3 suspend and resume support"
depends on ARM_SMMU_V3 && PM_SLEEP
default n
help
Add support for suspend and resume support for arm smmu v3.
config S390_IOMMU config S390_IOMMU
def_bool y if S390 && PCI def_bool y if S390 && PCI
depends on S390 && PCI depends on S390 && PCI
......
...@@ -4349,7 +4349,7 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ...@@ -4349,7 +4349,7 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
doorbell &= MSI_CFG0_ADDR_MASK; doorbell &= MSI_CFG0_ADDR_MASK;
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_ARM_SMMU_V3_PM
/* Saves the msg (base addr of msi irq) and restores it during resume */ /* Saves the msg (base addr of msi irq) and restores it during resume */
desc->msg.address_lo = msg->address_lo; desc->msg.address_lo = msg->address_lo;
desc->msg.address_hi = msg->address_hi; desc->msg.address_hi = msg->address_hi;
...@@ -4411,7 +4411,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) ...@@ -4411,7 +4411,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
devm_add_action(dev, arm_smmu_free_msis, dev); devm_add_action(dev, arm_smmu_free_msis, dev);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_ARM_SMMU_V3_PM
static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) static void arm_smmu_resume_msis(struct arm_smmu_device *smmu)
{ {
struct msi_desc *desc; struct msi_desc *desc;
...@@ -5313,8 +5313,7 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start, ...@@ -5313,8 +5313,7 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
return devm_ioremap_resource(dev, &res); return devm_ioremap_resource(dev, &res);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_ARM_SMMU_V3_PM
static int arm_smmu_ecmdq_disable(struct device *dev) static int arm_smmu_ecmdq_disable(struct device *dev)
{ {
int i, j; int i, j;
...@@ -5521,7 +5520,7 @@ static const struct of_device_id arm_smmu_of_match[] = { ...@@ -5521,7 +5520,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
}; };
MODULE_DEVICE_TABLE(of, arm_smmu_of_match); MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_ARM_SMMU_V3_PM
static const struct dev_pm_ops arm_smmu_pm_ops = { static const struct dev_pm_ops arm_smmu_pm_ops = {
.suspend = arm_smmu_suspend, .suspend = arm_smmu_suspend,
.resume = arm_smmu_resume, .resume = arm_smmu_resume,
......
...@@ -24,3 +24,22 @@ config HNS3_PMU ...@@ -24,3 +24,22 @@ config HNS3_PMU
devices. devices.
Adds the HNS3 PMU into perf events system for monitoring latency, Adds the HNS3 PMU into perf events system for monitoring latency,
bandwidth etc. bandwidth etc.
config HISI_L3T_PMU
tristate "HiSilicon SoC L3T PMU drivers"
depends on HISI_PMU
default n
help
Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home
Agent performance monitor and DDR Controller performance monitor.
L3T is a specialized PMU driver.
config HISI_LPDDRC_PMU
tristate "HiSilicon SoC LDPPRC PMU drivers"
depends on HISI_PMU
default n
help
Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home
Agent performance monitor and DDR Controller performance monitor.
LPDDRC is a specialize PMU driver.
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o \ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
hisi_uncore_l3t_pmu.o \
hisi_uncore_lpddrc_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
obj-$(CONFIG_HISI_L3T_PMU) += hisi_uncore_l3t_pmu.o
obj-$(CONFIG_HISI_LPDDRC_PMU) += hisi_uncore_lpddrc_pmu.o
...@@ -602,13 +602,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, ...@@ -602,13 +602,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* if the page is from buddy system, do not add to freed. * if the page is from buddy system, do not add to freed.
* because freed is used for hugetlbfs reservation accounting. * because freed is used for hugetlbfs reservation accounting.
*/ */
if (!HPageTemporary(page)) {
freed++; #ifdef CONFIG_ASCEND_SHARE_POOL
if (!truncate_op) { if (HPageTemporary(page) != 0) {
if (unlikely(hugetlb_unreserve_pages(inode, unlock_page(page);
if (!truncate_op)
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
continue;
}
#endif
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
index, index + 1, 1))) index, index + 1, 1)))
hugetlb_fix_reserve_counts(inode); hugetlb_fix_reserve_counts(inode);
}
} }
unlock_page(page); unlock_page(page);
...@@ -1061,8 +1068,12 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping, ...@@ -1061,8 +1068,12 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,
pgoff_t index = page->index; pgoff_t index = page->index;
remove_huge_page(page); remove_huge_page(page);
#ifdef CONFIG_ASCEND_SHARE_POOL
if (!HPageTemporary(page) && if (!HPageTemporary(page) &&
unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
#else
if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
#endif
hugetlb_fix_reserve_counts(inode); hugetlb_fix_reserve_counts(inode);
return 0; return 0;
......
...@@ -145,6 +145,8 @@ int ghes_notify_sea(void); ...@@ -145,6 +145,8 @@ int ghes_notify_sea(void);
static inline int ghes_notify_sea(void) { return -ENOENT; } static inline int ghes_notify_sea(void) { return -ENOENT; }
#endif #endif
#ifdef CONFIG_ACPI_APEI_GHES_TS_CORE
extern struct blocking_notifier_head ghes_ts_err_chain; extern struct blocking_notifier_head ghes_ts_err_chain;
#endif
#endif /* GHES_H */ #endif /* GHES_H */
...@@ -197,10 +197,13 @@ enum { ...@@ -197,10 +197,13 @@ enum {
#define CPER_SEC_DMAR_IOMMU \ #define CPER_SEC_DMAR_IOMMU \
GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
0xDF, 0xAA, 0x84, 0xEC) 0xDF, 0xAA, 0x84, 0xEC)
#ifdef CONFIG_ACPI_APEI_GHES_TS_CORE
/* HISI ts core */ /* HISI ts core */
#define CPER_SEC_TS_CORE \ #define CPER_SEC_TS_CORE \
GUID_INIT(0xeb4c71f8, 0xbc76, 0x4c46, 0xbd, 0x9, 0xd0, 0xd3, \ GUID_INIT(0xeb4c71f8, 0xbc76, 0x4c46, 0xbd, 0x9, 0xd0, 0xd3, \
0x45, 0x0, 0x5a, 0x92) 0x45, 0x0, 0x5a, 0x92)
#endif
#define CPER_PROC_VALID_TYPE 0x0001 #define CPER_PROC_VALID_TYPE 0x0001
#define CPER_PROC_VALID_ISA 0x0002 #define CPER_PROC_VALID_ISA 0x0002
......
...@@ -235,11 +235,13 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, ...@@ -235,11 +235,13 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/* to align the pointer to the (next) PMD hugepage boundary */ /* to align the pointer to the (next) PMD hugepage boundary */
#define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE) #define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PMD_SIZE */ /* test whether an address (unsigned long or pointer) is aligned to PMD_SIZE */
#define PMD_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PMD_SIZE) #define PMD_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PMD_SIZE)
#endif
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
......
...@@ -27,7 +27,9 @@ struct notifier_block; /* in notifier.h */ ...@@ -27,7 +27,9 @@ struct notifier_block; /* in notifier.h */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
#define VM_HUGE_PAGES 0x00001000 /* used for vmalloc hugepages */ #define VM_HUGE_PAGES 0x00001000 /* used for vmalloc hugepages */
#endif
#ifdef CONFIG_ASCEND_SHARE_POOL #ifdef CONFIG_ASCEND_SHARE_POOL
#define VM_SHAREPOOL 0x00002000 /* remapped to sharepool */ #define VM_SHAREPOOL 0x00002000 /* remapped to sharepool */
#else #else
...@@ -142,8 +144,11 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -142,8 +144,11 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller); int node, const void *caller);
void *vmalloc_no_huge(unsigned long size); void *vmalloc_no_huge(unsigned long size);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
extern void *vmalloc_hugepage(unsigned long size); extern void *vmalloc_hugepage(unsigned long size);
extern void *vmalloc_hugepage_user(unsigned long size); extern void *vmalloc_hugepage_user(unsigned long size);
#endif
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr); extern void vfree_atomic(const void *addr);
...@@ -160,6 +165,7 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, ...@@ -160,6 +165,7 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff); unsigned long pgoff);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
extern void *vmap_hugepage(struct page **pages, unsigned int count, extern void *vmap_hugepage(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot); unsigned long flags, pgprot_t prot);
extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma,
...@@ -167,6 +173,7 @@ extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma, ...@@ -167,6 +173,7 @@ extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma,
unsigned long pgoff, unsigned long size); unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, extern int remap_vmalloc_hugepage_range(struct vm_area_struct *vma,
void *addr, unsigned long pgoff); void *addr, unsigned long pgoff);
#endif
/* /*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
......
...@@ -992,6 +992,12 @@ config CLEAR_FREELIST_PAGE ...@@ -992,6 +992,12 @@ config CLEAR_FREELIST_PAGE
To enable this feature, kernel parameter "clear_freelist" also To enable this feature, kernel parameter "clear_freelist" also
needs to be added. needs to be added.
config EXTEND_HUGEPAGE_MAPPING
bool "Extend for hugepages mapping"
depends on ARM64
default n
help
Introduce vmalloc/vmap/remap interfaces that handle only hugepages.
source "mm/damon/Kconfig" source "mm/damon/Kconfig"
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/dynamic_hugetlb.h> #include <linux/dynamic_hugetlb.h>
#include "internal.h" #include "internal.h"
#include "hugetlb_vmemmap.h" #include "hugetlb_vmemmap.h"
#include "share_pool_internal.h"
int hugetlb_max_hstate __read_mostly; int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx; unsigned int default_hstate_idx;
...@@ -1625,6 +1626,7 @@ void free_huge_page(struct page *page) ...@@ -1625,6 +1626,7 @@ void free_huge_page(struct page *page)
h->resv_huge_pages++; h->resv_huge_pages++;
if (HPageTemporary(page)) { if (HPageTemporary(page)) {
sp_memcg_uncharge_hpage(page);
remove_hugetlb_page(h, page, false); remove_hugetlb_page(h, page, false);
spin_unlock_irqrestore(&hugetlb_lock, flags); spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page, true); update_and_free_page(h, page, true);
......
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SHARE_POOL_INTERNAL_H
#define SHARE_POOL_INTERNAL_H
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#ifdef CONFIG_ASCEND_SHARE_POOL
static inline void sp_memcg_uncharge_hpage(struct page *page)
{
if (!sp_is_enabled())
return;
mem_cgroup_uncharge(page);
}
#else
static inline void sp_memcg_uncharge_hpage(struct page *page)
{
}
#endif
#endif
...@@ -578,6 +578,7 @@ static int vmap_pages_range(unsigned long addr, unsigned long end, ...@@ -578,6 +578,7 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
return err; return err;
} }
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
static int vmap_hugepages_range_noflush(unsigned long addr, unsigned long end, static int vmap_hugepages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift) pgprot_t prot, struct page **pages, unsigned int page_shift)
{ {
...@@ -609,6 +610,7 @@ static int vmap_hugepages_range(unsigned long addr, unsigned long end, ...@@ -609,6 +610,7 @@ static int vmap_hugepages_range(unsigned long addr, unsigned long end,
return err; return err;
} }
#endif
/** /**
* map_kernel_range_noflush - map kernel VM area with the specified pages * map_kernel_range_noflush - map kernel VM area with the specified pages
...@@ -2792,6 +2794,7 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2792,6 +2794,7 @@ void *vmap(struct page **pages, unsigned int count,
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/** /**
* vmap_hugepage - map an array of huge pages into virtually contiguous space * vmap_hugepage - map an array of huge pages into virtually contiguous space
* @pages: array of huge page pointers (only the header) * @pages: array of huge page pointers (only the header)
...@@ -2830,6 +2833,7 @@ void *vmap_hugepage(struct page **pages, unsigned int count, ...@@ -2830,6 +2833,7 @@ void *vmap_hugepage(struct page **pages, unsigned int count,
return area->addr; return area->addr;
} }
EXPORT_SYMBOL(vmap_hugepage); EXPORT_SYMBOL(vmap_hugepage);
#endif
#ifdef CONFIG_VMAP_PFN #ifdef CONFIG_VMAP_PFN
struct vmap_pfn_data { struct vmap_pfn_data {
...@@ -3015,7 +3019,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -3015,7 +3019,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
size_per_node = size; size_per_node = size;
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
size_per_node /= num_online_nodes(); size_per_node /= num_online_nodes();
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
if (size_per_node >= PMD_SIZE || vm_flags & VM_HUGE_PAGES) { if (size_per_node >= PMD_SIZE || vm_flags & VM_HUGE_PAGES) {
#else
if (size_per_node >= PMD_SIZE) {
#endif
shift = PMD_SHIFT; shift = PMD_SHIFT;
align = max(real_align, 1UL << shift); align = max(real_align, 1UL << shift);
size = ALIGN(real_size, 1UL << shift); size = ALIGN(real_size, 1UL << shift);
...@@ -3050,8 +3058,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -3050,8 +3058,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr; return addr;
fail: fail:
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/* User could specify VM_HUGE_PAGES to alloc only hugepages. */ /* User could specify VM_HUGE_PAGES to alloc only hugepages. */
if (shift > PAGE_SHIFT && !(vm_flags & VM_HUGE_PAGES)) { if (shift > PAGE_SHIFT && !(vm_flags & VM_HUGE_PAGES)) {
#else
if (shift > PAGE_SHIFT) {
#endif
shift = PAGE_SHIFT; shift = PAGE_SHIFT;
align = real_align; align = real_align;
size = real_size; size = real_size;
...@@ -3261,6 +3273,7 @@ void *vmalloc_32_user(unsigned long size) ...@@ -3261,6 +3273,7 @@ void *vmalloc_32_user(unsigned long size)
} }
EXPORT_SYMBOL(vmalloc_32_user); EXPORT_SYMBOL(vmalloc_32_user);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/** /**
* vmalloc_hugepage - allocate virtually contiguous hugetlb memory * vmalloc_hugepage - allocate virtually contiguous hugetlb memory
* @size: allocation size * @size: allocation size
...@@ -3298,6 +3311,7 @@ void *vmalloc_hugepage_user(unsigned long size) ...@@ -3298,6 +3311,7 @@ void *vmalloc_hugepage_user(unsigned long size)
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_hugepage_user); EXPORT_SYMBOL(vmalloc_hugepage_user);
#endif
/* /*
* small helper routine , copy contents to buf from addr. * small helper routine , copy contents to buf from addr.
...@@ -3620,6 +3634,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, ...@@ -3620,6 +3634,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
} }
EXPORT_SYMBOL(remap_vmalloc_range); EXPORT_SYMBOL(remap_vmalloc_range);
#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING
/** /**
* remap_vmalloc_hugepage_range_partial - map vmalloc hugepages * remap_vmalloc_hugepage_range_partial - map vmalloc hugepages
* to userspace * to userspace
...@@ -3706,6 +3721,7 @@ int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, void *addr, ...@@ -3706,6 +3721,7 @@ int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, void *addr,
vma->vm_end - vma->vm_start); vma->vm_end - vma->vm_start);
} }
EXPORT_SYMBOL(remap_vmalloc_hugepage_range); EXPORT_SYMBOL(remap_vmalloc_hugepage_range);
#endif
void free_vm_area(struct vm_struct *area) void free_vm_area(struct vm_struct *area)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册