diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 505bdd75b5411258be7d70951c25371abb4e1ab5..644cfa3284a7bbc532394eada9768c8837b89b99 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -346,6 +346,15 @@ void dump_mem_limit(void); # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) #endif +/* + * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory + * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into + * multiple parts. As a result, the number of memory regions is large. + */ +#ifdef CONFIG_EFI +#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8) +#endif + #include #endif /* __ASM_MEMORY_H */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ba26ef1739a4c80c812f730c1626918cde7c63cb..a4f258c83a1fe55e8d552461e25b684c432393dc 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -68,6 +68,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, static const struct midr_range idc_support_list[] = { MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_REV(MIDR_HISI_TSV200, 1, 0), + MIDR_REV(MIDR_HISI_TSV200, 1, 2), { /* sentinel */ } }; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 01542e9f6b050ccb3a26ef372a56a26639420d20..9ac1601c82ea594f9306210d04aa0beda9afd820 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3059,6 +3059,9 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) struct arm_smmu_device *smmu; struct arm_smmu_master *master; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); +#ifdef CONFIG_ASCEND_FEATURES + u32 sid; +#endif if (!fwspec || fwspec->ops != &arm_smmu_ops) return ERR_PTR(-ENODEV); @@ -3105,6 +3108,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) smmu->features & ARM_SMMU_FEAT_STALL_FORCE) master->stall_enabled = true; +#ifdef CONFIG_ASCEND_FEATURES + if (!acpi_dev_prop_read_single(ACPI_COMPANION(dev), + "streamid", DEV_PROP_U32, &sid)) { + if (iommu_fwspec_add_ids(dev, &sid, 1)) + dev_info(dev, "failed to add ids\n"); + master->stall_enabled = true; + master->ssid_bits = 0x10; + } +#endif arm_smmu_init_pri(master); return &smmu->iommu; diff --git a/mm/cma.c b/mm/cma.c index 9361ecaf52bed3c192c73ed59ea8489e2f62b0dd..09f3b1e264c0cf1308955da0dcf9bdb6543742b9 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -486,8 +486,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, } if (ret && !no_warn) { - pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", - __func__, count, ret); + pr_err_ratelimited("%s: alloc failed, req-size: %zu pages, ret: %d\n", + __func__, count, ret); cma_debug_show_areas(cma); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8f680994df4b10bc46e97297f96559700fa46c0d..9bfb781fafd3f389cd5d8d418d12e5dfea1f5cb0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6286,39 +6286,43 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag) } EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage); +static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr, + unsigned long size) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) + return NULL; + + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); + + return ptep; +} + static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, - pgprot_t prot, unsigned long pfn, bool special) + pgprot_t prot, unsigned long pfn) { int ret = 0; pte_t *ptep, entry; struct hstate *h; - struct vm_area_struct *vma; - struct address_space *mapping; spinlock_t *ptl; h = size_to_hstate(PMD_SIZE); if (!h) return -EINVAL; - if (!IS_ALIGNED(addr, PMD_SIZE)) - return -EINVAL; - - vma = find_vma(mm, addr); - if (!vma || !range_in_vma(vma, addr, addr + PMD_SIZE)) - return -EINVAL; - - mapping = vma->vm_file->f_mapping; - i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, addr, huge_page_size(h)); - if (!ptep) { - ret = -ENXIO; - goto out_unlock; - } + ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h)); + if (!ptep) + return -ENXIO; - if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) { - ret = -ENXIO; - goto out_unlock; - } + if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) + return -ENXIO; entry = pfn_pte(pfn, prot); entry = huge_pte_mkdirty(entry); @@ -6326,31 +6330,27 @@ static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, entry = huge_pte_mkwrite(entry); entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); - if (special) - entry = pte_mkspecial(entry); + entry = pte_mkspecial(entry); ptl = huge_pte_lockptr(h, mm, ptep); spin_lock(ptl); set_huge_pte_at(mm, addr, ptep, entry); spin_unlock(ptl); -out_unlock: - i_mmap_unlock_read(mapping); - return ret; } int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage) { - return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage), false); + return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage)); } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte); int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long addr, pgprot_t prot, unsigned long phy_addr) { - return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT, true); + return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa); diff --git a/mm/memblock.c b/mm/memblock.c index e1fd07166a35a13497b8348a6b2278c382de1d49..b7744ae2ce3dc2012dd7eedd25364357fbbc0c2d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -29,6 +29,10 @@ # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS #endif +#ifndef INIT_MEMBLOCK_MEMORY_REGIONS +#define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS +#endif + /** * DOC: memblock overview * @@ -55,9 +59,9 @@ * the allocator metadata. The "memory" and "reserved" types are nicely * wrapped with struct memblock. This structure is statically * initialized at build time. The region arrays are initially sized to - * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS - * for "reserved". The region array for "physmem" is initially sized to - * %INIT_PHYSMEM_REGIONS. + * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and + * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array + * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. * The memblock_allow_resize() enables automatic resizing of the region * arrays during addition of new regions. This feature should be used * with care so that memory allocated for the region array will not @@ -102,7 +106,7 @@ unsigned long min_low_pfn; unsigned long max_pfn; unsigned long long max_possible_pfn; -static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; +static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; @@ -111,7 +115,7 @@ static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS struct memblock memblock __initdata_memblock = { .memory.regions = memblock_memory_init_regions, .memory.cnt = 1, /* empty dummy entry */ - .memory.max = INIT_MEMBLOCK_REGIONS, + .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS, .memory.name = "memory", .reserved.regions = memblock_reserved_init_regions, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a3617f0a0fd1f4c3a6ead803317e1415712fcc75..b2c4bc4bb5912c42bb7b6d1a1b965f09c96642de 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3116,12 +3116,23 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) { struct obj_cgroup *objcg; unsigned int nr_pages = 1 << order; +#ifdef CONFIG_ASCEND_FEATURES + struct mem_cgroup *memcg; +#endif if (!PageMemcgKmem(page)) return; objcg = __page_objcg(page); obj_cgroup_uncharge_pages(objcg, nr_pages); + +#ifdef CONFIG_ASCEND_FEATURES + memcg = get_mem_cgroup_from_objcg(objcg); + if (!mem_cgroup_is_root(memcg)) + memcg_oom_recover(memcg); + css_put(&memcg->css); +#endif + page->memcg_data = 0; obj_cgroup_put(objcg); } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 417ff9574d1953cb17a384617ca5a46774188e98..0f77eb4c6644ee60df5ae8d275d48bde1c511567 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1091,6 +1091,7 @@ static void check_panic_on_oom(struct oom_control *oc) if (is_sysrq_oom(oc)) return; dump_header(oc, NULL); + oom_type_notifier_call(0, oc); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); }