未验证 提交 d4a78fdf 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!294 昇腾补丁回合

Merge Pull Request from: @zhangjian210 
 
These patches has 7 bugfix which have been found in Ascend case:

1. When enable CONFIG_ASCEND_FEATURES, we will use tmp hugepage, and when we cause a OOM and disable
OOM-killer, the process will always not run normal although the system has enough memory later. One
patch fixes this.
2. Some Taishan core's(Taishan110) sub version is 2, we must figiure it when enable Taishan IDC feature.
3. When enable CONFIG_ASCEND_FEATURES, if a process cause OOM and enable OOM panic, we must using a
call chain to tell other drivers this problem.
4. In Ascend feature, some driver's VMA struct contains hugepage and nomal page. In this case, we
can't use VMA's message to alloc page. To fix this, we only alloc huge page directly in this interface
rather than using VMA's message.
5. When enable CONFIG_ASCEND_FEATURES and enable ACPI fearture, if we want to enbale SMMU though ACPI,
we must get it's sid from ACPI tables.

2 patch is Backport from maimline:
1. This patch fixes when we don's reserve EXTRA cma memory, the log buffer can be easily filled up by
CMA failure warning. And use pr_err_ratelimited() instead to reduce the duplicate CMA warning.
2. The ECC feature increases a maximum of 64 reserved physical memory segments, which increases the number 14 of memblocks.

 
 
Link:https://gitee.com/openeuler/kernel/pulls/294 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -346,6 +346,15 @@ void dump_mem_limit(void); ...@@ -346,6 +346,15 @@ void dump_mem_limit(void);
# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
#endif #endif
/*
* memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory
* of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into
* multiple parts. As a result, the number of memory regions is large.
*/
#ifdef CONFIG_EFI
#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8)
#endif
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#endif /* __ASM_MEMORY_H */ #endif /* __ASM_MEMORY_H */
...@@ -68,6 +68,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, ...@@ -68,6 +68,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry,
static const struct midr_range idc_support_list[] = { static const struct midr_range idc_support_list[] = {
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_REV(MIDR_HISI_TSV200, 1, 0), MIDR_REV(MIDR_HISI_TSV200, 1, 0),
MIDR_REV(MIDR_HISI_TSV200, 1, 2),
{ /* sentinel */ } { /* sentinel */ }
}; };
......
...@@ -3059,6 +3059,9 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) ...@@ -3059,6 +3059,9 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
#ifdef CONFIG_ASCEND_FEATURES
u32 sid;
#endif
if (!fwspec || fwspec->ops != &arm_smmu_ops) if (!fwspec || fwspec->ops != &arm_smmu_ops)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
...@@ -3105,6 +3108,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) ...@@ -3105,6 +3108,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
smmu->features & ARM_SMMU_FEAT_STALL_FORCE) smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
master->stall_enabled = true; master->stall_enabled = true;
#ifdef CONFIG_ASCEND_FEATURES
if (!acpi_dev_prop_read_single(ACPI_COMPANION(dev),
"streamid", DEV_PROP_U32, &sid)) {
if (iommu_fwspec_add_ids(dev, &sid, 1))
dev_info(dev, "failed to add ids\n");
master->stall_enabled = true;
master->ssid_bits = 0x10;
}
#endif
arm_smmu_init_pri(master); arm_smmu_init_pri(master);
return &smmu->iommu; return &smmu->iommu;
......
...@@ -486,8 +486,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, ...@@ -486,8 +486,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
} }
if (ret && !no_warn) { if (ret && !no_warn) {
pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", pr_err_ratelimited("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret); __func__, count, ret);
cma_debug_show_areas(cma); cma_debug_show_areas(cma);
} }
......
...@@ -6286,39 +6286,43 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag) ...@@ -6286,39 +6286,43 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag)
} }
EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage); EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
unsigned long size)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
p4dp = p4d_offset(pgdp, addr);
pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
return ptep;
}
static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, unsigned long pfn, bool special) pgprot_t prot, unsigned long pfn)
{ {
int ret = 0; int ret = 0;
pte_t *ptep, entry; pte_t *ptep, entry;
struct hstate *h; struct hstate *h;
struct vm_area_struct *vma;
struct address_space *mapping;
spinlock_t *ptl; spinlock_t *ptl;
h = size_to_hstate(PMD_SIZE); h = size_to_hstate(PMD_SIZE);
if (!h) if (!h)
return -EINVAL; return -EINVAL;
if (!IS_ALIGNED(addr, PMD_SIZE)) ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h));
return -EINVAL; if (!ptep)
return -ENXIO;
vma = find_vma(mm, addr);
if (!vma || !range_in_vma(vma, addr, addr + PMD_SIZE))
return -EINVAL;
mapping = vma->vm_file->f_mapping;
i_mmap_lock_read(mapping);
ptep = huge_pte_alloc(mm, addr, huge_page_size(h));
if (!ptep) {
ret = -ENXIO;
goto out_unlock;
}
if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) { if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep)))
ret = -ENXIO; return -ENXIO;
goto out_unlock;
}
entry = pfn_pte(pfn, prot); entry = pfn_pte(pfn, prot);
entry = huge_pte_mkdirty(entry); entry = huge_pte_mkdirty(entry);
...@@ -6326,31 +6330,27 @@ static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, ...@@ -6326,31 +6330,27 @@ static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr,
entry = huge_pte_mkwrite(entry); entry = huge_pte_mkwrite(entry);
entry = pte_mkyoung(entry); entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry); entry = pte_mkhuge(entry);
if (special) entry = pte_mkspecial(entry);
entry = pte_mkspecial(entry);
ptl = huge_pte_lockptr(h, mm, ptep); ptl = huge_pte_lockptr(h, mm, ptep);
spin_lock(ptl); spin_lock(ptl);
set_huge_pte_at(mm, addr, ptep, entry); set_huge_pte_at(mm, addr, ptep, entry);
spin_unlock(ptl); spin_unlock(ptl);
out_unlock:
i_mmap_unlock_read(mapping);
return ret; return ret;
} }
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, struct page *hpage) pgprot_t prot, struct page *hpage)
{ {
return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage), false); return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage));
} }
EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte); EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte);
int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long addr, int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long addr,
pgprot_t prot, unsigned long phy_addr) pgprot_t prot, unsigned long phy_addr)
{ {
return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT, true); return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT);
} }
EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa); EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa);
......
...@@ -29,6 +29,10 @@ ...@@ -29,6 +29,10 @@
# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
#endif #endif
#ifndef INIT_MEMBLOCK_MEMORY_REGIONS
#define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
#endif
/** /**
* DOC: memblock overview * DOC: memblock overview
* *
...@@ -55,9 +59,9 @@ ...@@ -55,9 +59,9 @@
* the allocator metadata. The "memory" and "reserved" types are nicely * the allocator metadata. The "memory" and "reserved" types are nicely
* wrapped with struct memblock. This structure is statically * wrapped with struct memblock. This structure is statically
* initialized at build time. The region arrays are initially sized to * initialized at build time. The region arrays are initially sized to
* %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
* for "reserved". The region array for "physmem" is initially sized to * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
* %INIT_PHYSMEM_REGIONS. * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
* The memblock_allow_resize() enables automatic resizing of the region * The memblock_allow_resize() enables automatic resizing of the region
* arrays during addition of new regions. This feature should be used * arrays during addition of new regions. This feature should be used
* with care so that memory allocated for the region array will not * with care so that memory allocated for the region array will not
...@@ -102,7 +106,7 @@ unsigned long min_low_pfn; ...@@ -102,7 +106,7 @@ unsigned long min_low_pfn;
unsigned long max_pfn; unsigned long max_pfn;
unsigned long long max_possible_pfn; unsigned long long max_possible_pfn;
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
...@@ -111,7 +115,7 @@ static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS ...@@ -111,7 +115,7 @@ static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS
struct memblock memblock __initdata_memblock = { struct memblock memblock __initdata_memblock = {
.memory.regions = memblock_memory_init_regions, .memory.regions = memblock_memory_init_regions,
.memory.cnt = 1, /* empty dummy entry */ .memory.cnt = 1, /* empty dummy entry */
.memory.max = INIT_MEMBLOCK_REGIONS, .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
.memory.name = "memory", .memory.name = "memory",
.reserved.regions = memblock_reserved_init_regions, .reserved.regions = memblock_reserved_init_regions,
......
...@@ -3116,12 +3116,23 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) ...@@ -3116,12 +3116,23 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
{ {
struct obj_cgroup *objcg; struct obj_cgroup *objcg;
unsigned int nr_pages = 1 << order; unsigned int nr_pages = 1 << order;
#ifdef CONFIG_ASCEND_FEATURES
struct mem_cgroup *memcg;
#endif
if (!PageMemcgKmem(page)) if (!PageMemcgKmem(page))
return; return;
objcg = __page_objcg(page); objcg = __page_objcg(page);
obj_cgroup_uncharge_pages(objcg, nr_pages); obj_cgroup_uncharge_pages(objcg, nr_pages);
#ifdef CONFIG_ASCEND_FEATURES
memcg = get_mem_cgroup_from_objcg(objcg);
if (!mem_cgroup_is_root(memcg))
memcg_oom_recover(memcg);
css_put(&memcg->css);
#endif
page->memcg_data = 0; page->memcg_data = 0;
obj_cgroup_put(objcg); obj_cgroup_put(objcg);
} }
......
...@@ -1091,6 +1091,7 @@ static void check_panic_on_oom(struct oom_control *oc) ...@@ -1091,6 +1091,7 @@ static void check_panic_on_oom(struct oom_control *oc)
if (is_sysrq_oom(oc)) if (is_sysrq_oom(oc))
return; return;
dump_header(oc, NULL); dump_header(oc, NULL);
oom_type_notifier_call(0, oc);
panic("Out of memory: %s panic_on_oom is enabled\n", panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册