提交 12c7518a 编写于 作者: K Kefeng Wang 提交者: Zheng Zengkai

arm64: memmap: fix request_resource return error

hulk inclusion
category: bugfix
bugzilla: 187483, https://gitee.com/openeuler/kernel/issues/I5MH9N
CVE: NA

--------------------------------

When support memmap=nn[KMG]$ss[KMG], it will request resource to show
reserved memory in iomem, but with memblock_setclr_flag() called
memblock_mark_memmap() leads to split memblock region and then
request_resource() could return -EBUSY when passed unaligned address.

Let's directly use memblock_reserve() and drop membloc_setclr_flag()
and show error if request_resource() in request_memmap_resource()
return error code.
Signed-off-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 e50cbf6c
...@@ -219,17 +219,34 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) ...@@ -219,17 +219,34 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
static void __init request_memmap_resources(struct resource *res) static void __init request_memmap_resources(struct resource *res)
{ {
struct resource *memmap_res; struct resource *memmap_res;
phys_addr_t base, size;
int i;
for (i = 0; i < MAX_RES_REGIONS; i++) {
base = mbk_memmap_regions[i].base;
size = mbk_memmap_regions[i].size;
if (!size)
continue;
if ((base < res->start) || (base + size - 1 > res->end))
continue;
memmap_res = memblock_alloc(sizeof(*memmap_res), SMP_CACHE_BYTES); memmap_res = memblock_alloc(sizeof(*memmap_res), SMP_CACHE_BYTES);
if (!memmap_res) if (!memmap_res)
panic("%s: Failed to allocate memmap_res\n", __func__); panic("%s: Failed to allocate memmap_res\n", __func__);
memmap_res->name = "memmap reserved"; memmap_res->name = "memmap reserved";
memmap_res->flags = IORESOURCE_MEM; memmap_res->flags = IORESOURCE_MEM;
memmap_res->start = res->start; memmap_res->start = base;
memmap_res->end = res->end; memmap_res->end = base + size - 1;
request_resource(res, memmap_res); if (request_resource(res, memmap_res)) {
pr_warn("memmap reserve: [%llx, %llx] request resource fail\n",
memmap_res->start, memmap_res->end);
memblock_free_early(virt_to_phys(memmap_res),
sizeof(*memmap_res));
}
}
} }
static void __init request_standard_resources(void) static void __init request_standard_resources(void)
...@@ -270,8 +287,8 @@ static void __init request_standard_resources(void) ...@@ -270,8 +287,8 @@ static void __init request_standard_resources(void)
if (kernel_data.start >= res->start && if (kernel_data.start >= res->start &&
kernel_data.end <= res->end) kernel_data.end <= res->end)
request_resource(res, &kernel_data); request_resource(res, &kernel_data);
if (memblock_is_memmap(region))
request_memmap_resources(res); request_memmap_resources(res);
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
/* Userspace will find "Crash kernel" region in /proc/iomem. */ /* Userspace will find "Crash kernel" region in /proc/iomem. */
......
...@@ -297,10 +297,8 @@ static void __init fdt_enforce_memory_region(void) ...@@ -297,10 +297,8 @@ static void __init fdt_enforce_memory_region(void)
memblock_add(usable_rgns[1].base, usable_rgns[1].size); memblock_add(usable_rgns[1].base, usable_rgns[1].size);
} }
#define MAX_RES_REGIONS 32 struct memblock_region mbk_memmap_regions[MAX_RES_REGIONS] __initdata_memblock;
int mbk_memmap_cnt __initdata;
static struct memblock_region mbk_memmap_regions[MAX_RES_REGIONS] __initdata_memblock;
static int mbk_memmap_cnt __initdata;
static void __init setup_mbk_memmap_regions(phys_addr_t base, phys_addr_t size) static void __init setup_mbk_memmap_regions(phys_addr_t base, phys_addr_t size)
{ {
...@@ -317,6 +315,7 @@ static void __init setup_mbk_memmap_regions(phys_addr_t base, phys_addr_t size) ...@@ -317,6 +315,7 @@ static void __init setup_mbk_memmap_regions(phys_addr_t base, phys_addr_t size)
static void __init reserve_memmap_regions(void) static void __init reserve_memmap_regions(void)
{ {
phys_addr_t base, size; phys_addr_t base, size;
const char *str;
int i; int i;
for (i = 0; i < mbk_memmap_cnt; i++) { for (i = 0; i < mbk_memmap_cnt; i++) {
...@@ -324,26 +323,27 @@ static void __init reserve_memmap_regions(void) ...@@ -324,26 +323,27 @@ static void __init reserve_memmap_regions(void)
size = mbk_memmap_regions[i].size; size = mbk_memmap_regions[i].size;
if (!memblock_is_region_memory(base, size)) { if (!memblock_is_region_memory(base, size)) {
pr_warn("memmap reserve: 0x%08llx - 0x%08llx is not a memory region - ignore\n", str = "is not a memory region - ignore";
base, base + size); goto err;
continue;
} }
if (memblock_is_region_reserved(base, size)) { if (memblock_is_region_reserved(base, size)) {
pr_warn("memmap reserve: 0x%08llx - 0x%08llx overlaps in-use memory region - ignore\n", str = "overlaps in-use memory region - ignore";
base, base + size); goto err;
continue;
} }
if (memblock_reserve(base, size)) { if (memblock_reserve(base, size)) {
pr_warn("memmap reserve: 0x%08llx - 0x%08llx failed\n", str = "failed";
base, base + size); goto err;
continue;
} }
pr_info("memmap reserved: 0x%08llx - 0x%08llx (%lld MB)", pr_info("memmap reserved: 0x%08llx - 0x%08llx (%lld MB)",
base, base + size, size >> 20); base, base + size - 1, size >> 20);
memblock_mark_memmap(base, size); continue;
err:
mbk_memmap_regions[i].size = 0;
pr_warn("memmap reserve: 0x%08llx - 0x%08llx %s\n",
base, base + size - 1, str);
} }
} }
......
...@@ -22,4 +22,8 @@ static inline void __init reserve_quick_kexec(void) {} ...@@ -22,4 +22,8 @@ static inline void __init reserve_quick_kexec(void) {}
static inline void __init request_quick_kexec_res(struct resource *res) {} static inline void __init request_quick_kexec_res(struct resource *res) {}
#endif #endif
#define MAX_RES_REGIONS 32
extern struct memblock_region mbk_memmap_regions[MAX_RES_REGIONS];
extern int mbk_memmap_cnt;
#endif /* ifndef _ARM64_MM_INTERNAL_H */ #endif /* ifndef _ARM64_MM_INTERNAL_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册