提交 877fa656 编写于 作者: T Tong Tiangen 提交者: Zheng Zengkai

arm64: add dump_user_range() to machine check safe

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5GB28
CVE: NA

-------------------------------

In the dump_user_range() processing, the data of the user process is
dump to corefile, when hardware memory error is encountered during dump,
only the relevant processes are affected, so killing the user process and
isolate the user page with hardware memory errors is a more reasonable
choice than kernel panic.

The dump_user_range() typical usage scenarios is coredump. Coredump file
writing to fs is related to the specific implementation of fs's write_iter
operation. This patch only supports two typical fs write function
(_copy_from_iter/iov_iter_copy_from_user_atomic) which is used  by
ext4/tmpfs/pipefs.
Signed-off-by: NTong Tiangen <tongtiangen@huawei.com>
上级 7aaf7c57
...@@ -899,7 +899,9 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start, ...@@ -899,7 +899,9 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
if (page) { if (page) {
void *kaddr = kmap(page); void *kaddr = kmap(page);
current->flags |= PF_COREDUMP_MCS;
stop = !dump_emit(cprm, kaddr, PAGE_SIZE); stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
current->flags &= ~PF_COREDUMP_MCS;
kunmap(page); kunmap(page);
put_page(page); put_page(page);
} else { } else {
......
...@@ -1607,6 +1607,7 @@ extern struct pid *cad_pid; ...@@ -1607,6 +1607,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_COREDUMP_MCS 0x01000000 /* Task coredump support machine check safe */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
......
...@@ -764,6 +764,14 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) ...@@ -764,6 +764,14 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_mc_to_iter); EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */ #endif /* CONFIG_ARCH_HAS_COPY_MC */
static void *memcpy_iter(void *to, const void *from, __kernel_size_t size)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && current->flags & PF_COREDUMP_MCS)
return (void *)copy_mc_to_kernel(to, from, size);
else
return memcpy(to, from, size);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
...@@ -777,7 +785,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) ...@@ -777,7 +785,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len), v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) memcpy_iter((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
) )
return bytes; return bytes;
...@@ -1013,7 +1021,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, ...@@ -1013,7 +1021,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len), v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) memcpy_iter((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
) )
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
return bytes; return bytes;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册