提交 cfa72543 编写于 作者: A Andrew Donnellan 提交者: Yang Yingliang

powerpc: Fix __clear_user() with KUAP enabled

stable inclusion
from linux-4.19.159
commit 92e7ec289b955384fba88953584562ce43164fa7
CVE: CVE-2020-4788

--------------------------------

commit 61e3acd8 upstream.

The KUAP implementation adds calls in clear_user() to enable and
disable access to userspace memory. However, it doesn't add these to
__clear_user(), which is used in the ptrace regset code.

As there's only one direct user of __clear_user() (the regset code),
and the time taken to set the AMR for KUAP purposes is going to
dominate the cost of a quick access_ok(), there's not much point
having a separate path.

Rename __clear_user() to __arch_clear_user(), and make __clear_user()
just call clear_user().

Reported-by: syzbot+f25ecf4b2982d8c7a640@syzkaller-ppc64.appspotmail.com
Reported-by: NDaniel Axtens <dja@axtens.net>
Suggested-by: NMichael Ellerman <mpe@ellerman.id.au>
Fixes: de78a9c4 ("powerpc: Add a framework for Kernel Userspace Access Protection")
Signed-off-by: NAndrew Donnellan <ajd@linux.ibm.com>
[mpe: Use __arch_clear_user() for the asm version like arm64 & nds32]
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191209132221.15328-1-ajd@linux.ibm.comSigned-off-by: NDaniel Axtens <dja@axtens.net>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 84c05e63
......@@ -416,7 +416,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return ret;
}
extern unsigned long __clear_user(void __user *addr, unsigned long size);
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
......@@ -424,12 +424,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size))) {
allow_write_to_user(addr, size);
ret = __clear_user(addr, size);
ret = __arch_clear_user(addr, size);
prevent_write_to_user(addr, size);
}
return ret;
}
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
return clear_user(addr, size);
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
......
......@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(__clear_user)
_GLOBAL(__arch_clear_user)
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
......@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
EX_TABLE(8b, 91b)
EX_TABLE(9b, 91b)
EXPORT_SYMBOL(__clear_user)
EXPORT_SYMBOL(__arch_clear_user)
......@@ -29,7 +29,7 @@ PPC64_CACHES:
.section ".text"
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
......@@ -70,7 +70,7 @@ err3; stb r0,0(r3)
mr r3,r4
blr
_GLOBAL_TOC(__clear_user)
_GLOBAL_TOC(__arch_clear_user)
cmpdi r4,32
neg r6,r3
li r0,0
......@@ -193,4 +193,4 @@ err1; dcbz 0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
EXPORT_SYMBOL(__clear_user)
EXPORT_SYMBOL(__arch_clear_user)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册