提交 db695c05 编写于 作者: R Russell King

ARM: remove user cmpxchg syscall

Mark Brand reports that a NEEDS_SYSCALL_FOR_CMPXCHG enabled kernel would
open a security hole in the ghost syscall used to implement cmpxchg, as
it fails to validate the user pointer.

However, in order for this option to be enabled, you'd need to be
building a pre-ARMv6 kernel with SMP support.  There is only one system
known which fits that, which is an early ARM SMP FPGA implementation
based on the ARM926T.

In any case, the Kconfig does not allow SMP to be enabled for pre-ARMv6
systems.

Moreover, even if NEEDS_SYSCALL_FOR_CMPXCHG were to be enabled, the
kernel would not build as __ARM_NR_cmpxchg64 is not defined.

The simple answer is to remove the buggy code.
Reported-by: NMark Brand <markbrand@google.com>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 6f56a68d
......@@ -21,13 +21,6 @@
*/
#define __NR_syscalls (388)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
......
......@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
.endm
.macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
......@@ -859,20 +858,7 @@ __kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg64
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg64
#elif defined(CONFIG_CPU_32v6K)
#if defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val
......@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
__kuser_cmpxchg: @ 0xffff0fc0
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
#elif __LINUX_ARM_ARCH__ < 6
#if __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
......
......@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls(regs->ARM_r0);
return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
......
......@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
......@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
config NEED_KUSER_HELPERS
bool
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册