提交 c2172ce2 编写于 作者: R Russell King

Merge branch 'uaccess' into fixes

...@@ -1700,6 +1700,21 @@ config HIGHPTE ...@@ -1700,6 +1700,21 @@ config HIGHPTE
consumed by page tables. Setting this option will allow consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory. user-space 2nd level page tables to reside in high memory.
config CPU_SW_DOMAIN_PAN
bool "Enable use of CPU domains to implement privileged no-access"
depends on MMU && !ARM_LPAE
default y
help
Increase kernel security by ensuring that normal kernel accesses
are unable to access userspace addresses. This can help prevent
use-after-free bugs becoming an exploitable privilege escalation
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events" bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS depends on PERF_EVENTS
......
...@@ -445,6 +445,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -445,6 +445,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif #endif
.endm .endm
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
str \tmp, [sp, #S_FRAME_SIZE]
#endif
.endm
.macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r0, [sp, #S_FRAME_SIZE]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg .macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/thread_info.h>
#endif #endif
/* /*
...@@ -34,15 +35,14 @@ ...@@ -34,15 +35,14 @@
*/ */
#ifndef CONFIG_IO_36 #ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0 #define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1 #define DOMAIN_USER 1
#define DOMAIN_IO 2 #define DOMAIN_IO 2
#else #else
#define DOMAIN_KERNEL 2 #define DOMAIN_KERNEL 2
#define DOMAIN_TABLE 2
#define DOMAIN_USER 1 #define DOMAIN_USER 1
#define DOMAIN_IO 0 #define DOMAIN_IO 0
#endif #endif
#define DOMAIN_VECTORS 3
/* /*
* Domain types * Domain types
...@@ -55,30 +55,65 @@ ...@@ -55,30 +55,65 @@
#define DOMAIN_MANAGER 1 #define DOMAIN_MANAGER 1
#endif #endif
#define domain_val(dom,type) ((type) << (2*(dom))) #define domain_mask(dom) ((3) << (2 * (dom)))
#define domain_val(dom,type) ((type) << (2 * (dom)))
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#else
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#endif
#define __DACR_DEFAULT \
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
#define DACR_UACCESS_DISABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
#define DACR_UACCESS_ENABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS static inline unsigned int get_domain(void)
{
unsigned int domain;
asm(
"mrc p15, 0, %0, c3, c0 @ get domain"
: "=r" (domain)
: "m" (current_thread_info()->cpu_domain));
return domain;
}
static inline void set_domain(unsigned val) static inline void set_domain(unsigned val)
{ {
asm volatile( asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain" "mcr p15, 0, %0, c3, c0 @ set domain"
: : "r" (val)); : : "r" (val) : "memory");
isb(); isb();
} }
#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \ #define modify_domain(dom,type) \
do { \ do { \
struct thread_info *thread = current_thread_info(); \ unsigned int domain = get_domain(); \
unsigned int domain = thread->cpu_domain; \ domain &= ~domain_mask(dom); \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \ domain = domain | domain_val(dom, type); \
thread->cpu_domain = domain | domain_val(dom, type); \ set_domain(domain); \
set_domain(thread->cpu_domain); \
} while (0) } while (0)
#else #else
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { } static inline void modify_domain(unsigned dom, unsigned type) { }
#endif #endif
......
...@@ -22,8 +22,11 @@ ...@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags; \
smp_mb(); \ smp_mb(); \
prefetchw(uaddr); \ prefetchw(uaddr); \
__ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \ "1: ldrex %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -34,12 +37,15 @@ ...@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret; int ret;
u32 val; u32 val;
...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb(); smp_mb();
/* Prefetching cannot fault */ /* Prefetching cannot fault */
prefetchw(uaddr); prefetchw(uaddr);
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n" "1: ldrex %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val) : "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
smp_mb(); smp_mb();
*uval = val; *uval = val;
...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h> #include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \ "1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \ __futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
unsigned int __ua_flags;
int ret = 0; int ret = 0;
u32 val; u32 val;
...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
preempt_disable(); preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n" "1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val) : "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
uaccess_restore(__ua_flags);
*uval = val; *uval = val;
preempt_enable(); preempt_enable();
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/* /*
* - section * - section
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
struct task_struct; struct task_struct;
#include <asm/types.h> #include <asm/types.h>
#include <asm/domain.h>
typedef unsigned long mm_segment_t; typedef unsigned long mm_segment_t;
...@@ -74,9 +73,6 @@ struct thread_info { ...@@ -74,9 +73,6 @@ struct thread_info {
.flags = 0, \ .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
......
...@@ -49,6 +49,35 @@ struct exception_table_entry ...@@ -49,6 +49,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs); extern int fixup_exception(struct pt_regs *regs);
/*
* These two functions allow hooking accesses to userspace to increase
* system integrity by ensuring that the kernel can not inadvertantly
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
static inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
#else
return 0;
#endif
}
static inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
#endif
}
/* /*
* These two are intentionally not defined anywhere - if the kernel * These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug. * code generates any references to them, that's a bug.
...@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *); ...@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \ register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
if (sizeof((x)) >= 8) \ if (sizeof((x)) >= 8) \
...@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *); ...@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \ break; \
default: __e = __get_user_bad(); break; \ default: __e = __get_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \ x = (typeof(*(p))) __r2; \
__e; \ __e; \
}) })
...@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \ __put_user_x(__r2, __p, __e, __l, 1); \
...@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \ break; \
default: __e = __put_user_bad(); break; \ default: __e = __put_user_bad(); break; \
} \ } \
uaccess_restore(__ua_flags); \
__e; \ __e; \
}) })
...@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs) ...@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
do { \ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \ unsigned long __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \ default: (__gu_val) = __get_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
} while (0) } while (0)
...@@ -381,9 +417,11 @@ do { \ ...@@ -381,9 +417,11 @@ do { \
#define __put_user_err(x, ptr, err) \ #define __put_user_err(x, ptr, err) \
do { \ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \ unsigned long __pu_addr = (unsigned long)(ptr); \
unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
...@@ -391,6 +429,7 @@ do { \ ...@@ -391,6 +429,7 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \ default: __put_user_bad(); \
} \ } \
uaccess_restore(__ua_flags); \
} while (0) } while (0)
#define __put_user_asm_byte(x, __pu_addr, err) \ #define __put_user_asm_byte(x, __pu_addr, err) \
...@@ -474,11 +513,46 @@ do { \ ...@@ -474,11 +513,46 @@ do { \
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); arm_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); static inline unsigned long __must_check
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); __copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
}
extern unsigned long __must_check
arm_clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__clear_user_std(void __user *addr, unsigned long n);
static inline unsigned long __must_check
__clear_user(void __user *addr, unsigned long n)
{
unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_clear_user(addr, n);
uaccess_restore(__ua_flags);
return n;
}
#else #else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
...@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n; return n;
} }
/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy); ...@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(arm_copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(arm_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_2);
......
...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid) ...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...) #define SPFIX(code...)
#endif #endif
.macro svc_entry, stack_hole=0, trace=1 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.save {r0 - pc} ) UNWIND(.save {r0 - pc} )
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp ) SPFIX( mov r0, sp )
...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5} ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" "" mov r6, #-1 @ "" "" "" ""
add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 ) SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack @ from the exception stack
...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid) ...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@ @
stmia r7, {r2 - r6} stmia r7, {r2 - r6}
uaccess_save r0
.if \uaccess
uaccess_disable r0
.endif
.if \trace .if \trace
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off bl trace_hardirqs_off
...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid) ...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.align 5 .align 5
__dabt_svc: __dabt_svc:
svc_entry svc_entry uaccess=0
mov r2, sp mov r2, sp
dabt_helper dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) ...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8" #error "sizeof(struct pt_regs) must be a multiple of 8"
#endif #endif
.macro usr_entry, trace=1 .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM( stmdb r0, {sp, lr}^ ) ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
.if \uaccess
uaccess_disable ip
.endif
@ Enable the alignment trap while in kernel mode @ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7) ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0) ATRAP( mcrne p15, 0, r8, c1, c0, 0)
...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) ...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.align 5 .align 5
__dabt_usr: __dabt_usr:
usr_entry usr_entry uaccess=0
kuser_cmpxchg_check kuser_cmpxchg_check
mov r2, sp mov r2, sp
dabt_helper dabt_helper
...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr) ...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.align 5 .align 5
__und_usr: __und_usr:
usr_entry usr_entry uaccess=0
mov r2, r4 mov r2, r4
mov r3, r5 mov r3, r5
...@@ -484,6 +493,8 @@ __und_usr: ...@@ -484,6 +493,8 @@ __und_usr:
1: ldrt r0, [r4] 1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction ARM_BE8(rev r0, r0) @ little endian instruction
uaccess_disable ip
@ r0 = 32-bit ARM instruction which caused the exception @ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction @ r4 = PC value for the faulting instruction
...@@ -518,9 +529,10 @@ __und_usr_thumb: ...@@ -518,9 +529,10 @@ __und_usr_thumb:
2: ldrht r5, [r4] 2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0 cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16 @ 16bit undefined instruction blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2] 3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction ARM_BE8(rev16 r0, r0) @ little endian instruction
uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16 orr r0, r0, r5, lsl #16
...@@ -715,6 +727,8 @@ ENDPROC(no_fp) ...@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32: __und_usr_fault_32:
mov r1, #4 mov r1, #4
b 1f b 1f
__und_usr_fault_16_pan:
uaccess_disable ip
__und_usr_fault_16: __und_usr_fault_16:
mov r1, #2 mov r1, #2
1: mov r0, sp 1: mov r0, sp
...@@ -770,6 +784,8 @@ ENTRY(__switch_to) ...@@ -770,6 +784,8 @@ ENTRY(__switch_to)
ldr r4, [r2, #TI_TP_VALUE] ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4] ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS #ifdef CONFIG_CPU_USE_DOMAINS
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN] ldr r6, [r2, #TI_CPU_DOMAIN]
#endif #endif
switch_tls r1, r4, r5, r3, r7 switch_tls r1, r4, r5, r3, r7
......
...@@ -174,6 +174,8 @@ ENTRY(vector_swi) ...@@ -174,6 +174,8 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif #endif
uaccess_disable tbl
adr tbl, sys_call_table @ load syscall table pointer adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT) #if defined(CONFIG_OABI_COMPAT)
......
...@@ -196,7 +196,7 @@ ...@@ -196,7 +196,7 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm .endm
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr, irq = 0 .macro svc_exit, rpsr, irq = 0
.if \irq != 0 .if \irq != 0
@ IRQs already off @ IRQs already off
...@@ -215,6 +215,10 @@ ...@@ -215,6 +215,10 @@
blne trace_hardirqs_off blne trace_hardirqs_off
#endif #endif
.endif .endif
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
msr spsr_cxsf, \rpsr msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321 @ We must avoid clrex due to Cortex-A15 erratum #830321
...@@ -222,6 +226,20 @@ ...@@ -222,6 +226,20 @@
strex r1, r2, [r0] @ clear the exclusive monitor strex r1, r2, [r0] @ clear the exclusive monitor
#endif #endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
@ Thumb mode SVC restore
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
#endif
.endm .endm
@ @
...@@ -241,6 +259,9 @@ ...@@ -241,6 +259,9 @@
@ on the stack remains correct). @ on the stack remains correct).
@ @
.macro svc_exit_via_fiq .macro svc_exit_via_fiq
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below) @ clobber state restored below)
...@@ -250,9 +271,27 @@ ...@@ -250,9 +271,27 @@
msr spsr_cxsf, r9 msr spsr_cxsf, r9
ldr r0, [r0, #S_R0] ldr r0, [r0, #S_R0]
ldmia r8, {pc}^ ldmia r8, {pc}^
#else
@ Thumb mode restore
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
#endif
.endm .endm
.macro restore_user_regs, fast = 0, offset = 0 .macro restore_user_regs, fast = 0, offset = 0
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r2, sp mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc ldr lr, [r2, #\offset + S_PC]! @ get pc
...@@ -270,72 +309,16 @@ ...@@ -270,72 +309,16 @@
@ after ldm {}^ @ after ldm {}^
add sp, sp, #\offset + S_FRAME_SIZE add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm #elif defined(CONFIG_CPU_V7M)
@ V7M restore.
#else /* CONFIG_THUMB2_KERNEL */ @ Note that we don't need to do clrex here as clearing the local
.macro svc_exit, rpsr, irq = 0 @ monitor is part of the exception entry and exit sequence.
.if \irq != 0
@ IRQs already off
#ifdef CONFIG_TRACE_IRQFLAGS
@ The parent context IRQs must have been enabled to get here in
@ the first place, so there's no point checking the PSR I bit.
bl trace_hardirqs_on
#endif
.else
@ IRQs off again before pulling preserved data off the stack
disable_irq_notrace
#ifdef CONFIG_TRACE_IRQFLAGS
tst \rpsr, #PSR_I_BIT
bleq trace_hardirqs_on
tst \rpsr, #PSR_I_BIT
blne trace_hardirqs_off
#endif
.endif
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
.endm
@
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
@
@ For full details see non-Thumb implementation above.
@
.macro svc_exit_via_fiq
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
.endm
#ifdef CONFIG_CPU_V7M
/*
* Note we don't need to do clrex here as clearing the local monitor is
* part of each exception entry and exit sequence.
*/
.macro restore_user_regs, fast = 0, offset = 0
.if \offset .if \offset
add sp, #\offset add sp, #\offset
.endif .endif
v7m_exception_slow_exit ret_r0 = \fast v7m_exception_slow_exit ret_r0 = \fast
.endm #else
#else /* ifdef CONFIG_CPU_V7M */ @ Thumb mode restore
.macro restore_user_regs, fast = 0, offset = 0
mov r2, sp mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
...@@ -353,9 +336,8 @@ ...@@ -353,9 +336,8 @@
.endif .endif
add sp, sp, #S_FRAME_SIZE - S_SP add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm
#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */ #endif /* !CONFIG_THUMB2_KERNEL */
.endm
/* /*
* Context tracking subsystem. Used to instrument transitions * Context tracking subsystem. Used to instrument transitions
......
...@@ -464,10 +464,7 @@ __enable_mmu: ...@@ -464,10 +464,7 @@ __enable_mmu:
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0 mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else #else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ mov r5, #DACR_INIT
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif #endif
......
...@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs) ...@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
buf[4] = '\0'; buf[4] = '\0';
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", {
buf, interrupts_enabled(regs) ? "n" : "ff", unsigned int domain = get_domain();
fast_interrupts_enabled(regs) ? "n" : "ff", const char *segment;
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], #ifdef CONFIG_CPU_SW_DOMAIN_PAN
get_fs() == get_ds() ? "kernel" : "user"); /*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#endif
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
else if (get_fs() == get_ds())
segment = "kernel";
else
segment = "user";
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], segment);
}
#else #else
printk("xPSR: %08lx\n", regs->ARM_cpsr); printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif #endif
...@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs) ...@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0'; buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU #ifdef CONFIG_CPU_CP15_MMU
{ {
unsigned int transbase, dac; unsigned int transbase, dac = get_domain();
asm("mrc p15, 0, %0, c2, c0\n\t" asm("mrc p15, 0, %0, c2, c0\n\t"
"mrc p15, 0, %1, c3, c0\n" : "=r" (transbase));
: "=r" (transbase), "=r" (dac));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac); transbase, dac);
} }
...@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
#ifdef CONFIG_CPU_USE_DOMAINS
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
* copied from the current thread via setup_thread_stack() in
* kernel/fork.c
*/
thread->cpu_domain = get_domain();
#endif
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
childregs->ARM_r0 = 0; childregs->ARM_r0 = 0;
......
...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data, ...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while (1) { while (1) {
unsigned long temp; unsigned long temp;
unsigned int __ua_flags;
__ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB) if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp); __user_swpb_asm(*data, address, res, temp);
else else
__user_swp_asm(*data, address, res, temp); __user_swp_asm(*data, address, res, temp);
uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current)) if (likely(res != -EAGAIN) || signal_pending(current))
break; break;
......
...@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base) ...@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base); kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2); flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*
* on V7-M there is no need to copy the vector table to a dedicated * on V7-M there is no need to copy the vector table to a dedicated
......
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
.text .text
/* Prototype: int __clear_user(void *addr, size_t sz) /* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory * Purpose : clear some user memory
* Params : addr - user memory address to clear * Params : addr - user memory address to clear
* : sz - number of bytes to clear * : sz - number of bytes to clear
* Returns : number of bytes NOT cleared * Returns : number of bytes NOT cleared
*/ */
ENTRY(__clear_user_std) ENTRY(__clear_user_std)
WEAK(__clear_user) WEAK(arm_clear_user)
stmfd sp!, {r1, lr} stmfd sp!, {r1, lr}
mov r2, #0 mov r2, #0
cmp r1, #4 cmp r1, #4
...@@ -44,7 +44,7 @@ WEAK(__clear_user) ...@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER( strnebt r2, [r0]) USER( strnebt r2, [r0])
mov r0, #0 mov r0, #0
ldmfd sp!, {r1, pc} ldmfd sp!, {r1, pc}
ENDPROC(__clear_user) ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std) ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_from_user(void *to, const void *from, size_t n) * size_t arm_copy_from_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -89,11 +89,11 @@ ...@@ -89,11 +89,11 @@
.text .text
ENTRY(__copy_from_user) ENTRY(arm_copy_from_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_from_user) ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 0 .align 0
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_to_user(void *to, const void *from, size_t n) * size_t arm_copy_to_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -93,11 +93,11 @@ ...@@ -93,11 +93,11 @@
.text .text
ENTRY(__copy_to_user_std) ENTRY(__copy_to_user_std)
WEAK(__copy_to_user) WEAK(arm_copy_to_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_to_user) ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std) ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax" .pushsection .text.fixup,"ax"
......
...@@ -17,6 +17,19 @@ ...@@ -17,6 +17,19 @@
.text .text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
uaccess_enable ip
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
#else
.macro save_regs .macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr} stmfd sp!, {r1, r2, r4 - r8, lr}
.endm .endm
...@@ -24,6 +37,7 @@ ...@@ -24,6 +37,7 @@
.macro load_regs .macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc} ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm .endm
#endif
.macro load1b, reg1 .macro load1b, reg1
ldrusr \reg1, r0, 1 ldrusr \reg1, r0, 1
......
...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
} }
unsigned long unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n) arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
/* /*
* This test is stubbed out of the main function above to keep * This test is stubbed out of the main function above to keep
...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
return n; return n;
} }
unsigned long __clear_user(void __user *addr, unsigned long n) unsigned long arm_clear_user(void __user *addr, unsigned long n)
{ {
/* See rational for this in __copy_to_user() above. */ /* See rational for this in __copy_to_user() above. */
if (n < 64) if (n < 64)
......
...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort) ...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write? tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
......
...@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort) ...@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR bic r1, r1, #1 << 11 @ clear bits 11 of FSR
do_ldrd_abort tmp=ip, insn=r3 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11 orreq r1, r1, #1 << 11
b do_DataAbort b do_DataAbort
...@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort) ...@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
do_ldrd_abort tmp=ip, insn=r3 uaccess_disable ip @ disable userspace access
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort b do_DataAbort
...@@ -26,16 +26,18 @@ ENTRY(v6_early_abort) ...@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36 ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136? teq ip, r3, lsr #4 @ r0 ARM1136?
bne do_DataAbort bne 1f
tst r5, #PSR_J_BIT @ Java? tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb? tsteq r5, #PSR_T_BIT @ Thumb?
bne do_DataAbort bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3) ARM_BE8(rev r3, r3)
do_ldrd_abort tmp=ip, insn=r3 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
#endif #endif
1: uaccess_disable ip @ disable userspace access
b do_DataAbort b do_DataAbort
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
ENTRY(v7_early_abort) ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
/* /*
* V6 code adjusts the returned DFSR. * V6 code adjusts the returned DFSR.
......
...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort) ...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif #endif
bne .data_thumb_abort bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction ldr r8, [r4] @ read arm instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write? tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24 and r7, r8, #15 << 24
...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort) ...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort: .data_thumb_abort:
ldrh r8, [r4] @ read instruction ldrh r8, [r4] @ read instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write? tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12 and r7, r8, #15 << 12
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT tst \psr, #PSR_T_BIT
beq not_thumb beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction ldrh \tmp, [\pc] @ Read aborted Thumb instruction
uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb? cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
...@@ -29,12 +30,9 @@ not_thumb: ...@@ -29,12 +30,9 @@ not_thumb:
* [7:4] == 1101 * [7:4] == 1101
* [20] == 0 * [20] == 0
*/ */
.macro do_ldrd_abort, tmp, insn .macro teq_ldrd, tmp, insn
tst \insn, #0x0e100000 @ [27:25,20] == 0 mov \tmp, #0x0e100000
bne not_ldrd orr \tmp, #0x000000f0
and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 and \tmp, \insn, \tmp
cmp \tmp, #0x000000d0 teq \tmp, #0x000000d0
beq do_DataAbort
not_ldrd:
.endm .endm
...@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = { ...@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY, L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER, .domain = DOMAIN_VECTORS,
}, },
[MT_HIGH_VECTORS] = { [MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY, L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER, .domain = DOMAIN_VECTORS,
}, },
[MT_MEMORY_RWX] = { [MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
......
...@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte) if (!new_pte)
goto no_pte; goto no_pte;
#ifndef CONFIG_ARM_LPAE
/*
* Modify the PTE pointer to have the correct domain. This
* needs to be the vectors domain to avoid the low vectors
* being unmapped.
*/
pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif
init_pud = pud_offset(init_pgd, 0); init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0); init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0); init_pte = pte_offset_map(init_pmd, 0);
......
...@@ -95,9 +95,10 @@ emulate: ...@@ -95,9 +95,10 @@ emulate:
reteq r4 @ no, return failure reteq r4 @ no, return failure
next: next:
uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and .Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC @ increment PC
uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000 teq r2, #0x0C000000
teqne r2, #0x0D000000 teqne r2, #0x0D000000
......
...@@ -98,8 +98,23 @@ ENTRY(privcmd_call) ...@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
mov r1, r2 mov r1, r2
mov r2, r3 mov r2, r3
ldr r3, [sp, #8] ldr r3, [sp, #8]
/*
* Privcmd calls are issued by the userspace. We need to allow the
* kernel to access the userspace memory before issuing the hypercall.
*/
uaccess_enable r4
/* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4] ldr r4, [sp, #4]
__HVC(XEN_IMM) __HVC(XEN_IMM)
/*
* Disable userspace access from kernel. This is fine to do it
* unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
* called before.
*/
uaccess_disable r4
ldm sp!, {r4} ldm sp!, {r4}
ret lr ret lr
ENDPROC(privcmd_call); ENDPROC(privcmd_call);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册