提交 9641c7cc 编写于 作者: R Russell King 提交者: Russell King

[ARM] nommu: uaccess tweaks

MMUless systems have only one address space for all threads, so
both the usual access_ok() checks, and the exception handling do
not make much sense.

Hence, discard the fixup and exception tables at link time, use
memcpy/memset for the user copy/clearing functions, and define
the permission check macros to be constants.

Some of this patch was derived from the equivalent patch by
Hyok S. Choi.
Signed-off-by: NHyok S. Choi <hyok.choi@samsung.com>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 002547b4
...@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr); ...@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero); EXPORT_SYMBOL(__memzero);
/* user mem (segment) */ /* user mem (segment) */
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_2);
...@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1); ...@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(__put_user_8);
#endif
/* crypto hash */ /* crypto hash */
EXPORT_SYMBOL(sha_transform); EXPORT_SYMBOL(sha_transform);
......
...@@ -80,6 +80,10 @@ SECTIONS ...@@ -80,6 +80,10 @@ SECTIONS
*(.exit.text) *(.exit.text)
*(.exit.data) *(.exit.data)
*(.exitcall.exit) *(.exitcall.exit)
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
#endif
} }
.text : { /* Real text segment */ .text : { /* Real text segment */
...@@ -87,7 +91,9 @@ SECTIONS ...@@ -87,7 +91,9 @@ SECTIONS
*(.text) *(.text)
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
#ifdef CONFIG_MMU
*(.fixup) *(.fixup)
#endif
*(.gnu.warning) *(.gnu.warning)
*(.rodata) *(.rodata)
*(.rodata.*) *(.rodata.*)
...@@ -142,7 +148,9 @@ SECTIONS ...@@ -142,7 +148,9 @@ SECTIONS
*/ */
. = ALIGN(32); . = ALIGN(32);
__start___ex_table = .; __start___ex_table = .;
#ifdef CONFIG_MMU
*(__ex_table) *(__ex_table)
#endif
__stop___ex_table = .; __stop___ex_table = .;
/* /*
......
...@@ -6,28 +6,31 @@ ...@@ -6,28 +6,31 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
copy_page.o delay.o findbit.o memchr.o memcpy.o \ delay.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \ memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \ strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \ strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \ testchangebit.o testclearbit.o testsetbit.o \
getuser.o putuser.o clear_user.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o sha1.o \ ucmpdi2.o lib1funcs.o div64.o sha1.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o io-readsb.o io-writesb.o io-readsl.o io-writesl.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
# the code in uaccess.S is not preemption safe and # the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only # probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y) ifeq ($(CONFIG_PREEMPT),y)
lib-y += copy_from_user.o copy_to_user.o mmu-y += copy_from_user.o copy_to_user.o
else else
ifneq ($(CONFIG_CPU_32v3),y) ifneq ($(CONFIG_CPU_32v3),y)
lib-y += copy_from_user.o copy_to_user.o mmu-y += copy_from_user.o copy_to_user.o
else else
lib-y += uaccess.o mmu-y += uaccess.o
endif endif
endif endif
lib-$(CONFIG_MMU) += $(mmu-y)
ifeq ($(CONFIG_CPU_32v3),y) ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o lib-y += io-readsw-armv3.o io-writesw-armv3.o
else else
......
...@@ -40,16 +40,25 @@ struct exception_table_entry ...@@ -40,16 +40,25 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs); extern int fixup_exception(struct pt_regs *regs);
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
extern int __get_user_bad(void);
extern int __put_user_bad(void);
/* /*
* Note that this is actually 0x1,0000,0000 * Note that this is actually 0x1,0000,0000
*/ */
#define KERNEL_DS 0x00000000 #define KERNEL_DS 0x00000000
#define USER_DS TASK_SIZE
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#ifdef CONFIG_MMU
#define USER_DS TASK_SIZE
#define get_fs() (current_thread_info()->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs (mm_segment_t fs) static inline void set_fs(mm_segment_t fs)
{ {
current_thread_info()->addr_limit = fs; current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
...@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs) ...@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
: "cc"); \ : "cc"); \
flag; }) flag; })
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
/* /*
* Single-value transfer routines. They automatically use the right * Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions * size if we just have the right pointer type. Note that the functions
...@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs) ...@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
* fixup code, but there are a few places where it intrudes on the * fixup code, but there are a few places where it intrudes on the
* main code path. When we only write to user space, there is no * main code path. When we only write to user space, there is no
* problem. * problem.
*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
* "access_ok()" call.
*
* The "xxx_error" versions set the third argument to EFAULT if an
* error occurs, and leave it unchanged on success. Note that these
* versions are void (ie, don't return a value as such).
*/ */
extern int __get_user_1(void *); extern int __get_user_1(void *);
extern int __get_user_2(void *); extern int __get_user_2(void *);
extern int __get_user_4(void *); extern int __get_user_4(void *);
extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \ #define __get_user_x(__r2,__p,__e,__s,__i...) \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
...@@ -131,6 +128,74 @@ extern int __get_user_bad(void); ...@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
__e; \ __e; \
}) })
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
#define __put_user_x(__r2,__p,__e,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
"bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r2) \
: "ip", "lr", "cc")
#define put_user(x,p) \
({ \
const register typeof(*(p)) __r2 asm("r2") = (x); \
const register typeof(*(p)) __user *__p asm("r0") = (p);\
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, 1); \
break; \
case 2: \
__put_user_x(__r2, __p, __e, 2); \
break; \
case 4: \
__put_user_x(__r2, __p, __e, 4); \
break; \
case 8: \
__put_user_x(__r2, __p, __e, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
#else /* CONFIG_MMU */
/*
* uClinux has only one addr space, so has simplified address limits.
*/
#define USER_DS KERNEL_DS
#define segment_eq(a,b) (1)
#define __addr_ok(addr) (1)
#define __range_ok(addr,size) (0)
#define get_fs() (KERNEL_DS)
static inline void set_fs(mm_segment_t fs)
{
}
#define get_user(x,p) __get_user(x,p)
#define put_user(x,p) __put_user(x,p)
#endif /* CONFIG_MMU */
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
* "access_ok()" call.
*
* The "xxx_error" versions set the third argument to EFAULT if an
* error occurs, and leave it unchanged on success. Note that these
* versions are void (ie, don't return a value as such).
*/
#define __get_user(x,ptr) \ #define __get_user(x,ptr) \
({ \ ({ \
long __gu_err = 0; \ long __gu_err = 0; \
...@@ -212,43 +277,6 @@ do { \ ...@@ -212,43 +277,6 @@ do { \
: "r" (addr), "i" (-EFAULT) \ : "r" (addr), "i" (-EFAULT) \
: "cc") : "cc")
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
extern int __put_user_bad(void);
#define __put_user_x(__r2,__p,__e,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
"bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r2) \
: "ip", "lr", "cc")
#define put_user(x,p) \
({ \
const register typeof(*(p)) __r2 asm("r2") = (x); \
const register typeof(*(p)) __user *__p asm("r0") = (p);\
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, 1); \
break; \
case 2: \
__put_user_x(__r2, __p, __e, 2); \
break; \
case 4: \
__put_user_x(__r2, __p, __e, 4); \
break; \
case 8: \
__put_user_x(__r2, __p, __e, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
#define __put_user(x,ptr) \ #define __put_user(x,ptr) \
({ \ ({ \
long __pu_err = 0; \ long __pu_err = 0; \
...@@ -354,9 +382,16 @@ do { \ ...@@ -354,9 +382,16 @@ do { \
: "cc") : "cc")
#ifdef CONFIG_MMU
extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __clear_user(void __user *addr, unsigned long n); extern unsigned long __clear_user(void __user *addr, unsigned long n);
#else
#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
#endif
extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count); extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long __strnlen_user(const char __user *s, long n); extern unsigned long __strnlen_user(const char __user *s, long n);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册