提交 5db6db0d 编写于 作者: L Linus Torvalds

Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull uaccess unification updates from Al Viro:
 "This is the uaccess unification pile. It's _not_ the end of uaccess
  work, but the next batch of that will go into the next cycle. This one
  mostly takes copy_from_user() and friends out of arch/* and gets the
  zero-padding behaviour in sync for all architectures.

  Dealing with the nocache/writethrough mess is for the next cycle;
  fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
  sold on access_ok() in there, BTW; just not in this pile), same for
  reducing __copy_... callsites, strn*... stuff, etc. - there will be a
  pile about as large as this one in the next merge window.

  This one sat in -next for weeks. -3KLoC"

* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
  HAVE_ARCH_HARDENED_USERCOPY is unconditional now
  CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
  m32r: switch to RAW_COPY_USER
  hexagon: switch to RAW_COPY_USER
  microblaze: switch to RAW_COPY_USER
  get rid of padding, switch to RAW_COPY_USER
  ia64: get rid of copy_in_user()
  ia64: sanitize __access_ok()
  ia64: get rid of 'segment' argument of __do_{get,put}_user()
  ia64: get rid of 'segment' argument of __{get,put}_user_check()
  ia64: add extable.h
  powerpc: get rid of zeroing, switch to RAW_COPY_USER
  esas2r: don't open-code memdup_user()
  alpha: fix stack smashing in old_adjtimex(2)
  don't open-code kernel_setsockopt()
  mips: switch to RAW_COPY_USER
  mips: get rid of tail-zeroing in primitives
  mips: make copy_from_user() zero tail explicitly
  mips: clean and reorder the forest of macros...
  mips: consolidate __invoke_... wrappers
  ...
#ifndef _ASM_EXTABLE_H
#define _ASM_EXTABLE_H
/*
* About the exception table:
*
* - insn is a 32-bit pc-relative offset from the faulting insn.
* - nextinsn is a 16-bit offset off of the faulting instruction
* (not off of the *next* instruction as branches are).
* - errreg is the register in which to place -EFAULT.
* - valreg is the final target register for the load sequence
* and will be zeroed.
*
* Either errreg or valreg may be $31, in which case nothing happens.
*
* The exception fixup information "just so happens" to be arranged
* as in a MEM format instruction. This lets us emit our three
* values like so:
*
* lda valreg, nextinsn(errreg)
*
*/
struct exception_table_entry
{
signed int insn;
union exception_fixup {
unsigned unit;
struct {
signed int nextinsn : 16;
unsigned int errreg : 5;
unsigned int valreg : 5;
} bits;
} fixup;
};
/* Returns the new pc */
#define fixup_exception(map_reg, _fixup, pc) \
({ \
if ((_fixup)->fixup.bits.valreg != 31) \
map_reg((_fixup)->fixup.bits.valreg) = 0; \
if ((_fixup)->fixup.bits.errreg != 31) \
map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
(pc) + (_fixup)->fixup.bits.nextinsn; \
})
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup.unit = (b)->fixup.unit; \
(b)->fixup.unit = (tmp).fixup.unit; \
} while (0)
#endif
...@@ -19,12 +19,8 @@ ...@@ -19,12 +19,8 @@
"3: .subsection 2\n" \ "3: .subsection 2\n" \
"4: br 1b\n" \ "4: br 1b\n" \
" .previous\n" \ " .previous\n" \
" .section __ex_table,\"a\"\n" \ EXC(1b,3b,%1,$31) \
" .long 1b-.\n" \ EXC(2b,3b,%1,$31) \
" lda $31,3b-1b(%1)\n" \
" .long 2b-.\n" \
" lda $31,3b-2b(%1)\n" \
" .previous\n" \
: "=&r" (oldval), "=&r"(ret) \ : "=&r" (oldval), "=&r"(ret) \
: "r" (uaddr), "r"(oparg) \ : "r" (uaddr), "r"(oparg) \
: "memory") : "memory")
...@@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"3: .subsection 2\n" "3: .subsection 2\n"
"4: br 1b\n" "4: br 1b\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" EXC(1b,3b,%0,$31)
" .long 1b-.\n" EXC(2b,3b,%0,$31)
" lda $31,3b-1b(%0)\n"
" .long 2b-.\n"
" lda $31,3b-2b(%0)\n"
" .previous\n"
: "+r"(ret), "=&r"(prev), "=&r"(cmp) : "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "memory"); : "memory");
......
#ifndef __ALPHA_UACCESS_H #ifndef __ALPHA_UACCESS_H
#define __ALPHA_UACCESS_H #define __ALPHA_UACCESS_H
#include <linux/errno.h>
#include <linux/sched.h>
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with * performed or not. If get_fs() == USER_DS, checking is performed, with
...@@ -20,9 +16,6 @@ ...@@ -20,9 +16,6 @@
#define KERNEL_DS ((mm_segment_t) { 0UL }) #define KERNEL_DS ((mm_segment_t) { 0UL })
#define USER_DS ((mm_segment_t) { -0x40000000000UL }) #define USER_DS ((mm_segment_t) { -0x40000000000UL })
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_fs() (current_thread_info()->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define set_fs(x) (current_thread_info()->addr_limit = (x)) #define set_fs(x) (current_thread_info()->addr_limit = (x))
...@@ -39,13 +32,13 @@ ...@@ -39,13 +32,13 @@
* - AND "addr+size" doesn't have any high-bits set * - AND "addr+size" doesn't have any high-bits set
* - OR we are in kernel mode. * - OR we are in kernel mode.
*/ */
#define __access_ok(addr, size, segment) \ #define __access_ok(addr, size) \
(((segment).seg & (addr | size | (addr+size))) == 0) ((get_fs().seg & (addr | size | (addr+size))) == 0)
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
({ \ ({ \
__chk_user_ptr(addr); \ __chk_user_ptr(addr); \
__access_ok(((unsigned long)(addr)), (size), get_fs()); \ __access_ok(((unsigned long)(addr)), (size)); \
}) })
/* /*
...@@ -61,9 +54,9 @@ ...@@ -61,9 +54,9 @@
* (b) require any knowledge of processes at this stage * (b) require any knowledge of processes at this stage
*/ */
#define put_user(x, ptr) \ #define put_user(x, ptr) \
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define get_user(x, ptr) \ #define get_user(x, ptr) \
__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) __get_user_check((x), (ptr), sizeof(*(ptr)))
/* /*
* The "__xxx" versions do not do address space checking, useful when * The "__xxx" versions do not do address space checking, useful when
...@@ -81,6 +74,11 @@ ...@@ -81,6 +74,11 @@
* more extensive comments with fixup_inline_exception below for * more extensive comments with fixup_inline_exception below for
* more information. * more information.
*/ */
#define EXC(label,cont,res,err) \
".section __ex_table,\"a\"\n" \
" .long "#label"-.\n" \
" lda "#res","#cont"-"#label"("#err")\n" \
".previous\n"
extern void __get_user_unknown(void); extern void __get_user_unknown(void);
...@@ -100,12 +98,12 @@ extern void __get_user_unknown(void); ...@@ -100,12 +98,12 @@ extern void __get_user_unknown(void);
__gu_err; \ __gu_err; \
}) })
#define __get_user_check(x, ptr, size, segment) \ #define __get_user_check(x, ptr, size) \
({ \ ({ \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \ unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ if (__access_ok((unsigned long)__gu_addr, size)) { \
__gu_err = 0; \ __gu_err = 0; \
switch (size) { \ switch (size) { \
case 1: __get_user_8(__gu_addr); break; \ case 1: __get_user_8(__gu_addr); break; \
...@@ -125,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -125,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_64(addr) \ #define __get_user_64(addr) \
__asm__("1: ldq %0,%2\n" \ __asm__("1: ldq %0,%2\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0, 2b-1b(%1)\n" \
".previous" \
: "=r"(__gu_val), "=r"(__gu_err) \ : "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err)) : "m"(__m(addr)), "1"(__gu_err))
#define __get_user_32(addr) \ #define __get_user_32(addr) \
__asm__("1: ldl %0,%2\n" \ __asm__("1: ldl %0,%2\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0, 2b-1b(%1)\n" \
".previous" \
: "=r"(__gu_val), "=r"(__gu_err) \ : "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err)) : "m"(__m(addr)), "1"(__gu_err))
...@@ -148,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -148,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_16(addr) \ #define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \ __asm__("1: ldwu %0,%2\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0, 2b-1b(%1)\n" \
".previous" \
: "=r"(__gu_val), "=r"(__gu_err) \ : "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err)) : "m"(__m(addr)), "1"(__gu_err))
#define __get_user_8(addr) \ #define __get_user_8(addr) \
__asm__("1: ldbu %0,%2\n" \ __asm__("1: ldbu %0,%2\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0, 2b-1b(%1)\n" \
".previous" \
: "=r"(__gu_val), "=r"(__gu_err) \ : "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err)) : "m"(__m(addr)), "1"(__gu_err))
#else #else
...@@ -177,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -177,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; };
" extwh %1,%3,%1\n" \ " extwh %1,%3,%1\n" \
" or %0,%1,%0\n" \ " or %0,%1,%0\n" \
"3:\n" \ "3:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,3b,%0,%2) \
" .long 1b - .\n" \ EXC(2b,3b,%0,%2) \
" lda %0, 3b-1b(%2)\n" \
" .long 2b - .\n" \
" lda %0, 3b-2b(%2)\n" \
".previous" \
: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
: "r"(addr), "2"(__gu_err)); \ : "r"(addr), "2"(__gu_err)); \
} }
...@@ -191,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -191,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; };
__asm__("1: ldq_u %0,0(%2)\n" \ __asm__("1: ldq_u %0,0(%2)\n" \
" extbl %0,%2,%0\n" \ " extbl %0,%2,%0\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0, 2b-1b(%1)\n" \
".previous" \
: "=&r"(__gu_val), "=r"(__gu_err) \ : "=&r"(__gu_val), "=r"(__gu_err) \
: "r"(addr), "1"(__gu_err)) : "r"(addr), "1"(__gu_err))
#endif #endif
...@@ -215,11 +194,11 @@ extern void __put_user_unknown(void); ...@@ -215,11 +194,11 @@ extern void __put_user_unknown(void);
__pu_err; \ __pu_err; \
}) })
#define __put_user_check(x, ptr, size, segment) \ #define __put_user_check(x, ptr, size) \
({ \ ({ \
long __pu_err = -EFAULT; \ long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ if (__access_ok((unsigned long)__pu_addr, size)) { \
__pu_err = 0; \ __pu_err = 0; \
switch (size) { \ switch (size) { \
case 1: __put_user_8(x, __pu_addr); break; \ case 1: __put_user_8(x, __pu_addr); break; \
...@@ -240,20 +219,14 @@ extern void __put_user_unknown(void); ...@@ -240,20 +219,14 @@ extern void __put_user_unknown(void);
#define __put_user_64(x, addr) \ #define __put_user_64(x, addr) \
__asm__ __volatile__("1: stq %r2,%1\n" \ __asm__ __volatile__("1: stq %r2,%1\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,$31,%0) \
" .long 1b - .\n" \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__pu_err) \ : "=r"(__pu_err) \
: "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
#define __put_user_32(x, addr) \ #define __put_user_32(x, addr) \
__asm__ __volatile__("1: stl %r2,%1\n" \ __asm__ __volatile__("1: stl %r2,%1\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,$31,%0) \
" .long 1b - .\n" \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__pu_err) \ : "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
...@@ -263,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n" \ ...@@ -263,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
#define __put_user_16(x, addr) \ #define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \ __asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,$31,%0) \
" .long 1b - .\n" \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__pu_err) \ : "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
#define __put_user_8(x, addr) \ #define __put_user_8(x, addr) \
__asm__ __volatile__("1: stb %r2,%1\n" \ __asm__ __volatile__("1: stb %r2,%1\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,$31,%0) \
" .long 1b - .\n" \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__pu_err) \ : "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
#else #else
...@@ -298,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ ...@@ -298,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
"3: stq_u %2,1(%5)\n" \ "3: stq_u %2,1(%5)\n" \
"4: stq_u %1,0(%5)\n" \ "4: stq_u %1,0(%5)\n" \
"5:\n" \ "5:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,5b,$31,%0) \
" .long 1b - .\n" \ EXC(2b,5b,$31,%0) \
" lda $31, 5b-1b(%0)\n" \ EXC(3b,5b,$31,%0) \
" .long 2b - .\n" \ EXC(4b,5b,$31,%0) \
" lda $31, 5b-2b(%0)\n" \
" .long 3b - .\n" \
" lda $31, 5b-3b(%0)\n" \
" .long 4b - .\n" \
" lda $31, 5b-4b(%0)\n" \
".previous" \
: "=r"(__pu_err), "=&r"(__pu_tmp1), \ : "=r"(__pu_err), "=&r"(__pu_tmp1), \
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
"=&r"(__pu_tmp4) \ "=&r"(__pu_tmp4) \
...@@ -324,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ ...@@ -324,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
" or %1,%2,%1\n" \ " or %1,%2,%1\n" \
"2: stq_u %1,0(%4)\n" \ "2: stq_u %1,0(%4)\n" \
"3:\n" \ "3:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,3b,$31,%0) \
" .long 1b - .\n" \ EXC(2b,3b,$31,%0) \
" lda $31, 3b-1b(%0)\n" \
" .long 2b - .\n" \
" lda $31, 3b-2b(%0)\n" \
".previous" \
: "=r"(__pu_err), \ : "=r"(__pu_err), \
"=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
...@@ -341,153 +298,37 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ ...@@ -341,153 +298,37 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
* Complex access routines * Complex access routines
*/ */
/* This little bit of silliness is to get the GP loaded for a function extern long __copy_user(void *to, const void *from, long len);
that ordinarily wouldn't. Otherwise we could have it done by the macro
directly, which can be optimized the linker. */
#ifdef MODULE
#define __module_address(sym) "r"(sym),
#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
#else
#define __module_address(sym)
#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
#endif
extern void __copy_user(void);
extern inline long
__copy_tofrom_user_nocheck(void *to, const void *from, long len)
{
register void * __cu_to __asm__("$6") = to;
register const void * __cu_from __asm__("$7") = from;
register long __cu_len __asm__("$0") = len;
__asm__ __volatile__(
__module_call(28, 3, __copy_user)
: "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
: __module_address(__copy_user)
"0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
return __cu_len;
}
#define __copy_to_user(to, from, n) \
({ \
__chk_user_ptr(to); \
__copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
})
#define __copy_from_user(to, from, n) \
({ \
__chk_user_ptr(from); \
__copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
})
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
extern inline long static inline unsigned long
copy_to_user(void __user *to, const void *from, long n) raw_copy_from_user(void *to, const void __user *from, unsigned long len)
{ {
if (likely(__access_ok((unsigned long)to, n, get_fs()))) return __copy_user(to, (__force const void *)from, len);
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
return n;
} }
extern inline long static inline unsigned long
copy_from_user(void *to, const void __user *from, long n) raw_copy_to_user(void __user *to, const void *from, unsigned long len)
{ {
long res = n; return __copy_user((__force void *)to, from, len);
if (likely(__access_ok((unsigned long)from, n, get_fs())))
res = __copy_from_user_inatomic(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
} }
extern void __do_clear_user(void); extern long __clear_user(void __user *to, long len);
extern inline long
__clear_user(void __user *to, long len)
{
register void __user * __cl_to __asm__("$6") = to;
register long __cl_len __asm__("$0") = len;
__asm__ __volatile__(
__module_call(28, 2, __do_clear_user)
: "=r"(__cl_len), "=r"(__cl_to)
: __module_address(__do_clear_user)
"0"(__cl_len), "1"(__cl_to)
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
return __cl_len;
}
extern inline long extern inline long
clear_user(void __user *to, long len) clear_user(void __user *to, long len)
{ {
if (__access_ok((unsigned long)to, len, get_fs())) if (__access_ok((unsigned long)to, len))
len = __clear_user(to, len); len = __clear_user(to, len);
return len; return len;
} }
#undef __module_address
#undef __module_call
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
/* #include <asm/extable.h>
* About the exception table:
*
* - insn is a 32-bit pc-relative offset from the faulting insn.
* - nextinsn is a 16-bit offset off of the faulting instruction
* (not off of the *next* instruction as branches are).
* - errreg is the register in which to place -EFAULT.
* - valreg is the final target register for the load sequence
* and will be zeroed.
*
* Either errreg or valreg may be $31, in which case nothing happens.
*
* The exception fixup information "just so happens" to be arranged
* as in a MEM format instruction. This lets us emit our three
* values like so:
*
* lda valreg, nextinsn(errreg)
*
*/
struct exception_table_entry
{
signed int insn;
union exception_fixup {
unsigned unit;
struct {
signed int nextinsn : 16;
unsigned int errreg : 5;
unsigned int valreg : 5;
} bits;
} fixup;
};
/* Returns the new pc */
#define fixup_exception(map_reg, _fixup, pc) \
({ \
if ((_fixup)->fixup.bits.valreg != 31) \
map_reg((_fixup)->fixup.bits.valreg) = 0; \
if ((_fixup)->fixup.bits.errreg != 31) \
map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
(pc) + (_fixup)->fixup.bits.nextinsn; \
})
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup.unit = (b)->fixup.unit; \
(b)->fixup.unit = (tmp).fixup.unit; \
} while (0)
#endif /* __ALPHA_UACCESS_H */ #endif /* __ALPHA_UACCESS_H */
...@@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extwl %1,%3,%1\n" " extwl %1,%3,%1\n"
" extwh %2,%3,%2\n" " extwh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extll %1,%3,%1\n" " extll %1,%3,%1\n"
" extlh %2,%3,%2\n" " extlh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extql %1,%3,%1\n" " extql %1,%3,%1\n"
" extqh %2,%3,%2\n" " extqh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,1(%5)\n" "3: stq_u %2,1(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0)); : "r"(va), "r"(una_reg(reg)), "0"(0));
...@@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,3(%5)\n" "3: stq_u %2,3(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0)); : "r"(va), "r"(una_reg(reg)), "0"(0));
...@@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, ...@@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,7(%5)\n" "3: stq_u %2,7(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n\t" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0)); : "r"(va), "r"(una_reg(reg)), "0"(0));
...@@ -802,7 +772,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -802,7 +772,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
/* Don't bother reading ds in the access check since we already /* Don't bother reading ds in the access check since we already
know that this came from the user. Also rely on the fact that know that this came from the user. Also rely on the fact that
the page at TASK_SIZE is unmapped and so can't be touched anyway. */ the page at TASK_SIZE is unmapped and so can't be touched anyway. */
if (!__access_ok((unsigned long)va, 0, USER_DS)) if ((unsigned long)va >= TASK_SIZE)
goto give_sigsegv; goto give_sigsegv;
++unaligned[1].count; ++unaligned[1].count;
...@@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extwl %1,%3,%1\n" " extwl %1,%3,%1\n"
" extwh %2,%3,%2\n" " extwh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extll %1,%3,%1\n" " extll %1,%3,%1\n"
" extlh %2,%3,%2\n" " extlh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extql %1,%3,%1\n" " extql %1,%3,%1\n"
" extqh %2,%3,%2\n" " extqh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extll %1,%3,%1\n" " extll %1,%3,%1\n"
" extlh %2,%3,%2\n" " extlh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extql %1,%3,%1\n" " extql %1,%3,%1\n"
" extqh %2,%3,%2\n" " extqh %2,%3,%2\n"
"3:\n" "3:\n"
".section __ex_table,\"a\"\n" EXC(1b,3b,%1,%0)
" .long 1b - .\n" EXC(2b,3b,%2,%0)
" lda %1,3b-1b(%0)\n"
" .long 2b - .\n"
" lda %2,3b-2b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0)); : "r"(va), "0"(0));
if (error) if (error)
...@@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,1(%5)\n" "3: stq_u %2,1(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0)); : "r"(va), "r"(*reg_addr), "0"(0));
...@@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,3(%5)\n" "3: stq_u %2,3(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0)); : "r"(va), "r"(*reg_addr), "0"(0));
...@@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,7(%5)\n" "3: stq_u %2,7(%5)\n"
"4: stq_u %1,0(%5)\n" "4: stq_u %1,0(%5)\n"
"5:\n" "5:\n"
".section __ex_table,\"a\"\n\t" EXC(1b,5b,%2,%0)
" .long 1b - .\n" EXC(2b,5b,%1,%0)
" lda %2,5b-1b(%0)\n" EXC(3b,5b,$31,%0)
" .long 2b - .\n" EXC(4b,5b,$31,%0)
" lda %1,5b-2b(%0)\n"
" .long 3b - .\n"
" lda $31,5b-3b(%0)\n"
" .long 4b - .\n"
" lda $31,5b-4b(%0)\n"
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4) "=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0)); : "r"(va), "r"(*reg_addr), "0"(0));
...@@ -1047,7 +979,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, ...@@ -1047,7 +979,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
/* We need to replicate some of the logic in mm/fault.c, /* We need to replicate some of the logic in mm/fault.c,
since we don't have access to the fault code in the since we don't have access to the fault code in the
exception handling return path. */ exception handling return path. */
if (!__access_ok((unsigned long)va, 0, USER_DS)) if ((unsigned long)va >= TASK_SIZE)
info.si_code = SEGV_ACCERR; info.si_code = SEGV_ACCERR;
else { else {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
......
...@@ -8,21 +8,6 @@ ...@@ -8,21 +8,6 @@
* right "bytes left to zero" value (and that it is updated only _after_ * right "bytes left to zero" value (and that it is updated only _after_
* a successful copy). There is also some rather minor exception setup * a successful copy). There is also some rather minor exception setup
* stuff. * stuff.
*
* NOTE! This is not directly C-callable, because the calling semantics
* are different:
*
* Inputs:
* length in $0
* destination address in $6
* exception pointer in $7
* return address in $28 (exceptions expect it there)
*
* Outputs:
* bytes left to copy in $0
*
* Clobbers:
* $1,$2,$3,$4,$5,$6
*/ */
#include <asm/export.h> #include <asm/export.h>
...@@ -38,62 +23,63 @@ ...@@ -38,62 +23,63 @@
.set noreorder .set noreorder
.align 4 .align 4
.globl __do_clear_user .globl __clear_user
.ent __do_clear_user .ent __clear_user
.frame $30, 0, $28 .frame $30, 0, $26
.prologue 0 .prologue 0
$loop: $loop:
and $1, 3, $4 # e0 : and $1, 3, $4 # e0 :
beq $4, 1f # .. e1 : beq $4, 1f # .. e1 :
0: EX( stq_u $31, 0($6) ) # e0 : zero one word 0: EX( stq_u $31, 0($16) ) # e0 : zero one word
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
subq $4, 1, $4 # e0 : subq $4, 1, $4 # e0 :
addq $6, 8, $6 # .. e1 : addq $16, 8, $16 # .. e1 :
bne $4, 0b # e1 : bne $4, 0b # e1 :
unop # : unop # :
1: bic $1, 3, $1 # e0 : 1: bic $1, 3, $1 # e0 :
beq $1, $tail # .. e1 : beq $1, $tail # .. e1 :
2: EX( stq_u $31, 0($6) ) # e0 : zero four words 2: EX( stq_u $31, 0($16) ) # e0 : zero four words
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
EX( stq_u $31, 8($6) ) # e0 : EX( stq_u $31, 8($16) ) # e0 :
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
EX( stq_u $31, 16($6) ) # e0 : EX( stq_u $31, 16($16) ) # e0 :
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
EX( stq_u $31, 24($6) ) # e0 : EX( stq_u $31, 24($16) ) # e0 :
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
subq $1, 4, $1 # e0 : subq $1, 4, $1 # e0 :
addq $6, 32, $6 # .. e1 : addq $16, 32, $16 # .. e1 :
bne $1, 2b # e1 : bne $1, 2b # e1 :
$tail: $tail:
bne $2, 1f # e1 : is there a tail to do? bne $2, 1f # e1 : is there a tail to do?
ret $31, ($28), 1 # .. e1 : ret $31, ($26), 1 # .. e1 :
1: EX( ldq_u $5, 0($6) ) # e0 : 1: EX( ldq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 : clr $0 # .. e1 :
nop # e1 : nop # e1 :
mskqh $5, $0, $5 # e0 : mskqh $5, $0, $5 # e0 :
EX( stq_u $5, 0($6) ) # e0 : EX( stq_u $5, 0($16) ) # e0 :
ret $31, ($28), 1 # .. e1 : ret $31, ($26), 1 # .. e1 :
__do_clear_user: __clear_user:
and $6, 7, $4 # e0 : find dest misalignment and $17, $17, $0
and $16, 7, $4 # e0 : find dest misalignment
beq $0, $zerolength # .. e1 : beq $0, $zerolength # .. e1 :
addq $0, $4, $1 # e0 : bias counter addq $0, $4, $1 # e0 : bias counter
and $1, 7, $2 # e1 : number of bytes in tail and $1, 7, $2 # e1 : number of bytes in tail
srl $1, 3, $1 # e0 : srl $1, 3, $1 # e0 :
beq $4, $loop # .. e1 : beq $4, $loop # .. e1 :
EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in
beq $1, $oneword # .. e1 : sub-word store? beq $1, $oneword # .. e1 : sub-word store?
mskql $5, $6, $5 # e0 : take care of misaligned head mskql $5, $16, $5 # e0 : take care of misaligned head
addq $6, 8, $6 # .. e1 : addq $16, 8, $16 # .. e1 :
EX( stq_u $5, -8($6) ) # e0 : EX( stq_u $5, -8($16) ) # e0 :
addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
subq $1, 1, $1 # e0 : subq $1, 1, $1 # e0 :
subq $0, 8, $0 # .. e1 : subq $0, 8, $0 # .. e1 :
...@@ -101,15 +87,15 @@ __do_clear_user: ...@@ -101,15 +87,15 @@ __do_clear_user:
unop # : unop # :
$oneword: $oneword:
mskql $5, $6, $4 # e0 : mskql $5, $16, $4 # e0 :
mskqh $5, $2, $5 # e0 : mskqh $5, $2, $5 # e0 :
or $5, $4, $5 # e1 : or $5, $4, $5 # e1 :
EX( stq_u $5, 0($6) ) # e0 : EX( stq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 : clr $0 # .. e1 :
$zerolength: $zerolength:
$exception: $exception:
ret $31, ($28), 1 # .. e1 : ret $31, ($26), 1 # .. e1 :
.end __do_clear_user .end __clear_user
EXPORT_SYMBOL(__do_clear_user) EXPORT_SYMBOL(__clear_user)
...@@ -9,21 +9,6 @@ ...@@ -9,21 +9,6 @@
* contains the right "bytes left to copy" value (and that it is updated * contains the right "bytes left to copy" value (and that it is updated
* only _after_ a successful copy). There is also some rather minor * only _after_ a successful copy). There is also some rather minor
* exception setup stuff.. * exception setup stuff..
*
* NOTE! This is not directly C-callable, because the calling semantics are
* different:
*
* Inputs:
* length in $0
* destination address in $6
* source address in $7
* return address in $28
*
* Outputs:
* bytes left to copy in $0
*
* Clobbers:
* $1,$2,$3,$4,$5,$6,$7
*/ */
#include <asm/export.h> #include <asm/export.h>
...@@ -49,58 +34,59 @@ ...@@ -49,58 +34,59 @@
.ent __copy_user .ent __copy_user
__copy_user: __copy_user:
.prologue 0 .prologue 0
and $6,7,$3 and $18,$18,$0
and $16,7,$3
beq $0,$35 beq $0,$35
beq $3,$36 beq $3,$36
subq $3,8,$3 subq $3,8,$3
.align 4 .align 4
$37: $37:
EXI( ldq_u $1,0($7) ) EXI( ldq_u $1,0($17) )
EXO( ldq_u $2,0($6) ) EXO( ldq_u $2,0($16) )
extbl $1,$7,$1 extbl $1,$17,$1
mskbl $2,$6,$2 mskbl $2,$16,$2
insbl $1,$6,$1 insbl $1,$16,$1
addq $3,1,$3 addq $3,1,$3
bis $1,$2,$1 bis $1,$2,$1
EXO( stq_u $1,0($6) ) EXO( stq_u $1,0($16) )
subq $0,1,$0 subq $0,1,$0
addq $6,1,$6 addq $16,1,$16
addq $7,1,$7 addq $17,1,$17
beq $0,$41 beq $0,$41
bne $3,$37 bne $3,$37
$36: $36:
and $7,7,$1 and $17,7,$1
bic $0,7,$4 bic $0,7,$4
beq $1,$43 beq $1,$43
beq $4,$48 beq $4,$48
EXI( ldq_u $3,0($7) ) EXI( ldq_u $3,0($17) )
.align 4 .align 4
$50: $50:
EXI( ldq_u $2,8($7) ) EXI( ldq_u $2,8($17) )
subq $4,8,$4 subq $4,8,$4
extql $3,$7,$3 extql $3,$17,$3
extqh $2,$7,$1 extqh $2,$17,$1
bis $3,$1,$1 bis $3,$1,$1
EXO( stq $1,0($6) ) EXO( stq $1,0($16) )
addq $7,8,$7 addq $17,8,$17
subq $0,8,$0 subq $0,8,$0
addq $6,8,$6 addq $16,8,$16
bis $2,$2,$3 bis $2,$2,$3
bne $4,$50 bne $4,$50
$48: $48:
beq $0,$41 beq $0,$41
.align 4 .align 4
$57: $57:
EXI( ldq_u $1,0($7) ) EXI( ldq_u $1,0($17) )
EXO( ldq_u $2,0($6) ) EXO( ldq_u $2,0($16) )
extbl $1,$7,$1 extbl $1,$17,$1
mskbl $2,$6,$2 mskbl $2,$16,$2
insbl $1,$6,$1 insbl $1,$16,$1
bis $1,$2,$1 bis $1,$2,$1
EXO( stq_u $1,0($6) ) EXO( stq_u $1,0($16) )
subq $0,1,$0 subq $0,1,$0
addq $6,1,$6 addq $16,1,$16
addq $7,1,$7 addq $17,1,$17
bne $0,$57 bne $0,$57
br $31,$41 br $31,$41
.align 4 .align 4
...@@ -108,27 +94,27 @@ $43: ...@@ -108,27 +94,27 @@ $43:
beq $4,$65 beq $4,$65
.align 4 .align 4
$66: $66:
EXI( ldq $1,0($7) ) EXI( ldq $1,0($17) )
subq $4,8,$4 subq $4,8,$4
EXO( stq $1,0($6) ) EXO( stq $1,0($16) )
addq $7,8,$7 addq $17,8,$17
subq $0,8,$0 subq $0,8,$0
addq $6,8,$6 addq $16,8,$16
bne $4,$66 bne $4,$66
$65: $65:
beq $0,$41 beq $0,$41
EXI( ldq $2,0($7) ) EXI( ldq $2,0($17) )
EXO( ldq $1,0($6) ) EXO( ldq $1,0($16) )
mskql $2,$0,$2 mskql $2,$0,$2
mskqh $1,$0,$1 mskqh $1,$0,$1
bis $2,$1,$2 bis $2,$1,$2
EXO( stq $2,0($6) ) EXO( stq $2,0($16) )
bis $31,$31,$0 bis $31,$31,$0
$41: $41:
$35: $35:
$exitin: $exitin:
$exitout: $exitout:
ret $31,($28),1 ret $31,($26),1
.end __copy_user .end __copy_user
EXPORT_SYMBOL(__copy_user) EXPORT_SYMBOL(__copy_user)
...@@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) ...@@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldq_u %0,%2\n" \ "1: ldq_u %0,%2\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,%0,%1) \
" .long 1b - .\n" \
" lda %0,2b-1b(%1)\n" \
".previous" \
: "=r"(x), "=r"(__guu_err) \ : "=r"(x), "=r"(__guu_err) \
: "m"(__m(ptr)), "1"(0)); \ : "m"(__m(ptr)), "1"(0)); \
__guu_err; \ __guu_err; \
...@@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) ...@@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: stq_u %2,%1\n" \ "1: stq_u %2,%1\n" \
"2:\n" \ "2:\n" \
".section __ex_table,\"a\"\n" \ EXC(1b,2b,$31,%0) \
" .long 1b - ." \
" lda $31,2b-1b(%0)\n" \
".previous" \
: "=r"(__puu_err) \ : "=r"(__puu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(0)); \ : "m"(__m(addr)), "rJ"(x), "0"(0)); \
__puu_err; \ __puu_err; \
......
...@@ -9,21 +9,6 @@ ...@@ -9,21 +9,6 @@
* a successful copy). There is also some rather minor exception setup * a successful copy). There is also some rather minor exception setup
* stuff. * stuff.
* *
* NOTE! This is not directly C-callable, because the calling semantics
* are different:
*
* Inputs:
* length in $0
* destination address in $6
* exception pointer in $7
* return address in $28 (exceptions expect it there)
*
* Outputs:
* bytes left to copy in $0
*
* Clobbers:
* $1,$2,$3,$4,$5,$6
*
* Much of the information about 21264 scheduling/coding comes from: * Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264 * Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here * abbreviated as 'CWG' in other comments here
...@@ -56,14 +41,15 @@ ...@@ -56,14 +41,15 @@
.set noreorder .set noreorder
.align 4 .align 4
.globl __do_clear_user .globl __clear_user
.ent __do_clear_user .ent __clear_user
.frame $30, 0, $28 .frame $30, 0, $26
.prologue 0 .prologue 0
# Pipeline info : Slotting & Comments # Pipeline info : Slotting & Comments
__do_clear_user: __clear_user:
and $6, 7, $4 # .. E .. .. : find dest head misalignment and $17, $17, $0
and $16, 7, $4 # .. E .. .. : find dest head misalignment
beq $0, $zerolength # U .. .. .. : U L U L beq $0, $zerolength # U .. .. .. : U L U L
addq $0, $4, $1 # .. .. .. E : bias counter addq $0, $4, $1 # .. .. .. E : bias counter
...@@ -75,14 +61,14 @@ __do_clear_user: ...@@ -75,14 +61,14 @@ __do_clear_user:
/* /*
* Head is not aligned. Write (8 - $4) bytes to head of destination * Head is not aligned. Write (8 - $4) bytes to head of destination
* This means $6 is known to be misaligned * This means $16 is known to be misaligned
*/ */
EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in
beq $1, $onebyte # .. .. U .. : sub-word store? beq $1, $onebyte # .. .. U .. : sub-word store?
mskql $5, $6, $5 # .. U .. .. : take care of misaligned head mskql $5, $16, $5 # .. U .. .. : take care of misaligned head
addq $6, 8, $6 # E .. .. .. : L U U L addq $16, 8, $16 # E .. .. .. : L U U L
EX( stq_u $5, -8($6) ) # .. .. .. L : EX( stq_u $5, -8($16) ) # .. .. .. L :
subq $1, 1, $1 # .. .. E .. : subq $1, 1, $1 # .. .. E .. :
addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment
subq $0, 8, $0 # E .. .. .. : U L U L subq $0, 8, $0 # E .. .. .. : U L U L
...@@ -93,11 +79,11 @@ __do_clear_user: ...@@ -93,11 +79,11 @@ __do_clear_user:
* values upon initial entry to the loop * values upon initial entry to the loop
* $1 is number of quadwords to clear (zero is a valid value) * $1 is number of quadwords to clear (zero is a valid value)
* $2 is number of trailing bytes (0..7) ($2 never used...) * $2 is number of trailing bytes (0..7) ($2 never used...)
* $6 is known to be aligned 0mod8 * $16 is known to be aligned 0mod8
*/ */
$headalign: $headalign:
subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop
and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop
subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
blt $4, $trailquad # U .. .. .. : U L U L blt $4, $trailquad # U .. .. .. : U L U L
...@@ -114,21 +100,21 @@ $headalign: ...@@ -114,21 +100,21 @@ $headalign:
beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64
$alignmod64: $alignmod64:
EX( stq_u $31, 0($6) ) # .. .. .. L EX( stq_u $31, 0($16) ) # .. .. .. L
addq $3, 8, $3 # .. .. E .. addq $3, 8, $3 # .. .. E ..
subq $0, 8, $0 # .. E .. .. subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L nop # E .. .. .. : U L U L
nop # .. .. .. E nop # .. .. .. E
subq $1, 1, $1 # .. .. E .. subq $1, 1, $1 # .. .. E ..
addq $6, 8, $6 # .. E .. .. addq $16, 8, $16 # .. E .. ..
blt $3, $alignmod64 # U .. .. .. : U L U L blt $3, $alignmod64 # U .. .. .. : U L U L
$bigalign: $bigalign:
/* /*
* $0 is the number of bytes left * $0 is the number of bytes left
* $1 is the number of quads left * $1 is the number of quads left
* $6 is aligned 0mod64 * $16 is aligned 0mod64
* we know that we'll be taking a minimum of one trip through * we know that we'll be taking a minimum of one trip through
* CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
* We are _not_ going to update $0 after every single store. That * We are _not_ going to update $0 after every single store. That
...@@ -145,39 +131,39 @@ $bigalign: ...@@ -145,39 +131,39 @@ $bigalign:
nop # E : nop # E :
nop # E : nop # E :
nop # E : nop # E :
bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest
/* This might actually help for the current trip... */ /* This might actually help for the current trip... */
$do_wh64: $do_wh64:
wh64 ($3) # .. .. .. L1 : memory subsystem hint wh64 ($3) # .. .. .. L1 : memory subsystem hint
subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop?
EX( stq_u $31, 0($6) ) # .. L .. .. EX( stq_u $31, 0($16) ) # .. L .. ..
subq $0, 8, $0 # E .. .. .. : U L U L subq $0, 8, $0 # E .. .. .. : U L U L
addq $6, 128, $3 # E : Target address of wh64 addq $16, 128, $3 # E : Target address of wh64
EX( stq_u $31, 8($6) ) # L : EX( stq_u $31, 8($16) ) # L :
EX( stq_u $31, 16($6) ) # L : EX( stq_u $31, 16($16) ) # L :
subq $0, 16, $0 # E : U L L U subq $0, 16, $0 # E : U L L U
nop # E : nop # E :
EX( stq_u $31, 24($6) ) # L : EX( stq_u $31, 24($16) ) # L :
EX( stq_u $31, 32($6) ) # L : EX( stq_u $31, 32($16) ) # L :
subq $0, 168, $5 # E : U L L U : two trips through the loop left? subq $0, 168, $5 # E : U L L U : two trips through the loop left?
/* 168 = 192 - 24, since we've already completed some stores */ /* 168 = 192 - 24, since we've already completed some stores */
subq $0, 16, $0 # E : subq $0, 16, $0 # E :
EX( stq_u $31, 40($6) ) # L : EX( stq_u $31, 40($16) ) # L :
EX( stq_u $31, 48($6) ) # L : EX( stq_u $31, 48($16) ) # L :
cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle
subq $1, 8, $1 # E : subq $1, 8, $1 # E :
subq $0, 16, $0 # E : subq $0, 16, $0 # E :
EX( stq_u $31, 56($6) ) # L : EX( stq_u $31, 56($16) ) # L :
nop # E : U L U L nop # E : U L U L
nop # E : nop # E :
subq $0, 8, $0 # E : subq $0, 8, $0 # E :
addq $6, 64, $6 # E : addq $16, 64, $16 # E :
bge $4, $do_wh64 # U : U L U L bge $4, $do_wh64 # U : U L U L
$trailquad: $trailquad:
...@@ -190,14 +176,14 @@ $trailquad: ...@@ -190,14 +176,14 @@ $trailquad:
beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go
$onequad: $onequad:
EX( stq_u $31, 0($6) ) # .. .. .. L EX( stq_u $31, 0($16) ) # .. .. .. L
subq $1, 1, $1 # .. .. E .. subq $1, 1, $1 # .. .. E ..
subq $0, 8, $0 # .. E .. .. subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L nop # E .. .. .. : U L U L
nop # .. .. .. E nop # .. .. .. E
nop # .. .. E .. nop # .. .. E ..
addq $6, 8, $6 # .. E .. .. addq $16, 8, $16 # .. E .. ..
bgt $1, $onequad # U .. .. .. : U L U L bgt $1, $onequad # U .. .. .. : U L U L
# We have an unknown number of bytes left to go. # We have an unknown number of bytes left to go.
...@@ -211,9 +197,9 @@ $trailbytes: ...@@ -211,9 +197,9 @@ $trailbytes:
# so we will use $0 as the loop counter # so we will use $0 as the loop counter
# We know for a fact that $0 > 0 zero due to previous context # We know for a fact that $0 > 0 zero due to previous context
$onebyte: $onebyte:
EX( stb $31, 0($6) ) # .. .. .. L EX( stb $31, 0($16) ) # .. .. .. L
subq $0, 1, $0 # .. .. E .. : subq $0, 1, $0 # .. .. E .. :
addq $6, 1, $6 # .. E .. .. : addq $16, 1, $16 # .. E .. .. :
bgt $0, $onebyte # U .. .. .. : U L U L bgt $0, $onebyte # U .. .. .. : U L U L
$zerolength: $zerolength:
...@@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?) ...@@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?)
nop # .. .. .. E : nop # .. .. .. E :
nop # .. .. E .. : nop # .. .. E .. :
nop # .. E .. .. : nop # .. E .. .. :
ret $31, ($28), 1 # L0 .. .. .. : L U L U ret $31, ($26), 1 # L0 .. .. .. : L U L U
.end __do_clear_user .end __clear_user
EXPORT_SYMBOL(__do_clear_user) EXPORT_SYMBOL(__clear_user)
...@@ -12,21 +12,6 @@ ...@@ -12,21 +12,6 @@
* only _after_ a successful copy). There is also some rather minor * only _after_ a successful copy). There is also some rather minor
* exception setup stuff.. * exception setup stuff..
* *
* NOTE! This is not directly C-callable, because the calling semantics are
* different:
*
* Inputs:
* length in $0
* destination address in $6
* source address in $7
* return address in $28
*
* Outputs:
* bytes left to copy in $0
*
* Clobbers:
* $1,$2,$3,$4,$5,$6,$7
*
* Much of the information about 21264 scheduling/coding comes from: * Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264 * Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here * abbreviated as 'CWG' in other comments here
...@@ -60,10 +45,11 @@ ...@@ -60,10 +45,11 @@
# Pipeline info: Slotting & Comments # Pipeline info: Slotting & Comments
__copy_user: __copy_user:
.prologue 0 .prologue 0
subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? andq $18, $18, $0
subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
beq $0, $zerolength # U .. .. .. : U L U L beq $0, $zerolength # U .. .. .. : U L U L
and $6,7,$3 # .. .. .. E : is leading dest misalignment and $16,7,$3 # .. .. .. E : is leading dest misalignment
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall)
subq $3, 8, $3 # E .. .. .. : L U U L : trip counter subq $3, 8, $3 # E .. .. .. : L U U L : trip counter
...@@ -73,17 +59,17 @@ __copy_user: ...@@ -73,17 +59,17 @@ __copy_user:
* We know we have at least one trip through this loop * We know we have at least one trip through this loop
*/ */
$aligndest: $aligndest:
EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores
addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG
addq $3,1,$3 # .. E .. .. : addq $3,1,$3 # .. E .. .. :
nop # E .. .. .. : U L U L nop # E .. .. .. : U L U L
/* /*
* the -1 is to compensate for the inc($6) done in a previous quadpack * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop * which allows us zero dependencies within either quadpack in the loop
*/ */
EXO( stb $1,-1($6) ) # .. .. .. L : EXO( stb $1,-1($16) ) # .. .. .. L :
addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG
subq $0,1,$0 # .. E .. .. : subq $0,1,$0 # .. E .. .. :
bne $3, $aligndest # U .. .. .. : U L U L bne $3, $aligndest # U .. .. .. : U L U L
...@@ -92,29 +78,29 @@ $aligndest: ...@@ -92,29 +78,29 @@ $aligndest:
* If we arrived via branch, we have a minimum of 32 bytes * If we arrived via branch, we have a minimum of 32 bytes
*/ */
$destaligned: $destaligned:
and $7,7,$1 # .. .. .. E : Check _current_ source alignment and $17,7,$1 # .. .. .. E : Check _current_ source alignment
bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop
EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code
beq $1,$quadaligned # U .. .. .. : U L U L beq $1,$quadaligned # U .. .. .. : U L U L
/* /*
* In the worst case, we've just executed an ldq_u here from 0($7) * In the worst case, we've just executed an ldq_u here from 0($17)
* and we'll repeat it once if we take the branch * and we'll repeat it once if we take the branch
*/ */
/* Misaligned quadword loop - not unrolled. Leave it that way. */ /* Misaligned quadword loop - not unrolled. Leave it that way. */
$misquad: $misquad:
EXI( ldq_u $2,8($7) ) # .. .. .. L : EXI( ldq_u $2,8($17) ) # .. .. .. L :
subq $4,8,$4 # .. .. E .. : subq $4,8,$4 # .. .. E .. :
extql $3,$7,$3 # .. U .. .. : extql $3,$17,$3 # .. U .. .. :
extqh $2,$7,$1 # U .. .. .. : U U L L extqh $2,$17,$1 # U .. .. .. : U U L L
bis $3,$1,$1 # .. .. .. E : bis $3,$1,$1 # .. .. .. E :
EXO( stq $1,0($6) ) # .. .. L .. : EXO( stq $1,0($16) ) # .. .. L .. :
addq $7,8,$7 # .. E .. .. : addq $17,8,$17 # .. E .. .. :
subq $0,8,$0 # E .. .. .. : U L L U subq $0,8,$0 # E .. .. .. : U L L U
addq $6,8,$6 # .. .. .. E : addq $16,8,$16 # .. .. .. E :
bis $2,$2,$3 # .. .. E .. : bis $2,$2,$3 # .. .. E .. :
nop # .. E .. .. : nop # .. E .. .. :
bne $4,$misquad # U .. .. .. : U L U L bne $4,$misquad # U .. .. .. : U L U L
...@@ -125,8 +111,8 @@ $misquad: ...@@ -125,8 +111,8 @@ $misquad:
beq $0,$zerolength # U .. .. .. : U L U L beq $0,$zerolength # U .. .. .. : U L U L
/* We know we have at least one trip through the byte loop */ /* We know we have at least one trip through the byte loop */
EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. : nop # .. E .. .. :
br $31, $dirtyentry # L0 .. .. .. : L U U L br $31, $dirtyentry # L0 .. .. .. : L U U L
/* Do the trailing byte loop load, then hop into the store part of the loop */ /* Do the trailing byte loop load, then hop into the store part of the loop */
...@@ -136,8 +122,8 @@ $misquad: ...@@ -136,8 +122,8 @@ $misquad:
* Based upon the usage context, it's worth the effort to unroll this loop * Based upon the usage context, it's worth the effort to unroll this loop
* $0 - number of bytes to be moved * $0 - number of bytes to be moved
* $4 - number of bytes to move as quadwords * $4 - number of bytes to move as quadwords
* $6 is current destination address * $16 is current destination address
* $7 is current source address * $17 is current source address
*/ */
$quadaligned: $quadaligned:
subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff
...@@ -155,29 +141,29 @@ $quadaligned: ...@@ -155,29 +141,29 @@ $quadaligned:
* instruction memory hint instruction). * instruction memory hint instruction).
*/ */
$unroll4: $unroll4:
EXI( ldq $1,0($7) ) # .. .. .. L EXI( ldq $1,0($17) ) # .. .. .. L
EXI( ldq $2,8($7) ) # .. .. L .. EXI( ldq $2,8($17) ) # .. .. L ..
subq $4,32,$4 # .. E .. .. subq $4,32,$4 # .. E .. ..
nop # E .. .. .. : U U L L nop # E .. .. .. : U U L L
addq $7,16,$7 # .. .. .. E addq $17,16,$17 # .. .. .. E
EXO( stq $1,0($6) ) # .. .. L .. EXO( stq $1,0($16) ) # .. .. L ..
EXO( stq $2,8($6) ) # .. L .. .. EXO( stq $2,8($16) ) # .. L .. ..
subq $0,16,$0 # E .. .. .. : U L L U subq $0,16,$0 # E .. .. .. : U L L U
addq $6,16,$6 # .. .. .. E addq $16,16,$16 # .. .. .. E
EXI( ldq $1,0($7) ) # .. .. L .. EXI( ldq $1,0($17) ) # .. .. L ..
EXI( ldq $2,8($7) ) # .. L .. .. EXI( ldq $2,8($17) ) # .. L .. ..
subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip?
EXO( stq $1,0($6) ) # .. .. .. L EXO( stq $1,0($16) ) # .. .. .. L
EXO( stq $2,8($6) ) # .. .. L .. EXO( stq $2,8($16) ) # .. .. L ..
subq $0,16,$0 # .. E .. .. subq $0,16,$0 # .. E .. ..
addq $7,16,$7 # E .. .. .. : U L L U addq $17,16,$17 # E .. .. .. : U L L U
nop # .. .. .. E nop # .. .. .. E
nop # .. .. E .. nop # .. .. E ..
addq $6,16,$6 # .. E .. .. addq $16,16,$16 # .. E .. ..
bgt $3,$unroll4 # U .. .. .. : U L U L bgt $3,$unroll4 # U .. .. .. : U L U L
nop nop
...@@ -186,14 +172,14 @@ $unroll4: ...@@ -186,14 +172,14 @@ $unroll4:
beq $4, $noquads beq $4, $noquads
$onequad: $onequad:
EXI( ldq $1,0($7) ) EXI( ldq $1,0($17) )
subq $4,8,$4 subq $4,8,$4
addq $7,8,$7 addq $17,8,$17
nop nop
EXO( stq $1,0($6) ) EXO( stq $1,0($16) )
subq $0,8,$0 subq $0,8,$0
addq $6,8,$6 addq $16,8,$16
bne $4,$onequad bne $4,$onequad
$noquads: $noquads:
...@@ -207,23 +193,23 @@ $noquads: ...@@ -207,23 +193,23 @@ $noquads:
* There's no point in doing a lot of complex alignment calculations to try to * There's no point in doing a lot of complex alignment calculations to try to
* to quadword stuff for a small amount of data. * to quadword stuff for a small amount of data.
* $0 - remaining number of bytes left to copy * $0 - remaining number of bytes left to copy
* $6 - current dest addr * $16 - current dest addr
* $7 - current source addr * $17 - current source addr
*/ */
$onebyteloop: $onebyteloop:
EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. : nop # .. E .. .. :
nop # E .. .. .. : U L U L nop # E .. .. .. : U L U L
$dirtyentry: $dirtyentry:
/* /*
* the -1 is to compensate for the inc($6) done in a previous quadpack * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop * which allows us zero dependencies within either quadpack in the loop
*/ */
EXO ( stb $2,-1($6) ) # .. .. .. L : EXO ( stb $2,-1($16) ) # .. .. .. L :
addq $7,1,$7 # .. .. E .. : quadpack as the load addq $17,1,$17 # .. .. E .. : quadpack as the load
subq $0,1,$0 # .. E .. .. : change count _after_ copy subq $0,1,$0 # .. E .. .. : change count _after_ copy
bgt $0,$onebyteloop # U .. .. .. : U L U L bgt $0,$onebyteloop # U .. .. .. : U L U L
...@@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?) ...@@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?)
nop # .. .. .. E nop # .. .. .. E
nop # .. .. E .. nop # .. .. E ..
nop # .. E .. .. nop # .. E .. ..
ret $31,($28),1 # L0 .. .. .. : L U L U ret $31,($26),1 # L0 .. .. .. : L U L U
.end __copy_user .end __copy_user
EXPORT_SYMBOL(__copy_user) EXPORT_SYMBOL(__copy_user)
...@@ -6,6 +6,7 @@ generic-y += device.h ...@@ -6,6 +6,7 @@ generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
......
...@@ -24,12 +24,10 @@ ...@@ -24,12 +24,10 @@
#ifndef _ASM_ARC_UACCESS_H #ifndef _ASM_ARC_UACCESS_H
#define _ASM_ARC_UACCESS_H #define _ASM_ARC_UACCESS_H
#include <linux/sched.h>
#include <asm/errno.h>
#include <linux/string.h> /* for generic string functions */ #include <linux/string.h> /* for generic string functions */
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
/* /*
* Algorithmically, for __user_ok() we want do: * Algorithmically, for __user_ok() we want do:
...@@ -170,7 +168,7 @@ ...@@ -170,7 +168,7 @@
static inline unsigned long static inline unsigned long
__arc_copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
long res = 0; long res = 0;
char val; char val;
...@@ -396,11 +394,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -396,11 +394,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
return res; return res;
} }
extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
unsigned long n);
static inline unsigned long static inline unsigned long
__arc_copy_to_user(void __user *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
long res = 0; long res = 0;
char val; char val;
...@@ -726,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n) ...@@ -726,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n)
} }
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n) #define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
#define __clear_user(d, n) __arc_clear_user(d, n) #define __clear_user(d, n) __arc_clear_user(d, n)
#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
#define __strnlen_user(s, n) __arc_strnlen_user(s, n) #define __strnlen_user(s, n) __arc_strnlen_user(s, n)
#else #else
extern long arc_copy_from_user_noinline(void *to, const void __user * from,
unsigned long n);
extern long arc_copy_to_user_noinline(void __user *to, const void *from,
unsigned long n);
extern unsigned long arc_clear_user_noinline(void __user *to, extern unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n); unsigned long n);
extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
long count); long count);
extern long arc_strnlen_user_noinline(const char __user *src, long n); extern long arc_strnlen_user_noinline(const char __user *src, long n);
#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
#define __clear_user(d, n) arc_clear_user_noinline(d, n) #define __clear_user(d, n) arc_clear_user_noinline(d, n)
#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
...@@ -752,6 +743,4 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n); ...@@ -752,6 +743,4 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n);
#include <asm-generic/uaccess.h> #include <asm-generic/uaccess.h>
extern int fixup_exception(struct pt_regs *regs);
#endif #endif
...@@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs) ...@@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs)
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
long arc_copy_from_user_noinline(void *to, const void __user *from,
unsigned long n)
{
return __arc_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(arc_copy_from_user_noinline);
long arc_copy_to_user_noinline(void __user *to, const void *from,
unsigned long n)
{
return __arc_copy_to_user(to, from, n);
}
EXPORT_SYMBOL(arc_copy_to_user_noinline);
unsigned long arc_clear_user_noinline(void __user *to, unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n) unsigned long n)
{ {
......
...@@ -41,7 +41,6 @@ config ARM ...@@ -41,7 +41,6 @@ config ARM
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
......
...@@ -7,6 +7,7 @@ generic-y += early_ioremap.h ...@@ -7,6 +7,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += ioctl.h generic-y += ioctl.h
generic-y += ipcbuf.h generic-y += ipcbuf.h
generic-y += irq_regs.h generic-y += irq_regs.h
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
* User space memory access functions * User space memory access functions
*/ */
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/errno.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/unified.h> #include <asm/unified.h>
...@@ -26,28 +24,7 @@ ...@@ -26,28 +24,7 @@
#define __put_user_unaligned __put_user #define __put_user_unaligned __put_user
#endif #endif
#define VERIFY_READ 0 #include <asm/extable.h>
#define VERIFY_WRITE 1
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/* /*
* These two functions allow hooking accesses to userspace to increase * These two functions allow hooking accesses to userspace to increase
...@@ -271,7 +248,7 @@ static inline void set_fs(mm_segment_t fs) ...@@ -271,7 +248,7 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) (uaccess_kernel() ? ~0UL : get_fs())
/* /*
* The "__xxx" versions of the user access functions do not verify the * The "__xxx" versions of the user access functions do not verify the
...@@ -478,7 +455,7 @@ extern unsigned long __must_check ...@@ -478,7 +455,7 @@ extern unsigned long __must_check
arm_copy_from_user(void *to, const void __user *from, unsigned long n); arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check static inline unsigned long __must_check
__arch_copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
unsigned int __ua_flags; unsigned int __ua_flags;
...@@ -494,7 +471,7 @@ extern unsigned long __must_check ...@@ -494,7 +471,7 @@ extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n); __copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check static inline unsigned long __must_check
__arch_copy_to_user(void __user *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
#ifndef CONFIG_UACCESS_WITH_MEMCPY #ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags; unsigned int __ua_flags;
...@@ -522,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n) ...@@ -522,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n)
} }
#else #else
#define __arch_copy_from_user(to, from, n) \ static inline unsigned long
(memcpy(to, (void __force *)from, n), 0) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
#define __arch_copy_to_user(to, from, n) \
(memcpy((void __force *)to, from, n), 0)
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
#endif
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
check_object_size(to, n, false);
if (likely(access_ok(VERIFY_READ, from, n)))
res = __arch_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
check_object_size(from, n, true); memcpy(to, (const void __force *)from, n);
return 0;
return __arch_copy_to_user(to, from, n);
} }
static inline unsigned long
static inline unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long n)
copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
check_object_size(from, n, true); memcpy((void __force *)to, from, n);
return 0;
if (access_ok(VERIFY_WRITE, to, n))
n = __arch_copy_to_user(to, from, n);
return n;
} }
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
#define __copy_to_user_inatomic __copy_to_user #endif
#define __copy_from_user_inatomic __copy_from_user #define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{ {
......
...@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
unsigned long ua_flags; unsigned long ua_flags;
int atomic; int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (uaccess_kernel()) {
memcpy((void *)to, from, n); memcpy((void *)to, from, n);
return 0; return 0;
} }
...@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
{ {
unsigned long ua_flags; unsigned long ua_flags;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (uaccess_kernel()) {
memset((void *)addr, 0, n); memset((void *)addr, 0, n);
return 0; return 0;
} }
......
...@@ -60,7 +60,6 @@ config ARM64 ...@@ -60,7 +60,6 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
......
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H
/*
* The exception table consists of pairs of relative offsets: the first
* is the relative offset to an instruction that is allowed to fault,
* and the second is the relative offset at which the program should
* continue. No registers are modified, so it is entirely up to the
* continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
int insn, fixup;
};
#define ARCH_HAS_RELATIVE_EXTABLE
extern int fixup_exception(struct pt_regs *regs);
#endif
...@@ -28,38 +28,12 @@ ...@@ -28,38 +28,12 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/kasan-checks.h> #include <linux/kasan-checks.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/errno.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/*
* The exception table consists of pairs of relative offsets: the first
* is the relative offset to an instruction that is allowed to fault,
* and the second is the relative offset at which the program should
* continue. No registers are modified, so it is entirely up to the
* continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
int insn, fixup;
};
#define ARCH_HAS_RELATIVE_EXTABLE
extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS (-1UL) #define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
...@@ -357,58 +331,13 @@ do { \ ...@@ -357,58 +331,13 @@ do { \
}) })
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user __arch_copy_from_user
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); #define raw_copy_to_user __arch_copy_to_user
extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
#define INLINE_COPY_TO_USER
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) #define INLINE_COPY_FROM_USER
{
kasan_check_write(to, n);
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
kasan_check_write(to, n);
check_object_size(to, n, false);
if (access_ok(VERIFY_READ, from, n)) {
res = __arch_copy_from_user(to, from, n);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
check_object_size(from, n, true);
if (access_ok(VERIFY_WRITE, to, n)) {
n = __arch_copy_to_user(to, from, n);
}
return n;
}
static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
n = __copy_in_user(to, from, n);
return n;
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{ {
......
...@@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page); ...@@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user); EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user); EXPORT_SYMBOL(raw_copy_in_user);
/* physical memory */ /* physical memory */
EXPORT_SYMBOL(memstart_addr); EXPORT_SYMBOL(memstart_addr);
......
...@@ -64,14 +64,14 @@ ...@@ -64,14 +64,14 @@
.endm .endm
end .req x5 end .req x5
ENTRY(__copy_in_user) ENTRY(raw_copy_in_user)
uaccess_enable_not_uao x3, x4 uaccess_enable_not_uao x3, x4
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__copy_in_user) ENDPROC(raw_copy_in_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
......
...@@ -5,6 +5,7 @@ generic-y += device.h ...@@ -5,6 +5,7 @@ generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += futex.h generic-y += futex.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
......
...@@ -8,12 +8,6 @@ ...@@ -8,12 +8,6 @@
#ifndef __ASM_AVR32_UACCESS_H #ifndef __ASM_AVR32_UACCESS_H
#define __ASM_AVR32_UACCESS_H #define __ASM_AVR32_UACCESS_H
#include <linux/errno.h>
#include <linux/sched.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
typedef struct { typedef struct {
unsigned int is_user_space; unsigned int is_user_space;
} mm_segment_t; } mm_segment_t;
...@@ -72,34 +66,18 @@ static inline void set_fs(mm_segment_t s) ...@@ -72,34 +66,18 @@ static inline void set_fs(mm_segment_t s)
extern __kernel_size_t __copy_user(void *to, const void *from, extern __kernel_size_t __copy_user(void *to, const void *from,
__kernel_size_t n); __kernel_size_t n);
extern __kernel_size_t copy_to_user(void __user *to, const void *from, static inline unsigned long
__kernel_size_t n); raw_copy_to_user(void __user *to, const void *from, unsigned long n)
extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
__kernel_size_t n)
{ {
return __copy_user((void __force *)to, from, n); return __copy_user((void __force *)to, from, n);
} }
static inline __kernel_size_t __copy_from_user(void *to, static inline unsigned long
const void __user *from, raw_copy_from_user(void *to, const void __user *from, unsigned long n)
__kernel_size_t n)
{ {
return __copy_user(to, (const void __force *)from, n); return __copy_user(to, (const void __force *)from, n);
} }
static inline __kernel_size_t copy_from_user(void *to, #define INLINE_COPY_FROM_USER
const void __user *from, #define INLINE_COPY_TO_USER
__kernel_size_t n)
{
size_t res = ___copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/* /*
* put_user: - Write a simple value into user space. * put_user: - Write a simple value into user space.
...@@ -329,9 +307,6 @@ extern long __strnlen_user(const char __user *__s, long __n); ...@@ -329,9 +307,6 @@ extern long __strnlen_user(const char __user *__s, long __n);
#define strlen_user(s) strnlen_user(s, ~0UL >> 1) #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
struct exception_table_entry #include <asm/extable.h>
{
unsigned long insn, fixup;
};
#endif /* __ASM_AVR32_UACCESS_H */ #endif /* __ASM_AVR32_UACCESS_H */
...@@ -36,8 +36,6 @@ EXPORT_SYMBOL(copy_page); ...@@ -36,8 +36,6 @@ EXPORT_SYMBOL(copy_page);
/* /*
* Userspace access stuff. * Userspace access stuff.
*/ */
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
......
...@@ -23,21 +23,6 @@ ...@@ -23,21 +23,6 @@
*/ */
.text .text
.align 1 .align 1
.global ___copy_from_user
.type ___copy_from_user, @function
___copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size ___copy_from_user, . - ___copy_from_user
.global copy_to_user
.type copy_to_user, @function
copy_to_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r12, r10, r10
.size copy_to_user, . - copy_to_user
.global __copy_user .global __copy_user
.type __copy_user, @function .type __copy_user, @function
__copy_user: __copy_user:
......
...@@ -7,6 +7,7 @@ generic-y += device.h ...@@ -7,6 +7,7 @@ generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += futex.h generic-y += futex.h
generic-y += hw_irq.h generic-y += hw_irq.h
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -29,9 +28,6 @@ static inline void set_fs(mm_segment_t fs) ...@@ -29,9 +28,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a) == (b)) #define segment_eq(a, b) ((a) == (b))
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size)) #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
/* /*
...@@ -46,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; ...@@ -46,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1;
extern int _access_ok(unsigned long addr, unsigned long size); extern int _access_ok(unsigned long addr, unsigned long size);
#endif #endif
/* #include <asm/extable.h>
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -163,41 +144,23 @@ static inline int bad_user_access_length(void) ...@@ -163,41 +144,23 @@ static inline int bad_user_access_length(void)
: "a" (__ptr(ptr))); \ : "a" (__ptr(ptr))); \
}) })
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
memcpy(to, (const void __force *)from, n); memcpy(to, (const void __force *)from, n);
return 0; return 0;
} }
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
memcpy((void __force *)to, from, n); memcpy((void __force *)to, from, n);
SSYNC(); SSYNC();
return 0; return 0;
} }
static inline unsigned long __must_check #define INLINE_COPY_FROM_USER
copy_from_user(void *to, const void __user *from, unsigned long n) #define INLINE_COPY_TO_USER
{
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_from_user(to, from, n);
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_WRITE, to, n)))
return __copy_to_user(to, from, n);
return n;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
......
...@@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size) ...@@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size)
/* Check that things do not wrap around */ /* Check that things do not wrap around */
if (addr > ULONG_MAX - size) if (addr > ULONG_MAX - size)
return 0; return 0;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return 1; return 1;
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
if (1) if (1)
......
...@@ -12,6 +12,7 @@ generic-y += dma.h ...@@ -12,6 +12,7 @@ generic-y += dma.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += futex.h generic-y += futex.h
......
...@@ -13,17 +13,11 @@ ...@@ -13,17 +13,11 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/string.h> #include <linux/string.h>
#ifdef CONFIG_ACCESS_CHECK
#define __access_ok _access_ok
#endif
/* /*
* __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
*
* C6X supports unaligned 32 and 64 bit loads and stores. * C6X supports unaligned 32 and 64 bit loads and stores.
*/ */
static inline __must_check long __copy_from_user(void *to, static inline __must_check unsigned long
const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
u32 tmp32; u32 tmp32;
u64 tmp64; u64 tmp64;
...@@ -58,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to, ...@@ -58,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to,
return 0; return 0;
} }
static inline __must_check long __copy_to_user(void __user *to, static inline __must_check unsigned long
const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
u32 tmp32; u32 tmp32;
u64 tmp64; u64 tmp64;
...@@ -93,9 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to, ...@@ -93,9 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to,
memcpy((void __force *)to, from, n); memcpy((void __force *)to, from, n);
return 0; return 0;
} }
#define INLINE_COPY_FROM_USER
#define __copy_to_user __copy_to_user #define INLINE_COPY_TO_USER
#define __copy_from_user __copy_from_user
extern int _access_ok(unsigned long addr, unsigned long size); extern int _access_ok(unsigned long addr, unsigned long size);
#ifdef CONFIG_ACCESS_CHECK #ifdef CONFIG_ACCESS_CHECK
......
...@@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size) ...@@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size)
if (!addr || addr > (0xffffffffUL - (size - 1))) if (!addr || addr > (0xffffffffUL - (size - 1)))
goto _bad_access; goto _bad_access;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return 1; return 1;
if (memory_start <= addr && (addr + size - 1) < memory_end) if (memory_start <= addr && (addr + size - 1) < memory_end)
......
...@@ -188,11 +188,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) ...@@ -188,11 +188,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
} }
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in /* Copy from user to kernel. The return-value is the number of bytes that were
userland. The return-value is the number of bytes that were
inaccessible. */ inaccessible. */
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long __copy_user_in(void *pdst, const void __user *psrc,
unsigned long pn) unsigned long pn)
{ {
/* We want the parameters put in special registers. /* We want the parameters put in special registers.
...@@ -217,19 +216,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -217,19 +216,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
{ {
__asm_copy_from_user_1 (dst, src, retn); __asm_copy_from_user_1 (dst, src, retn);
n--; n--;
if (retn)
goto exception;
} }
if (((unsigned long) src & 2) && n >= 2) if (((unsigned long) src & 2) && n >= 2)
{ {
__asm_copy_from_user_2 (dst, src, retn); __asm_copy_from_user_2 (dst, src, retn);
n -= 2; n -= 2;
if (retn)
goto exception;
} }
/* We only need one check after the unalignment-adjustments, because
if both adjustments were done, either both or neither reference
had an exception. */
if (retn != 0)
goto copy_exception_bytes;
} }
/* Decide which copying method to use. */ /* Decide which copying method to use. */
...@@ -328,7 +325,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -328,7 +325,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4; n -= 4;
if (retn) if (retn)
goto copy_exception_bytes; goto exception;
} }
/* If we get here, there were no memory read faults. */ /* If we get here, there were no memory read faults. */
...@@ -356,20 +353,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -356,20 +353,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
bytes. */ bytes. */
return retn; return retn;
copy_exception_bytes: exception:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n; return retn + n;
} }
EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(__copy_user_in);
/* Zero userspace. */ /* Zero userspace. */
unsigned long __do_clear_user(void __user *pto, unsigned long pn) unsigned long __do_clear_user(void __user *pto, unsigned long pn)
......
...@@ -156,10 +156,9 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) ...@@ -156,10 +156,9 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
} }
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
/* Copy from user to kernel, zeroing the bytes that were inaccessible in /* Copy from user to kernel. The return-value is the number of bytes that were
userland. The return-value is the number of bytes that were
inaccessible. */ inaccessible. */
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long __copy_user_in(void *pdst, const void __user *psrc,
unsigned long pn) unsigned long pn)
{ {
/* We want the parameters put in special registers. /* We want the parameters put in special registers.
...@@ -184,19 +183,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -184,19 +183,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
{ {
__asm_copy_from_user_1 (dst, src, retn); __asm_copy_from_user_1 (dst, src, retn);
n--; n--;
if (retn != 0)
goto exception;
} }
if (((unsigned long) src & 2) && n >= 2) if (((unsigned long) src & 2) && n >= 2)
{ {
__asm_copy_from_user_2 (dst, src, retn); __asm_copy_from_user_2 (dst, src, retn);
n -= 2; n -= 2;
if (retn != 0)
goto exception;
} }
/* We only need one check after the unalignment-adjustments, because
if both adjustments were done, either both or neither reference
had an exception. */
if (retn != 0)
goto copy_exception_bytes;
} }
/* Movem is dirt cheap. The overheap is low enough to always use the /* Movem is dirt cheap. The overheap is low enough to always use the
...@@ -279,7 +277,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -279,7 +277,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4; n -= 4;
if (retn) if (retn)
goto copy_exception_bytes; goto exception;
} }
/* If we get here, there were no memory read faults. */ /* If we get here, there were no memory read faults. */
...@@ -307,20 +305,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -307,20 +305,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
bytes. */ bytes. */
return retn; return retn;
copy_exception_bytes: exception:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n; return retn + n;
} }
EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(__copy_user_in);
/* Zero userspace. */ /* Zero userspace. */
unsigned long __do_clear_user(void __user *pto, unsigned long pn) unsigned long __do_clear_user(void __user *pto, unsigned long pn)
......
...@@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"2: move.b $r9,[%0+]\n", \ "2: move.b $r9,[%0+]\n", \
"3: addq 1,%2\n" \ "3: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 2b,3b\n") " .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" move.w [%1+],$r9\n" \ " move.w [%1+],$r9\n" \
"2: move.w $r9,[%0+]\n" COPY, \ "2: move.w $r9,[%0+]\n" COPY, \
"3: addq 2,%2\n" \ "3: addq 2,%2\n" FIXUP, \
" clear.w [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY) " .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \ #define __asm_copy_from_user_2(to, from, ret) \
...@@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_2x_cont(to, from, ret, \ __asm_copy_from_user_2x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \ "4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \ "5: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 4b,5b\n") " .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"2: move.d $r9,[%0+]\n" COPY, \ "2: move.d $r9,[%0+]\n" COPY, \
"3: addq 4,%2\n" \ "3: addq 4,%2\n" FIXUP, \
" clear.d [%0+]\n" FIXUP, \
" .dword 2b,3b\n" TENTRY) " .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \ #define __asm_copy_from_user_4(to, from, ret) \
...@@ -210,8 +206,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -210,8 +206,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_4x_cont(to, from, ret, \ __asm_copy_from_user_4x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \ "4: move.b $r9,[%0+]\n", \
"5: addq 1,%2\n" \ "5: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 4b,5b\n") " .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -219,7 +214,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -219,7 +214,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \ " move.w [%1+],$r9\n" \
"4: move.w $r9,[%0+]\n" COPY, \ "4: move.w $r9,[%0+]\n" COPY, \
"5: addq 2,%2\n" \ "5: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \ FIXUP, \
" .dword 4b,5b\n" TENTRY) " .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \ #define __asm_copy_from_user_6(to, from, ret) \
...@@ -229,8 +224,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -229,8 +224,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_6x_cont(to, from, ret, \ __asm_copy_from_user_6x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \ "6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \ "7: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 6b,7b\n") " .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -238,7 +232,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -238,7 +232,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"4: move.d $r9,[%0+]\n" COPY, \ "4: move.d $r9,[%0+]\n" COPY, \
"5: addq 4,%2\n" \ "5: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \ FIXUP, \
" .dword 4b,5b\n" TENTRY) " .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \ #define __asm_copy_from_user_8(to, from, ret) \
...@@ -248,8 +242,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -248,8 +242,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_8x_cont(to, from, ret, \ __asm_copy_from_user_8x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \ "6: move.b $r9,[%0+]\n", \
"7: addq 1,%2\n" \ "7: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 6b,7b\n") " .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -257,7 +250,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -257,7 +250,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \ " move.w [%1+],$r9\n" \
"6: move.w $r9,[%0+]\n" COPY, \ "6: move.w $r9,[%0+]\n" COPY, \
"7: addq 2,%2\n" \ "7: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \ FIXUP, \
" .dword 6b,7b\n" TENTRY) " .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \ #define __asm_copy_from_user_10(to, from, ret) \
...@@ -267,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -267,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_10x_cont(to, from, ret, \ __asm_copy_from_user_10x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \ "8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \ "9: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 8b,9b\n") " .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -276,7 +268,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -276,7 +268,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"6: move.d $r9,[%0+]\n" COPY, \ "6: move.d $r9,[%0+]\n" COPY, \
"7: addq 4,%2\n" \ "7: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \ FIXUP, \
" .dword 6b,7b\n" TENTRY) " .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \ #define __asm_copy_from_user_12(to, from, ret) \
...@@ -286,8 +278,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -286,8 +278,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_12x_cont(to, from, ret, \ __asm_copy_from_user_12x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \ "8: move.b $r9,[%0+]\n", \
"9: addq 1,%2\n" \ "9: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 8b,9b\n") " .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -295,7 +286,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -295,7 +286,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \ " move.w [%1+],$r9\n" \
"8: move.w $r9,[%0+]\n" COPY, \ "8: move.w $r9,[%0+]\n" COPY, \
"9: addq 2,%2\n" \ "9: addq 2,%2\n" \
" clear.w [%0+]\n" FIXUP, \ FIXUP, \
" .dword 8b,9b\n" TENTRY) " .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \ #define __asm_copy_from_user_14(to, from, ret) \
...@@ -305,8 +296,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -305,8 +296,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_14x_cont(to, from, ret, \ __asm_copy_from_user_14x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \ " move.b [%1+],$r9\n" \
"10: move.b $r9,[%0+]\n", \ "10: move.b $r9,[%0+]\n", \
"11: addq 1,%2\n" \ "11: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 10b,11b\n") " .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -314,7 +304,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -314,7 +304,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"8: move.d $r9,[%0+]\n" COPY, \ "8: move.d $r9,[%0+]\n" COPY, \
"9: addq 4,%2\n" \ "9: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \ FIXUP, \
" .dword 8b,9b\n" TENTRY) " .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \ #define __asm_copy_from_user_16(to, from, ret) \
...@@ -325,7 +315,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -325,7 +315,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"10: move.d $r9,[%0+]\n" COPY, \ "10: move.d $r9,[%0+]\n" COPY, \
"11: addq 4,%2\n" \ "11: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \ FIXUP, \
" .dword 10b,11b\n" TENTRY) " .dword 10b,11b\n" TENTRY)
#define __asm_copy_from_user_20(to, from, ret) \ #define __asm_copy_from_user_20(to, from, ret) \
...@@ -336,7 +326,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -336,7 +326,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \ " move.d [%1+],$r9\n" \
"12: move.d $r9,[%0+]\n" COPY, \ "12: move.d $r9,[%0+]\n" COPY, \
"13: addq 4,%2\n" \ "13: addq 4,%2\n" \
" clear.d [%0+]\n" FIXUP, \ FIXUP, \
" .dword 12b,13b\n" TENTRY) " .dword 12b,13b\n" TENTRY)
#define __asm_copy_from_user_24(to, from, ret) \ #define __asm_copy_from_user_24(to, from, ret) \
......
...@@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"2: move.b [%1+],$acr\n" \ "2: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"3: addq 1,%2\n" \ "3: addq 1,%2\n" \
" jump 1b\n" \ " jump 1b\n", \
" clear.b [%0+]\n", \
" .dword 2b,3b\n") " .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w $acr,[%0+]\n", \ " move.w $acr,[%0+]\n", \
FIXUP \ FIXUP \
"3: addq 2,%2\n" \ "3: addq 2,%2\n" \
" jump 1b\n" \ " jump 1b\n", \
" clear.w [%0+]\n", \
TENTRY \ TENTRY \
" .dword 2b,3b\n") " .dword 2b,3b\n")
...@@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_2x_cont(to, from, ret, \ __asm_copy_from_user_2x_cont(to, from, ret, \
"4: move.b [%1+],$acr\n" \ "4: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"5: addq 1,%2\n" \ "5: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 4b,5b\n") " .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"3: addq 4,%2\n" \ "3: addq 4,%2\n" \
" jump 1b\n" \ " jump 1b\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 2b,3b\n") " .dword 2b,3b\n")
...@@ -224,8 +220,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -224,8 +220,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_4x_cont(to, from, ret, \ __asm_copy_from_user_4x_cont(to, from, ret, \
"4: move.b [%1+],$acr\n" \ "4: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"5: addq 1,%2\n" \ "5: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 4b,5b\n") " .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -234,8 +229,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -234,8 +229,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"4: move.w [%1+],$acr\n" \ "4: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \ " move.w $acr,[%0+]\n", \
FIXUP \ FIXUP \
"5: addq 2,%2\n" \ "5: addq 2,%2\n", \
" clear.w [%0+]\n", \
TENTRY \ TENTRY \
" .dword 4b,5b\n") " .dword 4b,5b\n")
...@@ -246,8 +240,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -246,8 +240,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_6x_cont(to, from, ret, \ __asm_copy_from_user_6x_cont(to, from, ret, \
"6: move.b [%1+],$acr\n" \ "6: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"7: addq 1,%2\n" \ "7: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 6b,7b\n") " .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -256,8 +249,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -256,8 +249,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"4: move.d [%1+],$acr\n" \ "4: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"5: addq 4,%2\n" \ "5: addq 4,%2\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 4b,5b\n") " .dword 4b,5b\n")
...@@ -268,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -268,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_8x_cont(to, from, ret, \ __asm_copy_from_user_8x_cont(to, from, ret, \
"6: move.b [%1+],$acr\n" \ "6: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"7: addq 1,%2\n" \ "7: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 6b,7b\n") " .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -278,8 +269,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -278,8 +269,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"6: move.w [%1+],$acr\n" \ "6: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \ " move.w $acr,[%0+]\n", \
FIXUP \ FIXUP \
"7: addq 2,%2\n" \ "7: addq 2,%2\n", \
" clear.w [%0+]\n", \
TENTRY \ TENTRY \
" .dword 6b,7b\n") " .dword 6b,7b\n")
...@@ -290,8 +280,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -290,8 +280,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_10x_cont(to, from, ret, \ __asm_copy_from_user_10x_cont(to, from, ret, \
"8: move.b [%1+],$acr\n" \ "8: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"9: addq 1,%2\n" \ "9: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 8b,9b\n") " .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -300,8 +289,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -300,8 +289,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"6: move.d [%1+],$acr\n" \ "6: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"7: addq 4,%2\n" \ "7: addq 4,%2\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 6b,7b\n") " .dword 6b,7b\n")
...@@ -312,8 +300,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -312,8 +300,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_12x_cont(to, from, ret, \ __asm_copy_from_user_12x_cont(to, from, ret, \
"8: move.b [%1+],$acr\n" \ "8: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"9: addq 1,%2\n" \ "9: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 8b,9b\n") " .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -322,8 +309,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -322,8 +309,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"8: move.w [%1+],$acr\n" \ "8: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \ " move.w $acr,[%0+]\n", \
FIXUP \ FIXUP \
"9: addq 2,%2\n" \ "9: addq 2,%2\n", \
" clear.w [%0+]\n", \
TENTRY \ TENTRY \
" .dword 8b,9b\n") " .dword 8b,9b\n")
...@@ -334,8 +320,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -334,8 +320,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_14x_cont(to, from, ret, \ __asm_copy_from_user_14x_cont(to, from, ret, \
"10: move.b [%1+],$acr\n" \ "10: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \ " move.b $acr,[%0+]\n", \
"11: addq 1,%2\n" \ "11: addq 1,%2\n", \
" clear.b [%0+]\n", \
" .dword 10b,11b\n") " .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
...@@ -344,8 +329,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -344,8 +329,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"8: move.d [%1+],$acr\n" \ "8: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"9: addq 4,%2\n" \ "9: addq 4,%2\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 8b,9b\n") " .dword 8b,9b\n")
...@@ -358,8 +342,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -358,8 +342,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"10: move.d [%1+],$acr\n" \ "10: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"11: addq 4,%2\n" \ "11: addq 4,%2\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 10b,11b\n") " .dword 10b,11b\n")
...@@ -372,8 +355,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) ...@@ -372,8 +355,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"12: move.d [%1+],$acr\n" \ "12: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \ " move.d $acr,[%0+]\n", \
FIXUP \ FIXUP \
"13: addq 4,%2\n" \ "13: addq 4,%2\n", \
" clear.d [%0+]\n", \
TENTRY \ TENTRY \
" .dword 12b,13b\n") " .dword 12b,13b\n")
......
...@@ -9,6 +9,7 @@ generic-y += device.h ...@@ -9,6 +9,7 @@ generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += futex.h generic-y += futex.h
......
...@@ -15,15 +15,9 @@ ...@@ -15,15 +15,9 @@
#ifndef _CRIS_UACCESS_H #ifndef _CRIS_UACCESS_H
#define _CRIS_UACCESS_H #define _CRIS_UACCESS_H
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with * performed or not. If get_fs() == USER_DS, checking is performed, with
...@@ -49,30 +43,14 @@ ...@@ -49,30 +43,14 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) \ #define __user_ok(addr, size) \
(((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
#include <arch/uaccess.h> #include <arch/uaccess.h>
#include <asm/extable.h>
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -191,7 +169,7 @@ extern long __get_user_bad(void); ...@@ -191,7 +169,7 @@ extern long __get_user_bad(void);
live in lib/usercopy.c */ live in lib/usercopy.c */
extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n); extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline long static inline long
...@@ -258,7 +236,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -258,7 +236,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24) else if (n == 24)
__asm_copy_from_user_24(to, from, ret); __asm_copy_from_user_24(to, from, ret);
else else
ret = __copy_user_zeroing(to, from, n); ret = __copy_user_in(to, from, n);
return ret; return ret;
} }
...@@ -358,64 +336,33 @@ static inline size_t clear_user(void __user *to, size_t n) ...@@ -358,64 +336,33 @@ static inline size_t clear_user(void __user *to, size_t n)
return __do_clear_user(to, n); return __do_clear_user(to, n);
} }
static inline size_t copy_from_user(void *to, const void __user *from, size_t n) static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (unlikely(!access_ok(VERIFY_READ, from, n))) {
memset(to, 0, n);
return n;
}
if (__builtin_constant_p(n)) if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n); return __constant_copy_from_user(to, from, n);
else else
return __copy_user_zeroing(to, from, n); return __copy_user_in(to, from, n);
} }
static inline size_t copy_to_user(void __user *to, const void *from, size_t n) static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
if (__builtin_constant_p(n)) if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n); return __constant_copy_to_user(to, from, n);
else else
return __copy_user(to, from, n); return __copy_user(to, from, n);
} }
/* We let the __ versions of copy_from/to_user inline, because they're often #define INLINE_COPY_FROM_USER
* used in fast paths and have only a small space overhead. #define INLINE_COPY_TO_USER
*/
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void __user *from,
unsigned long n)
{
return __copy_user_zeroing(to, from, n);
}
static inline unsigned long
__generic_copy_to_user_nocheck(void __user *to, const void *from,
unsigned long n)
{
return __copy_user(to, from, n);
}
static inline unsigned long static inline unsigned long
__generic_clear_user_nocheck(void __user *to, unsigned long n) __clear_user(void __user *to, unsigned long n)
{ {
return __do_clear_user(to, n); return __do_clear_user(to, n);
} }
/* without checking */
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
#define strlen_user(str) strnlen_user((str), 0x7ffffffe) #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
#endif /* __ASSEMBLY__ */
#endif /* _CRIS_UACCESS_H */ #endif /* _CRIS_UACCESS_H */
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
......
...@@ -15,16 +15,13 @@ ...@@ -15,16 +15,13 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/extable.h>
#define __ptr(x) ((unsigned long __force *)(x)) #define __ptr(x) ((unsigned long __force *)(x))
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* check that a range of addresses falls within the current address limit * check that a range of addresses falls within the current address limit
*/ */
...@@ -63,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) ...@@ -63,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0) #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0) #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -256,61 +233,50 @@ do { \ ...@@ -256,61 +233,50 @@ do { \
/* /*
* *
*/ */
#define ____force(x) (__force void *)(void __user *)(x) #define ____force(x) (__force void *)(void __user *)(x)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern long __memset_user(void *dst, unsigned long count); extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count); extern long __memcpy_user(void *dst, const void *src, unsigned long count);
#define __clear_user(dst,count) __memset_user(____force(dst), (count)) #define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else #else
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) #define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif #endif
static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{
if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
might_fault(); #ifdef CONFIG_MMU
return __copy_from_user_inatomic(to, from, n); return __memcpy_user(to, (__force const void *)from, n);
#else
memcpy(to, (__force const void *)from, n);
return 0;
#endif
} }
static inline long copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
unsigned long ret = n; #ifdef CONFIG_MMU
return __memcpy_user((__force void *)to, from, n);
if (likely(__access_ok(from, n))) #else
ret = __copy_from_user(to, from, n); memcpy((__force void *)to, from, n);
return 0;
if (unlikely(ret != 0)) #endif
memset(to + (n - ret), 0, ret);
return ret;
} }
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
static inline long copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{ {
return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n; if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
} }
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
...@@ -318,6 +284,4 @@ extern long strnlen_user(const char __user *src, long count); ...@@ -318,6 +284,4 @@ extern long strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767) #define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long search_exception_table(unsigned long addr);
#endif /* _ASM_UACCESS_H */ #endif /* _ASM_UACCESS_H */
...@@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0, ...@@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
siginfo_t info; siginfo_t info;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
unsigned long fixup; if (fixup_exception(__frame))
fixup = search_exception_table(__frame->pc);
if (fixup) {
__frame->pc = fixup;
return; return;
}
#endif #endif
die_if_kernel("-- Memory Access Exception --\n" die_if_kernel("-- Memory Access Exception --\n"
......
...@@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand ...@@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler; extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
extern spinlock_t modlist_lock; extern spinlock_t modlist_lock;
int fixup_exception(struct pt_regs *regs)
/*****************************************************************************/
/*
* see if there's a fixup handler available to deal with a kernel fault
*/
unsigned long search_exception_table(unsigned long pc)
{ {
const struct exception_table_entry *extab; const struct exception_table_entry *extab;
unsigned long pc = regs->pc;
/* determine if the fault lay during a memcpy_user or a memset_user */ /* determine if the fault lay during a memcpy_user or a memset_user */
if (__frame->lr == (unsigned long) &__memset_user_error_lr && if (regs->lr == (unsigned long) &__memset_user_error_lr &&
(unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
) { ) {
/* the fault occurred in a protected memset /* the fault occurred in a protected memset
* - we search for the return address (in LR) instead of the program counter * - we search for the return address (in LR) instead of the program counter
* - it was probably during a clear_user() * - it was probably during a clear_user()
*/ */
return (unsigned long) &__memset_user_error_handler; regs->pc = (unsigned long) &__memset_user_error_handler;
return 1;
} }
if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && if (regs->lr == (unsigned long) &__memcpy_user_error_lr &&
(unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
) { ) {
/* the fault occurred in a protected memset /* the fault occurred in a protected memset
* - we search for the return address (in LR) instead of the program counter * - we search for the return address (in LR) instead of the program counter
* - it was probably during a copy_to/from_user() * - it was probably during a copy_to/from_user()
*/ */
return (unsigned long) &__memcpy_user_error_handler; regs->pc = (unsigned long) &__memcpy_user_error_handler;
return 1;
} }
extab = search_exception_tables(pc); extab = search_exception_tables(pc);
if (extab) if (extab) {
return extab->fixup; regs->pc = extab->fixup;
return 1;
}
return 0; return 0;
}
} /* end search_exception_table() */
...@@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear ...@@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long _pme, lrai, lrad, fixup; unsigned long _pme, lrai, lrad;
unsigned long flags = 0; unsigned long flags = 0;
siginfo_t info; siginfo_t info;
pgd_t *pge; pgd_t *pge;
...@@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear ...@@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
no_context: no_context:
/* are we prepared to handle this kernel fault? */ /* are we prepared to handle this kernel fault? */
if ((fixup = search_exception_table(__frame->pc)) != 0) { if (fixup_exception(__frame))
__frame->pc = fixup;
return; return;
}
/* /*
* Oops. The kernel tried to access some bad page. We'll have to * Oops. The kernel tried to access some bad page. We'll have to
......
...@@ -13,6 +13,7 @@ generic-y += dma.h ...@@ -13,6 +13,7 @@ generic-y += dma.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
...@@ -68,7 +69,6 @@ generic-y += tlbflush.h ...@@ -68,7 +69,6 @@ generic-y += tlbflush.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += topology.h generic-y += topology.h
generic-y += types.h generic-y += types.h
generic-y += uaccess.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += unaligned.h generic-y += unaligned.h
generic-y += vga.h generic-y += vga.h
......
#ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H
#include <linux/string.h>
static inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
}
}
memcpy(to, (const void __force *)from, n);
return 0;
}
static inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
memcpy((void __force *)to, from, n);
return 0;
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h>
#endif
...@@ -11,6 +11,7 @@ generic-y += device.h ...@@ -11,6 +11,7 @@ generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -50,8 +49,6 @@ ...@@ -50,8 +49,6 @@
* reasonably simple and not *too* slow. After all, we've got the * reasonably simple and not *too* slow. After all, we've got the
* MMU for backup. * MMU for backup.
*/ */
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define __access_ok(addr, size) \ #define __access_ok(addr, size) \
((get_fs().seg == KERNEL_DS.seg) || \ ((get_fs().seg == KERNEL_DS.seg) || \
...@@ -68,19 +65,12 @@ ...@@ -68,19 +65,12 @@
*/ */
/* Assembly somewhat optimized copy routines */ /* Assembly somewhat optimized copy routines */
unsigned long __copy_from_user_hexagon(void *to, const void __user *from, unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n); unsigned long n);
unsigned long __copy_to_user_hexagon(void __user *to, const void *from, unsigned long raw_copy_to_user(void __user *to, const void *from,
unsigned long n); unsigned long n);
#define INLINE_COPY_FROM_USER
#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n) #define INLINE_COPY_TO_USER
#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
/*
* XXX todo: some additonal performance gain is possible by
* implementing __copy_to/from_user_inatomic, which is much
* like __copy_to/from_user, but performs slightly less checking.
*/
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s)) #define __clear_user(a, s) __clear_user_hexagon((a), (s))
...@@ -107,10 +97,14 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, ...@@ -107,10 +97,14 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
return -EFAULT; return -EFAULT;
if (res > n) { if (res > n) {
copy_from_user(dst, src, n); long left = raw_copy_from_user(dst, src, n);
if (unlikely(left))
memset(dst + (n - left), 0, left);
return n; return n;
} else { } else {
copy_from_user(dst, src, res); long left = raw_copy_from_user(dst, src, res);
if (unlikely(left))
memset(dst + (res - left), 0, left);
return res-1; return res-1;
} }
} }
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
/* Additional functions */ /* Additional functions */
EXPORT_SYMBOL(__clear_user_hexagon); EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(__copy_from_user_hexagon); EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(__copy_to_user_hexagon); EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie); EXPORT_SYMBOL(__vmgetie);
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#define bytes r2 #define bytes r2
#define loopcount r5 #define loopcount r5
#define FUNCNAME __copy_from_user_hexagon #define FUNCNAME raw_copy_from_user
#include "copy_user_template.S" #include "copy_user_template.S"
/* LOAD FAULTS from COPY_FROM_USER */ /* LOAD FAULTS from COPY_FROM_USER */
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#define bytes r2 #define bytes r2
#define loopcount r5 #define loopcount r5
#define FUNCNAME __copy_to_user_hexagon #define FUNCNAME raw_copy_to_user
#include "copy_user_template.S" #include "copy_user_template.S"
/* STORE FAULTS from COPY_TO_USER */ /* STORE FAULTS from COPY_TO_USER */
......
...@@ -52,7 +52,6 @@ config IA64 ...@@ -52,7 +52,6 @@ config IA64
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
default y default y
help help
The Itanium Processor Family is Intel's 64-bit successor to The Itanium Processor Family is Intel's 64-bit successor to
......
#ifndef _ASM_IA64_EXTABLE_H
#define _ASM_IA64_EXTABLE_H
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn; /* location-relative address of insn this fixup is for */
int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
};
#endif
...@@ -33,14 +33,13 @@ ...@@ -33,14 +33,13 @@
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/extable.h>
/* /*
* For historical reasons, the following macros are grossly misnamed: * For historical reasons, the following macros are grossly misnamed:
...@@ -48,9 +47,6 @@ ...@@ -48,9 +47,6 @@
#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x)) #define set_fs(x) (current_thread_info()->addr_limit = (x))
...@@ -63,14 +59,14 @@ ...@@ -63,14 +59,14 @@
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table. * point inside the virtually mapped linear page table.
*/ */
#define __access_ok(addr, size, segment) \ static inline int __access_ok(const void __user *p, unsigned long size)
({ \ {
__chk_user_ptr(addr); \ unsigned long addr = (unsigned long)p;
(likely((unsigned long) (addr) <= (segment).seg) \ unsigned long seg = get_fs().seg;
&& ((segment).seg == KERNEL_DS.seg \ return likely(addr <= seg) &&
|| likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
}) }
#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) #define access_ok(type, addr, size) __access_ok((addr), (size))
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -80,8 +76,8 @@ ...@@ -80,8 +76,8 @@
* (a) re-use the arguments for side effects (sizeof/typeof is ok) * (a) re-use the arguments for side effects (sizeof/typeof is ok)
* (b) require any knowledge of processes at this stage * (b) require any knowledge of processes at this stage
*/ */
#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
/* /*
* The "__xxx" versions do not do address space checking, useful when * The "__xxx" versions do not do address space checking, useful when
...@@ -184,13 +180,13 @@ extern void __get_user_unknown (void); ...@@ -184,13 +180,13 @@ extern void __get_user_unknown (void);
* could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
* using r8/r9. * using r8/r9.
*/ */
#define __do_get_user(check, x, ptr, size, segment) \ #define __do_get_user(check, x, ptr, size) \
({ \ ({ \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
__typeof__ (size) __gu_size = (size); \ __typeof__ (size) __gu_size = (size); \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \ unsigned long __gu_val = 0; \
if (!check || __access_ok(__gu_ptr, size, segment)) \ if (!check || __access_ok(__gu_ptr, size)) \
switch (__gu_size) { \ switch (__gu_size) { \
case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
...@@ -202,8 +198,8 @@ extern void __get_user_unknown (void); ...@@ -202,8 +198,8 @@ extern void __get_user_unknown (void);
__gu_err; \ __gu_err; \
}) })
#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size)
#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) #define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size)
extern void __put_user_unknown (void); extern void __put_user_unknown (void);
...@@ -211,14 +207,14 @@ extern void __put_user_unknown (void); ...@@ -211,14 +207,14 @@ extern void __put_user_unknown (void);
* Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
* could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
*/ */
#define __do_put_user(check, x, ptr, size, segment) \ #define __do_put_user(check, x, ptr, size) \
({ \ ({ \
__typeof__ (x) __pu_x = (x); \ __typeof__ (x) __pu_x = (x); \
__typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
__typeof__ (size) __pu_size = (size); \ __typeof__ (size) __pu_size = (size); \
long __pu_err = -EFAULT; \ long __pu_err = -EFAULT; \
\ \
if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ if (!check || __access_ok(__pu_ptr, __pu_size)) \
switch (__pu_size) { \ switch (__pu_size) { \
case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
...@@ -229,8 +225,8 @@ extern void __put_user_unknown (void); ...@@ -229,8 +225,8 @@ extern void __put_user_unknown (void);
__pu_err; \ __pu_err; \
}) })
#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size)
#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) #define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size)
/* /*
* Complex access routines * Complex access routines
...@@ -239,56 +235,19 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use ...@@ -239,56 +235,19 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
unsigned long count); unsigned long count);
static inline unsigned long static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count) raw_copy_to_user(void __user *to, const void *from, unsigned long count)
{ {
check_object_size(from, count, true);
return __copy_user(to, (__force void __user *) from, count); return __copy_user(to, (__force void __user *) from, count);
} }
static inline unsigned long static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count) raw_copy_from_user(void *to, const void __user *from, unsigned long count)
{ {
check_object_size(to, count, false);
return __copy_user((__force void __user *) to, from, count); return __copy_user((__force void __user *) to, from, count);
} }
#define __copy_to_user_inatomic __copy_to_user #define INLINE_COPY_FROM_USER
#define __copy_from_user_inatomic __copy_from_user #define INLINE_COPY_TO_USER
#define copy_to_user(to, from, n) \
({ \
void __user *__cu_to = (to); \
const void *__cu_from = (from); \
long __cu_len = (n); \
\
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
check_object_size(__cu_from, __cu_len, true); \
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
} \
__cu_len; \
})
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
if (likely(__access_ok(from, n, get_fs())))
n = __copy_user((__force void __user *) to, from, n);
else
memset(to, 0, n);
return n;
}
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
static inline unsigned long
copy_in_user (void __user *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
n = __copy_user(to, from, n);
return n;
}
extern unsigned long __do_clear_user (void __user *, unsigned long); extern unsigned long __do_clear_user (void __user *, unsigned long);
...@@ -297,7 +256,7 @@ extern unsigned long __do_clear_user (void __user *, unsigned long); ...@@ -297,7 +256,7 @@ extern unsigned long __do_clear_user (void __user *, unsigned long);
#define clear_user(to, n) \ #define clear_user(to, n) \
({ \ ({ \
unsigned long __cu_len = (n); \ unsigned long __cu_len = (n); \
if (__access_ok(to, __cu_len, get_fs())) \ if (__access_ok(to, __cu_len)) \
__cu_len = __do_clear_user(to, __cu_len); \ __cu_len = __do_clear_user(to, __cu_len); \
__cu_len; \ __cu_len; \
}) })
...@@ -313,7 +272,7 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from, ...@@ -313,7 +272,7 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from,
({ \ ({ \
const char __user * __sfu_from = (from); \ const char __user * __sfu_from = (from); \
long __sfu_ret = -EFAULT; \ long __sfu_ret = -EFAULT; \
if (__access_ok(__sfu_from, 0, get_fs())) \ if (__access_ok(__sfu_from, 0)) \
__sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
__sfu_ret; \ __sfu_ret; \
}) })
...@@ -325,7 +284,7 @@ extern unsigned long __strlen_user (const char __user *); ...@@ -325,7 +284,7 @@ extern unsigned long __strlen_user (const char __user *);
({ \ ({ \
const char __user *__su_str = (str); \ const char __user *__su_str = (str); \
unsigned long __su_ret = 0; \ unsigned long __su_ret = 0; \
if (__access_ok(__su_str, 0, get_fs())) \ if (__access_ok(__su_str, 0)) \
__su_ret = __strlen_user(__su_str); \ __su_ret = __strlen_user(__su_str); \
__su_ret; \ __su_ret; \
}) })
...@@ -341,18 +300,11 @@ extern unsigned long __strnlen_user (const char __user *, long); ...@@ -341,18 +300,11 @@ extern unsigned long __strnlen_user (const char __user *, long);
({ \ ({ \
const char __user *__su_str = (str); \ const char __user *__su_str = (str); \
unsigned long __su_ret = 0; \ unsigned long __su_ret = 0; \
if (__access_ok(__su_str, 0, get_fs())) \ if (__access_ok(__su_str, 0)) \
__su_ret = __strnlen_user(__su_str, len); \ __su_ret = __strnlen_user(__su_str, len); \
__su_ret; \ __su_ret; \
}) })
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn; /* location-relative address of insn this fixup is for */
int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
};
#define ARCH_HAS_TRANSLATE_MEM_PTR 1 #define ARCH_HAS_TRANSLATE_MEM_PTR 1
static __inline__ void * static __inline__ void *
xlate_dev_mem_ptr(phys_addr_t p) xlate_dev_mem_ptr(phys_addr_t p)
......
...@@ -556,9 +556,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -556,9 +556,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
#define D r22 #define D r22
#define F r28 #define F r28
#define memset_arg0 r32
#define memset_arg2 r33
#define saved_retval loc0 #define saved_retval loc0
#define saved_rtlink loc1 #define saved_rtlink loc1
#define saved_pfs_stack loc2 #define saved_pfs_stack loc2
...@@ -622,7 +619,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -622,7 +619,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
* (faulting_addr - orig_dst) -> len to faulting st address * (faulting_addr - orig_dst) -> len to faulting st address
* B = (cur_dst - orig_dst) -> len copied so far * B = (cur_dst - orig_dst) -> len copied so far
* C = A - B -> len need to be copied * C = A - B -> len need to be copied
* D = orig_len - A -> len need to be zeroed * D = orig_len - A -> len need to be left along
*/ */
(p6) sub A = F, saved_in0 (p6) sub A = F, saved_in0
(p7) sub A = F, saved_in1 (p7) sub A = F, saved_in1
...@@ -638,9 +635,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -638,9 +635,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
sub D = saved_in2, A sub D = saved_in2, A
;; ;;
cmp.gt p8,p0=C,r0 // more than 1 byte? cmp.gt p8,p0=C,r0 // more than 1 byte?
add memset_arg0=saved_in0, A
(p6) mov memset_arg2=0 // copy_to_user should not call memset
(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed
mov r8=0 mov r8=0
mov saved_retval = D mov saved_retval = D
mov saved_rtlink = b0 mov saved_rtlink = b0
...@@ -652,11 +646,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ...@@ -652,11 +646,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
;; ;;
add saved_retval=saved_retval,r8 // above might return non-zero value add saved_retval=saved_retval,r8 // above might return non-zero value
cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte?
mov out0=memset_arg0 // *s
mov out1=r0 // c
mov out2=memset_arg2 // n
(p8) br.call.sptk.few b0=memset
;; ;;
mov retval=saved_retval mov retval=saved_retval
......
...@@ -5,7 +5,10 @@ ...@@ -5,7 +5,10 @@
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#include <linux/uaccess.h> #include <asm/ptrace.h>
#include <asm/extable.h>
#include <asm/errno.h>
#include <asm/processor.h>
void void
ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
generic-y += clkdev.h generic-y += clkdev.h
generic-y += current.h generic-y += current.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
......
...@@ -11,13 +11,9 @@ ...@@ -11,13 +11,9 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <linux/prefetch.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -114,25 +110,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size) ...@@ -114,25 +110,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size)
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
/* #include <asm/extable.h>
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -483,174 +461,25 @@ do { \ ...@@ -483,174 +461,25 @@ do { \
: "r14", "memory"); \ : "r14", "memory"); \
} while (0) } while (0)
#define __copy_user_zeroing(to, from, size) \
do { \
unsigned long __dst, __src, __c; \
__asm__ __volatile__ ( \
" mv r14, %0\n" \
" or r14, %1\n" \
" beq %0, %1, 9f\n" \
" beqz %2, 9f\n" \
" and3 r14, r14, #3\n" \
" bnez r14, 2f\n" \
" and3 %2, %2, #3\n" \
" beqz %3, 2f\n" \
" addi %0, #-4 ; word_copy \n" \
" .fillinsn\n" \
"0: ld r14, @%1+\n" \
" addi %3, #-1\n" \
" .fillinsn\n" \
"1: st r14, @+%0\n" \
" bnez %3, 0b\n" \
" beqz %2, 9f\n" \
" addi %0, #4\n" \
" .fillinsn\n" \
"2: ldb r14, @%1 ; byte_copy \n" \
" .fillinsn\n" \
"3: stb r14, @%0\n" \
" addi %1, #1\n" \
" addi %2, #-1\n" \
" addi %0, #1\n" \
" bnez %2, 2b\n" \
" .fillinsn\n" \
"9:\n" \
".section .fixup,\"ax\"\n" \
" .balign 4\n" \
"5: addi %3, #1\n" \
" addi %1, #-4\n" \
" .fillinsn\n" \
"6: slli %3, #2\n" \
" add %2, %3\n" \
" addi %0, #4\n" \
" .fillinsn\n" \
"7: ldi r14, #0 ; store zero \n" \
" .fillinsn\n" \
"8: addi %2, #-1\n" \
" stb r14, @%0 ; ACE? \n" \
" addi %0, #1\n" \
" bnez %2, 8b\n" \
" seth r14, #high(9b)\n" \
" or3 r14, r14, #low(9b)\n" \
" jmp r14\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,6b\n" \
" .long 1b,5b\n" \
" .long 2b,7b\n" \
" .long 3b,7b\n" \
".previous\n" \
: "=&r" (__dst), "=&r" (__src), "=&r" (size), \
"=&r" (__c) \
: "0" (to), "1" (from), "2" (size), "3" (size / 4) \
: "r14", "memory"); \
} while (0)
/* We let the __ versions of copy_from/to_user inline, because they're often /* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead. * used in fast paths and have only a small space overhead.
*/ */
static inline unsigned long __generic_copy_from_user_nocheck(void *to, static inline unsigned long
const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
__copy_user_zeroing(to, from, n); prefetchw(to);
__copy_user(to, from, n);
return n; return n;
} }
static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, static inline unsigned long
const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
prefetch(from);
__copy_user(to, from, n); __copy_user(to, from, n);
return n; return n;
} }
unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define copy_to_user(to, from, n) \
({ \
might_fault(); \
__generic_copy_to_user((to), (from), (n)); \
})
/**
* __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define copy_from_user(to, from, n) \
({ \
might_fault(); \
__generic_copy_from_user((to), (from), (n)); \
})
long __must_check strncpy_from_user(char *dst, const char __user *src, long __must_check strncpy_from_user(char *dst, const char __user *src,
long count); long count);
long __must_check __strncpy_from_user(char *dst, long __must_check __strncpy_from_user(char *dst,
......
...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -11,27 +11,6 @@ ...@@ -11,27 +11,6 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
prefetch(from);
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to,from,n);
return n;
}
unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to,from,n);
else
memset(to, 0, n);
return n;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
......
...@@ -5,6 +5,7 @@ generic-y += device.h ...@@ -5,6 +5,7 @@ generic-y += device.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += futex.h generic-y += futex.h
generic-y += hw_irq.h generic-y += hw_irq.h
generic-y += ioctl.h generic-y += ioctl.h
......
...@@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, ...@@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
wrusp(usp); wrusp(usp);
} }
#ifdef CONFIG_MMU
extern int handle_kernel_fault(struct pt_regs *regs);
#else
static inline int handle_kernel_fault(struct pt_regs *regs)
{
/* Any fault in kernel is fatal on non-mmu */
return 0;
}
#endif
/* Forward declaration, a strange C thing */ /* Forward declaration, a strange C thing */
struct task_struct; struct task_struct;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/uaccess_mm.h> #include <asm/uaccess_mm.h>
#endif #endif
#include <asm/extable.h>
#ifdef CONFIG_CPU_HAS_NO_UNALIGNED #ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#else #else
......
...@@ -5,14 +5,9 @@ ...@@ -5,14 +5,9 @@
* User space memory access functions * User space memory access functions
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/sched.h>
#include <asm/segment.h> #include <asm/segment.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* We let the MMU do all checking */ /* We let the MMU do all checking */
static inline int access_ok(int type, const void __user *addr, static inline int access_ok(int type, const void __user *addr,
unsigned long size) unsigned long size)
...@@ -36,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr, ...@@ -36,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr,
#define MOVES "move" #define MOVES "move"
#endif #endif
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
extern int __put_user_bad(void); extern int __put_user_bad(void);
extern int __get_user_bad(void); extern int __get_user_bad(void);
...@@ -202,39 +179,55 @@ asm volatile ("\n" \ ...@@ -202,39 +179,55 @@ asm volatile ("\n" \
unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ #define __suffix0
#define __suffix1 b
#define __suffix2 w
#define __suffix4 l
#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
asm volatile ("\n" \ asm volatile ("\n" \
"1: "MOVES"."#s1" (%2)+,%3\n" \ "1: "MOVES"."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \ " move."#s1" %3,(%1)+\n" \
" .ifnc \""#s2"\",\"\"\n" \
"2: "MOVES"."#s2" (%2)+,%3\n" \ "2: "MOVES"."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \ " move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \ " .ifnc \""#s3"\",\"\"\n" \
"3: "MOVES"."#s3" (%2)+,%3\n" \ "3: "MOVES"."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \ " move."#s3" %3,(%1)+\n" \
" .endif\n" \ " .endif\n" \
" .endif\n" \
"4:\n" \ "4:\n" \
" .section __ex_table,\"a\"\n" \ " .section __ex_table,\"a\"\n" \
" .align 4\n" \ " .align 4\n" \
" .long 1b,10f\n" \ " .long 1b,10f\n" \
" .ifnc \""#s2"\",\"\"\n" \
" .long 2b,20f\n" \ " .long 2b,20f\n" \
" .ifnc \""#s3"\",\"\"\n" \ " .ifnc \""#s3"\",\"\"\n" \
" .long 3b,30f\n" \ " .long 3b,30f\n" \
" .endif\n" \ " .endif\n" \
" .endif\n" \
" .previous\n" \ " .previous\n" \
"\n" \ "\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
"10: clr."#s1" (%1)+\n" \ "10: addq.l #"#n1",%0\n" \
"20: clr."#s2" (%1)+\n" \ " .ifnc \""#s2"\",\"\"\n" \
"20: addq.l #"#n2",%0\n" \
" .ifnc \""#s3"\",\"\"\n" \ " .ifnc \""#s3"\",\"\"\n" \
"30: clr."#s3" (%1)+\n" \ "30: addq.l #"#n3",%0\n" \
" .endif\n" \
" .endif\n" \ " .endif\n" \
" moveq.l #"#n",%0\n" \
" jra 4b\n" \ " jra 4b\n" \
" .previous\n" \ " .previous\n" \
: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
: : "memory") : : "memory")
#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
__suffix##n1, __suffix##n2, __suffix##n3)
static __always_inline unsigned long static __always_inline unsigned long
__constant_copy_from_user(void *to, const void __user *from, unsigned long n) __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
...@@ -242,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -242,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) { switch (n) {
case 1: case 1:
__get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
break; break;
case 2: case 2:
__get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2); __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
break; break;
case 3: case 3:
__constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
break; break;
case 4: case 4:
__get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
break; break;
case 5: case 5:
__constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
break; break;
case 6: case 6:
__constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
break; break;
case 7: case 7:
__constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
break; break;
case 8: case 8:
__constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
break; break;
case 9: case 9:
__constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
break; break;
case 10: case 10:
__constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
break; break;
case 12: case 12:
__constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
break; break;
default: default:
/* we limit the inlined version to 3 moves */ /* we limit the inlined version to 3 moves */
...@@ -363,24 +356,26 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -363,24 +356,26 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
return res; return res;
} }
#define __copy_from_user(to, from, n) \ static inline unsigned long
(__builtin_constant_p(n) ? \ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
__constant_copy_from_user(to, from, n) : \ {
__generic_copy_from_user(to, from, n)) if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n);
#define __copy_to_user(to, from, n) \ return __generic_copy_from_user(to, from, n);
(__builtin_constant_p(n) ? \ }
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_from_user(to, from, n) __copy_from_user(to, from, n) static inline unsigned long
#define copy_to_user(to, from, n) __copy_to_user(to, from, n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n);
return __generic_copy_to_user(to, from, n);
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -4,15 +4,11 @@ ...@@ -4,15 +4,11 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/segment.h> #include <asm/segment.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size))
/* /*
...@@ -26,25 +22,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size) ...@@ -26,25 +22,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
return 1; return 1;
} }
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
...@@ -124,13 +101,21 @@ extern int __get_user_bad(void); ...@@ -124,13 +101,21 @@ extern int __get_user_bad(void);
: "=d" (x) \ : "=d" (x) \
: "m" (*__ptr(ptr))) : "m" (*__ptr(ptr)))
#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) static inline unsigned long
#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
memcpy(to, (__force const void *)from, n);
return 0;
}
#define __copy_from_user(to, from, n) copy_from_user(to, from, n) static inline unsigned long
#define __copy_to_user(to, from, n) copy_to_user(to, from, n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define __copy_to_user_inatomic __copy_to_user {
#define __copy_from_user_inatomic __copy_from_user memcpy((__force void *)to, from, n);
return 0;
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
......
...@@ -88,7 +88,7 @@ static inline int frame_extra_sizes(int f) ...@@ -88,7 +88,7 @@ static inline int frame_extra_sizes(int f)
return frame_size_change[f]; return frame_size_change[f];
} }
int handle_kernel_fault(struct pt_regs *regs) int fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
struct pt_regs *tregs; struct pt_regs *tregs;
......
...@@ -1016,7 +1016,12 @@ asmlinkage void trap_c(struct frame *fp) ...@@ -1016,7 +1016,12 @@ asmlinkage void trap_c(struct frame *fp)
/* traced a trapping instruction on a 68020/30, /* traced a trapping instruction on a 68020/30,
* real exception will be executed afterwards. * real exception will be executed afterwards.
*/ */
} else if (!handle_kernel_fault(&fp->ptregs)) return;
}
#ifdef CONFIG_MMU
if (fixup_exception(&fp->ptregs))
return;
#endif
bad_super_trap(fp); bad_super_trap(fp);
return; return;
} }
......
...@@ -30,19 +30,13 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from, ...@@ -30,19 +30,13 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
"6:\n" "6:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
" .even\n" " .even\n"
"10: move.l %0,%3\n" "10: lsl.l #2,%0\n"
"7: clr.l (%2)+\n"
" subq.l #1,%3\n"
" jne 7b\n"
" lsl.l #2,%0\n"
" btst #1,%5\n" " btst #1,%5\n"
" jeq 8f\n" " jeq 8f\n"
"30: clr.w (%2)+\n" "30: addq.l #2,%0\n"
" addq.l #2,%0\n"
"8: btst #0,%5\n" "8: btst #0,%5\n"
" jeq 6b\n" " jeq 6b\n"
"50: clr.b (%2)+\n" "50: addq.l #1,%0\n"
" addq.l #1,%0\n"
" jra 6b\n" " jra 6b\n"
" .previous\n" " .previous\n"
"\n" "\n"
......
...@@ -32,7 +32,7 @@ int send_fault_sig(struct pt_regs *regs) ...@@ -32,7 +32,7 @@ int send_fault_sig(struct pt_regs *regs)
force_sig_info(siginfo.si_signo, force_sig_info(siginfo.si_signo,
&siginfo, current); &siginfo, current);
} else { } else {
if (handle_kernel_fault(regs)) if (fixup_exception(regs))
return -1; return -1;
//if (siginfo.si_signo == SIGBUS) //if (siginfo.si_signo == SIGBUS)
......
...@@ -8,6 +8,7 @@ generic-y += dma.h ...@@ -8,6 +8,7 @@ generic-y += dma.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += futex.h generic-y += futex.h
......
...@@ -4,10 +4,6 @@ ...@@ -4,10 +4,6 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -28,7 +24,7 @@ ...@@ -28,7 +24,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
/* /*
* Explicitly allow NULL pointers here. Parts of the kernel such * Explicitly allow NULL pointers here. Parts of the kernel such
* as readv/writev use access_ok to validate pointers, but want * as readv/writev use access_ok to validate pointers, but want
...@@ -51,28 +47,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ...@@ -51,28 +47,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
(unsigned long)(size)) (unsigned long)(size))
static inline int verify_area(int type, const void *addr, unsigned long size) #include <asm/extable.h>
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
...@@ -199,36 +174,9 @@ extern long __must_check strnlen_user(const char __user *src, long count); ...@@ -199,36 +174,9 @@ extern long __must_check strnlen_user(const char __user *src, long count);
extern unsigned long raw_copy_from_user(void *to, const void __user *from, extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n); unsigned long n);
extern unsigned long raw_copy_to_user(void __user *to, const void *from,
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
const void *from,
unsigned long n); unsigned long n);
static inline unsigned long copy_to_user(void __user *to, const void *from,
unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __copy_user(to, from, n);
return n;
}
#define __copy_to_user(to, from, n) __copy_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
/* /*
* Zero Userspace * Zero Userspace
*/ */
......
...@@ -548,7 +548,7 @@ ...@@ -548,7 +548,7 @@
"SUB %1, %1, D0Ar2\n" \ "SUB %1, %1, D0Ar2\n" \
"SUB %3, %3, D1Ar1\n") "SUB %3, %3, D1Ar1\n")
unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
unsigned long n) unsigned long n)
{ {
register char __user *dst asm ("A0.2") = pdst; register char __user *dst asm ("A0.2") = pdst;
...@@ -654,7 +654,7 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, ...@@ -654,7 +654,7 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
*/ */
return retn; return retn;
} }
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(raw_copy_to_user);
#define __asm_copy_from_user_1(to, from, ret) \ #define __asm_copy_from_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
......
...@@ -3,6 +3,7 @@ generic-y += barrier.h ...@@ -3,6 +3,7 @@ generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += device.h generic-y += device.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
......
...@@ -11,22 +11,15 @@ ...@@ -11,22 +11,15 @@
#ifndef _ASM_MICROBLAZE_UACCESS_H #ifndef _ASM_MICROBLAZE_UACCESS_H
#define _ASM_MICROBLAZE_UACCESS_H #define _ASM_MICROBLAZE_UACCESS_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h> /* RLIMIT_FSIZE */
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/extable.h>
#include <linux/string.h> #include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* On Microblaze the fs value is actually the top of the corresponding * On Microblaze the fs value is actually the top of the corresponding
* address space. * address space.
...@@ -55,22 +48,6 @@ ...@@ -55,22 +48,6 @@
# define segment_eq(a, b) ((a).seg == (b).seg) # define segment_eq(a, b) ((a).seg == (b).seg)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
/* Check against bounds of physical memory */ /* Check against bounds of physical memory */
...@@ -359,39 +336,19 @@ extern long __user_bad(void); ...@@ -359,39 +336,19 @@ extern long __user_bad(void);
__gu_err; \ __gu_err; \
}) })
static inline unsigned long
/* copy_to_from_user */ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
#define __copy_from_user(to, from, n) \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \
__copy_from_user((to), (from), (n))
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{ {
unsigned long res = n; return __copy_tofrom_user((__force void __user *)to, from, n);
might_fault();
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
} }
#define __copy_to_user(to, from, n) \ static inline unsigned long
__copy_tofrom_user((void __user *)(to), \ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
(__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{ {
might_fault(); return __copy_tofrom_user(to, (__force const void __user *)from, n);
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
return n;
} }
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
...@@ -422,7 +379,4 @@ static inline long strnlen_user(const char __user *src, long n) ...@@ -422,7 +379,4 @@ static inline long strnlen_user(const char __user *src, long n)
return __strnlen_user(src, n); return __strnlen_user(src, n);
} }
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_UACCESS_H */ #endif /* _ASM_MICROBLAZE_UACCESS_H */
...@@ -68,7 +68,6 @@ config MIPS ...@@ -68,7 +68,6 @@ config MIPS
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_ARCH_HARDENED_USERCOPY
menu "Machine selection" menu "Machine selection"
......
...@@ -139,15 +139,6 @@ ...@@ -139,15 +139,6 @@
.set noreorder .set noreorder
.set noat .set noat
/*
* t7 is used as a flag to note inatomic mode.
*/
LEAF(__copy_user_inatomic)
EXPORT_SYMBOL(__copy_user_inatomic)
b __copy_user_common
li t7, 1
END(__copy_user_inatomic)
/* /*
* A combined memcpy/__copy_user * A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of * __copy_user sets len to 0 for success; else to an upper bound of
...@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy) ...@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy: __memcpy:
FEXPORT(__copy_user) FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user) EXPORT_SYMBOL(__copy_user)
li t7, 0 /* not inatomic */
__copy_user_common:
/* /*
* Note: dst & src may be unaligned, len may be 0 * Note: dst & src may be unaligned, len may be 0
* Temps * Temps
...@@ -414,25 +403,7 @@ l_exc: ...@@ -414,25 +403,7 @@ l_exc:
LOAD t0, TI_TASK($28) LOAD t0, TI_TASK($28)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
SUB len, AT, t0 # len number of uncopied bytes SUB len, AT, t0 # len number of uncopied bytes
bnez t7, 2f /* Skip the zeroing out part if inatomic */ jr ra
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
beqz len, done
SUB src, len, 1
1: sb zero, 0(dst)
ADD dst, dst, 1
bnez src, 1b
SUB src, src, 1
2: jr ra
nop nop
......
...@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, ...@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr) __wsum sum, int *err_ptr)
{ {
might_fault(); might_fault();
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
return __csum_partial_copy_kernel((__force void *)src, dst, return __csum_partial_copy_kernel((__force void *)src, dst,
len, sum, err_ptr); len, sum, err_ptr);
else else
...@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, ...@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
{ {
might_fault(); might_fault();
if (access_ok(VERIFY_WRITE, dst, len)) { if (access_ok(VERIFY_WRITE, dst, len)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
return __csum_partial_copy_kernel(src, return __csum_partial_copy_kernel(src,
(__force void *)dst, (__force void *)dst,
len, sum, err_ptr); len, sum, err_ptr);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <linux/uaccess.h> /* for segment_eq() */ #include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void); extern void (*r4k_blast_icache)(void);
...@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ ...@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
\ \
__##pfx##flush_prologue \ __##pfx##flush_prologue \
\ \
if (segment_eq(get_fs(), USER_DS)) { \ if (!uaccess_kernel()) { \
while (1) { \ while (1) { \
protected_cachee_op(hitop, addr); \ protected_cachee_op(hitop, addr); \
if (addr == aend) \ if (addr == aend) \
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#define _ASM_UACCESS_H #define _ASM_UACCESS_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/asm-eva.h> #include <asm/asm-eva.h>
#include <asm/extable.h> #include <asm/extable.h>
...@@ -71,9 +69,6 @@ extern u64 __ua_limit; ...@@ -71,9 +69,6 @@ extern u64 __ua_limit;
#define USER_DS ((mm_segment_t) { __UA_LIMIT }) #define USER_DS ((mm_segment_t) { __UA_LIMIT })
#endif #endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x)) #define set_fs(x) (current_thread_info()->addr_limit = (x))
...@@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void) ...@@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void)
if (!IS_ENABLED(CONFIG_EVA)) if (!IS_ENABLED(CONFIG_EVA))
return false; return false;
return segment_eq(get_fs(), get_ds()); return uaccess_kernel();
} }
/* /*
...@@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void) ...@@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void)
* this function, memory access functions may still return -EFAULT. * this function, memory access functions may still return -EFAULT.
*/ */
#define __access_mask get_fs().seg static inline int __access_ok(const void __user *p, unsigned long size)
{
#define __access_ok(addr, size, mask) \ unsigned long addr = (unsigned long)p;
({ \ return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
unsigned long __addr = (unsigned long) (addr); \ }
unsigned long __size = size; \
unsigned long __mask = mask; \
unsigned long __ok; \
\
__chk_user_ptr(addr); \
__ok = (signed long)(__mask & (__addr | (__addr + __size) | \
__ua_size(__size))); \
__ok == 0; \
})
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
likely(__access_ok((addr), (size), __access_mask)) likely(__access_ok((addr), (size)))
/* /*
* put_user: - Write a simple value into user space. * put_user: - Write a simple value into user space.
...@@ -811,157 +797,7 @@ extern void __put_user_unaligned_unknown(void); ...@@ -811,157 +797,7 @@ extern void __put_user_unaligned_unknown(void);
extern size_t __copy_user(void *__to, const void *__from, size_t __n); extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#ifndef CONFIG_EVA #define __invoke_copy_from(func, to, from, n) \
#define __invoke_copy_to_user(to, from, n) \
({ \
register void __user *__cu_to_r __asm__("$4"); \
register const void *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
__MODULE_JAL(__copy_user) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to_user(to, from, n)
#endif
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define __copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
might_fault(); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
#define __copy_from_user_inatomic(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) \
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
__cu_from,\
__cu_len);\
else \
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
__cu_from, \
__cu_len); \
__cu_len; \
})
/*
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
#define copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
const void *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_from, __cu_len, true); \
\
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_to_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \
__cu_len; \
})
#ifndef CONFIG_EVA
#define __invoke_copy_from_user(to, from, n) \
({ \ ({ \
register void *__cu_to_r __asm__("$4"); \ register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \ register const void __user *__cu_from_r __asm__("$5"); \
...@@ -972,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); ...@@ -972,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r = (n); \ __cu_len_r = (n); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tnoreorder\n\t" \ ".set\tnoreorder\n\t" \
__MODULE_JAL(__copy_user) \ __MODULE_JAL(func) \
".set\tnoat\n\t" \ ".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \ __UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \ ".set\tat\n\t" \
...@@ -984,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); ...@@ -984,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \ __cu_len_r; \
}) })
#define __invoke_copy_from_kernel(to, from, n) \ #define __invoke_copy_to(func, to, from, n) \
__invoke_copy_from_user(to, from, n)
/* For userland <-> userland operations */
#define ___invoke_copy_in_user(to, from, n) \
__invoke_copy_from_user(to, from, n)
/* For kernel <-> kernel operations */
#define ___invoke_copy_in_kernel(to, from, n) \
__invoke_copy_from_user(to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
({ \ ({ \
register void *__cu_to_r __asm__("$4"); \ register void __user *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \ register const void *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \ register long __cu_len_r __asm__("$6"); \
\ \
__cu_to_r = (to); \ __cu_to_r = (to); \
__cu_from_r = (from); \ __cu_from_r = (from); \
__cu_len_r = (n); \ __cu_len_r = (n); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tnoreorder\n\t" \ __MODULE_JAL(func) \
__MODULE_JAL(__copy_user_inatomic) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
".set\treorder" \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \ : \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
...@@ -1018,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); ...@@ -1018,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \ __cu_len_r; \
}) })
#define __invoke_copy_from_kernel_inatomic(to, from, n) \ #define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from_user_inatomic(to, from, n) \ __invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
#define ___invoke_copy_in_kernel(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#ifndef CONFIG_EVA
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
#define ___invoke_copy_in_user(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
#else #else
/* EVA specific functions */ /* EVA specific functions */
extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from, extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n); size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from, extern size_t __copy_to_user_eva(void *__to, const void *__from,
size_t __n); size_t __n);
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
__MODULE_JAL(func_ptr) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
".set\treorder" \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
__MODULE_JAL(func_ptr) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
/* /*
* Source or destination address is in userland. We need to go through * Source or destination address is in userland. We need to go through
* the TLB * the TLB
*/ */
#define __invoke_copy_from_user(to, from, n) \ #define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) __invoke_copy_from(__copy_from_user_eva, to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, \
__copy_user_inatomic_eva)
#define __invoke_copy_to_user(to, from, n) \ #define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) __invoke_copy_to(__copy_to_user_eva, to, from, n)
#define ___invoke_copy_in_user(to, from, n) \ #define ___invoke_copy_in_user(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) __invoke_copy_from(__copy_in_user_eva, to, from, n)
/*
* Source or destination address in the kernel. We are not going through
* the TLB
*/
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
#define ___invoke_copy_in_kernel(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
#endif /* CONFIG_EVA */ #endif /* CONFIG_EVA */
/* static inline unsigned long
* __copy_from_user: - Copy a block of data from user space, with less checking. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
* @to: Destination address, in kernel space. {
* @from: Source address, in user space. if (eva_kernel_access())
* @n: Number of bytes to copy. return __invoke_copy_to_kernel(to, from, n);
* else
* Context: User context only. This function may sleep if pagefaults are return __invoke_copy_to_user(to, from, n);
* enabled. }
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define __copy_from_user(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
/* static inline unsigned long
* copy_from_user: - Copy a block of data from user space. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
* @to: Destination address, in kernel space. {
* @from: Source address, in user space. if (eva_kernel_access())
* @n: Number of bytes to copy. return __invoke_copy_from_kernel(to, from, n);
* else
* Context: User context only. This function may sleep if pagefaults are return __invoke_copy_from_user(to, from, n);
* enabled. }
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
#define copy_from_user(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
\
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
memset(__cu_to, 0, __cu_len); \
} \
} \
__cu_len; \
})
#define __copy_in_user(to, from, n) \ #define INLINE_COPY_FROM_USER
({ \ #define INLINE_COPY_TO_USER
void __user *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (eva_kernel_access()) { \
__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
__cu_len); \
} else { \
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
#define copy_in_user(to, from, n) \ static inline unsigned long
({ \ raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
void __user *__cu_to; \ {
const void __user *__cu_from; \ if (eva_kernel_access())
long __cu_len; \ return ___invoke_copy_in_kernel(to, from, n);
\ else
__cu_to = (to); \ return ___invoke_copy_in_user(to, from, n);
__cu_from = (from); \ }
__cu_len = (n); \
if (eva_kernel_access()) { \
__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
__cu_len); \
} else { \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \
__cu_len; \
})
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
......
...@@ -1200,7 +1200,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1200,7 +1200,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
case lwl_op: case lwl_op:
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_READ, vaddr, 4)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1273,7 +1273,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1273,7 +1273,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
case lwr_op: case lwr_op:
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_READ, vaddr, 4)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1347,7 +1347,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1347,7 +1347,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
case swl_op: case swl_op:
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_WRITE, vaddr, 4)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1417,7 +1417,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1417,7 +1417,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
case swr_op: case swr_op:
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_WRITE, vaddr, 4)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1492,7 +1492,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1492,7 +1492,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_READ, vaddr, 8)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1611,7 +1611,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1611,7 +1611,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_READ, vaddr, 8)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1730,7 +1730,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1730,7 +1730,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_WRITE, vaddr, 8)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1848,7 +1848,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1848,7 +1848,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
rt = regs->regs[MIPSInst_RT(inst)]; rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
if (!access_ok(VERIFY_WRITE, vaddr, 8)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV; err = SIGSEGV;
break; break;
...@@ -1965,7 +1965,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -1965,7 +1965,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
err = SIGBUS; err = SIGBUS;
break; break;
} }
if (!access_ok(VERIFY_READ, vaddr, 4)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGBUS; err = SIGBUS;
break; break;
...@@ -2021,7 +2021,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -2021,7 +2021,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
err = SIGBUS; err = SIGBUS;
break; break;
} }
if (!access_ok(VERIFY_WRITE, vaddr, 4)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGBUS; err = SIGBUS;
break; break;
...@@ -2084,7 +2084,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -2084,7 +2084,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
err = SIGBUS; err = SIGBUS;
break; break;
} }
if (!access_ok(VERIFY_READ, vaddr, 8)) { if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGBUS; err = SIGBUS;
break; break;
...@@ -2145,7 +2145,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) ...@@ -2145,7 +2145,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
err = SIGBUS; err = SIGBUS;
break; break;
} }
if (!access_ok(VERIFY_WRITE, vaddr, 8)) { if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr; current->thread.cp0_baduaddr = vaddr;
err = SIGBUS; err = SIGBUS;
break; break;
......
...@@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) ...@@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (unlikely(addr & 3)) if (unlikely(addr & 3))
return -EINVAL; return -EINVAL;
if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4)))
return -EINVAL; return -EINVAL;
if (cpu_has_llsc && R10000_LLSC_WAR) { if (cpu_has_llsc && R10000_LLSC_WAR) {
......
...@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadHW(addr, value, res); LoadHW(addr, value, res);
else else
LoadHWE(addr, value, res); LoadHWE(addr, value, res);
...@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadW(addr, value, res); LoadW(addr, value, res);
else else
LoadWE(addr, value, res); LoadWE(addr, value, res);
...@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadHWU(addr, value, res); LoadHWU(addr, value, res);
else else
LoadHWUE(addr, value, res); LoadHWUE(addr, value, res);
...@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt]; value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
StoreHW(addr, value, res); StoreHW(addr, value, res);
else else
StoreHWE(addr, value, res); StoreHWE(addr, value, res);
...@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt]; value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
StoreW(addr, value, res); StoreW(addr, value, res);
else else
StoreWE(addr, value, res); StoreWE(addr, value, res);
......
...@@ -562,39 +562,9 @@ ...@@ -562,39 +562,9 @@
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop nop
SUB len, AT, t0 # len number of uncopied bytes SUB len, AT, t0 # len number of uncopied bytes
bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
.set reorder /* DADDI_WAR */
SUB src, len, 1
beqz len, .Ldone\@
.set noreorder
1: sb zero, 0(dst)
ADD dst, dst, 1
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
bnez src, 1b
SUB src, src, 1
#else
.set push
.set noat
li v1, 1
bnez src, 1b
SUB src, src, v1
.set pop
#endif
jr ra jr ra
nop nop
#define SEXC(n) \ #define SEXC(n) \
.set reorder; /* DADDI_WAR */ \ .set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u\@: \ .Ls_exc_p ## n ## u\@: \
...@@ -672,15 +642,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ ...@@ -672,15 +642,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
move a2, zero move a2, zero
END(__rmemcpy) END(__rmemcpy)
/*
* t6 is used as a flag to note inatomic mode.
*/
LEAF(__copy_user_inatomic)
EXPORT_SYMBOL(__copy_user_inatomic)
b __copy_user_common
li t6, 1
END(__copy_user_inatomic)
/* /*
* A combined memcpy/__copy_user * A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of * __copy_user sets len to 0 for success; else to an upper bound of
...@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy) ...@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.L__memcpy: .L__memcpy:
FEXPORT(__copy_user) FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user) EXPORT_SYMBOL(__copy_user)
li t6, 0 /* not inatomic */
__copy_user_common:
/* Legacy Mode, user <-> user */ /* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
...@@ -708,20 +667,12 @@ __copy_user_common: ...@@ -708,20 +667,12 @@ __copy_user_common:
* space * space
*/ */
LEAF(__copy_user_inatomic_eva)
EXPORT_SYMBOL(__copy_user_inatomic_eva)
b __copy_from_user_common
li t6, 1
END(__copy_user_inatomic_eva)
/* /*
* __copy_from_user (EVA) * __copy_from_user (EVA)
*/ */
LEAF(__copy_from_user_eva) LEAF(__copy_from_user_eva)
EXPORT_SYMBOL(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva)
li t6, 0 /* not inatomic */
__copy_from_user_common:
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva) END(__copy_from_user_eva)
......
...@@ -18,7 +18,7 @@ struct stackframe { ...@@ -18,7 +18,7 @@ struct stackframe {
static inline int get_mem(unsigned long addr, unsigned long *result) static inline int get_mem(unsigned long addr, unsigned long *result)
{ {
unsigned long *address = (unsigned long *) addr; unsigned long *address = (unsigned long *) addr;
if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) if (!access_ok(VERIFY_READ, address, sizeof(unsigned long)))
return -1; return -1;
if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
return -3; return -3;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
generic-y += barrier.h generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
......
...@@ -14,13 +14,8 @@ ...@@ -14,13 +14,8 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/thread_info.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/errno.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -71,26 +66,7 @@ static inline int ___range_ok(unsigned long addr, unsigned int size) ...@@ -71,26 +66,7 @@ static inline int ___range_ok(unsigned long addr, unsigned int size)
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0) #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
/* #include <asm/extable.h>
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern int fixup_exception(struct pt_regs *regs);
#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
...@@ -299,170 +275,19 @@ do { \ ...@@ -299,170 +275,19 @@ do { \
} \ } \
} while (0) } while (0)
#define __copy_user_zeroing(to, from, size) \ static inline unsigned long
do { \ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
if (size) { \
void *__to = to; \
const void *__from = from; \
int w; \
asm volatile( \
"0: movbu (%0),%3;\n" \
"1: movbu %3,(%1);\n" \
" inc %0;\n" \
" inc %1;\n" \
" add -1,%2;\n" \
" bne 0b;\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3:\n" \
" mov %2,%0\n" \
" clr %3\n" \
"4: movbu %3,(%1);\n" \
" inc %1;\n" \
" add -1,%2;\n" \
" bne 4b;\n" \
" mov %0,%2\n" \
" jmp 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
: "cc", "memory"); \
} \
} while (0)
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline
unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
unsigned long n)
{
__copy_user_zeroing(to, from, n);
return n;
}
static inline
unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
unsigned long n)
{ {
__copy_user(to, from, n); __copy_user(to, from, n);
return n; return n;
} }
static inline unsigned long
#if 0 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
asm volatile( \
" mov %0,a0;\n" \
"0: movbu (%1),d3;\n" \
"1: movbu d3,(%2);\n" \
" add -1,a0;\n" \
" bne 0b;\n" \
"2:;" \
".section .fixup,\"ax\"\n" \
"3: jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
".previous" \
: \
: "d"(size), "d"(to), "d"(from) \
: "d3", "a0"); \
} while (0)
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user_zeroing(to, from, size) \
do { \
asm volatile( \
" mov %0,a0;\n" \
"0: movbu (%1),d3;\n" \
"1: movbu d3,(%2);\n" \
" add -1,a0;\n" \
" bne 0b;\n" \
"2:;" \
".section .fixup,\"ax\"\n" \
"3: jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .balign 4\n" \
" .long 0b,3b\n" \
" .long 1b,3b\n" \
".previous" \
: \
: "d"(size), "d"(to), "d"(from) \
: "d3", "a0"); \
} while (0)
static inline
unsigned long __constant_copy_to_user(void *to, const void *from,
unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__constant_copy_user(to, from, n);
return n;
}
static inline
unsigned long __constant_copy_from_user(void *to, const void *from,
unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__constant_copy_user_zeroing(to, from, n);
return n;
}
static inline
unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
unsigned long n)
{ {
__constant_copy_user(to, from, n); __copy_user(to, from, n);
return n;
}
static inline
unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
unsigned long n)
{
__constant_copy_user_zeroing(to, from, n);
return n; return n;
} }
#endif
extern unsigned long __generic_copy_to_user(void __user *, const void *,
unsigned long);
extern unsigned long __generic_copy_from_user(void *, const void __user *,
unsigned long);
#define __copy_to_user_inatomic(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user_inatomic(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user(to, from, n) \
({ \
might_fault(); \
__copy_to_user_inatomic((to), (from), (n)); \
})
#define __copy_from_user(to, from, n) \
({ \
might_fault(); \
__copy_from_user_inatomic((to), (from), (n)); \
})
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count); extern long __strncpy_from_user(char *dst, const char __user *src, long count);
......
...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user); ...@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__generic_copy_from_user);
EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(strnlen_user);
extern u64 __ashrdi3(u64, unsigned); extern u64 __ashrdi3(u64, unsigned);
......
...@@ -11,24 +11,6 @@ ...@@ -11,24 +11,6 @@
*/ */
#include <linux/uaccess.h> #include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__copy_user(to, from, n);
return n;
}
unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
return n;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
......
...@@ -13,6 +13,7 @@ generic-y += dma.h ...@@ -13,6 +13,7 @@ generic-y += dma.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
......
...@@ -13,33 +13,11 @@ ...@@ -13,33 +13,11 @@
#ifndef _ASM_NIOS2_UACCESS_H #ifndef _ASM_NIOS2_UACCESS_H
#define _ASM_NIOS2_UACCESS_H #define _ASM_NIOS2_UACCESS_H
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/page.h> #include <asm/page.h>
#define VERIFY_READ 0 #include <asm/extable.h>
#define VERIFY_WRITE 1
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn;
unsigned long fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/* /*
* Segment stuff * Segment stuff
...@@ -95,36 +73,17 @@ static inline unsigned long __must_check clear_user(void __user *to, ...@@ -95,36 +73,17 @@ static inline unsigned long __must_check clear_user(void __user *to,
return __clear_user(to, n); return __clear_user(to, n);
} }
extern long __copy_from_user(void *to, const void __user *from, extern unsigned long
unsigned long n); raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern long __copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
static inline long copy_from_user(void *to, const void __user *from, #define INLINE_COPY_FROM_USER
unsigned long n) #define INLINE_COPY_TO_USER
{
unsigned long res = n;
if (access_ok(VERIFY_READ, from, n))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline long copy_to_user(void __user *to, const void *from,
unsigned long n)
{
if (!access_ok(VERIFY_WRITE, to, n))
return n;
return __copy_to_user(to, from, n);
}
extern long strncpy_from_user(char *__to, const char __user *__from, extern long strncpy_from_user(char *__to, const char __user *__from,
long __len); long __len);
extern long strnlen_user(const char __user *s, long n); extern long strnlen_user(const char __user *s, long n);
#define __copy_from_user_inatomic __copy_from_user
#define __copy_to_user_inatomic __copy_to_user
/* Optimized macros */ /* Optimized macros */
#define __get_user_asm(val, insn, addr, err) \ #define __get_user_asm(val, insn, addr, err) \
{ \ { \
......
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
asm(".global __copy_from_user\n" asm(".global raw_copy_from_user\n"
" .type __copy_from_user, @function\n" " .type raw_copy_from_user, @function\n"
"__copy_from_user:\n" "raw_copy_from_user:\n"
" movi r2,7\n" " movi r2,7\n"
" mov r3,r4\n" " mov r3,r4\n"
" bge r2,r6,1f\n" " bge r2,r6,1f\n"
...@@ -65,12 +65,12 @@ asm(".global __copy_from_user\n" ...@@ -65,12 +65,12 @@ asm(".global __copy_from_user\n"
".word 7b,13b\n" ".word 7b,13b\n"
".previous\n" ".previous\n"
); );
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
asm( asm(
" .global __copy_to_user\n" " .global raw_copy_to_user\n"
" .type __copy_to_user, @function\n" " .type raw_copy_to_user, @function\n"
"__copy_to_user:\n" "raw_copy_to_user:\n"
" movi r2,7\n" " movi r2,7\n"
" mov r3,r4\n" " mov r3,r4\n"
" bge r2,r6,1f\n" " bge r2,r6,1f\n"
...@@ -127,7 +127,7 @@ asm( ...@@ -127,7 +127,7 @@ asm(
".word 11b,13b\n" ".word 11b,13b\n"
".word 12b,13b\n" ".word 12b,13b\n"
".previous\n"); ".previous\n");
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
long strncpy_from_user(char *__to, const char __user *__from, long __len) long strncpy_from_user(char *__to, const char __user *__from, long __len)
{ {
......
...@@ -16,6 +16,7 @@ generic-y += dma.h ...@@ -16,6 +16,7 @@ generic-y += dma.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
......
...@@ -22,14 +22,10 @@ ...@@ -22,14 +22,10 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -65,23 +61,6 @@ ...@@ -65,23 +61,6 @@
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
__range_ok((unsigned long)addr, (unsigned long)size) __range_ok((unsigned long)addr, (unsigned long)size)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
...@@ -257,34 +236,18 @@ do { \ ...@@ -257,34 +236,18 @@ do { \
extern unsigned long __must_check extern unsigned long __must_check
__copy_tofrom_user(void *to, const void *from, unsigned long size); __copy_tofrom_user(void *to, const void *from, unsigned long size);
#define __copy_from_user(to, from, size) \
__copy_tofrom_user(to, from, size)
#define __copy_to_user(to, from, size) \
__copy_tofrom_user(to, from, size)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long size)
{ {
unsigned long res = n; return __copy_tofrom_user(to, (__force const void *)from, size);
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_tofrom_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
} }
static inline unsigned long static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n) raw_copy_to_user(void *to, const void __user *from, unsigned long size)
{ {
if (likely(access_ok(VERIFY_WRITE, to, n))) return __copy_tofrom_user((__force void *)to, from, size);
n = __copy_tofrom_user(to, from, n);
return n;
} }
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
extern unsigned long __clear_user(void *addr, unsigned long size); extern unsigned long __clear_user(void *addr, unsigned long size);
...@@ -297,7 +260,7 @@ clear_user(void *addr, unsigned long size) ...@@ -297,7 +260,7 @@ clear_user(void *addr, unsigned long size)
} }
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
......
...@@ -26,7 +26,6 @@ config PARISC ...@@ -26,7 +26,6 @@ config PARISC
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_ARCH_HARDENED_USERCOPY
select VIRT_TO_BUS select VIRT_TO_BUS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS select CLONE_BACKWARDS
......
...@@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble... * our gateway page, and causes no end of trouble...
*/ */
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) if (uaccess_kernel() && !uaddr)
return -EFAULT; return -EFAULT;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
......
...@@ -6,15 +6,10 @@ ...@@ -6,15 +6,10 @@
*/ */
#include <asm/page.h> #include <asm/page.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h>
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define KERNEL_DS ((mm_segment_t){0}) #define KERNEL_DS ((mm_segment_t){0})
#define USER_DS ((mm_segment_t){1}) #define USER_DS ((mm_segment_t){1})
...@@ -216,9 +211,6 @@ struct exception_data { ...@@ -216,9 +211,6 @@ struct exception_data {
* Complex access routines -- external declarations * Complex access routines -- external declarations
*/ */
extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
extern long strncpy_from_user(char *, const char __user *, long); extern long strncpy_from_user(char *, const char __user *, long);
extern unsigned lclear_user(void __user *, unsigned long); extern unsigned lclear_user(void __user *, unsigned long);
extern long lstrnlen_user(const char __user *, long); extern long lstrnlen_user(const char __user *, long);
...@@ -232,59 +224,14 @@ extern long lstrnlen_user(const char __user *, long); ...@@ -232,59 +224,14 @@ extern long lstrnlen_user(const char __user *, long);
#define clear_user lclear_user #define clear_user lclear_user
#define __clear_user lclear_user #define __clear_user lclear_user
unsigned long __must_check __copy_to_user(void __user *dst, const void *src, unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
unsigned long len); unsigned long len);
unsigned long __must_check __copy_from_user(void *dst, const void __user *src, unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
unsigned long len); unsigned long len);
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
unsigned long len); unsigned long len);
#define __copy_in_user copy_in_user #define INLINE_COPY_TO_USER
#define __copy_to_user_inatomic __copy_to_user #define INLINE_COPY_FROM_USER
#define __copy_from_user_inatomic __copy_from_user
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
unsigned long ret = n;
if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false);
ret = __copy_from_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
return ret;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = __copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
struct pt_regs; struct pt_regs;
int fixup_exception(struct pt_regs *regs); int fixup_exception(struct pt_regs *regs);
......
...@@ -29,32 +29,32 @@ ...@@ -29,32 +29,32 @@
DECLARE_PER_CPU(struct exception_data, exception_data); DECLARE_PER_CPU(struct exception_data, exception_data);
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) #define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
#define get_kernel_space() (0) #define get_kernel_space() (0)
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
extern unsigned long pa_memcpy(void *dst, const void *src, extern unsigned long pa_memcpy(void *dst, const void *src,
unsigned long len); unsigned long len);
unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long raw_copy_to_user(void __user *dst, const void *src,
unsigned long len) unsigned long len)
{ {
mtsp(get_kernel_space(), 1); mtsp(get_kernel_space(), 1);
mtsp(get_user_space(), 2); mtsp(get_user_space(), 2);
return pa_memcpy((void __force *)dst, src, len); return pa_memcpy((void __force *)dst, src, len);
} }
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len) unsigned long len)
{ {
mtsp(get_user_space(), 1); mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2); mtsp(get_kernel_space(), 2);
return pa_memcpy(dst, (void __force *)src, len); return pa_memcpy(dst, (void __force *)src, len);
} }
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
{ {
mtsp(get_user_space(), 1); mtsp(get_user_space(), 1);
mtsp(get_user_space(), 2); mtsp(get_user_space(), 2);
...@@ -70,7 +70,7 @@ void * memcpy(void * dst,const void *src, size_t count) ...@@ -70,7 +70,7 @@ void * memcpy(void * dst,const void *src, size_t count)
return dst; return dst;
} }
EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(raw_copy_in_user);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
long probe_kernel_read(void *dst, const void *src, size_t size) long probe_kernel_read(void *dst, const void *src, size_t size)
......
...@@ -117,7 +117,6 @@ config PPC ...@@ -117,7 +117,6 @@ config PPC
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL_OLD select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
......
#ifndef _ARCH_POWERPC_EXTABLE_H
#define _ARCH_POWERPC_EXTABLE_H
/*
* The exception table consists of pairs of relative addresses: the first is
* the address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out what
* to do.
*
* All the routines below use bits of fixup code that are out of line with the
* main instruction path. This means when everything is well, we don't even
* have to jump over them. Further, they do not intrude on our cache or tlb
* entries.
*/
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn;
int fixup;
};
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
#endif
#ifndef _ARCH_POWERPC_UACCESS_H #ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H #define _ARCH_POWERPC_UACCESS_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
...@@ -63,31 +56,6 @@ ...@@ -63,31 +56,6 @@
(__chk_user_ptr(addr), \ (__chk_user_ptr(addr), \
__access_ok((__force unsigned long)(addr), (size), get_fs())) __access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
* The exception table consists of pairs of relative addresses: the first is
* the address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out what
* to do.
*
* All the routines below use bits of fixup code that are out of line with the
* main instruction path. This means when everything is well, we don't even
* have to jump over them. Further, they do not intrude on our cache or tlb
* entries.
*/
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn;
int fixup;
};
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
...@@ -301,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to, ...@@ -301,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to,
#ifndef __powerpc64__ #ifndef __powerpc64__
static inline unsigned long copy_from_user(void *to, #define INLINE_COPY_FROM_USER
const void __user *from, unsigned long n) #define INLINE_COPY_TO_USER
{
if (likely(access_ok(VERIFY_READ, from, n))) {
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
memset(to, 0, n);
return n;
}
static inline unsigned long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n)) {
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
}
return n;
}
#else /* __powerpc64__ */ #else /* __powerpc64__ */
#define __copy_in_user(to, from, size) \ static inline unsigned long
__copy_tofrom_user((to), (from), (size)) raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
extern unsigned long copy_from_user(void *to, const void __user *from, return __copy_tofrom_user(to, from, n);
unsigned long n); }
extern unsigned long copy_to_user(void __user *to, const void *from,
unsigned long n);
extern unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n);
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
static inline unsigned long __copy_from_user_inatomic(void *to, static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
...@@ -360,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, ...@@ -360,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0; return 0;
} }
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n); return __copy_tofrom_user((__force void __user *)to, from, n);
} }
static inline unsigned long __copy_to_user_inatomic(void __user *to, static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n) const void *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
...@@ -389,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, ...@@ -389,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
return 0; return 0;
} }
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force const void __user *)from, n); return __copy_tofrom_user(to, (__force const void __user *)from, n);
} }
static inline unsigned long __copy_from_user(void *to,
const void __user *from, unsigned long size)
{
might_fault();
return __copy_from_user_inatomic(to, from, size);
}
static inline unsigned long __copy_to_user(void __user *to,
const void *from, unsigned long size)
{
might_fault();
return __copy_to_user_inatomic(to, from, size);
}
extern unsigned long __clear_user(void __user *addr, unsigned long size); extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size) static inline unsigned long clear_user(void __user *addr, unsigned long size)
...@@ -422,7 +349,4 @@ extern long strncpy_from_user(char *dst, const char __user *src, long count); ...@@ -422,7 +349,4 @@ extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ARCH_POWERPC_UACCESS_H */ #endif /* _ARCH_POWERPC_UACCESS_H */
...@@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \ ...@@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \
obj-$(CONFIG_PPC32) += div64.o copy_32.o obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \ copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
memcpy_64.o memcmp_64.o memcpy_64.o memcmp_64.o
......
...@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user) ...@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user)
bdnz 130b bdnz 130b
/* then clear out the destination: r3 bytes starting at 4(r6) */ /* then clear out the destination: r3 bytes starting at 4(r6) */
132: mfctr r3 132: mfctr r3
srwi. r0,r3,2
li r9,0
mtctr r0
beq 113f
112: stwu r9,4(r6)
bdnz 112b
113: andi. r0,r3,3
mtctr r0
beq 120f
114: stb r9,4(r6)
addi r6,r6,1
bdnz 114b
120: blr 120: blr
EX_TABLE(30b,108b) EX_TABLE(30b,108b)
...@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user) ...@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user)
EX_TABLE(41b,111b) EX_TABLE(41b,111b)
EX_TABLE(130b,132b) EX_TABLE(130b,132b)
EX_TABLE(131b,120b) EX_TABLE(131b,120b)
EX_TABLE(112b,120b)
EX_TABLE(114b,120b)
EXPORT_SYMBOL(__copy_tofrom_user) EXPORT_SYMBOL(__copy_tofrom_user)
...@@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr blr
/* /*
* here we have trapped again, need to clear ctr bytes starting at r3 * here we have trapped again, amount remaining is in ctr.
*/ */
143: mfctr r5 143: mfctr r3
li r0,0
mr r4,r3
mr r3,r5 /* return the number of bytes not copied */
1: andi. r9,r4,7
beq 3f
90: stb r0,0(r4)
addic. r5,r5,-1
addi r4,r4,1
bne 1b
blr
3: cmpldi cr1,r5,8
srdi r9,r5,3
andi. r5,r5,7
blt cr1,93f
mtctr r9
91: std r0,0(r4)
addi r4,r4,8
bdnz 91b
93: beqlr
mtctr r5
92: stb r0,0(r4)
addi r4,r4,1
bdnz 92b
blr blr
/* /*
...@@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r5,-8(r1) ld r5,-8(r1)
add r6,r6,r5 add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */ subf r3,r3,r6 /* #bytes not copied */
190: blr
191:
192:
blr /* #bytes not copied in r3 */
EX_TABLE(20b,120b) EX_TABLE(20b,120b)
EX_TABLE(220b,320b) EX_TABLE(220b,320b)
...@@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
EX_TABLE(88b,188b) EX_TABLE(88b,188b)
EX_TABLE(43b,143b) EX_TABLE(43b,143b)
EX_TABLE(89b,189b) EX_TABLE(89b,189b)
EX_TABLE(90b,190b)
EX_TABLE(91b,191b)
EX_TABLE(92b,192b)
/* /*
* Routine to copy a whole page of data, optimized for POWER4. * Routine to copy a whole page of data, optimized for POWER4.
......
/*
* Functions which are too large to be inlined.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n)))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_to_user(to, from, n);
return n;
}
unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n)
{
might_sleep();
if (likely(access_ok(VERIFY_READ, from, n) &&
access_ok(VERIFY_WRITE, to, n)))
n =__copy_tofrom_user(to, from, n);
return n;
}
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_in_user);
...@@ -124,7 +124,6 @@ config S390 ...@@ -124,7 +124,6 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_EARLY_PFN_TO_NID select HAVE_ARCH_EARLY_PFN_TO_NID
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
......
#ifndef __S390_EXTABLE_H
#define __S390_EXTABLE_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
int insn, fixup;
};
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
#define ARCH_HAS_RELATIVE_EXTABLE
#endif
...@@ -12,13 +12,9 @@ ...@@ -12,13 +12,9 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
...@@ -42,7 +38,7 @@ ...@@ -42,7 +38,7 @@
static inline void set_fs(mm_segment_t fs) static inline void set_fs(mm_segment_t fs)
{ {
current->thread.mm_segment = fs; current->thread.mm_segment = fs;
if (segment_eq(fs, KERNEL_DS)) { if (uaccess_kernel()) {
set_cpu_flag(CIF_ASCE_SECONDARY); set_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
} else { } else {
...@@ -64,72 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size) ...@@ -64,72 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
#define access_ok(type, addr, size) __access_ok(addr, size) #define access_ok(type, addr, size) __access_ok(addr, size)
/* unsigned long __must_check
* The exception table consists of pairs of addresses: the first is the raw_copy_from_user(void *to, const void __user *from, unsigned long n);
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
int insn, fixup;
};
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
#define ARCH_HAS_RELATIVE_EXTABLE
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned long __must_check __copy_from_user(void *to, const void __user *from,
unsigned long n);
/** unsigned long __must_check
* __copy_to_user: - Copy a block of data into user space, with less checking. raw_copy_to_user(void __user *to, const void *from, unsigned long n);
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned long __must_check __copy_to_user(void __user *to, const void *from,
unsigned long n);
#define __copy_to_user_inatomic __copy_to_user #define INLINE_COPY_FROM_USER
#define __copy_from_user_inatomic __copy_from_user #define INLINE_COPY_TO_USER
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
...@@ -218,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s ...@@ -218,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{ {
size = __copy_to_user(ptr, x, size); size = raw_copy_to_user(ptr, x, size);
return size ? -EFAULT : 0; return size ? -EFAULT : 0;
} }
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{ {
size = __copy_from_user(x, ptr, size); size = raw_copy_from_user(x, ptr, size);
return size ? -EFAULT : 0; return size ? -EFAULT : 0;
} }
...@@ -314,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn)); ...@@ -314,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user #define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user #define __get_user_unaligned __get_user
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user(to, from, n);
}
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int sz = __compiletime_object_size(to);
might_fault();
if (unlikely(sz != -1 && sz < n)) {
if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
return __copy_from_user(to, from, n);
}
unsigned long __must_check unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n); raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_in_user(to, from, n);
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
......
...@@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr ...@@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
tmp1 = -4096UL; tmp1 = -4096UL;
asm volatile( asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"9: jz 7f\n" "6: jz 4f\n"
"1: algr %0,%3\n" "1: algr %0,%3\n"
" slgr %1,%3\n" " slgr %1,%3\n"
" slgr %2,%3\n" " slgr %2,%3\n"
...@@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr ...@@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
" slgr %4,%1\n" " slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 4f\n" " jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
"10:slgr %0,%4\n" "7: slgr %0,%4\n"
" algr %2,%4\n" " j 5f\n"
"4: lghi %4,-1\n" "4: slgr %0,%0\n"
" algr %4,%0\n" /* copy remaining size, subtract 1 */ "5:\n"
" bras %3,6f\n" /* memset loop */ EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
" xc 0(1,%2),0(%2)\n"
"5: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
"6: aghi %4,-256\n"
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
"7: slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory"); : "d" (reg0) : "cc", "memory");
return size; return size;
...@@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, ...@@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
asm volatile( asm volatile(
" sacf 0\n" " sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n" "0: mvcp 0(%0,%2),0(%1),%3\n"
"10:jz 8f\n" "7: jz 5f\n"
"1: algr %0,%3\n" "1: algr %0,%3\n"
" la %1,256(%1)\n" " la %1,256(%1)\n"
" la %2,256(%2)\n" " la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n" "2: mvcp 0(%0,%2),0(%1),%3\n"
"11:jnz 1b\n" "8: jnz 1b\n"
" j 8f\n" " j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n" " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n" " slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n" " jnh 6f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n" "4: mvcp 0(%4,%2),0(%1),%3\n"
"12:slgr %0,%4\n" "9: slgr %0,%4\n"
" algr %2,%4\n" " j 6f\n"
"5: lghi %4,-1\n" "5: slgr %0,%0\n"
" algr %4,%0\n" /* copy remaining size, subtract 1 */ "6: sacf 768\n"
" bras %3,7f\n" /* memset loop */ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
" xc 0(1,%2),0(%2)\n" EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
"6: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
"7: aghi %4,-256\n"
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
"8: slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory"); : : "cc", "memory");
return size; return size;
} }
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos)) if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n); return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n); return copy_from_user_mvcp(to, from, n);
} }
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size) unsigned long size)
...@@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, ...@@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
return size; return size;
} }
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos)) if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n); return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n); return copy_to_user_mvcs(to, from, n);
} }
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size) unsigned long size)
...@@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user ...@@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
return size; return size;
} }
unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{ {
if (static_branch_likely(&have_mvcos)) if (static_branch_likely(&have_mvcos))
return copy_in_user_mvcos(to, from, n); return copy_in_user_mvcos(to, from, n);
return copy_in_user_mvc(to, from, n); return copy_in_user_mvc(to, from, n);
} }
EXPORT_SYMBOL(__copy_in_user); EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{ {
......
...@@ -4,6 +4,7 @@ header-y += ...@@ -4,6 +4,7 @@ header-y +=
generic-y += barrier.h generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += current.h generic-y += current.h
generic-y += extable.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
......
#ifndef _ASM_SCORE_EXTABLE_H
#define _ASM_SCORE_EXTABLE_H
struct exception_table_entry {
unsigned long insn;
unsigned long fixup;
};
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#endif
...@@ -2,13 +2,8 @@ ...@@ -2,13 +2,8 @@
#define __SCORE_UACCESS_H #define __SCORE_UACCESS_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <asm/extable.h> #include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
...@@ -300,61 +295,19 @@ extern void __put_user_unknown(void); ...@@ -300,61 +295,19 @@ extern void __put_user_unknown(void);
extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len) raw_copy_from_user(void *to, const void __user *from, unsigned long len)
{
unsigned long res = len;
if (likely(access_ok(VERIFY_READ, from, len)))
res = __copy_tofrom_user(to, from, len);
if (unlikely(res))
memset(to + (len - res), 0, res);
return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len)
{
if (likely(access_ok(VERIFY_WRITE, to, len)))
len = __copy_tofrom_user(to, from, len);
return len;
}
static inline unsigned long
__copy_from_user(void *to, const void *from, unsigned long len)
{ {
unsigned long left = __copy_tofrom_user(to, from, len); return __copy_tofrom_user(to, (__force const void *)from, len);
if (unlikely(left))
memset(to + (len - left), 0, left);
return left;
} }
#define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
static inline unsigned long static inline unsigned long
__copy_to_user_inatomic(void *to, const void *from, unsigned long len) raw_copy_to_user(void __user *to, const void *from, unsigned long len)
{ {
return __copy_to_user(to, from, len); return __copy_tofrom_user((__force void *)to, from, len);
} }
static inline unsigned long #define INLINE_COPY_FROM_USER
__copy_from_user_inatomic(void *to, const void *from, unsigned long len) #define INLINE_COPY_TO_USER
{
return __copy_tofrom_user(to, from, len);
}
#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len)
{
if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len))
return __copy_tofrom_user(to, from, len);
}
/* /*
* __clear_user: - Zero a block of memory in user space, with less checking. * __clear_user: - Zero a block of memory in user space, with less checking.
......
#ifndef __ASM_SH_EXTABLE_H
#define __ASM_SH_EXTABLE_H
#include <asm-generic/extable.h>
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
#define ARCH_HAS_SEARCH_EXTABLE
#endif
#endif
#ifndef __ASM_SH_UACCESS_H #ifndef __ASM_SH_UACCESS_H
#define __ASM_SH_UACCESS_H #define __ASM_SH_UACCESS_H
#include <linux/errno.h>
#include <linux/sched.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define __addr_ok(addr) \ #define __addr_ok(addr) \
((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
...@@ -112,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n); ...@@ -112,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n);
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
return __copy_user(to, (__force void *)from, n); return __copy_user(to, (__force void *)from, n);
} }
static __always_inline unsigned long __must_check static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
return __copy_user((__force void *)to, from, n); return __copy_user((__force void *)to, from, n);
} }
#define INLINE_COPY_FROM_USER
#define __copy_to_user_inatomic __copy_to_user #define INLINE_COPY_TO_USER
#define __copy_from_user_inatomic __copy_from_user
/* /*
* Clear the area and return remaining number of bytes * Clear the area and return remaining number of bytes
...@@ -144,55 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); ...@@ -144,55 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
__cl_size; \ __cl_size; \
}) })
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long __copy_from = (unsigned long) from;
__kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_from, __copy_size))
__copy_size = __copy_user(to, from, __copy_size);
if (unlikely(__copy_size))
memset(to + (n - __copy_size), 0, __copy_size);
return __copy_size;
}
static inline unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long __copy_to = (unsigned long) to;
__kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_to, __copy_size))
return __copy_user(to, from, __copy_size);
return __copy_size;
}
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
#define ARCH_HAS_SEARCH_EXTABLE
#endif
int fixup_exception(struct pt_regs *regs);
extern void *set_exception_table_vec(unsigned int vec, void *handler); extern void *set_exception_table_vec(unsigned int vec, void *handler);
static inline void *set_exception_table_evt(unsigned int evt, void *handler) static inline void *set_exception_table_evt(unsigned int evt, void *handler)
......
...@@ -42,7 +42,6 @@ config SPARC ...@@ -42,7 +42,6 @@ config SPARC
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
select LOCKDEP_SMALL if LOCKDEP select LOCKDEP_SMALL if LOCKDEP
select ARCH_WANT_RELAX_ORDER select ARCH_WANT_RELAX_ORDER
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#endif #endif
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
long strncpy_from_user(char *dest, const char __user *src, long count); long strncpy_from_user(char *dest, const char __user *src, long count);
......
...@@ -7,14 +7,8 @@ ...@@ -7,14 +7,8 @@
#ifndef _ASM_UACCESS_H #ifndef _ASM_UACCESS_H
#define _ASM_UACCESS_H #define _ASM_UACCESS_H
#ifdef __KERNEL__
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/errno.h>
#endif
#ifndef __ASSEMBLY__
#include <asm/processor.h> #include <asm/processor.h>
...@@ -30,9 +24,6 @@ ...@@ -30,9 +24,6 @@
#define KERNEL_DS ((mm_segment_t) { 0 }) #define KERNEL_DS ((mm_segment_t) { 0 })
#define USER_DS ((mm_segment_t) { -1 }) #define USER_DS ((mm_segment_t) { -1 })
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.current_ds) #define get_fs() (current->thread.current_ds)
#define set_fs(val) ((current->thread.current_ds) = (val)) #define set_fs(val) ((current->thread.current_ds) = (val))
...@@ -45,7 +36,7 @@ ...@@ -45,7 +36,7 @@
* large size and address near to PAGE_OFFSET - a fault will break his intentions. * large size and address near to PAGE_OFFSET - a fault will break his intentions.
*/ */
#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
({ (void)(type); __access_ok((unsigned long)(addr), size); }) ({ (void)(type); __access_ok((unsigned long)(addr), size); })
...@@ -80,8 +71,6 @@ struct exception_table_entry ...@@ -80,8 +71,6 @@ struct exception_table_entry
/* Returns 0 if exception not found and fixup otherwise. */ /* Returns 0 if exception not found and fixup otherwise. */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2); unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
void __ret_efault(void);
/* Uh, these should become the main single-value transfer routines.. /* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right * They automatically use the right size if we just have the right
* pointer type.. * pointer type..
...@@ -246,39 +235,18 @@ int __get_user_bad(void); ...@@ -246,39 +235,18 @@ int __get_user_bad(void);
unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (n && __access_ok((unsigned long) to, n)) {
check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
} else
return n;
}
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n); return __copy_user(to, (__force void __user *) from, n);
} }
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (n && __access_ok((unsigned long) from, n)) {
check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n); return __copy_user((__force void __user *) to, from, n);
} else {
memset(to, 0, n);
return n;
}
} }
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) #define INLINE_COPY_FROM_USER
{ #define INLINE_COPY_TO_USER
return __copy_user((__force void __user *) to, from, n);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __clear_user(void __user *addr, unsigned long size) static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{ {
...@@ -312,6 +280,4 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n) ...@@ -312,6 +280,4 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
__must_check long strlen_user(const char __user *str); __must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n); __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_UACCESS_H */ #endif /* _ASM_UACCESS_H */
...@@ -5,18 +5,12 @@ ...@@ -5,18 +5,12 @@
* User space memory access functions * User space memory access functions
*/ */
#ifdef __KERNEL__
#include <linux/errno.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#include <asm/extable_64.h> #include <asm/extable_64.h>
#endif
#ifndef __ASSEMBLY__
#include <asm/processor.h> #include <asm/processor.h>
...@@ -36,9 +30,6 @@ ...@@ -36,9 +30,6 @@
#define KERNEL_DS ((mm_segment_t) { ASI_P }) #define KERNEL_DS ((mm_segment_t) { ASI_P })
#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
...@@ -185,39 +176,19 @@ __asm__ __volatile__( \ ...@@ -185,39 +176,19 @@ __asm__ __volatile__( \
int __get_user_bad(void); int __get_user_bad(void);
unsigned long __must_check ___copy_from_user(void *to, unsigned long __must_check raw_copy_from_user(void *to,
const void __user *from, const void __user *from,
unsigned long size); unsigned long size);
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
check_object_size(to, size, false);
return ___copy_from_user(to, from, size); unsigned long __must_check raw_copy_to_user(void __user *to,
}
#define __copy_from_user copy_from_user
unsigned long __must_check ___copy_to_user(void __user *to,
const void *from, const void *from,
unsigned long size); unsigned long size);
static inline unsigned long __must_check #define INLINE_COPY_FROM_USER
copy_to_user(void __user *to, const void *from, unsigned long size) #define INLINE_COPY_TO_USER
{
check_object_size(from, size, true);
return ___copy_to_user(to, from, size); unsigned long __must_check raw_copy_in_user(void __user *to,
}
#define __copy_to_user copy_to_user
unsigned long __must_check ___copy_in_user(void __user *to,
const void __user *from, const void __user *from,
unsigned long size); unsigned long size);
static inline unsigned long __must_check
copy_in_user(void __user *to, void __user *from, unsigned long size)
{
return ___copy_in_user(to, from, size);
}
#define __copy_in_user copy_in_user
unsigned long __must_check __clear_user(void __user *, unsigned long); unsigned long __must_check __clear_user(void __user *, unsigned long);
...@@ -226,14 +197,9 @@ unsigned long __must_check __clear_user(void __user *, unsigned long); ...@@ -226,14 +197,9 @@ unsigned long __must_check __clear_user(void __user *, unsigned long);
__must_check long strlen_user(const char __user *str); __must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n); __must_check long strnlen_user(const char __user *str, long n);
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
struct pt_regs; struct pt_regs;
unsigned long compute_effective_address(struct pt_regs *, unsigned long compute_effective_address(struct pt_regs *,
unsigned int insn, unsigned int insn,
unsigned int rd); unsigned int rd);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_UACCESS_H */ #endif /* _ASM_UACCESS_H */
...@@ -809,10 +809,3 @@ lvl14_save: ...@@ -809,10 +809,3 @@ lvl14_save:
.word 0 .word 0
.word 0 .word 0
.word t_irq14 .word t_irq14
.section ".fixup",#alloc,#execinstr
.globl __ret_efault
__ret_efault:
ret
restore %g0, -EFAULT, %o0
EXPORT_SYMBOL(__ret_efault)
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
.type generic_patch_copyops,#function .type generic_patch_copyops,#function
generic_patch_copyops: generic_patch_copyops:
GEN_DO_PATCH(memcpy, GENmemcpy) GEN_DO_PATCH(memcpy, GENmemcpy)
GEN_DO_PATCH(___copy_from_user, GENcopy_from_user) GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user)
GEN_DO_PATCH(___copy_to_user, GENcopy_to_user) GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user)
retl retl
nop nop
.size generic_patch_copyops,.-generic_patch_copyops .size generic_patch_copyops,.-generic_patch_copyops
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
.type niagara2_patch_copyops,#function .type niagara2_patch_copyops,#function
niagara2_patch_copyops: niagara2_patch_copyops:
NG_DO_PATCH(memcpy, NG2memcpy) NG_DO_PATCH(memcpy, NG2memcpy)
NG_DO_PATCH(___copy_from_user, NG2copy_from_user) NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user)
NG_DO_PATCH(___copy_to_user, NG2copy_to_user) NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user)
retl retl
nop nop
.size niagara2_patch_copyops,.-niagara2_patch_copyops .size niagara2_patch_copyops,.-niagara2_patch_copyops
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
.type niagara4_patch_copyops,#function .type niagara4_patch_copyops,#function
niagara4_patch_copyops: niagara4_patch_copyops:
NG_DO_PATCH(memcpy, NG4memcpy) NG_DO_PATCH(memcpy, NG4memcpy)
NG_DO_PATCH(___copy_from_user, NG4copy_from_user) NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user)
NG_DO_PATCH(___copy_to_user, NG4copy_to_user) NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user)
retl retl
nop nop
.size niagara4_patch_copyops,.-niagara4_patch_copyops .size niagara4_patch_copyops,.-niagara4_patch_copyops
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop nop
#endif #endif
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
.type niagara_patch_copyops,#function .type niagara_patch_copyops,#function
niagara_patch_copyops: niagara_patch_copyops:
NG_DO_PATCH(memcpy, NGmemcpy) NG_DO_PATCH(memcpy, NGmemcpy)
NG_DO_PATCH(___copy_from_user, NGcopy_from_user) NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user)
NG_DO_PATCH(___copy_to_user, NGcopy_to_user) NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user)
retl retl
nop nop
.size niagara_patch_copyops,.-niagara_patch_copyops .size niagara_patch_copyops,.-niagara_patch_copyops
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
.text; \ .text; \
.align 4; .align 4;
#define FUNC_NAME ___copy_from_user #define FUNC_NAME raw_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest #define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
#define EX_RETVAL(x) 0 #define EX_RETVAL(x) 0
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop; \ nop; \
#include "U1memcpy.S" #include "U1memcpy.S"
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
.text; \ .text; \
.align 4; .align 4;
#define FUNC_NAME ___copy_to_user #define FUNC_NAME raw_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
#define EX_RETVAL(x) 0 #define EX_RETVAL(x) 0
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop; \ nop; \
#include "U1memcpy.S" #include "U1memcpy.S"
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PREAMBLE \ #define PREAMBLE \
rd %asi, %g1; \ rd %asi, %g1; \
cmp %g1, ASI_AIUS; \ cmp %g1, ASI_AIUS; \
bne,pn %icc, ___copy_in_user; \ bne,pn %icc, raw_copy_in_user; \
nop; \ nop; \
#include "U3memcpy.S" #include "U3memcpy.S"
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
.type cheetah_patch_copyops,#function .type cheetah_patch_copyops,#function
cheetah_patch_copyops: cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy) ULTRA3_DO_PATCH(memcpy, U3memcpy)
ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user) ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user) ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user)
retl retl
nop nop
.size cheetah_patch_copyops,.-cheetah_patch_copyops .size cheetah_patch_copyops,.-cheetah_patch_copyops
...@@ -44,7 +44,7 @@ __retl_o2_plus_1: ...@@ -44,7 +44,7 @@ __retl_o2_plus_1:
* to copy register windows around during thread cloning. * to copy register windows around during thread cloning.
*/ */
ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0 cmp %o2, 0
be,pn %XCC, 85f be,pn %XCC, 85f
or %o0, %o1, %o3 or %o0, %o1, %o3
...@@ -105,5 +105,5 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ ...@@ -105,5 +105,5 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
add %o0, 1, %o0 add %o0, 1, %o0
retl retl
clr %o0 clr %o0
ENDPROC(___copy_in_user) ENDPROC(raw_copy_in_user)
EXPORT_SYMBOL(___copy_in_user) EXPORT_SYMBOL(raw_copy_in_user)
...@@ -364,21 +364,7 @@ short_aligned_end: ...@@ -364,21 +364,7 @@ short_aligned_end:
97: 97:
mov %o2, %g3 mov %o2, %g3
fixupretl: fixupretl:
sethi %hi(PAGE_OFFSET), %g1 retl
cmp %o0, %g1
blu 1f
cmp %o1, %g1
bgeu 1f
ld [%g6 + TI_PREEMPT], %g1
cmp %g1, 0
bne 1f
nop
save %sp, -64, %sp
mov %i0, %o0
call __bzero
mov %g3, %o1
restore
1: retl
mov %g3, %o0 mov %g3, %o0
/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */ /* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
......
...@@ -7,6 +7,7 @@ generic-y += clkdev.h ...@@ -7,6 +7,7 @@ generic-y += clkdev.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += hw_irq.h generic-y += hw_irq.h
......
...@@ -18,15 +18,11 @@ ...@@ -18,15 +18,11 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with * performed or not. If get_fs() == USER_DS, checking is performed, with
...@@ -102,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size); ...@@ -102,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size);
likely(__range_ok((unsigned long)(addr), (size)) == 0); \ likely(__range_ok((unsigned long)(addr), (size)) == 0); \
}) })
/* #include <asm/extable.h>
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
unsigned long insn, fixup;
};
extern int fixup_exception(struct pt_regs *regs);
/* /*
* This is a type: either unsigned long, if the argument fits into * This is a type: either unsigned long, if the argument fits into
...@@ -334,145 +313,16 @@ extern int __put_user_bad(void) ...@@ -334,145 +313,16 @@ extern int __put_user_bad(void)
((x) = 0, -EFAULT); \ ((x) = 0, -EFAULT); \
}) })
/** extern unsigned long __must_check
* __copy_to_user() - copy data into user space, with less checking. raw_copy_to_user(void __user *to, const void *from, unsigned long n);
* @to: Destination address, in user space. extern unsigned long __must_check
* @from: Source address, in kernel space. raw_copy_from_user(void *to, const void __user *from, unsigned long n);
* @n: Number of bytes to copy. #define INLINE_COPY_FROM_USER
* #define INLINE_COPY_TO_USER
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* An alternate version - __copy_to_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable().
*/
extern unsigned long __must_check __copy_to_user_inatomic(
void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
/**
* __copy_from_user() - copy data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - is designed
* to be called from atomic context, typically bracketed by calls
* to pagefault_disable() and pagefault_enable(). This version
* does *NOT* pad with zeros.
*/
extern unsigned long __must_check __copy_from_user_inatomic(
void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_from_user_zeroing(
void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_from_user_zeroing(to, from, n);
}
static inline unsigned long __must_check
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
unsigned long n)
{
int sz = __compiletime_object_size(to);
if (likely(sz == -1 || sz >= n))
n = _copy_from_user(to, from, n);
else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
#ifdef __tilegx__ #ifdef __tilegx__
/** extern unsigned long raw_copy_in_user(
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern unsigned long __copy_in_user_inatomic(
void __user *to, const void __user *from, unsigned long n); void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
return __copy_in_user_inatomic(to, from, n);
}
static inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
n = __copy_in_user(to, from, n);
return n;
}
#endif #endif
......
...@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount); ...@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */ /* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic); EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__copy_from_user_inatomic); EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(__copy_from_user_zeroing);
#ifdef __tilegx__ #ifdef __tilegx__
EXPORT_SYMBOL(__copy_in_user_inatomic); EXPORT_SYMBOL(raw_copy_in_user);
#endif #endif
/* hypervisor glue */ /* hypervisor glue */
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define IS_MEMCPY 0 #define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1 #define IS_COPY_FROM_USER 1
#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1 #define IS_COPY_TO_USER -1
.section .text.memcpy_common, "ax" .section .text.memcpy_common, "ax"
...@@ -42,40 +41,31 @@ ...@@ -42,40 +41,31 @@
9 9
/* __copy_from_user_inatomic takes the kernel target address in r0, /* raw_copy_from_user takes the kernel target address in r0,
* the user source in r1, and the bytes to copy in r2. * the user source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0. * It returns the number of uncopiable bytes (hopefully zero) in r0.
*/ */
ENTRY(__copy_from_user_inatomic) ENTRY(raw_copy_from_user)
.type __copy_from_user_inatomic, @function .type raw_copy_from_user, @function
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
.text.memcpy_common, \ .text.memcpy_common, \
.Lend_memcpy_common - __copy_from_user_inatomic) .Lend_memcpy_common - raw_copy_from_user)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common } { movei r29, IS_COPY_FROM_USER; j memcpy_common }
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic .size raw_copy_from_user, . - raw_copy_from_user
/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but /* raw_copy_to_user takes the user target address in r0,
* any uncopiable bytes are zeroed in the target.
*/
ENTRY(__copy_from_user_zeroing)
.type __copy_from_user_zeroing, @function
FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
/* __copy_to_user_inatomic takes the user target address in r0,
* the kernel source in r1, and the bytes to copy in r2. * the kernel source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0. * It returns the number of uncopiable bytes (hopefully zero) in r0.
*/ */
ENTRY(__copy_to_user_inatomic) ENTRY(raw_copy_to_user)
.type __copy_to_user_inatomic, @function .type raw_copy_to_user, @function
FEEDBACK_REENTER(__copy_from_user_inatomic) FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_COPY_TO_USER; j memcpy_common } { movei r29, IS_COPY_TO_USER; j memcpy_common }
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic .size raw_copy_to_user, . - raw_copy_to_user
ENTRY(memcpy) ENTRY(memcpy)
.type memcpy, @function .type memcpy, @function
FEEDBACK_REENTER(__copy_from_user_inatomic) FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_MEMCPY } { movei r29, IS_MEMCPY }
.size memcpy, . - memcpy .size memcpy, . - memcpy
/* Fall through */ /* Fall through */
...@@ -520,12 +510,7 @@ copy_from_user_fixup_loop: ...@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{ bnzt r2, copy_from_user_fixup_loop } { bnzt r2, copy_from_user_fixup_loop }
.Lcopy_from_user_fixup_zero_remainder: .Lcopy_from_user_fixup_zero_remainder:
{ bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ move lr, r27
/* byte-at-a-time loop faulted, so zero the rest. */
{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
{ bnzt r3, 1b }
2: move lr, r27
{ move r0, r2; jrp lr } { move r0, r2; jrp lr }
copy_to_user_fixup_loop: copy_to_user_fixup_loop:
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
__v; \ __v; \
}) })
#define USERCOPY_FUNC __copy_to_user_inatomic #define USERCOPY_FUNC raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v)) #define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v)) #define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v)) #define ST4(p, v) _ST((p), st4, (v))
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#define LD8 LD #define LD8 LD
#include "memcpy_64.c" #include "memcpy_64.c"
#define USERCOPY_FUNC __copy_from_user_inatomic #define USERCOPY_FUNC raw_copy_from_user
#define ST1 ST #define ST1 ST
#define ST2 ST #define ST2 ST
#define ST4 ST #define ST4 ST
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld) #define LD8(p) _LD((p), ld)
#include "memcpy_64.c" #include "memcpy_64.c"
#define USERCOPY_FUNC __copy_in_user_inatomic #define USERCOPY_FUNC raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v)) #define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v)) #define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v)) #define ST4(p, v) _ST((p), st4, (v))
...@@ -83,12 +83,3 @@ ...@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u) #define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld) #define LD8(p) _LD((p), ld)
#include "memcpy_64.c" #include "memcpy_64.c"
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
unsigned long n)
{
unsigned long rc = __copy_from_user_inatomic(to, from, n);
if (unlikely(rc))
memset(to + n - rc, 0, rc);
return rc;
}
...@@ -6,6 +6,7 @@ generic-y += delay.h ...@@ -6,6 +6,7 @@ generic-y += delay.h
generic-y += device.h generic-y += device.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += ftrace.h generic-y += ftrace.h
generic-y += futex.h generic-y += futex.h
generic-y += hardirq.h generic-y += hardirq.h
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#ifndef __UM_UACCESS_H #ifndef __UM_UACCESS_H
#define __UM_UACCESS_H #define __UM_UACCESS_H
#include <asm/thread_info.h>
#include <asm/elf.h> #include <asm/elf.h>
#define __under_task_size(addr, size) \ #define __under_task_size(addr, size) \
...@@ -22,8 +21,8 @@ ...@@ -22,8 +21,8 @@
#define __addr_range_nowrap(addr, size) \ #define __addr_range_nowrap(addr, size) \
((unsigned long) (addr) <= ((unsigned long) (addr) + (size))) ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
extern long __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern long __copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern long __strncpy_from_user(char *dst, const char __user *src, long count); extern long __strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strnlen_user(const void __user *str, long len); extern long __strnlen_user(const void __user *str, long len);
extern unsigned long __clear_user(void __user *mem, unsigned long len); extern unsigned long __clear_user(void __user *mem, unsigned long len);
...@@ -32,12 +31,10 @@ static inline int __access_ok(unsigned long addr, unsigned long size); ...@@ -32,12 +31,10 @@ static inline int __access_ok(unsigned long addr, unsigned long size);
/* Teach asm-generic/uaccess.h that we have C functions for these. */ /* Teach asm-generic/uaccess.h that we have C functions for these. */
#define __access_ok __access_ok #define __access_ok __access_ok
#define __clear_user __clear_user #define __clear_user __clear_user
#define __copy_to_user __copy_to_user
#define __copy_from_user __copy_from_user
#define __strnlen_user __strnlen_user #define __strnlen_user __strnlen_user
#define __strncpy_from_user __strncpy_from_user #define __strncpy_from_user __strncpy_from_user
#define __copy_to_user_inatomic __copy_to_user #define INLINE_COPY_FROM_USER
#define __copy_from_user_inatomic __copy_from_user #define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h> #include <asm-generic/uaccess.h>
...@@ -46,7 +43,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ...@@ -46,7 +43,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
return __addr_range_nowrap(addr, size) && return __addr_range_nowrap(addr, size) &&
(__under_task_size(addr, size) || (__under_task_size(addr, size) ||
__access_ok_vsyscall(addr, size) || __access_ok_vsyscall(addr, size) ||
segment_eq(get_fs(), KERNEL_DS)); uaccess_kernel());
} }
#endif #endif
...@@ -139,16 +139,16 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg) ...@@ -139,16 +139,16 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg)
return 0; return 0;
} }
long __copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memcpy(to, (__force void*)from, n); memcpy(to, (__force void*)from, n);
return 0; return 0;
} }
return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
} }
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
static int copy_chunk_to_user(unsigned long to, int len, void *arg) static int copy_chunk_to_user(unsigned long to, int len, void *arg)
{ {
...@@ -159,16 +159,16 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg) ...@@ -159,16 +159,16 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg)
return 0; return 0;
} }
long __copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memcpy((__force void *) to, from, n); memcpy((__force void *) to, from, n);
return 0; return 0;
} }
return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
} }
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
{ {
...@@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count)
long n; long n;
char *ptr = dst; char *ptr = dst;
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
strncpy(dst, (__force void *) src, count); strncpy(dst, (__force void *) src, count);
return strnlen(dst, count); return strnlen(dst, count);
} }
...@@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused) ...@@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused)
unsigned long __clear_user(void __user *mem, unsigned long len) unsigned long __clear_user(void __user *mem, unsigned long len)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memset((__force void*)mem, 0, len); memset((__force void*)mem, 0, len);
return 0; return 0;
} }
...@@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len) ...@@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len)
{ {
int count = 0, n; int count = 0, n;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return strnlen((__force char*)str, len) + 1; return strnlen((__force char*)str, len) + 1;
n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
......
...@@ -10,6 +10,7 @@ generic-y += div64.h ...@@ -10,6 +10,7 @@ generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fb.h generic-y += fb.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += ftrace.h generic-y += ftrace.h
......
...@@ -12,35 +12,30 @@ ...@@ -12,35 +12,30 @@
#ifndef __UNICORE_UACCESS_H__ #ifndef __UNICORE_UACCESS_H__
#define __UNICORE_UACCESS_H__ #define __UNICORE_UACCESS_H__
#include <linux/thread_info.h>
#include <linux/errno.h>
#include <asm/memory.h> #include <asm/memory.h>
#define __copy_from_user __copy_from_user
#define __copy_to_user __copy_to_user
#define __strncpy_from_user __strncpy_from_user #define __strncpy_from_user __strncpy_from_user
#define __strnlen_user __strnlen_user #define __strnlen_user __strnlen_user
#define __clear_user __clear_user #define __clear_user __clear_user
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) (((size) <= TASK_SIZE) \ #define __user_ok(addr, size) (((size) <= TASK_SIZE) \
&& ((addr) <= TASK_SIZE - (size))) && ((addr) <= TASK_SIZE - (size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
extern unsigned long __must_check extern unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n); raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check extern unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n); raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check extern unsigned long __must_check
__clear_user(void __user *addr, unsigned long n); __clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check extern unsigned long __must_check
__strncpy_from_user(char *to, const char __user *from, unsigned long count); __strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long extern unsigned long
__strnlen_user(const char __user *s, long n); __strnlen_user(const char __user *s, long n);
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h> #include <asm-generic/uaccess.h>
extern int fixup_exception(struct pt_regs *regs);
#endif /* __UNICORE_UACCESS_H__ */ #endif /* __UNICORE_UACCESS_H__ */
...@@ -46,8 +46,8 @@ EXPORT_SYMBOL(__strncpy_from_user); ...@@ -46,8 +46,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashldi3);
......
...@@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs)
buf, interrupts_enabled(regs) ? "n" : "ff", buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)], processor_modes[processor_mode(regs)],
segment_eq(get_fs(), get_ds()) ? "kernel" : "user"); uaccess_kernel() ? "kernel" : "user");
{ {
unsigned int ctrl; unsigned int ctrl;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_from_user(void *to, const void *from, size_t n) * size_t raw_copy_from_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -87,22 +87,18 @@ ...@@ -87,22 +87,18 @@
.text .text
ENTRY(__copy_from_user) ENTRY(raw_copy_from_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_from_user) ENDPROC(raw_copy_from_user)
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 0 .align 0
copy_abort_preamble copy_abort_preamble
ldm.w (r1, r2), [sp]+ ldm.w (r1, r2, r3), [sp]+
sub r3, r0, r1 sub r0, r0, r1
rsub r2, r3, r2 rsub r0, r0, r2
stw r2, [sp]
mov r1, #0
b.l memset
ldw.w r0, [sp]+, #4
copy_abort_end copy_abort_end
.popsection .popsection
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/* /*
* Prototype: * Prototype:
* *
* size_t __copy_to_user(void *to, const void *from, size_t n) * size_t raw_copy_to_user(void *to, const void *from, size_t n)
* *
* Purpose: * Purpose:
* *
...@@ -79,11 +79,11 @@ ...@@ -79,11 +79,11 @@
.text .text
WEAK(__copy_to_user) WEAK(raw_copy_to_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_to_user) ENDPROC(raw_copy_to_user)
.pushsection .fixup,"ax" .pushsection .fixup,"ax"
.align 0 .align 0
......
...@@ -98,7 +98,6 @@ config X86 ...@@ -98,7 +98,6 @@ config X86
select HAVE_ACPI_APEI_NMI if ACPI select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
......
...@@ -3,19 +3,14 @@ ...@@ -3,19 +3,14 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/errno.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kasan-checks.h> #include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/extable.h> #include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
/* /*
* The fs value determines whether argument validity checking should be * The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with * performed or not. If get_fs() == USER_DS, checking is performed, with
...@@ -384,6 +379,18 @@ do { \ ...@@ -384,6 +379,18 @@ do { \
: "=r" (err), ltype(x) \ : "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err)) : "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
/* /*
* This doesn't do __uaccess_begin/end - the exception handling * This doesn't do __uaccess_begin/end - the exception handling
* around it must do that. * around it must do that.
...@@ -675,59 +682,6 @@ extern struct movsl_mask { ...@@ -675,59 +682,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h> # include <asm/uaccess_64.h>
#endif #endif
unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned n);
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n);
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
might_fault();
kasan_check_write(to, n);
if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
kasan_check_read(from, n);
might_fault();
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
/* /*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the * We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2. * nested NMI paths are careful to preserve CR2.
......
...@@ -4,149 +4,52 @@ ...@@ -4,149 +4,52 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/page.h> #include <asm/page.h>
unsigned long __must_check __copy_to_user_ll unsigned long __must_check __copy_user_ll
(void __user *to, const void *from, unsigned long n); (void *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero unsigned long __must_check __copy_from_user_ll_nocache_nozero
(void *to, const void __user *from, unsigned long n); (void *to, const void __user *from, unsigned long n);
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
check_object_size(from, n, true);
return __copy_to_user_ll(to, from, n);
}
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static __always_inline unsigned long __must_check static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
might_fault(); return __copy_user_ll((__force void *)to, from, n);
return __copy_to_user_inatomic(to, from, n);
} }
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_from_user_ll_nozero(to, from, n);
}
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - may be called from
* atomic context and will fail rather than sleep. In this case the
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
* for explanation of why this is needed.
*/
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end();
return ret;
}
}
return __copy_from_user_ll(to, from, n);
}
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{ {
might_fault();
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
unsigned long ret; unsigned long ret;
switch (n) { switch (n) {
case 1: case 1:
ret = 0;
__uaccess_begin(); __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1); __get_user_asm_nozero(*(u8 *)to, from, ret,
"b", "b", "=q", 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
ret = 0;
__uaccess_begin(); __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2); __get_user_asm_nozero(*(u16 *)to, from, ret,
"w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
ret = 0;
__uaccess_begin(); __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4); __get_user_asm_nozero(*(u32 *)to, from, ret,
"l", "k", "=r", 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
} }
} }
return __copy_from_user_ll_nocache(to, from, n); return __copy_user_ll(to, (__force const void *)from, n);
} }
static __always_inline unsigned long static __always_inline unsigned long
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
* User space memory access functions * User space memory access functions
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/kasan-checks.h> #include <linux/kasan-checks.h>
#include <asm/alternative.h> #include <asm/alternative.h>
...@@ -46,58 +45,54 @@ copy_user_generic(void *to, const void *from, unsigned len) ...@@ -46,58 +45,54 @@ copy_user_generic(void *to, const void *from, unsigned len)
return ret; return ret;
} }
__must_check unsigned long static __always_inline __must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len); raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
static __always_inline __must_check
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{ {
int ret = 0; int ret = 0;
check_object_size(dst, size, false);
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size); return copy_user_generic(dst, (__force void *)src, size);
switch (size) { switch (size) {
case 1: case 1:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u8 *)dst, (u8 __user *)src, __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1); ret, "b", "b", "=q", 1);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 2: case 2:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u16 *)dst, (u16 __user *)src, __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 4: case 4:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u32 *)dst, (u32 __user *)src, __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4); ret, "l", "k", "=r", 4);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 8: case 8:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 10: case 10:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10); ret, "q", "", "=r", 10);
if (likely(!ret)) if (likely(!ret))
__get_user_asm(*(u16 *)(8 + (char *)dst), __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
(u16 __user *)(8 + (char __user *)src), (u16 __user *)(8 + (char __user *)src),
ret, "w", "w", "=r", 2); ret, "w", "w", "=r", 2);
__uaccess_end(); __uaccess_end();
return ret; return ret;
case 16: case 16:
__uaccess_begin(); __uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16); ret, "q", "", "=r", 16);
if (likely(!ret)) if (likely(!ret))
__get_user_asm(*(u64 *)(8 + (char *)dst), __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
(u64 __user *)(8 + (char __user *)src), (u64 __user *)(8 + (char __user *)src),
ret, "q", "", "=r", 8); ret, "q", "", "=r", 8);
__uaccess_end(); __uaccess_end();
...@@ -107,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ...@@ -107,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
} }
} }
static __always_inline __must_check static __always_inline __must_check unsigned long
int __copy_from_user(void *dst, const void __user *src, unsigned size) raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
might_fault();
kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{ {
int ret = 0; int ret = 0;
check_object_size(src, size, true);
if (!__builtin_constant_p(size)) if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size); return copy_user_generic((__force void *)dst, src, size);
switch (size) { switch (size) {
...@@ -176,99 +162,15 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) ...@@ -176,99 +162,15 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
} }
static __always_inline __must_check static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size) unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
{
might_fault();
kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{ {
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst,
(__force void *)src, size);
switch (size) {
case 1: {
u8 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1);
if (likely(!ret))
__put_user_asm(tmp, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
__uaccess_end();
return ret;
}
case 2: {
u16 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2);
if (likely(!ret))
__put_user_asm(tmp, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
__uaccess_end();
return ret;
}
case 4: {
u32 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4);
if (likely(!ret))
__put_user_asm(tmp, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
__uaccess_end();
return ret;
}
case 8: {
u64 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "er", 8);
__uaccess_end();
return ret;
}
default:
return copy_user_generic((__force void *)dst, return copy_user_generic((__force void *)dst,
(__force void *)src, size); (__force void *)src, size);
}
}
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
} }
extern long __copy_user_nocache(void *dst, const void __user *src, extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_fault();
kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 1);
}
static inline int static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src, __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)
......
...@@ -4,12 +4,9 @@ ...@@ -4,12 +4,9 @@
* For licencing details see kernel-base/COPYING * For licencing details see kernel-base/COPYING
*/ */
#include <linux/highmem.h> #include <linux/uaccess.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
/* /*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the * We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2. * nested NMI paths are careful to preserve CR2.
...@@ -34,52 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) ...@@ -34,52 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(copy_from_user_nmi); EXPORT_SYMBOL_GPL(copy_from_user_nmi);
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
return n;
}
EXPORT_SYMBOL(_copy_to_user);
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
EXPORT_SYMBOL(_copy_from_user);
...@@ -5,12 +5,7 @@ ...@@ -5,12 +5,7 @@
* Copyright 1997 Andi Kleen <ak@muc.de> * Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds * Copyright 1997 Linus Torvalds
*/ */
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mmx.h> #include <asm/mmx.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -201,197 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) ...@@ -201,197 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
return size; return size;
} }
static unsigned long
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
" .align 2,0x90\n"
"0: movl 32(%4), %%eax\n"
" cmpl $67, %0\n"
" jbe 2f\n"
"1: movl 64(%4), %%eax\n"
" .align 2,0x90\n"
"2: movl 0(%4), %%eax\n"
"21: movl 4(%4), %%edx\n"
" movl %%eax, 0(%3)\n"
" movl %%edx, 4(%3)\n"
"3: movl 8(%4), %%eax\n"
"31: movl 12(%4),%%edx\n"
" movl %%eax, 8(%3)\n"
" movl %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
" movl %%eax, 16(%3)\n"
" movl %%edx, 20(%3)\n"
"10: movl 24(%4), %%eax\n"
"51: movl 28(%4), %%edx\n"
" movl %%eax, 24(%3)\n"
" movl %%edx, 28(%3)\n"
"11: movl 32(%4), %%eax\n"
"61: movl 36(%4), %%edx\n"
" movl %%eax, 32(%3)\n"
" movl %%edx, 36(%3)\n"
"12: movl 40(%4), %%eax\n"
"71: movl 44(%4), %%edx\n"
" movl %%eax, 40(%3)\n"
" movl %%edx, 44(%3)\n"
"13: movl 48(%4), %%eax\n"
"81: movl 52(%4), %%edx\n"
" movl %%eax, 48(%3)\n"
" movl %%edx, 52(%3)\n"
"14: movl 56(%4), %%eax\n"
"91: movl 60(%4), %%edx\n"
" movl %%eax, 56(%3)\n"
" movl %%edx, 60(%3)\n"
" addl $-64, %0\n"
" addl $64, %4\n"
" addl $64, %3\n"
" cmpl $63, %0\n"
" ja 0b\n"
"5: movl %0, %%eax\n"
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
"6: rep; movsl\n"
" movl %%eax,%0\n"
"7: rep; movsb\n"
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
"16: pushl %0\n"
" pushl %%eax\n"
" xorl %%eax,%%eax\n"
" rep; stosb\n"
" popl %%eax\n"
" popl %0\n"
" jmp 8b\n"
".previous\n"
_ASM_EXTABLE(0b,16b)
_ASM_EXTABLE(1b,16b)
_ASM_EXTABLE(2b,16b)
_ASM_EXTABLE(21b,16b)
_ASM_EXTABLE(3b,16b)
_ASM_EXTABLE(31b,16b)
_ASM_EXTABLE(4b,16b)
_ASM_EXTABLE(41b,16b)
_ASM_EXTABLE(10b,16b)
_ASM_EXTABLE(51b,16b)
_ASM_EXTABLE(11b,16b)
_ASM_EXTABLE(61b,16b)
_ASM_EXTABLE(12b,16b)
_ASM_EXTABLE(71b,16b)
_ASM_EXTABLE(13b,16b)
_ASM_EXTABLE(81b,16b)
_ASM_EXTABLE(14b,16b)
_ASM_EXTABLE(91b,16b)
_ASM_EXTABLE(6b,9b)
_ASM_EXTABLE(7b,16b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
return size;
}
/*
* Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
* hyoshiok@miraclelinux.com
*/
static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
" .align 2,0x90\n"
"0: movl 32(%4), %%eax\n"
" cmpl $67, %0\n"
" jbe 2f\n"
"1: movl 64(%4), %%eax\n"
" .align 2,0x90\n"
"2: movl 0(%4), %%eax\n"
"21: movl 4(%4), %%edx\n"
" movnti %%eax, 0(%3)\n"
" movnti %%edx, 4(%3)\n"
"3: movl 8(%4), %%eax\n"
"31: movl 12(%4),%%edx\n"
" movnti %%eax, 8(%3)\n"
" movnti %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
" movnti %%eax, 16(%3)\n"
" movnti %%edx, 20(%3)\n"
"10: movl 24(%4), %%eax\n"
"51: movl 28(%4), %%edx\n"
" movnti %%eax, 24(%3)\n"
" movnti %%edx, 28(%3)\n"
"11: movl 32(%4), %%eax\n"
"61: movl 36(%4), %%edx\n"
" movnti %%eax, 32(%3)\n"
" movnti %%edx, 36(%3)\n"
"12: movl 40(%4), %%eax\n"
"71: movl 44(%4), %%edx\n"
" movnti %%eax, 40(%3)\n"
" movnti %%edx, 44(%3)\n"
"13: movl 48(%4), %%eax\n"
"81: movl 52(%4), %%edx\n"
" movnti %%eax, 48(%3)\n"
" movnti %%edx, 52(%3)\n"
"14: movl 56(%4), %%eax\n"
"91: movl 60(%4), %%edx\n"
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
" addl $64, %4\n"
" addl $64, %3\n"
" cmpl $63, %0\n"
" ja 0b\n"
" sfence \n"
"5: movl %0, %%eax\n"
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
"6: rep; movsl\n"
" movl %%eax,%0\n"
"7: rep; movsb\n"
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
"16: pushl %0\n"
" pushl %%eax\n"
" xorl %%eax,%%eax\n"
" rep; stosb\n"
" popl %%eax\n"
" popl %0\n"
" jmp 8b\n"
".previous\n"
_ASM_EXTABLE(0b,16b)
_ASM_EXTABLE(1b,16b)
_ASM_EXTABLE(2b,16b)
_ASM_EXTABLE(21b,16b)
_ASM_EXTABLE(3b,16b)
_ASM_EXTABLE(31b,16b)
_ASM_EXTABLE(4b,16b)
_ASM_EXTABLE(41b,16b)
_ASM_EXTABLE(10b,16b)
_ASM_EXTABLE(51b,16b)
_ASM_EXTABLE(11b,16b)
_ASM_EXTABLE(61b,16b)
_ASM_EXTABLE(12b,16b)
_ASM_EXTABLE(71b,16b)
_ASM_EXTABLE(13b,16b)
_ASM_EXTABLE(81b,16b)
_ASM_EXTABLE(14b,16b)
_ASM_EXTABLE(91b,16b)
_ASM_EXTABLE(6b,9b)
_ASM_EXTABLE(7b,16b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
return size;
}
static unsigned long __copy_user_intel_nocache(void *to, static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size) const void __user *from, unsigned long size)
{ {
...@@ -486,12 +290,8 @@ static unsigned long __copy_user_intel_nocache(void *to, ...@@ -486,12 +290,8 @@ static unsigned long __copy_user_intel_nocache(void *to,
* Leave these declared but undefined. They should not be any references to * Leave these declared but undefined. They should not be any references to
* them * them
*/ */
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long __copy_user_intel(void __user *to, const void *from,
unsigned long size); unsigned long size);
unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */ #endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */ /* Generic arbitrary sized copy. */
...@@ -528,47 +328,7 @@ do { \ ...@@ -528,47 +328,7 @@ do { \
: "memory"); \ : "memory"); \
} while (0) } while (0)
#define __copy_user_zeroing(to, from, size) \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
do { \
int __d0, __d1, __d2; \
__asm__ __volatile__( \
" cmp $7,%0\n" \
" jbe 1f\n" \
" movl %1,%0\n" \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
"4: rep; movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 6f\n" \
"3: lea 0(%3,%0,4),%0\n" \
"6: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(4b,5b) \
_ASM_EXTABLE(0b,3b) \
_ASM_EXTABLE(1b,6b) \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
} while (0)
unsigned long __copy_to_user_ll(void __user *to, const void *from,
unsigned long n)
{ {
stac(); stac();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
...@@ -578,51 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -578,51 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
clac(); clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_to_user_ll); EXPORT_SYMBOL(__copy_user_ll);
unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n)
{
stac();
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_intel(to, from, n);
clac();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll);
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
stac();
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel((void __user *)to,
(const void *)from, n);
clac();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n)
{
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
n = __copy_user_zeroing_intel_nocache(to, from, n);
else
__copy_user_zeroing(to, from, n);
#else
__copy_user_zeroing(to, from, n);
#endif
clac();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
......
...@@ -54,15 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n) ...@@ -54,15 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
} }
EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(clear_user);
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
return copy_user_generic((__force void *)to, (__force void *)from, len);
}
return len;
}
EXPORT_SYMBOL(copy_in_user);
/* /*
* Try to copy last bytes and clear the rest if needed. * Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation, * Since protection fault in copy_from/to_user is not a normal situation,
...@@ -80,9 +71,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len) ...@@ -80,9 +71,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
break; break;
} }
clac(); clac();
/* If the destination is a kernel buffer, we always clear the end */
if (!__addr_ok(to))
memset(to, 0, len);
return len; return len;
} }
...@@ -6,6 +6,7 @@ generic-y += dma-contiguous.h ...@@ -6,6 +6,7 @@ generic-y += dma-contiguous.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
generic-y += extable.h
generic-y += fcntl.h generic-y += fcntl.h
generic-y += hardirq.h generic-y += hardirq.h
generic-y += ioctl.h generic-y += ioctl.h
......
...@@ -19,9 +19,6 @@ ...@@ -19,9 +19,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/types.h> #include <asm/types.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -16,14 +16,9 @@ ...@@ -16,14 +16,9 @@
#ifndef _XTENSA_UACCESS_H #ifndef _XTENSA_UACCESS_H
#define _XTENSA_UACCESS_H #define _XTENSA_UACCESS_H
#include <linux/errno.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/extable.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#include <linux/sched.h>
/* /*
* The fs value determines whether argument validity checking should * The fs value determines whether argument validity checking should
...@@ -43,7 +38,7 @@ ...@@ -43,7 +38,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) \ #define __user_ok(addr, size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
...@@ -239,60 +234,22 @@ __asm__ __volatile__( \ ...@@ -239,60 +234,22 @@ __asm__ __volatile__( \
* Copy to/from user space * Copy to/from user space
*/ */
/*
* We use a generic, arbitrary-sized copy subroutine. The Xtensa
* architecture would cause heavy code bloat if we tried to inline
* these functions and provide __constant_copy_* equivalents like the
* i386 versions. __xtensa_copy_user is quite efficient. See the
* .fixup section of __xtensa_copy_user for a discussion on the
* X_zeroing equivalents for Xtensa.
*/
extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
static inline unsigned long static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
return __copy_user(to, from, n); prefetchw(to);
} return __xtensa_copy_user(to, (__force const void *)from, n);
static inline unsigned long
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
return __copy_user(to, from, n);
} }
static inline unsigned long static inline unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
prefetch(from); prefetch(from);
if (access_ok(VERIFY_WRITE, to, n)) return __xtensa_copy_user((__force void *)to, from, n);
return __copy_user(to, from, n);
return n;
}
static inline unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
prefetchw(to);
if (access_ok(VERIFY_READ, from, n))
return __copy_user(to, from, n);
else
memset(to, 0, n);
return n;
} }
#define INLINE_COPY_FROM_USER
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) #define INLINE_COPY_TO_USER
#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
#define __copy_to_user(to, from, n) \
__generic_copy_to_user_nocheck((to), (from), (n))
#define __copy_from_user(to, from, n) \
__generic_copy_from_user_nocheck((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/* /*
* We need to return the number of bytes not cleared. Our memset() * We need to return the number of bytes not cleared. Our memset()
...@@ -348,10 +305,4 @@ static inline long strnlen_user(const char *str, long len) ...@@ -348,10 +305,4 @@ static inline long strnlen_user(const char *str, long len)
return __strnlen_user(str, len); return __strnlen_user(str, len);
} }
struct exception_table_entry
{
unsigned long insn, fixup;
};
#endif /* _XTENSA_UACCESS_H */ #endif /* _XTENSA_UACCESS_H */
...@@ -102,9 +102,9 @@ __xtensa_copy_user: ...@@ -102,9 +102,9 @@ __xtensa_copy_user:
bltui a4, 7, .Lbytecopy # do short copies byte by byte bltui a4, 7, .Lbytecopy # do short copies byte by byte
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
addi a3, a3, 1 addi a3, a3, 1
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
addi a5, a5, 1 addi a5, a5, 1
addi a4, a4, -1 addi a4, a4, -1
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
...@@ -112,11 +112,11 @@ __xtensa_copy_user: ...@@ -112,11 +112,11 @@ __xtensa_copy_user:
.Ldst2mod4: # dst 16-bit aligned .Ldst2mod4: # dst 16-bit aligned
# copy 2 bytes # copy 2 bytes
bltui a4, 6, .Lbytecopy # do short copies byte by byte bltui a4, 6, .Lbytecopy # do short copies byte by byte
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
EX(l8ui, a7, a3, 1, l_fixup) EX(l8ui, a7, a3, 1, fixup)
addi a3, a3, 2 addi a3, a3, 2
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
EX(s8i, a7, a5, 1, s_fixup) EX(s8i, a7, a5, 1, fixup)
addi a5, a5, 2 addi a5, a5, 2
addi a4, a4, -2 addi a4, a4, -2
j .Ldstaligned # dst is now aligned, return to main algorithm j .Ldstaligned # dst is now aligned, return to main algorithm
...@@ -135,9 +135,9 @@ __xtensa_copy_user: ...@@ -135,9 +135,9 @@ __xtensa_copy_user:
add a7, a3, a4 # a7 = end address for source add a7, a3, a4 # a7 = end address for source
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lnextbyte: .Lnextbyte:
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
addi a3, a3, 1 addi a3, a3, 1
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
addi a5, a5, 1 addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte blt a3, a7, .Lnextbyte
...@@ -161,15 +161,15 @@ __xtensa_copy_user: ...@@ -161,15 +161,15 @@ __xtensa_copy_user:
add a8, a8, a3 # a8 = end of last 16B source chunk add a8, a8, a3 # a8 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop1: .Loop1:
EX(l32i, a6, a3, 0, l_fixup) EX(l32i, a6, a3, 0, fixup)
EX(l32i, a7, a3, 4, l_fixup) EX(l32i, a7, a3, 4, fixup)
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
EX(l32i, a6, a3, 8, l_fixup) EX(l32i, a6, a3, 8, fixup)
EX(s32i, a7, a5, 4, s_fixup) EX(s32i, a7, a5, 4, fixup)
EX(l32i, a7, a3, 12, l_fixup) EX(l32i, a7, a3, 12, fixup)
EX(s32i, a6, a5, 8, s_fixup) EX(s32i, a6, a5, 8, fixup)
addi a3, a3, 16 addi a3, a3, 16
EX(s32i, a7, a5, 12, s_fixup) EX(s32i, a7, a5, 12, fixup)
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1 blt a3, a8, .Loop1
...@@ -177,31 +177,31 @@ __xtensa_copy_user: ...@@ -177,31 +177,31 @@ __xtensa_copy_user:
.Loop1done: .Loop1done:
bbci.l a4, 3, .L2 bbci.l a4, 3, .L2
# copy 8 bytes # copy 8 bytes
EX(l32i, a6, a3, 0, l_fixup) EX(l32i, a6, a3, 0, fixup)
EX(l32i, a7, a3, 4, l_fixup) EX(l32i, a7, a3, 4, fixup)
addi a3, a3, 8 addi a3, a3, 8
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
EX(s32i, a7, a5, 4, s_fixup) EX(s32i, a7, a5, 4, fixup)
addi a5, a5, 8 addi a5, a5, 8
.L2: .L2:
bbci.l a4, 2, .L3 bbci.l a4, 2, .L3
# copy 4 bytes # copy 4 bytes
EX(l32i, a6, a3, 0, l_fixup) EX(l32i, a6, a3, 0, fixup)
addi a3, a3, 4 addi a3, a3, 4
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
addi a5, a5, 4 addi a5, a5, 4
.L3: .L3:
bbci.l a4, 1, .L4 bbci.l a4, 1, .L4
# copy 2 bytes # copy 2 bytes
EX(l16ui, a6, a3, 0, l_fixup) EX(l16ui, a6, a3, 0, fixup)
addi a3, a3, 2 addi a3, a3, 2
EX(s16i, a6, a5, 0, s_fixup) EX(s16i, a6, a5, 0, fixup)
addi a5, a5, 2 addi a5, a5, 2
.L4: .L4:
bbci.l a4, 0, .L5 bbci.l a4, 0, .L5
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
.L5: .L5:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw retw
...@@ -217,7 +217,7 @@ __xtensa_copy_user: ...@@ -217,7 +217,7 @@ __xtensa_copy_user:
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
and a10, a3, a8 # save unalignment offset for below and a10, a3, a8 # save unalignment offset for below
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
EX(l32i, a6, a3, 0, l_fixup) # load first word EX(l32i, a6, a3, 0, fixup) # load first word
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
loopnez a7, .Loop2done loopnez a7, .Loop2done
#else /* !XCHAL_HAVE_LOOPS */ #else /* !XCHAL_HAVE_LOOPS */
...@@ -226,19 +226,19 @@ __xtensa_copy_user: ...@@ -226,19 +226,19 @@ __xtensa_copy_user:
add a12, a12, a3 # a12 = end of last 16B source chunk add a12, a12, a3 # a12 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop2: .Loop2:
EX(l32i, a7, a3, 4, l_fixup) EX(l32i, a7, a3, 4, fixup)
EX(l32i, a8, a3, 8, l_fixup) EX(l32i, a8, a3, 8, fixup)
ALIGN( a6, a6, a7) ALIGN( a6, a6, a7)
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
EX(l32i, a9, a3, 12, l_fixup) EX(l32i, a9, a3, 12, fixup)
ALIGN( a7, a7, a8) ALIGN( a7, a7, a8)
EX(s32i, a7, a5, 4, s_fixup) EX(s32i, a7, a5, 4, fixup)
EX(l32i, a6, a3, 16, l_fixup) EX(l32i, a6, a3, 16, fixup)
ALIGN( a8, a8, a9) ALIGN( a8, a8, a9)
EX(s32i, a8, a5, 8, s_fixup) EX(s32i, a8, a5, 8, fixup)
addi a3, a3, 16 addi a3, a3, 16
ALIGN( a9, a9, a6) ALIGN( a9, a9, a6)
EX(s32i, a9, a5, 12, s_fixup) EX(s32i, a9, a5, 12, fixup)
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a12, .Loop2 blt a3, a12, .Loop2
...@@ -246,39 +246,39 @@ __xtensa_copy_user: ...@@ -246,39 +246,39 @@ __xtensa_copy_user:
.Loop2done: .Loop2done:
bbci.l a4, 3, .L12 bbci.l a4, 3, .L12
# copy 8 bytes # copy 8 bytes
EX(l32i, a7, a3, 4, l_fixup) EX(l32i, a7, a3, 4, fixup)
EX(l32i, a8, a3, 8, l_fixup) EX(l32i, a8, a3, 8, fixup)
ALIGN( a6, a6, a7) ALIGN( a6, a6, a7)
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
addi a3, a3, 8 addi a3, a3, 8
ALIGN( a7, a7, a8) ALIGN( a7, a7, a8)
EX(s32i, a7, a5, 4, s_fixup) EX(s32i, a7, a5, 4, fixup)
addi a5, a5, 8 addi a5, a5, 8
mov a6, a8 mov a6, a8
.L12: .L12:
bbci.l a4, 2, .L13 bbci.l a4, 2, .L13
# copy 4 bytes # copy 4 bytes
EX(l32i, a7, a3, 4, l_fixup) EX(l32i, a7, a3, 4, fixup)
addi a3, a3, 4 addi a3, a3, 4
ALIGN( a6, a6, a7) ALIGN( a6, a6, a7)
EX(s32i, a6, a5, 0, s_fixup) EX(s32i, a6, a5, 0, fixup)
addi a5, a5, 4 addi a5, a5, 4
mov a6, a7 mov a6, a7
.L13: .L13:
add a3, a3, a10 # readjust a3 with correct misalignment add a3, a3, a10 # readjust a3 with correct misalignment
bbci.l a4, 1, .L14 bbci.l a4, 1, .L14
# copy 2 bytes # copy 2 bytes
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
EX(l8ui, a7, a3, 1, l_fixup) EX(l8ui, a7, a3, 1, fixup)
addi a3, a3, 2 addi a3, a3, 2
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
EX(s8i, a7, a5, 1, s_fixup) EX(s8i, a7, a5, 1, fixup)
addi a5, a5, 2 addi a5, a5, 2
.L14: .L14:
bbci.l a4, 0, .L15 bbci.l a4, 0, .L15
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, l_fixup) EX(l8ui, a6, a3, 0, fixup)
EX(s8i, a6, a5, 0, s_fixup) EX(s8i, a6, a5, 0, fixup)
.L15: .L15:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw retw
...@@ -291,30 +291,10 @@ __xtensa_copy_user: ...@@ -291,30 +291,10 @@ __xtensa_copy_user:
* bytes_copied = a5 - a2 * bytes_copied = a5 - a2
* retval = bytes_not_copied = original len - bytes_copied * retval = bytes_not_copied = original len - bytes_copied
* retval = a11 - (a5 - a2) * retval = a11 - (a5 - a2)
*
* Clearing the remaining pieces of kernel memory plugs security
* holes. This functionality is the equivalent of the *_zeroing
* functions that some architectures provide.
*/ */
.Lmemset:
.word memset
s_fixup: fixup:
sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */
retw retw
l_fixup:
sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
/* void *memset(void *s, int c, size_t n); */
mov a6, a5 /* s */
movi a7, 0 /* c */
mov a8, a2 /* n */
l32r a4, .Lmemset
callx4 a4
/* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */
retw
...@@ -650,7 +650,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ...@@ -650,7 +650,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %zd bytes\n", bd->name, count); dprintk("%s: write %zd bytes\n", bd->name, count);
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EINVAL; return -EINVAL;
bsg_set_block(bd, file); bsg_set_block(bd, file);
......
...@@ -1289,32 +1289,13 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ...@@ -1289,32 +1289,13 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
|| (cmd > EXPRESS_IOCTL_MAX)) || (cmd > EXPRESS_IOCTL_MAX))
return -ENOTSUPP; return -ENOTSUPP;
if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
if (IS_ERR(ioctl)) {
esas2r_log(ESAS2R_LOG_WARN, esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler access_ok failed for cmd %d, " "ioctl_handler access_ok failed for cmd %d, "
"address %p", cmd, "address %p", cmd,
arg); arg);
return -EFAULT; return PTR_ERR(ioctl);
}
/* allocate a kernel memory buffer for the IOCTL data */
ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
if (ioctl == NULL) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler kzalloc failed for %zu bytes",
sizeof(struct atto_express_ioctl));
return -ENOMEM;
}
err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
if (err != 0) {
esas2r_log(ESAS2R_LOG_WARN,
"copy_from_user didn't copy everything (err %d, cmd %d)",
err,
cmd);
kfree(ioctl);
return -EFAULT;
} }
/* verify the signature */ /* verify the signature */
......
...@@ -581,7 +581,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) ...@@ -581,7 +581,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp; sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE]; unsigned char cmnd[SG_MAX_CDB_SIZE];
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EINVAL; return -EINVAL;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
......
...@@ -1460,27 +1460,10 @@ static void o2net_rx_until_empty(struct work_struct *work) ...@@ -1460,27 +1460,10 @@ static void o2net_rx_until_empty(struct work_struct *work)
static int o2net_set_nodelay(struct socket *sock) static int o2net_set_nodelay(struct socket *sock)
{ {
int ret, val = 1; int val = 1;
mm_segment_t oldfs;
oldfs = get_fs(); return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
set_fs(KERNEL_DS); (void *)&val, sizeof(val));
/*
* Dear unsuspecting programmer,
*
* Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
* argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
* silently turn into SO_DEBUG.
*
* Yours,
* Keeper of hilariously fragile interfaces.
*/
ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
(char __user *)&val, sizeof(val));
set_fs(oldfs);
return ret;
} }
static int o2net_set_usertimeout(struct socket *sock) static int o2net_set_usertimeout(struct socket *sock)
...@@ -1488,7 +1471,7 @@ static int o2net_set_usertimeout(struct socket *sock) ...@@ -1488,7 +1471,7 @@ static int o2net_set_usertimeout(struct socket *sock)
int user_timeout = O2NET_TCP_USER_TIMEOUT; int user_timeout = O2NET_TCP_USER_TIMEOUT;
return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
(char *)&user_timeout, sizeof(user_timeout)); (void *)&user_timeout, sizeof(user_timeout));
} }
static void o2net_initialize_handshake(void) static void o2net_initialize_handshake(void)
......
#ifndef __ASM_GENERIC_EXTABLE_H
#define __ASM_GENERIC_EXTABLE_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#endif
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* on any machine that has kernel and user data in the same * on any machine that has kernel and user data in the same
* address space, e.g. all NOMMU machines. * address space, e.g. all NOMMU machines.
*/ */
#include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/segment.h> #include <asm/segment.h>
...@@ -35,9 +34,6 @@ static inline void set_fs(mm_segment_t fs) ...@@ -35,9 +34,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#endif #endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
/* /*
...@@ -51,87 +47,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ...@@ -51,87 +47,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
} }
#endif #endif
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry
{
unsigned long insn, fixup;
};
/*
* architectures with an MMU should override these two
*/
#ifndef __copy_from_user
static inline __must_check long __copy_from_user(void *to,
const void __user * from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 *)to = *(u64 __force *)from;
return 0;
#endif
default:
break;
}
}
memcpy(to, (const void __force *)from, n);
return 0;
}
#endif
#ifndef __copy_to_user
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
#endif
default:
break;
}
}
memcpy((void __force *)to, from, n);
return 0;
}
#endif
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
...@@ -171,8 +86,7 @@ static inline __must_check long __copy_to_user(void __user *to, ...@@ -171,8 +86,7 @@ static inline __must_check long __copy_to_user(void __user *to,
static inline int __put_user_fn(size_t size, void __user *ptr, void *x) static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{ {
size = __copy_to_user(ptr, x, size); return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
return size ? -EFAULT : size;
} }
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
...@@ -187,28 +101,28 @@ extern int __put_user_bad(void) __attribute__((noreturn)); ...@@ -187,28 +101,28 @@ extern int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: { \ case 1: { \
unsigned char __x; \ unsigned char __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \ __gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \ ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 2: { \ case 2: { \
unsigned short __x; \ unsigned short __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \ __gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \ ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 4: { \ case 4: { \
unsigned int __x; \ unsigned int __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \ __gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \ ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 8: { \ case 8: { \
unsigned long long __x; \ unsigned long long __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \ __gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \ ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
...@@ -233,12 +147,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); ...@@ -233,12 +147,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn #ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{ {
size_t n = __copy_from_user(x, ptr, size); return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
if (unlikely(n)) {
memset(x + (size - n), 0, n);
return -EFAULT;
}
return 0;
} }
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
...@@ -247,36 +156,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) ...@@ -247,36 +156,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn)); extern int __get_user_bad(void) __attribute__((noreturn));
#ifndef __copy_from_user_inatomic
#define __copy_from_user_inatomic __copy_from_user
#endif
#ifndef __copy_to_user_inatomic
#define __copy_to_user_inatomic __copy_to_user
#endif
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
return n;
}
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
...@@ -348,4 +227,6 @@ clear_user(void __user *to, unsigned long n) ...@@ -348,4 +227,6 @@ clear_user(void __user *to, unsigned long n)
return __clear_user(to, n); return __clear_user(to, n);
} }
#include <asm/extable.h>
#endif /* __ASM_GENERIC_UACCESS_H */ #endif /* __ASM_GENERIC_UACCESS_H */
...@@ -2,8 +2,199 @@ ...@@ -2,8 +2,199 @@
#define __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h> #include <asm/uaccess.h>
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
* __copy_{to,from}_user{,_inatomic}().
*
* raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
* return the amount left to copy. They should assume that access_ok() has
* already been checked (and succeeded); they should *not* zero-pad anything.
* No KASAN or object size checks either - those belong here.
*
* Both of these functions should attempt to copy size bytes starting at from
* into the area starting at to. They must not fetch or store anything
* outside of those areas. Return value must be between 0 (everything
* copied successfully) and size (nothing copied).
*
* If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
* at to must become equal to the bytes fetched from the corresponding area
* starting at from. All data past to + size - N must be left unmodified.
*
* If copying succeeds, the return value must be 0. If some data cannot be
* fetched, it is permitted to copy less than had been fetched; the only
* hard requirement is that not storing anything at all (i.e. returning size)
* should happen only when nothing could be copied. In other words, you don't
* have to squeeze as much as possible - it is allowed, but not necessary.
*
* For raw_copy_from_user() to always points to kernel memory and no faults
* on store should happen. Interpretation of from is affected by set_fs().
* For raw_copy_to_user() it's the other way round.
*
* Both can be inlined - it's up to architectures whether it wants to bother
* with that. They should not be used directly; they are used to implement
* the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
* that are used instead. Out of those, __... ones are inlined. Plain
* copy_{to,from}_user() might or might not be inlined. If you want them
* inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
*
* NOTE: only copy_from_user() zero-pads the destination in case of short copy.
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
* at all; their callers absolutely must check the return value.
*
* Biarch ones should also provide raw_copy_in_user() - similar to the above,
* but both source and destination are __user pointers (affected by set_fs()
* as usual) and both source and destination can trigger faults.
*/
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
kasan_check_write(to, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
static __always_inline unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
static __always_inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
kasan_check_read(from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
#ifdef INLINE_COPY_FROM_USER
static inline unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#else
extern unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = raw_copy_to_user(to, from, n);
return n;
}
#else
extern unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
might_fault();
kasan_check_write(to, n);
if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
kasan_check_read(from, n);
might_fault();
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n;
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
__copy_in_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return raw_copy_in_user(to, from, n);
}
static __always_inline unsigned long __must_check
copy_in_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
n = raw_copy_in_user(to, from, n);
return n;
}
#endif
static __always_inline void pagefault_disabled_inc(void) static __always_inline void pagefault_disabled_inc(void)
{ {
current->pagefault_disabled++; current->pagefault_disabled++;
...@@ -67,12 +258,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to, ...@@ -67,12 +258,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return __copy_from_user_inatomic(to, from, n); return __copy_from_user_inatomic(to, from, n);
} }
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
#endif /* ARCH_HAS_NOCACHE_UACCESS */ #endif /* ARCH_HAS_NOCACHE_UACCESS */
/* /*
......
...@@ -100,7 +100,7 @@ struct sockaddr_ib { ...@@ -100,7 +100,7 @@ struct sockaddr_ib {
*/ */
static inline bool ib_safe_file_access(struct file *filp) static inline bool ib_safe_file_access(struct file *filp)
{ {
return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); return filp->f_cred == current_cred() && !uaccess_kernel();
} }
#endif /* _RDMA_IB_H */ #endif /* _RDMA_IB_H */
...@@ -96,7 +96,7 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, ...@@ -96,7 +96,7 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
if (unlikely(in_interrupt() || if (unlikely(in_interrupt() ||
current->flags & (PF_KTHREAD | PF_EXITING))) current->flags & (PF_KTHREAD | PF_EXITING)))
return -EPERM; return -EPERM;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EPERM; return -EPERM;
if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
return -EPERM; return -EPERM;
......
...@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ ...@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
once.o refcount.o once.o refcount.o usercopy.o
obj-y += string_helpers.o obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o obj-y += hexdump.o
......
...@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, ...@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
size_t count) size_t count)
{ {
/* It will get better. Eventually... */ /* It will get better. Eventually... */
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
direction |= ITER_KVEC; direction |= ITER_KVEC;
i->type = direction; i->type = direction;
i->kvec = (struct kvec *)iov; i->kvec = (struct kvec *)iov;
...@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return 0; return 0;
} }
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
__copy_from_user_nocache((to += v.iov_len) - v.iov_len, __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len), v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len), v.bv_offset, v.bv_len),
...@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes)) if (unlikely(i->count < bytes))
return false; return false;
iterate_all_kinds(i, bytes, v, ({ iterate_all_kinds(i, bytes, v, ({
if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len)) v.iov_base, v.iov_len))
return false; return false;
0;}), 0;}),
......
#include <linux/uaccess.h>
/* out-of-line parts */
#ifndef INLINE_COPY_FROM_USER
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = raw_copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
EXPORT_SYMBOL(_copy_from_user);
#endif
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = raw_copy_to_user(to, from, n);
return n;
}
EXPORT_SYMBOL(_copy_to_user);
#endif
...@@ -4298,7 +4298,7 @@ void __might_fault(const char *file, int line) ...@@ -4298,7 +4298,7 @@ void __might_fault(const char *file, int line)
* get paged out, therefore we'll never actually fault, and the * get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives. * below annotations will generate false positives.
*/ */
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return; return;
if (pagefault_disabled()) if (pagefault_disabled())
return; return;
......
...@@ -84,13 +84,10 @@ static struct ctl_table rds_tcp_sysctl_table[] = { ...@@ -84,13 +84,10 @@ static struct ctl_table rds_tcp_sysctl_table[] = {
/* doing it this way avoids calling tcp_sk() */ /* doing it this way avoids calling tcp_sk() */
void rds_tcp_nonagle(struct socket *sock) void rds_tcp_nonagle(struct socket *sock)
{ {
mm_segment_t oldfs = get_fs();
int val = 1; int val = 1;
set_fs(KERNEL_DS); kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val,
sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
sizeof(val)); sizeof(val));
set_fs(oldfs);
} }
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
......
...@@ -40,13 +40,7 @@ ...@@ -40,13 +40,7 @@
static void rds_tcp_cork(struct socket *sock, int val) static void rds_tcp_cork(struct socket *sock, int val)
{ {
mm_segment_t oldfs; kernel_setsockopt(sock, SOL_TCP, TCP_CORK, (void *)&val, sizeof(val));
oldfs = get_fs();
set_fs(KERNEL_DS);
sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
sizeof(val));
set_fs(oldfs);
} }
void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp) void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp)
......
...@@ -125,17 +125,8 @@ config HAVE_HARDENED_USERCOPY_ALLOCATOR ...@@ -125,17 +125,8 @@ config HAVE_HARDENED_USERCOPY_ALLOCATOR
validating memory ranges against heap object sizes in validating memory ranges against heap object sizes in
support of CONFIG_HARDENED_USERCOPY. support of CONFIG_HARDENED_USERCOPY.
config HAVE_ARCH_HARDENED_USERCOPY
bool
help
The architecture supports CONFIG_HARDENED_USERCOPY by
calling check_object_size() just before performing the
userspace copies in the low level implementation of
copy_to_user() and copy_from_user().
config HARDENED_USERCOPY config HARDENED_USERCOPY
bool "Harden memory copies between kernel and userspace" bool "Harden memory copies between kernel and userspace"
depends on HAVE_ARCH_HARDENED_USERCOPY
depends on HAVE_HARDENED_USERCOPY_ALLOCATOR depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
select BUG select BUG
help help
......
...@@ -608,7 +608,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr, ...@@ -608,7 +608,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
static bool tomoyo_kernel_service(void) static bool tomoyo_kernel_service(void)
{ {
/* Nothing to do if I am a kernel service. */ /* Nothing to do if I am a kernel service. */
return segment_eq(get_fs(), KERNEL_DS); return uaccess_kernel();
} }
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册