提交 dc502142 编写于 作者: L Linus Torvalds

Merge branch 'uaccess.strlen' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull user access str* updates from Al Viro:
 "uaccess str...() dead code removal"

* 'uaccess.strlen' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  s390 keyboard.c: don't open-code strndup_user()
  mips: get rid of unused __strnlen_user()
  get rid of unused __strncpy_from_user() instances
  kill strlen_user()
......@@ -326,7 +326,6 @@ clear_user(void __user *to, long len)
(uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#include <asm/extable.h>
......
......@@ -526,7 +526,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASMARM_UACCESS_H */
......@@ -349,7 +349,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
......@@ -194,13 +194,6 @@ static inline long __must_check strnlen_user(const char __user *src, long n)
return strnlen((const char __force *)src, n) + 1;
}
static inline long __must_check strlen_user(const char __user *src)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
return strlen((const char __force *)src) + 1;
}
/*
* Zero Userspace
*/
......
......@@ -172,12 +172,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
return __do_strncpy_from_user(dst, src, count);
}
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
......@@ -363,6 +357,4 @@ __clear_user(void __user *to, unsigned long n)
return __do_clear_user(to, n);
}
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
#endif /* _CRIS_UACCESS_H */
......@@ -282,6 +282,4 @@ clear_user(void __user *to, unsigned long n)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
#endif /* _ASM_UACCESS_H */
......@@ -277,18 +277,6 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from,
__sfu_ret; \
})
/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
extern unsigned long __strlen_user (const char __user *);
#define strlen_user(str) \
({ \
const char __user *__su_str = (str); \
unsigned long __su_ret = 0; \
if (__access_ok(__su_str, 0)) \
__su_ret = __strlen_user(__su_str); \
__su_ret; \
})
/*
* Returns: 0 if exception before NUL or reaching the supplied limit
* (N), a value greater than N if the limit would be exceeded, else
......
......@@ -5,7 +5,7 @@
lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
clear_user.o strncpy_from_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o xor.o
......
/*
* Optimized version of the strlen_user() function
*
* Inputs:
* in0 address of buffer
*
* Outputs:
* ret0 0 in case of fault, strlen(buffer)+1 otherwise
*
* Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 01/19/99 S.Eranian heavily enhanced version (see details below)
* 09/24/99 S.Eranian added speculation recovery code
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
//
// int strlen_user(char *)
// ------------------------
// Returns:
// - length of string + 1
// - 0 in case an exception is raised
//
// This is an enhanced version of the basic strlen_user. it includes a
// combination of compute zero index (czx), parallel comparisons, speculative
// loads and loop unroll using rotating registers.
//
// General Ideas about the algorithm:
// The goal is to look at the string in chunks of 8 bytes.
// so we need to do a few extra checks at the beginning because the
// string may not be 8-byte aligned. In this case we load the 8byte
// quantity which includes the start of the string and mask the unused
// bytes with 0xff to avoid confusing czx.
// We use speculative loads and software pipelining to hide memory
// latency and do read ahead safely. This way we defer any exception.
//
// Because we don't want the kernel to be relying on particular
// settings of the DCR register, we provide recovery code in case
// speculation fails. The recovery code is going to "redo" the work using
// only normal loads. If we still get a fault then we return an
// error (ret0=0). Otherwise we return the strlen+1 as usual.
// The fact that speculation may fail can be caused, for instance, by
// the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
// a NaT bit will be set if the translation is not present. The normal
// load, on the other hand, will cause the translation to be inserted
// if the mapping exists.
//
// It should be noted that we execute recovery code only when we need
// to use the data that has been speculatively loaded: we don't execute
// recovery code on pure read ahead data.
//
// Remarks:
// - the cmp r0,r0 is used as a fast way to initialize a predicate
// register to 1. This is required to make sure that we get the parallel
// compare correct.
//
// - we don't use the epilogue counter to exit the loop but we need to set
// it to zero beforehand.
//
// - after the loop we must test for Nat values because neither the
// czx nor cmp instruction raise a NaT consumption fault. We must be
// careful not to look too far for a Nat for which we don't care.
// For instance we don't need to look at a NaT in val2 if the zero byte
// was in val1.
//
// - Clearly performance tuning is required.
//
#define saved_pfs r11
#define tmp r10
#define base r16
#define orig r17
#define saved_pr r18
#define src r19
#define mask r20
#define val r21
#define val1 r22
#define val2 r23
GLOBAL_ENTRY(__strlen_user)
.prologue
.save ar.pfs, saved_pfs
alloc saved_pfs=ar.pfs,11,0,0,8
.rotr v[2], w[2] // declares our 4 aliases
extr.u tmp=in0,0,3 // tmp=least significant 3 bits
mov orig=in0 // keep trackof initial byte address
dep src=0,in0,0,3 // src=8byte-aligned in0 address
.save pr, saved_pr
mov saved_pr=pr // preserve predicates (rotation)
;;
.body
ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate)
shl tmp=tmp,3 // multiply by 8bits/byte
mov mask=-1 // our mask
;;
ld8.s w[1]=[src],8 // load next 8 bytes in 2nd pipeline
cmp.eq p6,p0=r0,r0 // sets p6 (required because of // cmp.and)
sub tmp=64,tmp // how many bits to shift our mask on the right
;;
shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part
mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
;;
add base=-16,src // keep track of aligned base
chk.s v[1], .recover // if already NaT, then directly skip to recover
or v[1]=v[1],mask // now we have a safe initial byte pattern
;;
1:
ld8.s v[0]=[src],8 // speculatively load next
czx1.r val1=v[1] // search 0 byte from right
czx1.r val2=w[1] // search 0 byte from right following 8bytes
;;
ld8.s w[0]=[src],8 // speculatively load next to next
cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
(p6) br.wtop.dptk.few 1b // loop until p6 == 0
;;
//
// We must return try the recovery code iff
// val1_is_nat || (val1==8 && val2_is_nat)
//
// XXX Fixme
// - there must be a better way of doing the test
//
cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate)
tnat.nz p6,p7=val1 // test NaT on val1
(p6) br.cond.spnt .recover // jump to recovery if val1 is NaT
;;
//
// if we come here p7 is true, i.e., initialized for // cmp
//
cmp.eq.and p7,p0=8,val1// val1==8?
tnat.nz.and p7,p0=val2 // test NaT if val2
(p7) br.cond.spnt .recover // jump to recovery if val2 is NaT
;;
(p8) mov val1=val2 // val2 contains the value
(p8) adds src=-16,src // correct position when 3 ahead
(p9) adds src=-24,src // correct position when 4 ahead
;;
sub ret0=src,orig // distance from origin
sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
mov pr=saved_pr,0xffffffffffff0000
;;
sub ret0=ret0,tmp // length=now - back -1
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp // end of normal execution
//
// Outlined recovery code when speculation failed
//
// This time we don't use speculation and rely on the normal exception
// mechanism. that's why the loop is not as good as the previous one
// because read ahead is not possible
//
// XXX Fixme
// - today we restart from the beginning of the string instead
// of trying to continue where we left off.
//
.recover:
EX(.Lexit1, ld8 val=[base],8) // load the initial bytes
;;
or val=val,mask // remask first bytes
cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
;;
//
// ar.ec is still zero here
//
2:
EX(.Lexit1, (p6) ld8 val=[base],8)
;;
czx1.r val1=val // search 0 byte from right
;;
cmp.eq p6,p0=8,val1 // val1==8 ?
(p6) br.wtop.dptk.few 2b // loop until p6 == 0
;;
sub ret0=base,orig // distance from base
sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
mov pr=saved_pr,0xffffffffffff0000
;;
sub ret0=ret0,tmp // length=now - back -1
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp // end of successful recovery code
//
// We failed even on the normal load (called from exception handler)
//
.Lexit1:
mov ret0=0
mov pr=saved_pr,0xffffffffffff0000
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp
END(__strlen_user)
EXPORT_SYMBOL(__strlen_user)
......@@ -482,8 +482,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
long __must_check strncpy_from_user(char *dst, const char __user *src,
long count);
long __must_check __strncpy_from_user(char *dst,
const char __user *src, long count);
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
......@@ -511,22 +509,6 @@ unsigned long __clear_user(void __user *mem, unsigned long len);
*/
unsigned long clear_user(void __user *mem, unsigned long len);
/**
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
long strnlen_user(const char __user *str, long n);
#endif /* _ASM_M32R_UACCESS_H */
......@@ -23,7 +23,6 @@ EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(strnlen_user);
......
......@@ -88,14 +88,6 @@ do { \
#endif /* CONFIG_ISA_DUAL_ISSUE */
long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
......
......@@ -378,7 +378,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
(uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __clear_user(void __user *to, unsigned long n);
......
......@@ -141,8 +141,6 @@ static inline long strnlen_user(const char *src, long n)
return(strlen(src) + 1); /* DAVIDM make safer */
}
#define strlen_user(str) strnlen_user(str, 32767)
/*
* Zero Userspace
*/
......
......@@ -188,8 +188,6 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*/
extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
extern unsigned long raw_copy_to_user(void __user *to, const void *from,
......
......@@ -355,14 +355,12 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
extern int __strncpy_user(char *to, const char __user *from, int len);
#define __strncpy_from_user __strncpy_user
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
return __strncpy_user(dst, src, count);
}
/*
......
......@@ -967,60 +967,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
__cl_size; \
})
extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
static inline long
__strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (__to), "r" (__from), "r" (__len)
: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
} else {
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
__MODULE_JAL(__strncpy_from_user_nocheck_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (__to), "r" (__from), "r" (__len)
: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
}
return res;
}
extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
......@@ -1073,82 +1019,6 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
return res;
}
extern long __strlen_kernel_asm(const char __user *s);
extern long __strlen_user_asm(const char __user *s);
/*
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
static inline long strlen_user(const char __user *s)
{
long res;
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_kernel_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (s)
: "$2", "$4", __UA_t0, "$31");
} else {
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_user_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (s)
: "$2", "$4", __UA_t0, "$31");
}
return res;
}
extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
static inline long __strnlen_user(const char __user *s, long n)
{
long res;
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
__MODULE_JAL(__strnlen_kernel_nocheck_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (s), "r" (n)
: "$2", "$4", "$5", __UA_t0, "$31");
} else {
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
__MODULE_JAL(__strnlen_user_nocheck_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (s), "r" (n)
: "$2", "$4", "$5", __UA_t0, "$31");
}
return res;
}
extern long __strnlen_kernel_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);
......
......@@ -3,7 +3,7 @@
#
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
mips-atomic.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
PTR 9b, handler; \
.previous
/*
* Return the size of a string (including the ending 0)
*
* Return 0 for error
*/
.macro __BUILD_STRLEN_ASM func
LEAF(__strlen_\func\()_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a0
bnez v0, .Lfault\@
move v0, a0
.ifeqs "\func", "kernel"
1: EX(lbu, v1, (v0), .Lfault\@)
.else
1: EX(lbue, v1, (v0), .Lfault\@)
.endif
PTR_ADDIU v0, 1
bnez v1, 1b
PTR_SUBU v0, a0
jr ra
END(__strlen_\func\()_asm)
.Lfault\@: move v0, zero
jr ra
.endm
#ifndef CONFIG_EVA
/* Set aliases */
.global __strlen_user_asm
.set __strlen_user_asm, __strlen_kernel_asm
EXPORT_SYMBOL(__strlen_user_asm)
#endif
__BUILD_STRLEN_ASM kernel
EXPORT_SYMBOL(__strlen_kernel_asm)
#ifdef CONFIG_EVA
.set push
.set eva
__BUILD_STRLEN_ASM user
.set pop
EXPORT_SYMBOL(__strlen_user_asm)
#endif
......@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
and v0, a1
bnez v0, .Lfault\@
FEXPORT(__strncpy_from_\func\()_nocheck_asm)
move t0, zero
move v1, a1
.ifeqs "\func","kernel"
......@@ -70,16 +69,12 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strncpy_from_user_asm
.global __strncpy_from_user_nocheck_asm
.set __strncpy_from_user_asm, __strncpy_from_kernel_asm
.set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
EXPORT_SYMBOL(__strncpy_from_user_asm)
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm)
#endif
__BUILD_STRNCPY_ASM kernel
EXPORT_SYMBOL(__strncpy_from_kernel_asm)
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm)
#ifdef CONFIG_EVA
.set push
......@@ -87,5 +82,4 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm)
__BUILD_STRNCPY_ASM user
.set pop
EXPORT_SYMBOL(__strncpy_from_user_asm)
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm)
#endif
......@@ -32,7 +32,6 @@ LEAF(__strnlen_\func\()_asm)
and v0, a0
bnez v0, .Lfault\@
FEXPORT(__strnlen_\func\()_nocheck_asm)
move v0, a0
PTR_ADDU a1, a0 # stop pointer
1:
......@@ -68,16 +67,12 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strnlen_user_asm
.global __strnlen_user_nocheck_asm
.set __strnlen_user_asm, __strnlen_kernel_asm
.set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
EXPORT_SYMBOL(__strnlen_user_asm)
EXPORT_SYMBOL(__strnlen_user_nocheck_asm)
#endif
__BUILD_STRNLEN_ASM kernel
EXPORT_SYMBOL(__strnlen_kernel_asm)
EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm)
#ifdef CONFIG_EVA
......@@ -86,5 +81,4 @@ EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm)
__BUILD_STRNLEN_ASM user
.set pop
EXPORT_SYMBOL(__strnlen_user_asm)
EXPORT_SYMBOL(__strnlen_user_nocheck_asm)
#endif
......@@ -290,9 +290,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *str, long n);
#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
extern unsigned long clear_user(void __user *mem, unsigned long len);
extern unsigned long __clear_user(void __user *mem, unsigned long len);
......
......@@ -23,7 +23,6 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(strnlen_user);
......
......@@ -49,14 +49,6 @@ do { \
: "memory", "cc"); \
} while (0)
long
__strncpy_from_user(char *dst, const char *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
long
strncpy_from_user(char *dst, const char *src, long count)
{
......
......@@ -264,7 +264,6 @@ clear_user(void *addr, unsigned long size)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_OPENRISC_UACCESS_H */
......@@ -209,7 +209,6 @@ extern long lstrnlen_user(const char __user *, long);
#define user_addr_max() (~0UL)
#define strnlen_user lstrnlen_user
#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
#define clear_user lclear_user
#define __clear_user lclear_user
......
......@@ -340,7 +340,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ARCH_POWERPC_UACCESS_H */
......@@ -276,23 +276,6 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
return __strnlen_user(src, n);
}
/**
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
#define strlen_user(str) strnlen_user(str, ~0UL)
/*
* Zero Userspace
*/
......
......@@ -359,12 +359,6 @@ static inline int strncpy_from_user(char *dst, const char *src, long len)
return -EFAULT;
}
extern int __strlen_user(const char *src);
static inline long strlen_user(const char __user *src)
{
return __strlen_user(src);
}
extern int __strnlen_user(const char *str, long len);
static inline long strnlen_user(const char __user *str, long len)
{
......
......@@ -103,34 +103,6 @@ ENTRY(__strnlen_user)
.word 1b, 99b
.previous
.align 2
ENTRY(__strlen_user)
0: lb r6, [r4]
mv r7, r4
extsb r6, r6
cmpi.c r6, 0
mv r4, r6
beq .L27
.L28:
1: lb r6, [r7, 1]+
addi r6, 1
cmpi.c r6, 0
bne .L28
.L27:
br r3
.section .fixup, "ax"
ldi r4, 0x0
br r3
99:
ldi r4, 0
br r3
.previous
.section __ex_table, "a"
.align 2
.word 0b ,99b
.word 1b ,99b
.previous
.align 2
ENTRY(__copy_tofrom_user)
cmpi.c r6, 0
......
......@@ -100,7 +100,6 @@ struct __large_struct { unsigned long buf[100]; };
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
/* Generic arbitrary sized copy. */
......
......@@ -277,7 +277,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASM_UACCESS_H */
......@@ -194,7 +194,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
struct pt_regs;
......
......@@ -327,7 +327,6 @@ extern unsigned long raw_copy_in_user(
extern long strnlen_user(const char __user *str, long n);
extern long strlen_user(const char __user *str);
extern long strncpy_from_user(char *dst, const char __user *src, long);
/**
......
......@@ -255,11 +255,6 @@ int clear_user_proc(void __user *buf, int size)
return clear_user(buf, size);
}
int strlen_user_proc(char __user *str)
{
return strlen_user(str);
}
int cpu(void)
{
return current_thread_info()->cpu;
......
......@@ -565,7 +565,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
......
......@@ -278,19 +278,15 @@ clear_user(void *addr, unsigned long size)
extern long __strncpy_user(char *, const char *, long);
#define __strncpy_from_user __strncpy_user
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, count);
return __strncpy_user(dst, src, count);
return -EFAULT;
}
#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
/*
* Return the size of a string (including the ending 0!)
*/
......
......@@ -433,12 +433,7 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
case KDSKBSENT:
if (!perm)
return -EPERM;
len = strnlen_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
if (!len)
return -EFAULT;
if (len > sizeof(u_kbs->kb_string))
return -EINVAL;
p = memdup_user_nul(u_kbs->kb_string, len);
p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
if (IS_ERR(p))
return PTR_ERR(p);
kfree(kbd->func_table[kb_func]);
......
......@@ -200,11 +200,6 @@ static inline long strnlen_user(const char __user *src, long n)
return __strnlen_user(src, n);
}
static inline long strlen_user(const char __user *src)
{
return strnlen_user(src, 32767);
}
/*
* Zero Userspace
*/
......
......@@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count)
return 0;
}
EXPORT_SYMBOL(strnlen_user);
/**
* strlen_user: - Get the size of a user string INCLUDING final NUL.
* @str: The string to measure.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Get the size of a NUL-terminated string in user space.
*
* Returns the size of the string INCLUDING the terminating NUL.
* On exception, returns 0.
*
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
long strlen_user(const char __user *str)
{
unsigned long max_addr, src_addr;
max_addr = user_addr_max();
src_addr = (unsigned long)str;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
user_access_begin();
retval = do_strnlen_user(str, ~0ul, max);
user_access_end();
return retval;
}
return 0;
}
EXPORT_SYMBOL(strlen_user);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册