提交 30c44659 编写于 作者: L Linus Torvalds

Merge branch 'strscpy' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

Pull strscpy string copy function implementation from Chris Metcalf.

Chris sent this during the merge window, but I waffled back and forth on
the pull request, which is why it's going in only now.

The new "strscpy()" function is definitely easier to use and more secure
than either strncpy() or strlcpy(), both of which are horrible nasty
interfaces that have serious and irredeemable problems.

strncpy() has a useless return value, and doesn't NUL-terminate an
overlong result.  To make matters worse, it pads a short result with
zeroes, which is a performance disaster if you have big buffers.

strlcpy(), by contrast, is a mis-designed "fix" for strlcpy(), lacking
the insane NUL padding, but having a differently broken return value
which returns the original length of the source string.  Which means
that it will read characters past the count from the source buffer, and
you have to trust the source to be properly terminated.  It also makes
error handling fragile, since the test for overflow is unnecessarily
subtle.

strscpy() avoids both these problems, guaranteeing the NUL termination
(but not excessive padding) if the destination size wasn't zero, and
making the overflow condition very obvious by returning -E2BIG.  It also
doesn't read past the size of the source, and can thus be used for
untrusted source data too.

So why did I waffle about this for so long?

Every time we introduce a new-and-improved interface, people start doing
these interminable series of trivial conversion patches.

And every time that happens, somebody does some silly mistake, and the
conversion patch to the improved interface actually makes things worse.
Because the patch is mindnumbing and trivial, nobody has the attention
span to look at it carefully, and it's usually done over large swatches
of source code which means that not every conversion gets tested.

So I'm pulling the strscpy() support because it *is* a better interface.
But I will refuse to pull mindless conversion patches.  Use this in
places where it makes sense, but don't do trivial patches to fix things
that aren't actually known to be broken.

* 'strscpy' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  tile: use global strscpy() rather than private copy
  string: provide strscpy()
  Make asm/word-at-a-time.h available on all architectures
......@@ -48,4 +48,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -20,4 +20,5 @@ generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -46,4 +46,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -59,4 +59,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -43,4 +43,5 @@ generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
......@@ -58,4 +58,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += vtime.h
generic-y += word-at-a-time.h
......@@ -9,3 +9,4 @@ generic-y += module.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
......@@ -54,4 +54,5 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += syscalls.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
......@@ -17,4 +17,5 @@ generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
......@@ -61,4 +61,5 @@ generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += vtime.h
generic-y += word-at-a-time.h
......@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
......@@ -13,3 +13,4 @@ generic-y += sections.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += serial.h
generic-y += word-at-a-time.h
......@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/string.h>
#include <gxio/iorpc_globals.h>
#include <gxio/iorpc_mpipe.h>
......@@ -29,32 +30,6 @@
/* HACK: Avoid pointless "shadow" warnings. */
#define link link_shadow
/**
* strscpy - Copy a C-string into a sized buffer, but only if it fits
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
*
* Use this routine to avoid copying too-long strings.
* The routine returns the total number of bytes copied
* (including the trailing NUL) or zero if the buffer wasn't
* big enough. To ensure that programmers pay attention
* to the return code, the destination has a single NUL
* written at the front (if size is non-zero) when the
* buffer is not big enough.
*/
static size_t strscpy(char *dest, const char *src, size_t size)
{
size_t len = strnlen(src, size) + 1;
if (len > size) {
if (size)
dest[0] = '\0';
return 0;
}
memcpy(dest, src, len);
return len;
}
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
{
char file[32];
......@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
if (!context)
return GXIO_ERR_NO_DEVICE;
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
return gxio_mpipe_info_instance_aux(context, name);
......@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
if (rv >= 0) {
if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
return GXIO_ERR_INVAL_MEMORY_SIZE;
memcpy(link_mac, mac.mac, sizeof(mac.mac));
}
......@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
_gxio_mpipe_link_name_t name;
int rv;
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
rv = gxio_mpipe_link_open_aux(context, name, flags);
......
......@@ -40,4 +40,5 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -25,4 +25,5 @@ generic-y += preempt.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -62,4 +62,5 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
......@@ -28,4 +28,5 @@ generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += xor.h
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
/*
* This says "generic", but it's actually big-endian only.
* Little-endian can use more efficient versions of these
* interfaces, see for example
* arch/x86/include/asm/word-at-a-time.h
* for those.
*/
#include <linux/kernel.h>
#include <asm/byteorder.h>
#ifdef __BIG_ENDIAN
struct word_at_a_time {
const unsigned long high_bits, low_bits;
......@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
#else
/*
* The optimal byte mask counting is probably going to be something
* that is architecture-specific. If you have a reliably fast
* bit count instruction, that might be better than the multiply
* and shift, for example.
*/
struct word_at_a_time {
const unsigned long one_bits, high_bits;
};
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
#ifdef CONFIG_64BIT
/*
* Jan Achrenius on G+: microoptimized version of
* the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
* that works for the bytemasks without having to
* mask them first.
*/
static inline long count_masked_bytes(unsigned long mask)
{
return mask*0x0001020304050608ul >> 56;
}
#else /* 32-bit case */
/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
static inline long count_masked_bytes(long mask)
{
/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
long a = (0x0ff0001+mask) >> 23;
/* Fix the 1 for 00 case */
return a & mask;
}
#endif
/* Return nonzero if it has a zero */
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
{
unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
*bits = mask;
return mask;
}
static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
{
return bits;
}
static inline unsigned long create_zero_mask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
return bits >> 7;
}
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
static inline unsigned long find_zero(unsigned long mask)
{
return count_masked_bytes(mask);
}
#endif /* __BIG_ENDIAN */
#endif /* _ASM_WORD_AT_A_TIME_H */
......@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t __must_check strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
......
......@@ -27,6 +27,10 @@
#include <linux/bug.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
#include <asm/page.h>
#ifndef __HAVE_ARCH_STRNCASECMP
/**
* strncasecmp - Case insensitive, length-limited string comparison
......@@ -146,6 +150,90 @@ size_t strlcpy(char *dest, const char *src, size_t size)
EXPORT_SYMBOL(strlcpy);
#endif
#ifndef __HAVE_ARCH_STRSCPY
/**
* strscpy - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: Size of destination buffer
*
* Copy the string, or as much of it as fits, into the dest buffer.
* The routine returns the number of characters copied (not including
* the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
* The behavior is undefined if the string buffers overlap.
* The destination buffer is always NUL terminated, unless it's zero-sized.
*
* Preferred to strlcpy() since the API doesn't require reading memory
* from the src string beyond the specified "count" bytes, and since
* the return value is easier to error-check than strlcpy()'s.
* In addition, the implementation is robust to the string changing out
* from underneath it, unlike the current strlcpy() implementation.
*
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
* zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
* with an overflow test, then just memset() the tail of the dest buffer.
*/
ssize_t strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
long res = 0;
if (count == 0)
return -E2BIG;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* If src is unaligned, don't cross a page boundary,
* since we don't know if the next page is mapped.
*/
if ((long)src & (sizeof(long) - 1)) {
size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
if (limit < max)
max = limit;
}
#else
/* If src or dest is unaligned, don't do word-at-a-time. */
if (((long) dest | (long) src) & (sizeof(long) - 1))
max = 0;
#endif
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
c = *(unsigned long *)(src+res);
*(unsigned long *)(dest+res) = c;
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
return res + find_zero(data);
}
res += sizeof(unsigned long);
count -= sizeof(unsigned long);
max -= sizeof(unsigned long);
}
while (count) {
char c;
c = src[res];
dest[res] = c;
if (!c)
return res;
res++;
count--;
}
/* Hit buffer length without finding a NUL; force NUL-termination. */
if (res)
dest[res-1] = '\0';
return -E2BIG;
}
EXPORT_SYMBOL(strscpy);
#endif
#ifndef __HAVE_ARCH_STRCAT
/**
* strcat - Append one %NUL-terminated string to another
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册