string_64.h 4.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_STRING_64_H
#define _ASM_X86_STRING_64_H
L
Linus Torvalds 已提交
4 5

#ifdef __KERNEL__
6
#include <linux/jump_label.h>
L
Linus Torvalds 已提交
7

8
/* Written 2002 by Andi Kleen */
L
Linus Torvalds 已提交
9

10 11
/* Only used for special circumstances. Stolen from i386/string.h */
static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
L
Linus Torvalds 已提交
12
{
13 14 15 16 17 18 19 20 21 22 23 24 25
	unsigned long d0, d1, d2;
	asm volatile("rep ; movsl\n\t"
		     "testb $2,%b4\n\t"
		     "je 1f\n\t"
		     "movsw\n"
		     "1:\ttestb $1,%b4\n\t"
		     "je 2f\n\t"
		     "movsb\n"
		     "2:"
		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
		     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
		     : "memory");
	return to;
L
Linus Torvalds 已提交
26 27 28 29 30 31
}

/* Even with __builtin_ the compiler may decide to use the out of line
   function. */

#define __HAVE_ARCH_MEMCPY 1
32
extern void *memcpy(void *to, const void *from, size_t len);
33 34
extern void *__memcpy(void *to, const void *from, size_t len);

35
#ifndef CONFIG_FORTIFY_SOURCE
36
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37 38 39 40 41 42 43 44 45 46
#define memcpy(dst, src, len)					\
({								\
	size_t __len = (len);					\
	void *__ret;						\
	if (__builtin_constant_p(len) && __len >= 64)		\
		__ret = __memcpy((dst), (src), __len);		\
	else							\
		__ret = __builtin_memcpy((dst), (src), __len);	\
	__ret;							\
})
47
#endif
48
#endif /* !CONFIG_FORTIFY_SOURCE */
L
Linus Torvalds 已提交
49 50

#define __HAVE_ARCH_MEMSET
51
void *memset(void *s, int c, size_t n);
52
void *__memset(void *s, int c, size_t n);
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#define __HAVE_ARCH_MEMSET16
static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
{
	long d0, d1;
	asm volatile("rep\n\t"
		     "stosw"
		     : "=&c" (d0), "=&D" (d1)
		     : "a" (v), "1" (s), "0" (n)
		     : "memory");
	return s;
}

#define __HAVE_ARCH_MEMSET32
static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
{
	long d0, d1;
	asm volatile("rep\n\t"
		     "stosl"
		     : "=&c" (d0), "=&D" (d1)
		     : "a" (v), "1" (s), "0" (n)
		     : "memory");
	return s;
}

#define __HAVE_ARCH_MEMSET64
static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
{
	long d0, d1;
	asm volatile("rep\n\t"
		     "stosq"
		     : "=&c" (d0), "=&D" (d1)
		     : "a" (v), "1" (s), "0" (n)
		     : "memory");
	return s;
}

L
Linus Torvalds 已提交
90
#define __HAVE_ARCH_MEMMOVE
91
void *memmove(void *dest, const void *src, size_t count);
92
void *__memmove(void *dest, const void *src, size_t count);
L
Linus Torvalds 已提交
93

94 95 96 97 98
int memcmp(const void *cs, const void *ct, size_t count);
size_t strlen(const char *s);
char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
L
Linus Torvalds 已提交
99

100 101 102 103 104 105 106 107 108 109 110
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)

/*
 * For files that not instrumented (e.g. mm/slub.c) we
 * should use not instrumented version of mem* functions.
 */

#undef memcpy
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
111 112 113 114 115

#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif

116 117
#endif

118
#define __HAVE_ARCH_MEMCPY_MCSAFE 1
119 120
__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
		size_t cnt);
121 122
DECLARE_STATIC_KEY_FALSE(mcsafe_key);

T
Tony Luck 已提交
123 124 125 126 127 128 129 130
/**
 * memcpy_mcsafe - copy memory with indication if a machine check happened
 *
 * @dst:	destination address
 * @src:	source address
 * @cnt:	number of bytes to copy
 *
 * Low level memory copy function that catches machine checks
T
Tony Luck 已提交
131 132 133
 * We only call into the "safe" function on systems that can
 * actually do machine check recovery. Everyone else can just
 * use memcpy().
T
Tony Luck 已提交
134
 *
135 136
 * Return 0 for success, or number of bytes not copied if there was an
 * exception.
T
Tony Luck 已提交
137
 */
138
static __always_inline __must_check unsigned long
T
Tony Luck 已提交
139 140 141 142
memcpy_mcsafe(void *dst, const void *src, size_t cnt)
{
#ifdef CONFIG_X86_MCE
	if (static_branch_unlikely(&mcsafe_key))
143
		return __memcpy_mcsafe(dst, src, cnt);
T
Tony Luck 已提交
144 145 146 147 148
	else
#endif
		memcpy(dst, src, cnt);
	return 0;
}
T
Tony Luck 已提交
149

150 151 152 153 154
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif

L
Linus Torvalds 已提交
155 156
#endif /* __KERNEL__ */

H
H. Peter Anvin 已提交
157
#endif /* _ASM_X86_STRING_64_H */