memcpy_64.S 4.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/* Copyright 2002 Andi Kleen */
2

3
#include <linux/linkage.h>
I
Ingo Molnar 已提交
4

5
#include <asm/cpufeature.h>
I
Ingo Molnar 已提交
6
#include <asm/dwarf2.h>
7
#include <asm/alternative-asm.h>
8

L
Linus Torvalds 已提交
9 10 11
/*
 * memcpy - Copy a memory block.
 *
I
Ingo Molnar 已提交
12 13 14 15 16
 * Input:
 *  rdi destination
 *  rsi source
 *  rdx count
 *
L
Linus Torvalds 已提交
17 18
 * Output:
 * rax original destination
I
Ingo Molnar 已提交
19
 */
L
Linus Torvalds 已提交
20

I
Ingo Molnar 已提交
21 22 23
/*
 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
 *
24
 * This gets patched over the unrolled variant (below) via the
I
Ingo Molnar 已提交
25 26
 * alternative instructions framework:
 */
27 28
	.section .altinstr_replacement, "ax", @progbits
.Lmemcpy_c:
I
Ingo Molnar 已提交
29
	movq %rdi, %rax
30 31
	movq %rdx, %rcx
	shrq $3, %rcx
I
Ingo Molnar 已提交
32
	andl $7, %edx
33
	rep movsq
I
Ingo Molnar 已提交
34
	movl %edx, %ecx
35 36
	rep movsb
	ret
37 38
.Lmemcpy_e:
	.previous
39

40 41 42 43 44 45 46 47 48 49
/*
 * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
 * memcpy_c. Use memcpy_c_e when possible.
 *
 * This gets patched over the unrolled variant (below) via the
 * alternative instructions framework:
 */
	.section .altinstr_replacement, "ax", @progbits
.Lmemcpy_c_e:
	movq %rdi, %rax
50
	movq %rdx, %rcx
51 52 53 54 55
	rep movsb
	ret
.Lmemcpy_e_e:
	.previous

56 57 58
ENTRY(__memcpy)
ENTRY(memcpy)
	CFI_STARTPROC
59
	movq %rdi, %rax
60

61
	cmpq $0x20, %rdx
62
	jb .Lhandle_tail
63

I
Ingo Molnar 已提交
64
	/*
65
	 * We check whether memory false dependence could occur,
66
	 * then jump to corresponding copy mode.
I
Ingo Molnar 已提交
67
	 */
68 69
	cmp  %dil, %sil
	jl .Lcopy_backward
70
	subq $0x20, %rdx
71 72
.Lcopy_forward_loop:
	subq $0x20,	%rdx
73

I
Ingo Molnar 已提交
74
	/*
75
	 * Move in blocks of 4x8 bytes:
I
Ingo Molnar 已提交
76
	 */
77 78 79 80 81 82 83 84 85 86 87 88
	movq 0*8(%rsi),	%r8
	movq 1*8(%rsi),	%r9
	movq 2*8(%rsi),	%r10
	movq 3*8(%rsi),	%r11
	leaq 4*8(%rsi),	%rsi

	movq %r8,	0*8(%rdi)
	movq %r9,	1*8(%rdi)
	movq %r10,	2*8(%rdi)
	movq %r11,	3*8(%rdi)
	leaq 4*8(%rdi),	%rdi
	jae  .Lcopy_forward_loop
89
	addl $0x20,	%edx
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	jmp  .Lhandle_tail

.Lcopy_backward:
	/*
	 * Calculate copy position to tail.
	 */
	addq %rdx,	%rsi
	addq %rdx,	%rdi
	subq $0x20,	%rdx
	/*
	 * At most 3 ALU operations in one cycle,
	 * so append NOPS in the same 16bytes trunk.
	 */
	.p2align 4
.Lcopy_backward_loop:
	subq $0x20,	%rdx
	movq -1*8(%rsi),	%r8
	movq -2*8(%rsi),	%r9
	movq -3*8(%rsi),	%r10
	movq -4*8(%rsi),	%r11
	leaq -4*8(%rsi),	%rsi
	movq %r8,		-1*8(%rdi)
	movq %r9,		-2*8(%rdi)
	movq %r10,		-3*8(%rdi)
	movq %r11,		-4*8(%rdi)
	leaq -4*8(%rdi),	%rdi
	jae  .Lcopy_backward_loop
117

118 119 120
	/*
	 * Calculate copy position to head.
	 */
121
	addl $0x20,	%edx
122 123
	subq %rdx,	%rsi
	subq %rdx,	%rdi
124
.Lhandle_tail:
125
	cmpl $16,	%edx
126
	jb   .Lless_16bytes
I
Ingo Molnar 已提交
127

128 129 130 131 132 133 134 135 136 137 138 139
	/*
	 * Move data from 16 bytes to 31 bytes.
	 */
	movq 0*8(%rsi), %r8
	movq 1*8(%rsi),	%r9
	movq -2*8(%rsi, %rdx),	%r10
	movq -1*8(%rsi, %rdx),	%r11
	movq %r8,	0*8(%rdi)
	movq %r9,	1*8(%rdi)
	movq %r10,	-2*8(%rdi, %rdx)
	movq %r11,	-1*8(%rdi, %rdx)
	retq
140
	.p2align 4
141
.Lless_16bytes:
142
	cmpl $8,	%edx
143 144 145 146 147 148 149 150 151 152 153
	jb   .Lless_8bytes
	/*
	 * Move data from 8 bytes to 15 bytes.
	 */
	movq 0*8(%rsi),	%r8
	movq -1*8(%rsi, %rdx),	%r9
	movq %r8,	0*8(%rdi)
	movq %r9,	-1*8(%rdi, %rdx)
	retq
	.p2align 4
.Lless_8bytes:
154
	cmpl $4,	%edx
155
	jb   .Lless_3bytes
I
Ingo Molnar 已提交
156

157 158 159 160 161 162 163 164
	/*
	 * Move data from 4 bytes to 7 bytes.
	 */
	movl (%rsi), %ecx
	movl -4(%rsi, %rdx), %r8d
	movl %ecx, (%rdi)
	movl %r8d, -4(%rdi, %rdx)
	retq
165
	.p2align 4
166
.Lless_3bytes:
167 168
	subl $1, %edx
	jb .Lend
169 170 171
	/*
	 * Move data from 1 bytes to 3 bytes.
	 */
172 173 174 175 176 177 178 179
	movzbl (%rsi), %ecx
	jz .Lstore_1byte
	movzbq 1(%rsi), %r8
	movzbq (%rsi, %rdx), %r9
	movb %r8b, 1(%rdi)
	movb %r9b, (%rdi, %rdx)
.Lstore_1byte:
	movb %cl, (%rdi)
180

I
Ingo Molnar 已提交
181
.Lend:
182
	retq
183 184 185
	CFI_ENDPROC
ENDPROC(memcpy)
ENDPROC(__memcpy)
186

I
Ingo Molnar 已提交
187
	/*
188 189 190 191 192 193 194 195 196
	 * Some CPUs are adding enhanced REP MOVSB/STOSB feature
	 * If the feature is supported, memcpy_c_e() is the first choice.
	 * If enhanced rep movsb copy is not available, use fast string copy
	 * memcpy_c() when possible. This is faster and code is simpler than
	 * original memcpy().
	 * Otherwise, original memcpy() is used.
	 * In .altinstructions section, ERMS feature is placed after REG_GOOD
         * feature to implement the right patch order.
	 *
I
Ingo Molnar 已提交
197 198 199 200
	 * Replace only beginning, memcpy is used to apply alternatives,
	 * so it is silly to overwrite itself with nops - reboot is the
	 * only outcome...
	 */
201 202 203 204 205
	.section .altinstructions, "a"
	altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
			     .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
	altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
			     .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
206
	.previous