diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 2af5df3ade7c03e70f3056617d5982540a8cb5b8..e78b8eee66155df85844e8cf6dae4507fa313bc6 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -61,7 +61,7 @@ ENTRY(csum_partial) testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. - jz 10f # Jump if alignment is boundary of 2bytes. + jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 20b0eaf14c605c1b078cb2eea4437c15d48a6430..e78761d6b7f87811ea7f20feab53a72656547e1b 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n) char *ret = dest; __asm__ __volatile__( - /* Handle more 16bytes in loop */ + /* Handle more 16 bytes in loop */ "cmp $0x10, %0\n\t" "jb 1f\n\t" diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1c273be7c97eded64736d60ba10c22ed09f3ceef..56313a3261888d0e7eb866788c24f806acf5d47a 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -98,7 +98,7 @@ ENTRY(memcpy) subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, - * so append NOPS in the same 16bytes trunk. + * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index f3d83cbfc831bf1ef86e5291014f0f26b9f2a6b5..65268a6104f45e09d5e45f75cb63edf6527164a0 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -27,7 +27,7 @@ ENTRY(memmove) CFI_STARTPROC - /* Handle more 32bytes in loop */ + /* Handle more 32 bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx jb 1f