2016-07-11 23:36:41 +08:00
|
|
|
/* Copyright 2002 Andi Kleen */
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/cpufeatures.h>
|
2018-07-30 23:26:54 +08:00
|
|
|
#include <asm/mcsafe_test.h>
|
2016-07-11 23:36:41 +08:00
|
|
|
#include <asm/alternative-asm.h>
|
tools/headers: Synchronize kernel ABI headers
After the SPDX license tags were added a number of tooling headers got out of
sync with their kernel variants, generating lots of build warnings.
Sync them:
- tools/arch/x86/include/asm/disabled-features.h,
tools/arch/x86/include/asm/required-features.h,
tools/include/linux/hash.h:
Remove the SPDX tag where the kernel version does not have it.
- tools/include/asm-generic/bitops/__fls.h,
tools/include/asm-generic/bitops/arch_hweight.h,
tools/include/asm-generic/bitops/const_hweight.h,
tools/include/asm-generic/bitops/fls.h,
tools/include/asm-generic/bitops/fls64.h,
tools/include/uapi/asm-generic/ioctls.h,
tools/include/uapi/asm-generic/mman-common.h,
tools/include/uapi/sound/asound.h,
tools/include/uapi/linux/kvm.h,
tools/include/uapi/linux/perf_event.h,
tools/include/uapi/linux/sched.h,
tools/include/uapi/linux/vhost.h,
tools/include/uapi/sound/asound.h:
Add the SPDX tag of the respective kernel header.
- tools/include/uapi/linux/bpf_common.h,
tools/include/uapi/linux/fcntl.h,
tools/include/uapi/linux/hw_breakpoint.h,
tools/include/uapi/linux/mman.h,
tools/include/uapi/linux/stat.h,
Change the tag to the kernel header version:
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
Also sync other header details:
- include/uapi/sound/asound.h:
Fix pointless end of line whitespace noise the header grew in this cycle.
- tools/arch/x86/lib/memcpy_64.S:
Sync the code and add tools/include/asm/export.h with dummy wrappers
to support building the kernel side code in a tooling header environment.
- tools/include/uapi/asm-generic/mman.h,
tools/include/uapi/linux/bpf.h:
Sync other details that don't impact tooling's use of the ABIs.
Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-11-03 19:18:37 +08:00
|
|
|
#include <asm/export.h>
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We build a jump to memcpy_orig by default which gets NOPped out on
|
|
|
|
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
|
|
|
|
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
|
|
|
|
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.weak memcpy
|
|
|
|
|
|
|
|
/*
|
|
|
|
* memcpy - Copy a memory block.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
* rax original destination
|
|
|
|
*/
|
|
|
|
ENTRY(__memcpy)
|
|
|
|
ENTRY(memcpy)
|
|
|
|
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
|
|
|
|
"jmp memcpy_erms", X86_FEATURE_ERMS
|
|
|
|
|
|
|
|
movq %rdi, %rax
|
|
|
|
movq %rdx, %rcx
|
|
|
|
shrq $3, %rcx
|
|
|
|
andl $7, %edx
|
|
|
|
rep movsq
|
|
|
|
movl %edx, %ecx
|
|
|
|
rep movsb
|
|
|
|
ret
|
|
|
|
ENDPROC(memcpy)
|
|
|
|
ENDPROC(__memcpy)
|
tools/headers: Synchronize kernel ABI headers
After the SPDX license tags were added a number of tooling headers got out of
sync with their kernel variants, generating lots of build warnings.
Sync them:
- tools/arch/x86/include/asm/disabled-features.h,
tools/arch/x86/include/asm/required-features.h,
tools/include/linux/hash.h:
Remove the SPDX tag where the kernel version does not have it.
- tools/include/asm-generic/bitops/__fls.h,
tools/include/asm-generic/bitops/arch_hweight.h,
tools/include/asm-generic/bitops/const_hweight.h,
tools/include/asm-generic/bitops/fls.h,
tools/include/asm-generic/bitops/fls64.h,
tools/include/uapi/asm-generic/ioctls.h,
tools/include/uapi/asm-generic/mman-common.h,
tools/include/uapi/sound/asound.h,
tools/include/uapi/linux/kvm.h,
tools/include/uapi/linux/perf_event.h,
tools/include/uapi/linux/sched.h,
tools/include/uapi/linux/vhost.h,
tools/include/uapi/sound/asound.h:
Add the SPDX tag of the respective kernel header.
- tools/include/uapi/linux/bpf_common.h,
tools/include/uapi/linux/fcntl.h,
tools/include/uapi/linux/hw_breakpoint.h,
tools/include/uapi/linux/mman.h,
tools/include/uapi/linux/stat.h,
Change the tag to the kernel header version:
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
Also sync other header details:
- include/uapi/sound/asound.h:
Fix pointless end of line whitespace noise the header grew in this cycle.
- tools/arch/x86/lib/memcpy_64.S:
Sync the code and add tools/include/asm/export.h with dummy wrappers
to support building the kernel side code in a tooling header environment.
- tools/include/uapi/asm-generic/mman.h,
tools/include/uapi/linux/bpf.h:
Sync other details that don't impact tooling's use of the ABIs.
Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-11-03 19:18:37 +08:00
|
|
|
EXPORT_SYMBOL(memcpy)
|
|
|
|
EXPORT_SYMBOL(__memcpy)
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* memcpy_erms() - enhanced fast string memcpy. This is faster and
|
|
|
|
* simpler than memcpy. Use memcpy_erms when possible.
|
|
|
|
*/
|
|
|
|
ENTRY(memcpy_erms)
|
|
|
|
movq %rdi, %rax
|
|
|
|
movq %rdx, %rcx
|
|
|
|
rep movsb
|
|
|
|
ret
|
|
|
|
ENDPROC(memcpy_erms)
|
|
|
|
|
|
|
|
ENTRY(memcpy_orig)
|
|
|
|
movq %rdi, %rax
|
|
|
|
|
|
|
|
cmpq $0x20, %rdx
|
|
|
|
jb .Lhandle_tail
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We check whether memory false dependence could occur,
|
|
|
|
* then jump to corresponding copy mode.
|
|
|
|
*/
|
|
|
|
cmp %dil, %sil
|
|
|
|
jl .Lcopy_backward
|
|
|
|
subq $0x20, %rdx
|
|
|
|
.Lcopy_forward_loop:
|
|
|
|
subq $0x20, %rdx
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move in blocks of 4x8 bytes:
|
|
|
|
*/
|
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq 1*8(%rsi), %r9
|
|
|
|
movq 2*8(%rsi), %r10
|
|
|
|
movq 3*8(%rsi), %r11
|
|
|
|
leaq 4*8(%rsi), %rsi
|
|
|
|
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, 1*8(%rdi)
|
|
|
|
movq %r10, 2*8(%rdi)
|
|
|
|
movq %r11, 3*8(%rdi)
|
|
|
|
leaq 4*8(%rdi), %rdi
|
|
|
|
jae .Lcopy_forward_loop
|
|
|
|
addl $0x20, %edx
|
|
|
|
jmp .Lhandle_tail
|
|
|
|
|
|
|
|
.Lcopy_backward:
|
|
|
|
/*
|
|
|
|
* Calculate copy position to tail.
|
|
|
|
*/
|
|
|
|
addq %rdx, %rsi
|
|
|
|
addq %rdx, %rdi
|
|
|
|
subq $0x20, %rdx
|
|
|
|
/*
|
|
|
|
* At most 3 ALU operations in one cycle,
|
|
|
|
* so append NOPS in the same 16 bytes trunk.
|
|
|
|
*/
|
|
|
|
.p2align 4
|
|
|
|
.Lcopy_backward_loop:
|
|
|
|
subq $0x20, %rdx
|
|
|
|
movq -1*8(%rsi), %r8
|
|
|
|
movq -2*8(%rsi), %r9
|
|
|
|
movq -3*8(%rsi), %r10
|
|
|
|
movq -4*8(%rsi), %r11
|
|
|
|
leaq -4*8(%rsi), %rsi
|
|
|
|
movq %r8, -1*8(%rdi)
|
|
|
|
movq %r9, -2*8(%rdi)
|
|
|
|
movq %r10, -3*8(%rdi)
|
|
|
|
movq %r11, -4*8(%rdi)
|
|
|
|
leaq -4*8(%rdi), %rdi
|
|
|
|
jae .Lcopy_backward_loop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate copy position to head.
|
|
|
|
*/
|
|
|
|
addl $0x20, %edx
|
|
|
|
subq %rdx, %rsi
|
|
|
|
subq %rdx, %rdi
|
|
|
|
.Lhandle_tail:
|
|
|
|
cmpl $16, %edx
|
|
|
|
jb .Lless_16bytes
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move data from 16 bytes to 31 bytes.
|
|
|
|
*/
|
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq 1*8(%rsi), %r9
|
|
|
|
movq -2*8(%rsi, %rdx), %r10
|
|
|
|
movq -1*8(%rsi, %rdx), %r11
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, 1*8(%rdi)
|
|
|
|
movq %r10, -2*8(%rdi, %rdx)
|
|
|
|
movq %r11, -1*8(%rdi, %rdx)
|
|
|
|
retq
|
|
|
|
.p2align 4
|
|
|
|
.Lless_16bytes:
|
|
|
|
cmpl $8, %edx
|
|
|
|
jb .Lless_8bytes
|
|
|
|
/*
|
|
|
|
* Move data from 8 bytes to 15 bytes.
|
|
|
|
*/
|
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq -1*8(%rsi, %rdx), %r9
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, -1*8(%rdi, %rdx)
|
|
|
|
retq
|
|
|
|
.p2align 4
|
|
|
|
.Lless_8bytes:
|
|
|
|
cmpl $4, %edx
|
|
|
|
jb .Lless_3bytes
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move data from 4 bytes to 7 bytes.
|
|
|
|
*/
|
|
|
|
movl (%rsi), %ecx
|
|
|
|
movl -4(%rsi, %rdx), %r8d
|
|
|
|
movl %ecx, (%rdi)
|
|
|
|
movl %r8d, -4(%rdi, %rdx)
|
|
|
|
retq
|
|
|
|
.p2align 4
|
|
|
|
.Lless_3bytes:
|
|
|
|
subl $1, %edx
|
|
|
|
jb .Lend
|
|
|
|
/*
|
|
|
|
* Move data from 1 bytes to 3 bytes.
|
|
|
|
*/
|
|
|
|
movzbl (%rsi), %ecx
|
|
|
|
jz .Lstore_1byte
|
|
|
|
movzbq 1(%rsi), %r8
|
|
|
|
movzbq (%rsi, %rdx), %r9
|
|
|
|
movb %r8b, 1(%rdi)
|
|
|
|
movb %r9b, (%rdi, %rdx)
|
|
|
|
.Lstore_1byte:
|
|
|
|
movb %cl, (%rdi)
|
|
|
|
|
|
|
|
.Lend:
|
|
|
|
retq
|
|
|
|
ENDPROC(memcpy_orig)
|
|
|
|
|
|
|
|
#ifndef CONFIG_UML
|
2018-07-30 23:26:54 +08:00
|
|
|
|
|
|
|
MCSAFE_TEST_CTL
|
|
|
|
|
2016-07-11 23:36:41 +08:00
|
|
|
/*
|
2018-07-30 23:26:54 +08:00
|
|
|
* __memcpy_mcsafe - memory copy with machine check exception handling
|
2016-07-11 23:36:41 +08:00
|
|
|
* Note that we only catch machine checks when reading the source addresses.
|
|
|
|
* Writes to target are posted and don't generate machine checks.
|
|
|
|
*/
|
2018-07-30 23:26:54 +08:00
|
|
|
ENTRY(__memcpy_mcsafe)
|
2016-07-11 23:36:41 +08:00
|
|
|
cmpl $8, %edx
|
|
|
|
/* Less than 8 bytes? Go to byte copy loop */
|
|
|
|
jb .L_no_whole_words
|
|
|
|
|
|
|
|
/* Check for bad alignment of source */
|
|
|
|
testl $7, %esi
|
|
|
|
/* Already aligned */
|
|
|
|
jz .L_8byte_aligned
|
|
|
|
|
|
|
|
/* Copy one byte at a time until source is 8-byte aligned */
|
|
|
|
movl %esi, %ecx
|
|
|
|
andl $7, %ecx
|
|
|
|
subl $8, %ecx
|
|
|
|
negl %ecx
|
|
|
|
subl %ecx, %edx
|
2018-07-30 23:26:54 +08:00
|
|
|
.L_read_leading_bytes:
|
2016-07-11 23:36:41 +08:00
|
|
|
movb (%rsi), %al
|
2018-07-30 23:26:54 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
|
|
|
|
MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
|
|
|
|
.L_write_leading_bytes:
|
2016-07-11 23:36:41 +08:00
|
|
|
movb %al, (%rdi)
|
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
2018-07-30 23:26:54 +08:00
|
|
|
jnz .L_read_leading_bytes
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
.L_8byte_aligned:
|
|
|
|
movl %edx, %ecx
|
|
|
|
andl $7, %edx
|
|
|
|
shrl $3, %ecx
|
|
|
|
jz .L_no_whole_words
|
|
|
|
|
2018-07-30 23:26:54 +08:00
|
|
|
.L_read_words:
|
2016-07-11 23:36:41 +08:00
|
|
|
movq (%rsi), %r8
|
2018-07-30 23:26:54 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 8 .E_read_words
|
|
|
|
MCSAFE_TEST_DST %rdi 8 .E_write_words
|
|
|
|
.L_write_words:
|
|
|
|
movq %r8, (%rdi)
|
|
|
|
addq $8, %rsi
|
|
|
|
addq $8, %rdi
|
2016-07-11 23:36:41 +08:00
|
|
|
decl %ecx
|
2018-07-30 23:26:54 +08:00
|
|
|
jnz .L_read_words
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
/* Any trailing bytes? */
|
|
|
|
.L_no_whole_words:
|
|
|
|
andl %edx, %edx
|
|
|
|
jz .L_done_memcpy_trap
|
|
|
|
|
|
|
|
/* Copy trailing bytes */
|
|
|
|
movl %edx, %ecx
|
2018-07-30 23:26:54 +08:00
|
|
|
.L_read_trailing_bytes:
|
2016-07-11 23:36:41 +08:00
|
|
|
movb (%rsi), %al
|
2018-07-30 23:26:54 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
|
|
|
|
MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
|
|
|
|
.L_write_trailing_bytes:
|
2016-07-11 23:36:41 +08:00
|
|
|
movb %al, (%rdi)
|
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
2018-07-30 23:26:54 +08:00
|
|
|
jnz .L_read_trailing_bytes
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
/* Copy successful. Return zero */
|
|
|
|
.L_done_memcpy_trap:
|
2018-08-20 21:17:14 +08:00
|
|
|
xorl %eax, %eax
|
2016-07-11 23:36:41 +08:00
|
|
|
ret
|
2018-07-30 23:26:54 +08:00
|
|
|
ENDPROC(__memcpy_mcsafe)
|
|
|
|
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
|
2016-07-11 23:36:41 +08:00
|
|
|
|
|
|
|
.section .fixup, "ax"
|
2018-07-30 23:26:54 +08:00
|
|
|
/*
|
|
|
|
* Return number of bytes not copied for any failure. Note that
|
|
|
|
* there is no "tail" handling since the source buffer is 8-byte
|
|
|
|
* aligned and poison is cacheline aligned.
|
|
|
|
*/
|
|
|
|
.E_read_words:
|
|
|
|
shll $3, %ecx
|
|
|
|
.E_leading_bytes:
|
|
|
|
addl %edx, %ecx
|
|
|
|
.E_trailing_bytes:
|
|
|
|
mov %ecx, %eax
|
2016-07-11 23:36:41 +08:00
|
|
|
ret
|
|
|
|
|
2018-07-30 23:26:54 +08:00
|
|
|
/*
|
|
|
|
* For write fault handling, given the destination is unaligned,
|
|
|
|
* we handle faults on multi-byte writes with a byte-by-byte
|
|
|
|
* copy up to the write-protected page.
|
|
|
|
*/
|
|
|
|
.E_write_words:
|
|
|
|
shll $3, %ecx
|
|
|
|
addl %edx, %ecx
|
|
|
|
movl %ecx, %edx
|
|
|
|
jmp mcsafe_handle_tail
|
|
|
|
|
2016-07-11 23:36:41 +08:00
|
|
|
.previous
|
|
|
|
|
2018-07-30 23:26:54 +08:00
|
|
|
_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
|
|
|
|
_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
|
|
|
|
_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
|
|
|
|
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
|
|
|
|
_ASM_EXTABLE(.L_write_words, .E_write_words)
|
|
|
|
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
|
2016-07-11 23:36:41 +08:00
|
|
|
#endif
|