2019-05-19 20:08:55 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Copyright 2002 Andi Kleen */
|
2006-10-04 15:38:54 +08:00
|
|
|
|
2006-09-26 16:52:32 +08:00
|
|
|
#include <linux/linkage.h>
|
2016-03-15 06:33:39 +08:00
|
|
|
#include <asm/errno.h>
|
2016-01-27 05:12:04 +08:00
|
|
|
#include <asm/cpufeatures.h>
|
2018-05-01 21:49:45 +08:00
|
|
|
#include <asm/mcsafe_test.h>
|
2011-05-18 06:29:16 +08:00
|
|
|
#include <asm/alternative-asm.h>
|
2016-01-12 00:04:34 +08:00
|
|
|
#include <asm/export.h>
|
2006-09-26 16:52:32 +08:00
|
|
|
|
2015-02-04 22:36:49 +08:00
|
|
|
/*
|
|
|
|
* We build a jump to memcpy_orig by default which gets NOPped out on
|
|
|
|
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
|
|
|
|
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
|
|
|
|
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.weak memcpy
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* memcpy - Copy a memory block.
|
|
|
|
*
|
2009-03-12 19:20:17 +08:00
|
|
|
* Input:
|
|
|
|
* rdi destination
|
|
|
|
* rsi source
|
|
|
|
* rdx count
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Output:
|
|
|
|
* rax original destination
|
2009-03-12 19:20:17 +08:00
|
|
|
*/
|
2015-02-04 22:36:49 +08:00
|
|
|
ENTRY(__memcpy)
|
|
|
|
ENTRY(memcpy)
|
|
|
|
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
|
|
|
|
"jmp memcpy_erms", X86_FEATURE_ERMS
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-12 19:20:17 +08:00
|
|
|
movq %rdi, %rax
|
2012-01-26 23:50:55 +08:00
|
|
|
movq %rdx, %rcx
|
|
|
|
shrq $3, %rcx
|
2009-03-12 19:20:17 +08:00
|
|
|
andl $7, %edx
|
2006-09-26 16:52:32 +08:00
|
|
|
rep movsq
|
2009-03-12 19:20:17 +08:00
|
|
|
movl %edx, %ecx
|
2006-09-26 16:52:32 +08:00
|
|
|
rep movsb
|
|
|
|
ret
|
2015-02-04 22:36:49 +08:00
|
|
|
ENDPROC(memcpy)
|
|
|
|
ENDPROC(__memcpy)
|
2016-01-12 00:04:34 +08:00
|
|
|
EXPORT_SYMBOL(memcpy)
|
|
|
|
EXPORT_SYMBOL(__memcpy)
|
2006-09-26 16:52:32 +08:00
|
|
|
|
2011-05-18 06:29:16 +08:00
|
|
|
/*
|
2015-02-04 22:36:49 +08:00
|
|
|
* memcpy_erms() - enhanced fast string memcpy. This is faster and
|
|
|
|
* simpler than memcpy. Use memcpy_erms when possible.
|
2011-05-18 06:29:16 +08:00
|
|
|
*/
|
2015-02-04 22:36:49 +08:00
|
|
|
ENTRY(memcpy_erms)
|
2011-05-18 06:29:16 +08:00
|
|
|
movq %rdi, %rax
|
2012-01-26 23:50:55 +08:00
|
|
|
movq %rdx, %rcx
|
2011-05-18 06:29:16 +08:00
|
|
|
rep movsb
|
|
|
|
ret
|
2015-02-04 22:36:49 +08:00
|
|
|
ENDPROC(memcpy_erms)
|
2015-02-14 06:39:56 +08:00
|
|
|
|
2015-02-04 22:36:49 +08:00
|
|
|
ENTRY(memcpy_orig)
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
movq %rdi, %rax
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2012-01-26 23:50:55 +08:00
|
|
|
cmpq $0x20, %rdx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
jb .Lhandle_tail
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2009-03-12 19:20:17 +08:00
|
|
|
/*
|
2011-05-01 20:09:21 +08:00
|
|
|
* We check whether memory false dependence could occur,
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
* then jump to corresponding copy mode.
|
2009-03-12 19:20:17 +08:00
|
|
|
*/
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
cmp %dil, %sil
|
|
|
|
jl .Lcopy_backward
|
2012-01-26 23:50:55 +08:00
|
|
|
subq $0x20, %rdx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
.Lcopy_forward_loop:
|
|
|
|
subq $0x20, %rdx
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2009-03-12 19:20:17 +08:00
|
|
|
/*
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
* Move in blocks of 4x8 bytes:
|
2009-03-12 19:20:17 +08:00
|
|
|
*/
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq 1*8(%rsi), %r9
|
|
|
|
movq 2*8(%rsi), %r10
|
|
|
|
movq 3*8(%rsi), %r11
|
|
|
|
leaq 4*8(%rsi), %rsi
|
|
|
|
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, 1*8(%rdi)
|
|
|
|
movq %r10, 2*8(%rdi)
|
|
|
|
movq %r11, 3*8(%rdi)
|
|
|
|
leaq 4*8(%rdi), %rdi
|
|
|
|
jae .Lcopy_forward_loop
|
2012-01-26 23:50:55 +08:00
|
|
|
addl $0x20, %edx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
jmp .Lhandle_tail
|
|
|
|
|
|
|
|
.Lcopy_backward:
|
|
|
|
/*
|
|
|
|
* Calculate copy position to tail.
|
|
|
|
*/
|
|
|
|
addq %rdx, %rsi
|
|
|
|
addq %rdx, %rdi
|
|
|
|
subq $0x20, %rdx
|
|
|
|
/*
|
|
|
|
* At most 3 ALU operations in one cycle,
|
2013-04-15 17:06:10 +08:00
|
|
|
* so append NOPS in the same 16 bytes trunk.
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
*/
|
|
|
|
.p2align 4
|
|
|
|
.Lcopy_backward_loop:
|
|
|
|
subq $0x20, %rdx
|
|
|
|
movq -1*8(%rsi), %r8
|
|
|
|
movq -2*8(%rsi), %r9
|
|
|
|
movq -3*8(%rsi), %r10
|
|
|
|
movq -4*8(%rsi), %r11
|
|
|
|
leaq -4*8(%rsi), %rsi
|
|
|
|
movq %r8, -1*8(%rdi)
|
|
|
|
movq %r9, -2*8(%rdi)
|
|
|
|
movq %r10, -3*8(%rdi)
|
|
|
|
movq %r11, -4*8(%rdi)
|
|
|
|
leaq -4*8(%rdi), %rdi
|
|
|
|
jae .Lcopy_backward_loop
|
2006-02-04 04:51:02 +08:00
|
|
|
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
/*
|
|
|
|
* Calculate copy position to head.
|
|
|
|
*/
|
2012-01-26 23:50:55 +08:00
|
|
|
addl $0x20, %edx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
subq %rdx, %rsi
|
|
|
|
subq %rdx, %rdi
|
2006-02-04 04:51:02 +08:00
|
|
|
.Lhandle_tail:
|
2012-01-26 23:50:55 +08:00
|
|
|
cmpl $16, %edx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
jb .Lless_16bytes
|
2009-03-12 19:20:17 +08:00
|
|
|
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
/*
|
|
|
|
* Move data from 16 bytes to 31 bytes.
|
|
|
|
*/
|
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq 1*8(%rsi), %r9
|
|
|
|
movq -2*8(%rsi, %rdx), %r10
|
|
|
|
movq -1*8(%rsi, %rdx), %r11
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, 1*8(%rdi)
|
|
|
|
movq %r10, -2*8(%rdi, %rdx)
|
|
|
|
movq %r11, -1*8(%rdi, %rdx)
|
|
|
|
retq
|
2006-02-04 04:51:02 +08:00
|
|
|
.p2align 4
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
.Lless_16bytes:
|
2012-01-26 23:50:55 +08:00
|
|
|
cmpl $8, %edx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
jb .Lless_8bytes
|
|
|
|
/*
|
|
|
|
* Move data from 8 bytes to 15 bytes.
|
|
|
|
*/
|
|
|
|
movq 0*8(%rsi), %r8
|
|
|
|
movq -1*8(%rsi, %rdx), %r9
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
|
|
movq %r9, -1*8(%rdi, %rdx)
|
|
|
|
retq
|
|
|
|
.p2align 4
|
|
|
|
.Lless_8bytes:
|
2012-01-26 23:50:55 +08:00
|
|
|
cmpl $4, %edx
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
jb .Lless_3bytes
|
2009-03-12 19:20:17 +08:00
|
|
|
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
/*
|
|
|
|
* Move data from 4 bytes to 7 bytes.
|
|
|
|
*/
|
|
|
|
movl (%rsi), %ecx
|
|
|
|
movl -4(%rsi, %rdx), %r8d
|
|
|
|
movl %ecx, (%rdi)
|
|
|
|
movl %r8d, -4(%rdi, %rdx)
|
|
|
|
retq
|
2006-02-04 04:51:02 +08:00
|
|
|
.p2align 4
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
.Lless_3bytes:
|
2012-01-26 23:55:32 +08:00
|
|
|
subl $1, %edx
|
|
|
|
jb .Lend
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
/*
|
|
|
|
* Move data from 1 bytes to 3 bytes.
|
|
|
|
*/
|
2012-01-26 23:55:32 +08:00
|
|
|
movzbl (%rsi), %ecx
|
|
|
|
jz .Lstore_1byte
|
|
|
|
movzbq 1(%rsi), %r8
|
|
|
|
movzbq (%rsi, %rdx), %r9
|
|
|
|
movb %r8b, 1(%rdi)
|
|
|
|
movb %r9b, (%rdi, %rdx)
|
|
|
|
.Lstore_1byte:
|
|
|
|
movb %cl, (%rdi)
|
2006-02-04 04:51:02 +08:00
|
|
|
|
2009-03-12 19:20:17 +08:00
|
|
|
.Lend:
|
x86, mem: Optimize memcpy by avoiding memory false dependece
All read operations after allocation stage can run speculatively,
all write operation will run in program order, and if addresses are
different read may run before older write operation, otherwise wait
until write commit. However CPU don't check each address bit,
so read could fail to recognize different address even they
are in different page.For example if rsi is 0xf004, rdi is 0xe008,
in following operation there will generate big performance latency.
1. movq (%rsi), %rax
2. movq %rax, (%rdi)
3. movq 8(%rsi), %rax
4. movq %rax, 8(%rdi)
If %rsi and rdi were in really the same meory page, there are TRUE
read-after-write dependence because instruction 2 write 0x008 and
instruction 3 read 0x00c, the two address are overlap partially.
Actually there are in different page and no any issues,
but without checking each address bit CPU could think they are
in the same page, and instruction 3 have to wait for instruction 2
to write data into cache from write buffer, then load data from cache,
the cost time read spent is equal to mfence instruction. We may avoid it by
tuning operation sequence as follow.
1. movq 8(%rsi), %rax
2. movq %rax, 8(%rdi)
3. movq (%rsi), %rax
4. movq %rax, (%rdi)
Instruction 3 read 0x004, instruction 2 write address 0x010, no any
dependence. At last on Core2 we gain 1.83x speedup compared with
original instruction sequence. In this patch we first handle small
size(less 20bytes), then jump to different copy mode. Based on our
micro-benchmark small bytes from 1 to 127 bytes, we got up to 2X
improvement, and up to 1.5X improvement for 1024 bytes on Corei7. (We
use our micro-benchmark, and will do further test according to your
requirment)
Signed-off-by: Ma Ling <ling.ma@intel.com>
LKML-Reference: <1277753065-18610-1-git-send-email-ling.ma@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2010-06-29 03:24:25 +08:00
|
|
|
retq
|
2015-02-04 22:36:49 +08:00
|
|
|
ENDPROC(memcpy_orig)
|
2016-02-19 03:47:26 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_UML
|
2018-05-01 21:49:45 +08:00
|
|
|
|
|
|
|
MCSAFE_TEST_CTL
|
|
|
|
|
2016-02-19 03:47:26 +08:00
|
|
|
/*
|
2018-05-04 08:06:11 +08:00
|
|
|
* __memcpy_mcsafe - memory copy with machine check exception handling
|
2016-02-19 03:47:26 +08:00
|
|
|
* Note that we only catch machine checks when reading the source addresses.
|
|
|
|
* Writes to target are posted and don't generate machine checks.
|
|
|
|
*/
|
2018-05-04 08:06:11 +08:00
|
|
|
ENTRY(__memcpy_mcsafe)
|
2016-02-19 03:47:26 +08:00
|
|
|
cmpl $8, %edx
|
|
|
|
/* Less than 8 bytes? Go to byte copy loop */
|
|
|
|
jb .L_no_whole_words
|
|
|
|
|
|
|
|
/* Check for bad alignment of source */
|
|
|
|
testl $7, %esi
|
|
|
|
/* Already aligned */
|
|
|
|
jz .L_8byte_aligned
|
|
|
|
|
|
|
|
/* Copy one byte at a time until source is 8-byte aligned */
|
|
|
|
movl %esi, %ecx
|
|
|
|
andl $7, %ecx
|
|
|
|
subl $8, %ecx
|
|
|
|
negl %ecx
|
|
|
|
subl %ecx, %edx
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_read_leading_bytes:
|
2016-02-19 03:47:26 +08:00
|
|
|
movb (%rsi), %al
|
2018-05-01 21:49:45 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
|
|
|
|
MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_write_leading_bytes:
|
2016-02-19 03:47:26 +08:00
|
|
|
movb %al, (%rdi)
|
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
2018-05-04 08:06:16 +08:00
|
|
|
jnz .L_read_leading_bytes
|
2016-02-19 03:47:26 +08:00
|
|
|
|
|
|
|
.L_8byte_aligned:
|
|
|
|
movl %edx, %ecx
|
|
|
|
andl $7, %edx
|
|
|
|
shrl $3, %ecx
|
|
|
|
jz .L_no_whole_words
|
|
|
|
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_read_words:
|
2016-02-19 03:47:26 +08:00
|
|
|
movq (%rsi), %r8
|
2018-05-01 21:49:45 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 8 .E_read_words
|
|
|
|
MCSAFE_TEST_DST %rdi 8 .E_write_words
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_write_words:
|
2018-05-04 08:06:11 +08:00
|
|
|
movq %r8, (%rdi)
|
|
|
|
addq $8, %rsi
|
|
|
|
addq $8, %rdi
|
2016-02-19 03:47:26 +08:00
|
|
|
decl %ecx
|
2018-05-04 08:06:16 +08:00
|
|
|
jnz .L_read_words
|
2016-02-19 03:47:26 +08:00
|
|
|
|
|
|
|
/* Any trailing bytes? */
|
|
|
|
.L_no_whole_words:
|
|
|
|
andl %edx, %edx
|
|
|
|
jz .L_done_memcpy_trap
|
|
|
|
|
|
|
|
/* Copy trailing bytes */
|
|
|
|
movl %edx, %ecx
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_read_trailing_bytes:
|
2016-02-19 03:47:26 +08:00
|
|
|
movb (%rsi), %al
|
2018-05-01 21:49:45 +08:00
|
|
|
MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
|
|
|
|
MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
|
2018-05-04 08:06:16 +08:00
|
|
|
.L_write_trailing_bytes:
|
2016-02-19 03:47:26 +08:00
|
|
|
movb %al, (%rdi)
|
|
|
|
incq %rsi
|
|
|
|
incq %rdi
|
|
|
|
decl %ecx
|
2018-05-04 08:06:16 +08:00
|
|
|
jnz .L_read_trailing_bytes
|
2016-02-19 03:47:26 +08:00
|
|
|
|
2016-03-15 06:33:39 +08:00
|
|
|
/* Copy successful. Return zero */
|
2016-02-19 03:47:26 +08:00
|
|
|
.L_done_memcpy_trap:
|
2018-07-02 18:31:54 +08:00
|
|
|
xorl %eax, %eax
|
2019-04-03 15:39:45 +08:00
|
|
|
.L_done:
|
2016-02-19 03:47:26 +08:00
|
|
|
ret
|
2018-05-04 08:06:11 +08:00
|
|
|
ENDPROC(__memcpy_mcsafe)
|
|
|
|
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
|
2016-02-19 03:47:26 +08:00
|
|
|
|
|
|
|
.section .fixup, "ax"
|
2018-05-04 08:06:21 +08:00
|
|
|
/*
|
|
|
|
* Return number of bytes not copied for any failure. Note that
|
|
|
|
* there is no "tail" handling since the source buffer is 8-byte
|
|
|
|
* aligned and poison is cacheline aligned.
|
|
|
|
*/
|
|
|
|
.E_read_words:
|
|
|
|
shll $3, %ecx
|
|
|
|
.E_leading_bytes:
|
|
|
|
addl %edx, %ecx
|
|
|
|
.E_trailing_bytes:
|
|
|
|
mov %ecx, %eax
|
2019-04-03 15:39:45 +08:00
|
|
|
jmp .L_done
|
2016-02-19 03:47:26 +08:00
|
|
|
|
2018-05-04 08:06:26 +08:00
|
|
|
/*
|
|
|
|
* For write fault handling, given the destination is unaligned,
|
|
|
|
* we handle faults on multi-byte writes with a byte-by-byte
|
|
|
|
* copy up to the write-protected page.
|
|
|
|
*/
|
|
|
|
.E_write_words:
|
|
|
|
shll $3, %ecx
|
|
|
|
addl %edx, %ecx
|
|
|
|
movl %ecx, %edx
|
|
|
|
jmp mcsafe_handle_tail
|
|
|
|
|
2016-02-19 03:47:26 +08:00
|
|
|
.previous
|
|
|
|
|
2018-05-04 08:06:21 +08:00
|
|
|
_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
|
|
|
|
_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
|
|
|
|
_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
|
2018-05-04 08:06:26 +08:00
|
|
|
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
|
|
|
|
_ASM_EXTABLE(.L_write_words, .E_write_words)
|
|
|
|
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
|
2016-02-19 03:47:26 +08:00
|
|
|
#endif
|