2017-06-13 01:31:36 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse2 | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse2,-slow-unaligned-mem-16 | FileCheck %s --check-prefix=SSE2FAST
|
|
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
|
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
2016-03-30 01:09:27 +08:00
|
|
|
|
2016-04-01 04:40:32 +08:00
|
|
|
; https://llvm.org/bugs/show_bug.cgi?id=27100
|
|
|
|
|
2016-03-30 01:09:27 +08:00
|
|
|
define void @memset_16_nonzero_bytes(i8* %x) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_16_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movq %rax, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 01:30:06 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_16_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-02 01:36:45 +08:00
|
|
|
; AVX-LABEL: memset_16_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
2016-04-02 01:36:45 +08:00
|
|
|
; AVX-NEXT: vmovups %xmm0, (%rdi)
|
|
|
|
; AVX-NEXT: retq
|
2016-03-30 01:09:27 +08:00
|
|
|
%call = tail call i8* @__memset_chk(i8* %x, i32 42, i64 16, i64 -1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_32_nonzero_bytes(i8* %x) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_32_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movq %rax, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 01:30:06 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_32_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-LABEL: memset_32_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-03-30 01:09:27 +08:00
|
|
|
%call = tail call i8* @__memset_chk(i8* %x, i32 42, i64 32, i64 -1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_64_nonzero_bytes(i8* %x) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_64_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movq %rax, 56(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 48(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 40(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 32(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 01:30:06 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_64_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-LABEL: memset_64_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-03-30 01:09:27 +08:00
|
|
|
%call = tail call i8* @__memset_chk(i8* %x, i32 42, i64 64, i64 -1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_128_nonzero_bytes(i8* %x) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_128_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movq %rax, 120(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 112(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 104(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 96(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 88(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 80(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 72(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 64(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 56(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 48(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 40(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 32(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rax, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 01:30:06 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_128_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 112(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 96(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 80(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 64(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-LABEL: memset_128_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-NEXT: vmovups %ymm0, 96(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 64(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-03-30 01:09:27 +08:00
|
|
|
%call = tail call i8* @__memset_chk(i8* %x, i32 42, i64 128, i64 -1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_256_nonzero_bytes(i8* %x) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_256_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: pushq %rax
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: .cfi_def_cfa_offset 16
|
|
|
|
; SSE-NEXT: movl $256, %edx # imm = 0x100
|
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from
physical registers to be closer to their point of use / def to
minimize their live ranges. This change extends this to also physical
register assignments from immediate values.
This causes a reduction in reduction in overall register pressure and
minor reduction in spills and indirectly fixes an out-of-registers
assertion (PR39391).
Most test changes are from minor instruction reorderings and register
name selection changes and direct consequences of that.
Reviewers: MatzeB, qcolombet, myatsina, pcc
Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya,
javed.absar, arphaman, jfb, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D54218
llvm-svn: 346894
2018-11-15 05:11:53 +08:00
|
|
|
; SSE-NEXT: movl $42, %esi
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: callq memset
|
|
|
|
; SSE-NEXT: popq %rax
|
2018-04-24 18:32:08 +08:00
|
|
|
; SSE-NEXT: .cfi_def_cfa_offset 8
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: retq
|
2016-04-01 01:30:06 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_256_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 240(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 224(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 208(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 192(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 176(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 160(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 144(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 128(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 112(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 96(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 80(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 64(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movups %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-LABEL: memset_256_nonzero_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX-NEXT: vmovups %ymm0, 224(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 192(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 160(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 128(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 96(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 64(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
2016-03-30 01:09:27 +08:00
|
|
|
%call = tail call i8* @__memset_chk(i8* %x, i32 42, i64 256, i64 -1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i8* @__memset_chk(i8*, i32, i64, i64)
|
|
|
|
|
2016-04-01 04:40:32 +08:00
|
|
|
; Repeat with a non-constant value for the stores.
|
|
|
|
|
|
|
|
define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_16_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-03-02 06:32:25 +08:00
|
|
|
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movzbl %sil, %eax
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
|
|
|
|
; SSE-NEXT: imulq %rax, %rcx
|
|
|
|
; SSE-NEXT: movq %rcx, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 04:40:32 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_16_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movd %esi, %xmm0
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2017-12-29 22:41:50 +08:00
|
|
|
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
|
|
|
|
; SSE2FAST-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-LABEL: memset_16_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX1-NEXT: vmovd %esi, %xmm0
|
2016-04-02 01:36:45 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: memset_16_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX2-NEXT: vmovd %esi, %xmm0
|
2016-04-02 01:36:45 +08:00
|
|
|
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX2-NEXT: retq
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 16, i1 false)
|
2016-04-01 04:40:32 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_32_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-03-02 06:32:25 +08:00
|
|
|
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movzbl %sil, %eax
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
|
|
|
|
; SSE-NEXT: imulq %rax, %rcx
|
|
|
|
; SSE-NEXT: movq %rcx, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 04:40:32 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_32_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movd %esi, %xmm0
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2017-12-29 22:41:50 +08:00
|
|
|
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
|
|
|
|
; SSE2FAST-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-LABEL: memset_32_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX1-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: memset_32_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX2-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 32, i1 false)
|
2016-04-01 04:40:32 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_64_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-03-02 06:32:25 +08:00
|
|
|
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movzbl %sil, %eax
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
|
|
|
|
; SSE-NEXT: imulq %rax, %rcx
|
|
|
|
; SSE-NEXT: movq %rcx, 56(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 48(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 40(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 32(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 04:40:32 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_64_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movd %esi, %xmm0
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2017-12-29 22:41:50 +08:00
|
|
|
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
|
|
|
|
; SSE2FAST-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-LABEL: memset_64_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX1-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: memset_64_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX2-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 64, i1 false)
|
2016-04-01 04:40:32 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_128_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-03-02 06:32:25 +08:00
|
|
|
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movzbl %sil, %eax
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-NEXT: movabsq $72340172838076673, %rcx # imm = 0x101010101010101
|
|
|
|
; SSE-NEXT: imulq %rax, %rcx
|
|
|
|
; SSE-NEXT: movq %rcx, 120(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 112(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 104(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 96(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 88(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 80(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 72(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 64(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 56(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 48(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 40(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 32(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 24(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 16(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, 8(%rdi)
|
|
|
|
; SSE-NEXT: movq %rcx, (%rdi)
|
|
|
|
; SSE-NEXT: retq
|
2016-04-01 04:40:32 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_128_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movd %esi, %xmm0
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2017-12-29 22:41:50 +08:00
|
|
|
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
|
|
|
|
; SSE2FAST-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 112(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 96(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 80(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 64(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-LABEL: memset_128_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX1-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 96(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 64(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: memset_128_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX2-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 64(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 128, i1 false)
|
2016-04-01 04:40:32 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
|
2016-04-02 02:11:30 +08:00
|
|
|
; SSE-LABEL: memset_256_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE-NEXT: movl $256, %edx # imm = 0x100
|
2018-03-10 10:42:14 +08:00
|
|
|
; SSE-NEXT: jmp memset # TAILCALL
|
2016-04-01 04:40:32 +08:00
|
|
|
;
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-LABEL: memset_256_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2FAST: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; SSE2FAST-NEXT: movd %esi, %xmm0
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2017-12-29 22:41:50 +08:00
|
|
|
; SSE2FAST-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
|
|
|
|
; SSE2FAST-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
|
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests
Was there really no other way to splat a byte in SSE2?
punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
llvm-svn: 265172
2016-04-02 02:29:25 +08:00
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 240(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 224(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 208(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 192(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 176(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 160(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 144(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 128(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 112(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 96(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 80(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 64(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 48(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 32(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, 16(%rdi)
|
|
|
|
; SSE2FAST-NEXT: movdqu %xmm0, (%rdi)
|
|
|
|
; SSE2FAST-NEXT: retq
|
|
|
|
;
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-LABEL: memset_256_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX1-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 224(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 192(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 160(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 128(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 96(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 64(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rdi)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: memset_256_nonconst_bytes:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2016-04-06 01:12:19 +08:00
|
|
|
; AVX2-NEXT: vmovd %esi, %xmm0
|
2016-04-02 00:27:14 +08:00
|
|
|
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 224(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 192(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 160(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 128(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 64(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi)
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
|
2016-04-01 04:40:32 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 256, i1 false)
|
2016-04-01 04:40:32 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-20 01:13:12 +08:00
|
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1
|
2016-04-01 04:40:32 +08:00
|
|
|
|