2017-06-19 05:30:57 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s
|
2008-12-19 04:05:58 +08:00
|
|
|
|
|
|
|
; widen v8i8 to v16i8 (checks even power of 2 widening with add & and)
|
|
|
|
|
|
|
|
define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
|
2017-06-19 05:30:57 +08:00
|
|
|
; CHECK-LABEL: update:
|
|
|
|
; CHECK: # BB#0: # %entry
|
|
|
|
; CHECK-NEXT: subl $12, %esp
|
|
|
|
; CHECK-NEXT: movl $0, (%esp)
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
|
2017-06-19 05:30:57 +08:00
|
|
|
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4]
|
|
|
|
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; CHECK-NEXT: jmp .LBB0_1
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
|
|
|
; CHECK-NEXT: .LBB0_2: # %forbody
|
|
|
|
; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1
|
|
|
|
; CHECK-NEXT: movl (%esp), %eax
|
|
|
|
; CHECK-NEXT: shll $3, %eax
|
|
|
|
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
|
|
|
|
; CHECK-NEXT: movl (%esp), %eax
|
|
|
|
; CHECK-NEXT: shll $3, %eax
|
|
|
|
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
|
|
|
|
; CHECK-NEXT: movl (%esp), %ecx
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; CHECK-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; CHECK-NEXT: psubw %xmm0, %xmm3
|
2017-06-19 05:30:57 +08:00
|
|
|
; CHECK-NEXT: pand %xmm1, %xmm3
|
|
|
|
; CHECK-NEXT: pshufb %xmm2, %xmm3
|
|
|
|
; CHECK-NEXT: movq %xmm3, (%edx,%ecx,8)
|
|
|
|
; CHECK-NEXT: incl (%esp)
|
|
|
|
; CHECK-NEXT: .LBB0_1: # %forcond
|
|
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; CHECK-NEXT: movl (%esp), %eax
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: jl .LBB0_2
|
|
|
|
; CHECK-NEXT: # BB#3: # %afterfor
|
|
|
|
; CHECK-NEXT: addl $12, %esp
|
|
|
|
; CHECK-NEXT: retl
|
2008-12-19 04:05:58 +08:00
|
|
|
entry:
|
2017-06-19 05:30:57 +08:00
|
|
|
%dst_i.addr = alloca i64*
|
|
|
|
%src_i.addr = alloca i64*
|
|
|
|
%n.addr = alloca i32
|
|
|
|
%i = alloca i32, align 4
|
|
|
|
%dst = alloca <8 x i8>*, align 4
|
|
|
|
%src = alloca <8 x i8>*, align 4
|
2008-12-19 04:05:58 +08:00
|
|
|
store i64* %dst_i, i64** %dst_i.addr
|
|
|
|
store i64* %src_i, i64** %src_i.addr
|
|
|
|
store i32 %n, i32* %n.addr
|
|
|
|
store i32 0, i32* %i
|
|
|
|
br label %forcond
|
|
|
|
|
2017-06-19 05:30:57 +08:00
|
|
|
forcond:
|
|
|
|
%tmp = load i32, i32* %i
|
|
|
|
%tmp1 = load i32, i32* %n.addr
|
|
|
|
%cmp = icmp slt i32 %tmp, %tmp1
|
2008-12-19 04:05:58 +08:00
|
|
|
br i1 %cmp, label %forbody, label %afterfor
|
|
|
|
|
2017-06-19 05:30:57 +08:00
|
|
|
forbody:
|
|
|
|
%tmp2 = load i32, i32* %i
|
|
|
|
%tmp3 = load i64*, i64** %dst_i.addr
|
|
|
|
%arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2
|
|
|
|
%conv = bitcast i64* %arrayidx to <8 x i8>*
|
2008-12-19 04:05:58 +08:00
|
|
|
store <8 x i8>* %conv, <8 x i8>** %dst
|
2017-06-19 05:30:57 +08:00
|
|
|
%tmp4 = load i32, i32* %i
|
|
|
|
%tmp5 = load i64*, i64** %src_i.addr
|
|
|
|
%arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4
|
|
|
|
%conv7 = bitcast i64* %arrayidx6 to <8 x i8>*
|
2008-12-19 04:05:58 +08:00
|
|
|
store <8 x i8>* %conv7, <8 x i8>** %src
|
2017-06-19 05:30:57 +08:00
|
|
|
%tmp8 = load i32, i32* %i
|
|
|
|
%tmp9 = load <8 x i8>*, <8 x i8>** %dst
|
|
|
|
%arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8
|
|
|
|
%tmp11 = load i32, i32* %i
|
|
|
|
%tmp12 = load <8 x i8>*, <8 x i8>** %src
|
|
|
|
%arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11
|
|
|
|
%tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13
|
|
|
|
%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 >
|
|
|
|
%and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 >
|
2008-12-19 04:05:58 +08:00
|
|
|
store <8 x i8> %and, <8 x i8>* %arrayidx10
|
|
|
|
br label %forinc
|
|
|
|
|
2017-06-19 05:30:57 +08:00
|
|
|
forinc:
|
|
|
|
%tmp15 = load i32, i32* %i
|
|
|
|
%inc = add i32 %tmp15, 1
|
2008-12-19 04:05:58 +08:00
|
|
|
store i32 %inc, i32* %i
|
|
|
|
br label %forcond
|
|
|
|
|
2017-06-19 05:30:57 +08:00
|
|
|
afterfor:
|
2008-12-19 04:05:58 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|