2016-09-12 08:08:33 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
2010-08-27 05:51:41 +08:00
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
2009-07-02 05:38:46 +08:00
|
|
|
|
2011-08-20 22:02:29 +08:00
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
|
|
|
target triple = "x86_64-apple-darwin10.0.0"
|
|
|
|
|
2010-08-27 05:51:41 +08:00
|
|
|
; Bitcasts between vectors and scalars are valid.
|
|
|
|
; PR4487
|
|
|
|
define i32 @test1(i64 %a) {
|
2013-07-14 09:42:54 +08:00
|
|
|
; CHECK-LABEL: @test1(
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-NEXT: ret i32 0
|
|
|
|
;
|
|
|
|
%t1 = bitcast i64 %a to <2 x i32>
|
|
|
|
%t2 = bitcast i64 %a to <2 x i32>
|
|
|
|
%t3 = xor <2 x i32> %t1, %t2
|
|
|
|
%t4 = extractelement <2 x i32> %t3, i32 0
|
|
|
|
ret i32 %t4
|
2009-07-02 05:38:46 +08:00
|
|
|
}
|
|
|
|
|
2016-02-25 01:08:29 +08:00
|
|
|
; Perform the bitwise logic in the source type of the operands to eliminate bitcasts.
|
2016-02-25 00:34:29 +08:00
|
|
|
|
|
|
|
define <2 x i32> @xor_two_vector_bitcasts(<1 x i64> %a, <1 x i64> %b) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @xor_two_vector_bitcasts(
|
|
|
|
; CHECK-NEXT: [[T31:%.*]] = xor <1 x i64> %a, %b
|
|
|
|
; CHECK-NEXT: [[T3:%.*]] = bitcast <1 x i64> [[T31]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[T3]]
|
|
|
|
;
|
2016-02-25 00:34:29 +08:00
|
|
|
%t1 = bitcast <1 x i64> %a to <2 x i32>
|
|
|
|
%t2 = bitcast <1 x i64> %b to <2 x i32>
|
|
|
|
%t3 = xor <2 x i32> %t1, %t2
|
|
|
|
ret <2 x i32> %t3
|
|
|
|
}
|
|
|
|
|
2016-02-25 06:31:18 +08:00
|
|
|
; Verify that 'xor' of vector and constant is done as a vector bitwise op before the bitcast.
|
|
|
|
|
|
|
|
define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @xor_bitcast_vec_to_vec(
|
|
|
|
; CHECK-NEXT: [[T21:%.*]] = xor <1 x i64> %a, <i64 8589934593>
|
|
|
|
; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[T21]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[T2]]
|
|
|
|
;
|
2016-02-25 06:31:18 +08:00
|
|
|
%t1 = bitcast <1 x i64> %a to <2 x i32>
|
|
|
|
%t2 = xor <2 x i32> <i32 1, i32 2>, %t1
|
|
|
|
ret <2 x i32> %t2
|
|
|
|
}
|
|
|
|
|
|
|
|
; Verify that 'and' of integer and constant is done as a vector bitwise op before the bitcast.
|
|
|
|
|
|
|
|
define i64 @and_bitcast_vec_to_int(<2 x i32> %a) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @and_bitcast_vec_to_int(
|
|
|
|
; CHECK-NEXT: [[T21:%.*]] = and <2 x i32> %a, <i32 3, i32 0>
|
|
|
|
; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[T21]] to i64
|
|
|
|
; CHECK-NEXT: ret i64 [[T2]]
|
|
|
|
;
|
2016-02-25 06:31:18 +08:00
|
|
|
%t1 = bitcast <2 x i32> %a to i64
|
|
|
|
%t2 = and i64 %t1, 3
|
|
|
|
ret i64 %t2
|
|
|
|
}
|
|
|
|
|
|
|
|
; Verify that 'or' of vector and constant is done as an integer bitwise op before the bitcast.
|
|
|
|
|
|
|
|
define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @or_bitcast_int_to_vec(
|
|
|
|
; CHECK-NEXT: [[T21:%.*]] = or i64 %a, 8589934593
|
|
|
|
; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[T21]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[T2]]
|
|
|
|
;
|
2016-02-25 06:31:18 +08:00
|
|
|
%t1 = bitcast i64 %a to <2 x i32>
|
|
|
|
%t2 = or <2 x i32> %t1, <i32 1, i32 2>
|
|
|
|
ret <2 x i32> %t2
|
|
|
|
}
|
|
|
|
|
2016-09-12 08:08:33 +08:00
|
|
|
; Optimize bitcasts that are extracting low element of vector. This happens because of SRoA.
|
optimize bitcast(trunc(bitcast(x))) where the result is a float and 'x'
is a vector to be a vector element extraction. This allows clang to
compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
movd %eax, %xmm0
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movd %xmm1, %rax
movd %eax, %xmm1
addss %xmm2, %xmm1
shrq $32, %rax
movd %eax, %xmm0
addss %xmm1, %xmm0
ret
... eliminating half of the horribleness.
llvm-svn: 112227
2010-08-27 05:55:42 +08:00
|
|
|
; rdar://7892780
|
|
|
|
define float @test2(<2 x float> %A, <2 x i32> %B) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test2(
|
|
|
|
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 0
|
|
|
|
; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i32> %B to <2 x float>
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[BC]], i32 0
|
|
|
|
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
|
|
|
|
; CHECK-NEXT: ret float [[ADD]]
|
|
|
|
;
|
optimize bitcast(trunc(bitcast(x))) where the result is a float and 'x'
is a vector to be a vector element extraction. This allows clang to
compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
movd %eax, %xmm0
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movd %xmm1, %rax
movd %eax, %xmm1
addss %xmm2, %xmm1
shrq $32, %rax
movd %eax, %xmm0
addss %xmm1, %xmm0
ret
... eliminating half of the horribleness.
llvm-svn: 112227
2010-08-27 05:55:42 +08:00
|
|
|
%tmp28 = bitcast <2 x float> %A to i64 ; <i64> [#uses=2]
|
|
|
|
%tmp23 = trunc i64 %tmp28 to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp24 = bitcast i32 %tmp23 to float ; <float> [#uses=1]
|
|
|
|
|
|
|
|
%tmp = bitcast <2 x i32> %B to i64
|
|
|
|
%tmp2 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1]
|
|
|
|
|
|
|
|
%add = fadd float %tmp24, %tmp4
|
|
|
|
ret float %add
|
|
|
|
}
|
2010-08-27 06:14:59 +08:00
|
|
|
|
2016-09-12 08:08:33 +08:00
|
|
|
; Optimize bitcasts that are extracting other elements of a vector. This happens because of SRoA.
|
2010-08-27 06:14:59 +08:00
|
|
|
; rdar://7892780
|
|
|
|
define float @test3(<2 x float> %A, <2 x i64> %B) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test3(
|
|
|
|
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x float> %A, i32 1
|
|
|
|
; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> %B to <4 x float>
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x float> [[BC2]], i32 2
|
|
|
|
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP24]], [[TMP4]]
|
|
|
|
; CHECK-NEXT: ret float [[ADD]]
|
|
|
|
;
|
2010-08-27 06:14:59 +08:00
|
|
|
%tmp28 = bitcast <2 x float> %A to i64
|
|
|
|
%tmp29 = lshr i64 %tmp28, 32
|
|
|
|
%tmp23 = trunc i64 %tmp29 to i32
|
|
|
|
%tmp24 = bitcast i32 %tmp23 to float
|
|
|
|
|
|
|
|
%tmp = bitcast <2 x i64> %B to i128
|
|
|
|
%tmp1 = lshr i128 %tmp, 64
|
|
|
|
%tmp2 = trunc i128 %tmp1 to i32
|
|
|
|
%tmp4 = bitcast i32 %tmp2 to float
|
|
|
|
|
|
|
|
%add = fadd float %tmp24, %tmp4
|
|
|
|
ret float %add
|
|
|
|
}
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
2010-08-28 09:20:38 +08:00
|
|
|
|
2015-12-11 01:09:28 +08:00
|
|
|
; Both bitcasts are unnecessary; change the extractelement.
|
2015-11-20 05:32:25 +08:00
|
|
|
|
|
|
|
define float @bitcast_extelt1(<2 x float> %A) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @bitcast_extelt1(
|
|
|
|
; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x float> %A, i32 0
|
|
|
|
; CHECK-NEXT: ret float [[BC2]]
|
|
|
|
;
|
2015-11-20 05:32:25 +08:00
|
|
|
%bc1 = bitcast <2 x float> %A to <2 x i32>
|
|
|
|
%ext = extractelement <2 x i32> %bc1, i32 0
|
|
|
|
%bc2 = bitcast i32 %ext to float
|
|
|
|
ret float %bc2
|
|
|
|
}
|
|
|
|
|
2015-12-11 01:09:28 +08:00
|
|
|
; Second bitcast can be folded into the first.
|
2015-11-20 05:32:25 +08:00
|
|
|
|
|
|
|
define i64 @bitcast_extelt2(<4 x float> %A) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @bitcast_extelt2(
|
|
|
|
; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float> %A to <2 x i64>
|
|
|
|
; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x i64> [[BC]], i32 1
|
|
|
|
; CHECK-NEXT: ret i64 [[BC2]]
|
|
|
|
;
|
2015-11-20 05:32:25 +08:00
|
|
|
%bc1 = bitcast <4 x float> %A to <2 x double>
|
|
|
|
%ext = extractelement <2 x double> %bc1, i32 1
|
|
|
|
%bc2 = bitcast double %ext to i64
|
|
|
|
ret i64 %bc2
|
|
|
|
}
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
2010-08-28 09:20:38 +08:00
|
|
|
|
2016-09-12 08:08:33 +08:00
|
|
|
; TODO: This should return %A.
|
2015-12-11 01:09:28 +08:00
|
|
|
|
|
|
|
define <2 x i32> @bitcast_extelt3(<2 x i32> %A) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @bitcast_extelt3(
|
|
|
|
; CHECK-NEXT: [[BC1:%.*]] = bitcast <2 x i32> %A to <1 x i64>
|
|
|
|
; CHECK-NEXT: [[EXT:%.*]] = extractelement <1 x i64> [[BC1]], i32 0
|
|
|
|
; CHECK-NEXT: [[BC2:%.*]] = bitcast i64 [[EXT]] to <2 x i32>
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[BC2]]
|
|
|
|
;
|
2015-12-11 01:09:28 +08:00
|
|
|
%bc1 = bitcast <2 x i32> %A to <1 x i64>
|
|
|
|
%ext = extractelement <1 x i64> %bc1, i32 0
|
|
|
|
%bc2 = bitcast i64 %ext to <2 x i32>
|
|
|
|
ret <2 x i32> %bc2
|
|
|
|
}
|
|
|
|
|
|
|
|
; Handle the case where the input is not a vector.
|
|
|
|
|
|
|
|
define double @bitcast_extelt4(i128 %A) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @bitcast_extelt4(
|
|
|
|
; CHECK-NEXT: [[BC:%.*]] = bitcast i128 %A to <2 x double>
|
|
|
|
; CHECK-NEXT: [[BC2:%.*]] = extractelement <2 x double> [[BC]], i32 0
|
|
|
|
; CHECK-NEXT: ret double [[BC2]]
|
|
|
|
;
|
2015-12-11 01:09:28 +08:00
|
|
|
%bc1 = bitcast i128 %A to <2 x i64>
|
|
|
|
%ext = extractelement <2 x i64> %bc1, i32 0
|
|
|
|
%bc2 = bitcast i64 %ext to double
|
|
|
|
ret double %bc2
|
|
|
|
}
|
|
|
|
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
2010-08-28 09:20:38 +08:00
|
|
|
define <2 x i32> @test4(i32 %A, i32 %B){
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test4(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> undef, i32 %A, i32 0
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i32> [[TMP1]], i32 %B, i32 1
|
|
|
|
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
|
|
|
|
;
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
2010-08-28 09:20:38 +08:00
|
|
|
%tmp38 = zext i32 %A to i64
|
|
|
|
%tmp32 = zext i32 %B to i64
|
|
|
|
%tmp33 = shl i64 %tmp32, 32
|
|
|
|
%ins35 = or i64 %tmp33, %tmp38
|
|
|
|
%tmp43 = bitcast i64 %ins35 to <2 x i32>
|
|
|
|
ret <2 x i32> %tmp43
|
|
|
|
}
|
|
|
|
|
|
|
|
; rdar://8360454
|
|
|
|
define <2 x float> @test5(float %A, float %B) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test5(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> undef, float %A, i32 0
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x float> [[TMP1]], float %B, i32 1
|
|
|
|
; CHECK-NEXT: ret <2 x float> [[TMP2]]
|
|
|
|
;
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
2010-08-28 09:20:38 +08:00
|
|
|
%tmp37 = bitcast float %A to i32
|
|
|
|
%tmp38 = zext i32 %tmp37 to i64
|
|
|
|
%tmp31 = bitcast float %B to i32
|
|
|
|
%tmp32 = zext i32 %tmp31 to i64
|
|
|
|
%tmp33 = shl i64 %tmp32, 32
|
|
|
|
%ins35 = or i64 %tmp33, %tmp38
|
|
|
|
%tmp43 = bitcast i64 %ins35 to <2 x float>
|
|
|
|
ret <2 x float> %tmp43
|
|
|
|
}
|
handle the constant case of vector insertion. For something
like this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.B;
A.A = 42;
return A;
}
we now generate:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
pshufd $16, %xmm2, %xmm2
movss LCPI1_1(%rip), %xmm0
pshufd $16, %xmm0, %xmm0
unpcklps %xmm2, %xmm0
ret
instead of:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
movd %xmm2, %eax
shlq $32, %rax
addq $1109917696, %rax ## imm = 0x42280000
movd %rax, %xmm0
ret
llvm-svn: 112345
2010-08-28 09:50:57 +08:00
|
|
|
|
|
|
|
define <2 x float> @test6(float %A){
|
2013-07-14 09:42:54 +08:00
|
|
|
; CHECK-LABEL: @test6(
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x float> <float 4.200000e+01, float undef>, float %A, i32 1
|
|
|
|
; CHECK-NEXT: ret <2 x float> [[TMP1]]
|
|
|
|
;
|
|
|
|
%tmp23 = bitcast float %A to i32
|
|
|
|
%tmp24 = zext i32 %tmp23 to i64
|
|
|
|
%tmp25 = shl i64 %tmp24, 32
|
|
|
|
%mask20 = or i64 %tmp25, 1109917696
|
|
|
|
%tmp35 = bitcast i64 %mask20 to <2 x float>
|
|
|
|
ret <2 x float> %tmp35
|
handle the constant case of vector insertion. For something
like this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.B;
A.A = 42;
return A;
}
we now generate:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
pshufd $16, %xmm2, %xmm2
movss LCPI1_1(%rip), %xmm0
pshufd $16, %xmm0, %xmm0
unpcklps %xmm2, %xmm0
ret
instead of:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
movd %xmm2, %eax
shlq $32, %rax
addq $1109917696, %rax ## imm = 0x42280000
movd %rax, %xmm0
ret
llvm-svn: 112345
2010-08-28 09:50:57 +08:00
|
|
|
}
|
2011-08-20 22:02:29 +08:00
|
|
|
|
|
|
|
define i64 @ISPC0(i64 %in) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @ISPC0(
|
|
|
|
; CHECK-NEXT: ret i64 0
|
|
|
|
;
|
2011-08-20 22:02:29 +08:00
|
|
|
%out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1> to i64), i64 -1)
|
|
|
|
ret i64 %out
|
|
|
|
}
|
2011-08-25 04:18:38 +08:00
|
|
|
|
|
|
|
|
|
|
|
define i64 @Vec2(i64 %in) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @Vec2(
|
|
|
|
; CHECK-NEXT: ret i64 0
|
|
|
|
;
|
2011-08-25 04:18:38 +08:00
|
|
|
%out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 0> to i64), i64 0)
|
|
|
|
ret i64 %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @All11(i64 %in) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @All11(
|
|
|
|
; CHECK-NEXT: ret i64 0
|
|
|
|
;
|
2013-02-12 05:36:49 +08:00
|
|
|
%out = and i64 %in, xor (i64 bitcast (<2 x float> bitcast (i64 -1 to <2 x float>) to i64), i64 -1)
|
2011-08-25 04:18:38 +08:00
|
|
|
ret i64 %out
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
define i32 @All111(i32 %in) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @All111(
|
|
|
|
; CHECK-NEXT: ret i32 0
|
|
|
|
;
|
2013-02-12 05:36:49 +08:00
|
|
|
%out = and i32 %in, xor (i32 bitcast (<1 x float> bitcast (i32 -1 to <1 x float>) to i32), i32 -1)
|
2011-08-25 04:18:38 +08:00
|
|
|
ret i32 %out
|
|
|
|
}
|
|
|
|
|
2013-02-12 05:41:44 +08:00
|
|
|
define <2 x i16> @BitcastInsert(i32 %a) {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @BitcastInsert(
|
|
|
|
; CHECK-NEXT: [[R:%.*]] = bitcast i32 %a to <2 x i16>
|
|
|
|
; CHECK-NEXT: ret <2 x i16> [[R]]
|
|
|
|
;
|
2013-02-12 05:41:44 +08:00
|
|
|
%v = insertelement <1 x i32> undef, i32 %a, i32 0
|
|
|
|
%r = bitcast <1 x i32> %v to <2 x i16>
|
|
|
|
ret <2 x i16> %r
|
|
|
|
}
|
2013-09-20 04:59:04 +08:00
|
|
|
|
|
|
|
; PR17293
|
|
|
|
define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test7(
|
|
|
|
; CHECK-NEXT: [[CAST:%.*]] = bitcast <2 x i8*>* %arg to <2 x i64>*
|
|
|
|
; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[CAST]], align 16
|
|
|
|
; CHECK-NEXT: ret <2 x i64> [[LOAD]]
|
|
|
|
;
|
2013-09-20 04:59:04 +08:00
|
|
|
%cast = bitcast <2 x i8*>* %arg to <2 x i64>*
|
2015-02-28 05:17:42 +08:00
|
|
|
%load = load <2 x i64>, <2 x i64>* %cast, align 16
|
2013-09-20 04:59:04 +08:00
|
|
|
ret <2 x i64> %load
|
|
|
|
}
|
2016-06-27 14:42:54 +08:00
|
|
|
|
|
|
|
define i8 @test8() {
|
2016-09-12 08:08:33 +08:00
|
|
|
; CHECK-LABEL: @test8(
|
|
|
|
; CHECK-NEXT: ret i8 -85
|
|
|
|
;
|
2016-06-27 14:42:54 +08:00
|
|
|
%res = bitcast <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true> to i8
|
|
|
|
ret i8 %res
|
|
|
|
}
|