Change AVX512 braodcastsd/ss patterns interaction with spilling . New implementation take a scalar register and generate a vector without COPY_TO_REGCLASS (turn it into a VR128 register ) .The issue is that during register allocation we may spill a scalar value using 128-bit loads and stores, wasting cache bandwidth.

Differential Revision: http://reviews.llvm.org/D19579

llvm-svn: 268190
This commit is contained in:
Igor Breger 2016-05-01 08:40:00 +00:00
parent e430de8be6
commit 131008fbcb
6 changed files with 329 additions and 110 deletions

View File

@ -821,6 +821,36 @@ def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
//---
// broadcast with a scalar argument.
multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
let isCodeGenOnly = 1 in {
def r_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
(ins SrcInfo.FRC:$src), OpcodeStr#"\t{$src, $dst|$dst, $src}",
[(set DestInfo.RC:$dst, (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)))]>,
Requires<[HasAVX512]>, T8PD, EVEX;
let Constraints = "$src0 = $dst" in
def rk_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
(ins DestInfo.RC:$src0, DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
OpcodeStr#"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
[(set DestInfo.RC:$dst,
(vselect DestInfo.KRCWM:$mask,
(DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
DestInfo.RC:$src0))]>,
Requires<[HasAVX512]>, T8PD, EVEX, EVEX_K;
def rkz_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
(ins DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
OpcodeStr#"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
[(set DestInfo.RC:$dst,
(vselect DestInfo.KRCWM:$mask,
(DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
DestInfo.ImmAllZerosV))]>,
Requires<[HasAVX512]>, T8PD, EVEX, EVEX_KZ;
} // let isCodeGenOnly = 1 in
}
multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
@ -840,10 +870,12 @@ multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
multiclass avx512_fp_broadcast_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _> {
defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
EVEX_V512;
let Predicates = [HasVLX] in {
defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
EVEX_V256;
}
}
@ -852,8 +884,10 @@ let ExeDomain = SSEPackedSingle in {
defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, "vbroadcastss",
avx512vl_f32_info>;
let Predicates = [HasVLX] in {
defm VBROADCASTSSZ128 : avx512_broadcast_rm<0x18, "vbroadcastss",
v4f32x_info, v4f32x_info>, EVEX_V128;
defm VBROADCASTSSZ128 :
avx512_broadcast_rm<0x18, "vbroadcastss", v4f32x_info, v4f32x_info>,
avx512_broadcast_scalar<0x18, "vbroadcastss", v4f32x_info, v4f32x_info>,
EVEX_V128;
}
}
@ -862,50 +896,6 @@ let ExeDomain = SSEPackedDouble in {
avx512vl_f64_info>, VEX_W;
}
// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
// Later, we can canonize broadcast instructions before ISel phase and
// eliminate additional patterns on ISel.
// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
// representations of source
multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
X86VectorVTInfo _, RegisterClass SrcRC_v,
RegisterClass SrcRC_s> {
def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
(!cast<Instruction>(InstName##"r")
(COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
let AddedComplexity = 30 in {
def : Pat<(_.VT (vselect _.KRCWM:$mask,
(OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
(!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
(COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
def : Pat<(_.VT(vselect _.KRCWM:$mask,
(OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
(!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
(COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
}
}
defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
VR128X, FR32X>;
defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
VR128X, FR64X>;
let Predicates = [HasVLX] in {
defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
v8f32x_info, VR128X, FR32X>;
defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
v4f32x_info, VR128X, FR32X>;
defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
v4f64x_info, VR128X, FR64X>;
}
def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
(VBROADCASTSSZm addr:$src)>;
def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
(VBROADCASTSDZm addr:$src)>;
def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
(VBROADCASTSSZm addr:$src)>;
def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
@ -1091,14 +1081,6 @@ def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
(VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
(VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
(VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
//---

View File

@ -805,50 +805,54 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::TZMSK64rr, X86::TZMSK64rm, 0 },
// AVX-512 foldable instructions
{ X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
{ X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
{ X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
{ X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
{ X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
{ X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
{ X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
{ X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
{ X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
{ X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
{ X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
{ X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
{ X86::VPABSDZrr, X86::VPABSDZrm, 0 },
{ X86::VPABSQZrr, X86::VPABSQZrm, 0 },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
{ X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
{ X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
{ X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
{ X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
{ X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
{ X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
{ X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
{ X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
{ X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
{ X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
{ X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
{ X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
{ X86::VPABSDZrr, X86::VPABSDZrm, 0 },
{ X86::VPABSQZrr, X86::VPABSQZrm, 0 },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
{ X86::VBROADCASTSSZr_s, X86::VBROADCASTSSZm, TB_NO_REVERSE },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
{ X86::VBROADCASTSDZr_s, X86::VBROADCASTSDZm, TB_NO_REVERSE },
// AVX-512 foldable instructions (256-bit versions)
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
{ X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
{ X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
{ X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
{ X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
{ X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
{ X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
{ X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
{ X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
{ X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
{ X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
// AVX-512 foldable instructions (256-bit versions)
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
{ X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
{ X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
{ X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
{ X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
{ X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
{ X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
{ X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
{ X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
{ X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
{ X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
{ X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
{ X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
{ X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
{ X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
{ X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
{ X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
{ X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
{ X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
{ X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
{ X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
{ X86::VBROADCASTSSZ256r_s, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
{ X86::VBROADCASTSDZ256r_s, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
// AVX-512 foldable instructions (128-bit versions)
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
{ X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
{ X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
{ X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
{ X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
{ X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
{ X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
{ X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
{ X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
{ X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
{ X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
{ X86::VBROADCASTSSZ128r_s, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
// F16C foldable instructions
{ X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 },
{ X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 },

View File

@ -8396,17 +8396,19 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
(VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
sub_xmm)))>;
}
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
}
let Predicates = [HasAVX2, NoVLX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
}
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI], AddedComplexity = 20 in {
@ -8458,6 +8460,11 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
// is used by additional users, which prevents the pattern selection.
let Predicates = [HasAVX], AddedComplexity = 20 in {
// 128bit broadcasts:
def : Pat<(v2f64 (X86VBroadcast f64:$src)),
(VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
}
let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
@ -8469,11 +8476,6 @@ let Predicates = [HasAVX], AddedComplexity = 20 in {
(VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
(VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
def : Pat<(v2f64 (X86VBroadcast f64:$src)),
(VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
}
let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
def : Pat<(v8i32 (X86VBroadcast GR32:$src)),

View File

@ -663,3 +663,20 @@ define <32 x i8> @test_cmpgtb(<32 x i8> %A) {
ret <32 x i8> %B
}
define <4 x float> @_inreg4xfloat(float %a) {
%b = insertelement <4 x float> undef, float %a, i32 0
%c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %c
}
define <8 x float> @_inreg8xfloat(float %a) {
%b = insertelement <8 x float> undef, float %a, i32 0
%c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %c
}
define <4 x double> @_inreg4xdouble(double %a) {
%b = insertelement <4 x double> undef, double %a, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %c
}

View File

@ -398,3 +398,42 @@ define <8 x i64> @_invec4xi64(<4 x i64>%a) {
ret <8 x i64>%res
}
declare void @func_f32(float)
define <16 x float> @broadcast_ss_spill(float %x) {
; ALL-LABEL: broadcast_ss_spill:
; ALL: # BB#0:
; ALL-NEXT: pushq %rax
; ALL-NEXT: .Ltmp0:
; ALL-NEXT: .cfi_def_cfa_offset 16
; ALL-NEXT: vaddss %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Folded Spill
; ALL-NEXT: callq func_f32
; ALL-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %zmm0 # 4-byte Folded Reload
; ALL-NEXT: popq %rax
; ALL-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
ret <16 x float> %c
}
declare void @func_f64(double)
define <8 x double> @broadcast_sd_spill(double %x) {
; ALL-LABEL: broadcast_sd_spill:
; ALL: # BB#0:
; ALL-NEXT: pushq %rax
; ALL-NEXT: .Ltmp1:
; ALL-NEXT: .cfi_def_cfa_offset 16
; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Folded Spill
; ALL-NEXT: callq func_f64
; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 8-byte Folded Reload
; ALL-NEXT: popq %rax
; ALL-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
ret <8 x double> %c
}

View File

@ -0,0 +1,175 @@
; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f -mattr=+avx512vl| FileCheck %s
declare void @func_f32(float)
define <8 x float> @_256_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _256_broadcast_ss_spill:
; CHECK: # BB#0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Folded Spill
; CHECK-NEXT: callq func_f32
; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %ymm0 # 4-byte Folded Reload
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
%b = insertelement <8 x float> undef, float %a, i32 0
%c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %c
}
define <4 x float> @_128_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _128_broadcast_ss_spill:
; CHECK: # BB#0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .Ltmp1:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Folded Spill
; CHECK-NEXT: callq func_f32
; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
%b = insertelement <4 x float> undef, float %a, i32 0
%c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %c
}
declare void @func_f64(double)
define <4 x double> @_256_broadcast_sd_spill(double %x) {
; CHECK-LABEL: _256_broadcast_sd_spill:
; CHECK: # BB#0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .Ltmp2:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vaddsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Folded Spill
; CHECK-NEXT: callq func_f64
; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 8-byte Folded Reload
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
%b = insertelement <4 x double> undef, double %a, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %c
}
define <8 x float> @_inreg8xfloat(float %a) {
; CHECK-LABEL: _inreg8xfloat:
; CHECK: # BB#0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: retq
%b = insertelement <8 x float> undef, float %a, i32 0
%c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %c
}
define <8 x float> @_ss8xfloat_mask(<8 x float> %i, float %a, <8 x i32> %mask1) {
; CHECK-LABEL: _ss8xfloat_mask:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3
; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1
; CHECK-NEXT: vbroadcastss %xmm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x float> undef, float %a, i32 0
%c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
%r = select <8 x i1> %mask, <8 x float> %c, <8 x float> %i
ret <8 x float> %r
}
define <8 x float> @_ss8xfloat_maskz(float %a, <8 x i32> %mask1) {
; CHECK-LABEL: _ss8xfloat_maskz:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x float> undef, float %a, i32 0
%c = shufflevector <8 x float> %b, <8 x float> undef, <8 x i32> zeroinitializer
%r = select <8 x i1> %mask, <8 x float> %c, <8 x float> zeroinitializer
ret <8 x float> %r
}
define <4 x float> @_inreg4xfloat(float %a) {
; CHECK-LABEL: _inreg4xfloat:
; CHECK: # BB#0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: retq
%b = insertelement <4 x float> undef, float %a, i32 0
%c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %c
}
define <4 x float> @_ss4xfloat_mask(<4 x float> %i, float %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xfloat_mask:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastss %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%b = insertelement <4 x float> undef, float %a, i32 0
%c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
%r = select <4 x i1> %mask, <4 x float> %c, <4 x float> %i
ret <4 x float> %r
}
define <4 x float> @_ss4xfloat_maskz(float %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xfloat_maskz:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%b = insertelement <4 x float> undef, float %a, i32 0
%c = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
%r = select <4 x i1> %mask, <4 x float> %c, <4 x float> zeroinitializer
ret <4 x float> %r
}
define <4 x double> @_inreg4xdouble(double %a) {
; CHECK-LABEL: _inreg4xdouble:
; CHECK: # BB#0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: retq
%b = insertelement <4 x double> undef, double %a, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %c
}
define <4 x double> @_ss4xdouble_mask(<4 x double> %i, double %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xdouble_mask:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1
; CHECK-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%b = insertelement <4 x double> undef, double %a, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
%r = select <4 x i1> %mask, <4 x double> %c, <4 x double> %i
ret <4 x double> %r
}
define <4 x double> @_ss4xdouble_maskz(double %a, <4 x i32> %mask1) {
; CHECK-LABEL: _ss4xdouble_maskz:
; CHECK: # BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%b = insertelement <4 x double> undef, double %a, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
%r = select <4 x i1> %mask, <4 x double> %c, <4 x double> zeroinitializer
ret <4 x double> %r
}