forked from OSchip/llvm-project
[X86][SSE] Add selective commutation support for insertps (PR40340)
When we are inserting 1 "inline" element, and zeroing 2 of the other elements then we can safely commute the insertps source inputs to improve memory folding. Differential Revision: https://reviews.llvm.org/D56843 llvm-svn: 351807
This commit is contained in:
parent
cd26560e46
commit
180fcff5a7
|
@ -752,6 +752,7 @@ defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
|
|||
|
||||
// vinsertps - insert f32 to XMM
|
||||
let ExeDomain = SSEPackedSingle in {
|
||||
let isCommutable = 1 in
|
||||
def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
|
||||
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
||||
|
|
|
@ -1569,6 +1569,28 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
|
|||
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
|
||||
OpIdx1, OpIdx2);
|
||||
}
|
||||
case X86::INSERTPSrr:
|
||||
case X86::VINSERTPSrr:
|
||||
case X86::VINSERTPSZrr: {
|
||||
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
|
||||
unsigned ZMask = Imm & 15;
|
||||
unsigned DstIdx = (Imm >> 4) & 3;
|
||||
unsigned SrcIdx = (Imm >> 6) & 3;
|
||||
|
||||
// We can commute insertps if we zero 2 of the elements, the insertion is
|
||||
// "inline" and we don't override the insertion with a zero.
|
||||
if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
|
||||
countPopulation(ZMask) == 2) {
|
||||
unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
|
||||
assert(0 <= AltIdx && AltIdx < 4 && "Illegal insertion index");
|
||||
unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
|
||||
auto &WorkingMI = cloneIfNew(MI);
|
||||
WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
|
||||
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
|
||||
OpIdx1, OpIdx2);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
case X86::MOVSDrr:
|
||||
case X86::MOVSSrr:
|
||||
case X86::VMOVSDrr:
|
||||
|
|
|
@ -5651,6 +5651,7 @@ let Constraints = "$src1 = $dst" in
|
|||
// vector. The next one matches the intrinsic and could zero arbitrary elements
|
||||
// in the target vector.
|
||||
multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
|
||||
let isCommutable = 1 in
|
||||
def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, u8imm:$src3),
|
||||
!if(Is2Addr,
|
||||
|
|
|
@ -302,15 +302,12 @@ define float @extract_lane_insertps_6123(<4 x float> %a0, <4 x float> *%p1) {
|
|||
define <4 x float> @commute_load_insertps(<4 x float>, <4 x float>* nocapture readonly) {
|
||||
; SSE-LABEL: commute_load_insertps:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm1
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[1],zero,xmm1[3]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],zero,mem[0]
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: commute_load_insertps:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm1
|
||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[1],zero,xmm1[3]
|
||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[1],zero,mem[0]
|
||||
; AVX-NEXT: retq
|
||||
%3 = load <4 x float>, <4 x float>* %1
|
||||
%4 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %3, <4 x float> %0, i8 85)
|
||||
|
|
Loading…
Reference in New Issue