AMDGPU/GlobalISel: Prefer merge/unmerge ops to legalize TFE

These have a better chance of combining with other operations and are
currently much better supported than G_EXTRACT.
This commit is contained in:
Matt Arsenault 2020-01-26 12:50:24 -05:00 committed by Matt Arsenault
parent a82a28ae12
commit 7bffa97285
3 changed files with 171 additions and 76 deletions

View File

@ -2914,6 +2914,52 @@ bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
return true;
}
// Produce a vector of s16 elements from s32 pieces.
static void truncToS16Vector(MachineIRBuilder &B, Register DstReg,
ArrayRef<Register> UnmergeParts) {
const LLT S16 = LLT::scalar(16);
SmallVector<Register, 4> RemergeParts(UnmergeParts.size());
for (int I = 0, E = UnmergeParts.size(); I != E; ++I)
RemergeParts[I] = B.buildTrunc(S16, UnmergeParts[I]).getReg(0);
B.buildBuildVector(DstReg, RemergeParts);
}
/// Convert a set of s32 registers to a result vector with s16 elements.
static void bitcastToS16Vector(MachineIRBuilder &B, Register DstReg,
ArrayRef<Register> UnmergeParts) {
MachineRegisterInfo &MRI = *B.getMRI();
const LLT V2S16 = LLT::vector(2, 16);
LLT TargetTy = MRI.getType(DstReg);
int NumElts = UnmergeParts.size();
if (NumElts == 1) {
assert(TargetTy == V2S16);
B.buildBitcast(DstReg, UnmergeParts[0]);
return;
}
SmallVector<Register, 4> RemergeParts(NumElts);
for (int I = 0; I != NumElts; ++I)
RemergeParts[I] = B.buildBitcast(V2S16, UnmergeParts[I]).getReg(0);
if (TargetTy.getSizeInBits() == 32u * NumElts) {
B.buildConcatVectors(DstReg, RemergeParts);
return;
}
const LLT V3S16 = LLT::vector(3, 16);
const LLT V6S16 = LLT::vector(6, 16);
// Widen to v6s16 and unpack v3 parts.
assert(TargetTy == V3S16);
RemergeParts.push_back(B.buildUndef(V2S16).getReg(0));
auto Concat = B.buildConcatVectors(V6S16, RemergeParts);
B.buildUnmerge({DstReg, MRI.createGenericVirtualRegister(V3S16)}, Concat);
}
// FIXME: Just vector trunc should be sufficent, but legalization currently
// broken.
static void repackUnpackedD16Load(MachineIRBuilder &B, Register DstReg,
@ -2973,6 +3019,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
Register DstReg = MI.getOperand(0).getReg();
LLT Ty = MRI->getType(DstReg);
const LLT EltTy = Ty.getScalarType();
const bool IsD16 = Ty.getScalarType() == S16;
const unsigned NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
@ -3015,18 +3062,38 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
// Insert after the instruction.
B.setInsertPt(*MI.getParent(), ++MI.getIterator());
// TODO: Should probably unmerge to s32 pieces and repack instead of using
// extracts.
if (RoundedTy == Ty) {
B.buildExtract(DstReg, TFEReg, 0);
} else {
// If we had to round the data type (i.e. this was a <3 x s16>), do the
// weird extract separately.
auto DataPart = B.buildExtract(RoundedTy, TFEReg, 0);
B.buildExtract(DstReg, DataPart, 0);
// Now figure out how to copy the new result register back into the old
// result.
SmallVector<Register, 5> UnmergeResults(TFETy.getNumElements(), Dst1Reg);
int NumDataElts = TFETy.getNumElements() - 1;
if (!Ty.isVector()) {
// Simplest case is a trivial unmerge (plus a truncate for d16).
UnmergeResults[0] = Ty == S32 ?
DstReg : MRI->createGenericVirtualRegister(S32);
B.buildUnmerge(UnmergeResults, TFEReg);
if (Ty != S32)
B.buildTrunc(DstReg, UnmergeResults[0]);
return true;
}
B.buildExtract(Dst1Reg, TFEReg, RoundedTy.getSizeInBits());
// We have to repack into a new vector of some kind.
for (int I = 0; I != NumDataElts; ++I)
UnmergeResults[I] = MRI->createGenericVirtualRegister(S32);
B.buildUnmerge(UnmergeResults, TFEReg);
// Drop the final TFE element.
ArrayRef<Register> DataPart(UnmergeResults.data(), NumDataElts);
if (EltTy == S32)
B.buildBuildVector(DstReg, DataPart);
else if (ST.hasUnpackedD16VMem())
truncToS16Vector(B, DstReg, DataPart);
else
bitcastToS16Vector(B, DstReg, DataPart);
return true;
}

View File

@ -235,10 +235,9 @@ define amdgpu_ps half @image_load_tfe_f16(<8 x i32> inreg %rsrc, i32 %s, i32 %t)
; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; UNPACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 2 from custom "TargetCustom8")
; UNPACKED: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 0
; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32
; UNPACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>)
; UNPACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; UNPACKED: $vgpr0 = COPY [[COPY10]](s32)
; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0
; PACKED-LABEL: name: image_load_tfe_f16
@ -257,11 +256,9 @@ define amdgpu_ps half @image_load_tfe_f16(<8 x i32> inreg %rsrc, i32 %s, i32 %t)
; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; PACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 2 from custom "TargetCustom8")
; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INT]](<2 x s32>), 0
; PACKED: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[EXTRACT]](<2 x s16>)
; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32
; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>)
; PACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; PACKED: $vgpr0 = COPY [[COPY10]](s32)
; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0
%res = call { half, i32 } @llvm.amdgcn.image.load.2d.sl_f16i32s.i32(i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
@ -288,11 +285,18 @@ define amdgpu_ps <2 x half> @image_load_tfe_v2f16(<8 x i32> inreg %rsrc, i32 %s,
; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; UNPACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 4 from custom "TargetCustom8")
; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[INT]](<3 x s32>), 0
; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[EXTRACT]](<2 x s32>), 0
; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64
; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: $vgpr0 = COPY [[EXTRACT1]](<2 x s16>)
; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>)
; UNPACKED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; UNPACKED: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C]]
; UNPACKED: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; UNPACKED: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C]]
; UNPACKED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNPACKED: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; UNPACKED: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; UNPACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; UNPACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0
; PACKED-LABEL: name: image_load_tfe_v2f16
; PACKED: bb.1 (%ir-block.0):
@ -310,10 +314,10 @@ define amdgpu_ps <2 x half> @image_load_tfe_v2f16(<8 x i32> inreg %rsrc, i32 %s,
; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; PACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 4 from custom "TargetCustom8")
; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INT]](<2 x s32>), 0
; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32
; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: $vgpr0 = COPY [[EXTRACT]](<2 x s16>)
; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>)
; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32)
; PACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0
%res = call { <2 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v2f16i32s.i32(i32 3, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
%tex = extractvalue { <2 x half>, i32 } %res, 0
@ -339,16 +343,31 @@ define amdgpu_ps <3 x half> @image_load_tfe_v3f16(<8 x i32> inreg %rsrc, i32 %s,
; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; UNPACKED: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 6 from custom "TargetCustom8", align 8)
; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<3 x s32>) = G_EXTRACT [[INT]](<4 x s32>), 0
; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[EXTRACT]](<3 x s32>), 0
; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<4 x s32>), 96
; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>)
; UNPACKED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; UNPACKED: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C]]
; UNPACKED: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; UNPACKED: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C]]
; UNPACKED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNPACKED: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; UNPACKED: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; UNPACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; UNPACKED: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; UNPACKED: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C]]
; UNPACKED: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; UNPACKED: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C2]], [[C1]](s32)
; UNPACKED: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
; UNPACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; UNPACKED: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>)
; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
; UNPACKED: G_STORE [[UV3]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; UNPACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT1]](<3 x s16>), 0
; UNPACKED: [[EXTRACT3:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
; UNPACKED: [[EXTRACT4:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
; UNPACKED: $vgpr0 = COPY [[EXTRACT3]](<2 x s16>)
; UNPACKED: $vgpr1 = COPY [[EXTRACT4]](<2 x s16>)
; UNPACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
; UNPACKED: $vgpr0 = COPY [[EXTRACT1]](<2 x s16>)
; UNPACKED: $vgpr1 = COPY [[EXTRACT2]](<2 x s16>)
; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
; PACKED-LABEL: name: image_load_tfe_v3f16
; PACKED: bb.1 (%ir-block.0):
@ -366,16 +385,19 @@ define amdgpu_ps <3 x half> @image_load_tfe_v3f16(<8 x i32> inreg %rsrc, i32 %s,
; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; PACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 6 from custom "TargetCustom8", align 8)
; PACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[INT]](<3 x s32>), 0
; PACKED: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[EXTRACT]](<4 x s16>), 0
; PACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64
; PACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; PACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT1]](<3 x s16>), 0
; PACKED: [[EXTRACT3:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
; PACKED: [[EXTRACT4:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
; PACKED: $vgpr0 = COPY [[EXTRACT3]](<2 x s16>)
; PACKED: $vgpr1 = COPY [[EXTRACT4]](<2 x s16>)
; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>)
; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32)
; PACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV1]](s32)
; PACKED: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
; PACKED: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[DEF1]](<2 x s16>)
; PACKED: [[UV3:%[0-9]+]]:_(<3 x s16>), [[UV4:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s16>)
; PACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; PACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[UV3]](<3 x s16>), 0
; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0
; PACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32
; PACKED: $vgpr0 = COPY [[EXTRACT]](<2 x s16>)
; PACKED: $vgpr1 = COPY [[EXTRACT1]](<2 x s16>)
; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%res = call { <3 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v3f16i32s.i32(i32 7, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
%tex = extractvalue { <3 x half>, i32 } %res, 0
@ -401,13 +423,26 @@ define amdgpu_ps <4 x half> @image_load_tfe_v4f16(<8 x i32> inreg %rsrc, i32 %s,
; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; UNPACKED: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8")
; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[INT]](<5 x s32>), 0
; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[EXTRACT]](<4 x s32>), 0
; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<5 x s32>), 128
; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s16>)
; UNPACKED: $vgpr0 = COPY [[UV]](<2 x s16>)
; UNPACKED: $vgpr1 = COPY [[UV1]](<2 x s16>)
; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>)
; UNPACKED: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; UNPACKED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; UNPACKED: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C]]
; UNPACKED: [[COPY11:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; UNPACKED: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C]]
; UNPACKED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; UNPACKED: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; UNPACKED: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; UNPACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; UNPACKED: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; UNPACKED: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C]]
; UNPACKED: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
; UNPACKED: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C]]
; UNPACKED: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; UNPACKED: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]]
; UNPACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; UNPACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
; UNPACKED: $vgpr1 = COPY [[BITCAST1]](<2 x s16>)
; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
; PACKED-LABEL: name: image_load_tfe_v4f16
; PACKED: bb.1 (%ir-block.0):
@ -425,12 +460,12 @@ define amdgpu_ps <4 x half> @image_load_tfe_v4f16(<8 x i32> inreg %rsrc, i32 %s,
; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; PACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8")
; PACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[INT]](<3 x s32>), 0
; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64
; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s16>)
; PACKED: $vgpr0 = COPY [[UV]](<2 x s16>)
; PACKED: $vgpr1 = COPY [[UV1]](<2 x s16>)
; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>)
; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32)
; PACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV1]](s32)
; PACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; PACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
; PACKED: $vgpr1 = COPY [[BITCAST1]](<2 x s16>)
; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%res = call { <4 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v4f16i32s.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
%tex = extractvalue { <4 x half>, i32 } %res, 0

View File

@ -115,10 +115,9 @@ define amdgpu_ps float @image_load_tfe_f32(<8 x i32> inreg %rsrc, i32 %s, i32 %t
; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; GCN: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 4 from custom "TargetCustom8")
; GCN: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 0
; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32
; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: $vgpr0 = COPY [[EXTRACT]](s32)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>)
; GCN: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: $vgpr0 = COPY [[UV]](s32)
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
%res = call { float, i32 } @llvm.amdgcn.image.load.2d.sl_f32i32s.i32(i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
%tex = extractvalue { float, i32 } %res, 0
@ -144,10 +143,8 @@ define amdgpu_ps <2 x float> @image_load_tfe_v2f32(<8 x i32> inreg %rsrc, i32 %s
; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; GCN: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8")
; GCN: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[INT]](<3 x s32>), 0
; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64
; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<2 x s32>)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>)
; GCN: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: $vgpr0 = COPY [[UV]](s32)
; GCN: $vgpr1 = COPY [[UV1]](s32)
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
@ -175,10 +172,8 @@ define amdgpu_ps <3 x float> @image_load_tfe_v3f32(<8 x i32> inreg %rsrc, i32 %s
; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; GCN: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 12 from custom "TargetCustom8", align 16)
; GCN: [[EXTRACT:%[0-9]+]]:_(<3 x s32>) = G_EXTRACT [[INT]](<4 x s32>), 0
; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<4 x s32>), 96
; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s32>)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>)
; GCN: G_STORE [[UV3]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: $vgpr0 = COPY [[UV]](s32)
; GCN: $vgpr1 = COPY [[UV1]](s32)
; GCN: $vgpr2 = COPY [[UV2]](s32)
@ -207,10 +202,8 @@ define amdgpu_ps <4 x float> @image_load_tfe_v4f32(<8 x i32> inreg %rsrc, i32 %s
; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; GCN: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
; GCN: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[INT]](<5 x s32>), 0
; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<5 x s32>), 128
; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>)
; GCN: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
; GCN: $vgpr0 = COPY [[UV]](s32)
; GCN: $vgpr1 = COPY [[UV1]](s32)
; GCN: $vgpr2 = COPY [[UV2]](s32)