forked from OSchip/llvm-project
AMDGPU/GlobalISel: Fix asserting on gather4 intrinsics
This commit is contained in:
parent
a060a1782e
commit
039c917b43
|
@ -3376,30 +3376,36 @@ bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
|
|||
|
||||
/// Turn a set of s16 typed registers in \p A16AddrRegs into a dword sized
|
||||
/// vector with s16 typed elements.
|
||||
static void packImageA16AddressToDwords(MachineIRBuilder &B,
|
||||
MachineInstr &MI,
|
||||
static void packImageA16AddressToDwords(MachineIRBuilder &B, MachineInstr &MI,
|
||||
SmallVectorImpl<Register> &PackedAddrs,
|
||||
int DimIdx,
|
||||
int NumVAddrs) {
|
||||
int AddrIdx, int DimIdx, int NumVAddrs,
|
||||
int NumGradients) {
|
||||
const LLT S16 = LLT::scalar(16);
|
||||
const LLT V2S16 = LLT::vector(2, 16);
|
||||
|
||||
SmallVector<Register, 8> A16AddrRegs;
|
||||
A16AddrRegs.resize(NumVAddrs);
|
||||
for (int I = AddrIdx; I < AddrIdx + NumVAddrs; ++I) {
|
||||
Register AddrReg = MI.getOperand(I).getReg();
|
||||
|
||||
for (int I = 0; I != NumVAddrs; ++I) {
|
||||
A16AddrRegs[I] = MI.getOperand(DimIdx + I).getReg();
|
||||
assert(B.getMRI()->getType(A16AddrRegs[I]) == S16);
|
||||
}
|
||||
|
||||
// Round to dword.
|
||||
if (NumVAddrs % 2 != 0)
|
||||
A16AddrRegs.push_back(B.buildUndef(S16).getReg(0));
|
||||
|
||||
PackedAddrs.resize(A16AddrRegs.size() / 2);
|
||||
for (int I = 0, E = PackedAddrs.size(); I != E; ++I) {
|
||||
PackedAddrs[I] = B.buildBuildVector(
|
||||
V2S16, {A16AddrRegs[2 * I], A16AddrRegs[2 * I + 1]}).getReg(0);
|
||||
if (I < DimIdx) {
|
||||
AddrReg = B.buildBitcast(V2S16, AddrReg).getReg(0);
|
||||
PackedAddrs.push_back(AddrReg);
|
||||
} else {
|
||||
// Dz/dh, dz/dv and the last odd coord are packed with undef. Also, in 1D,
|
||||
// derivatives dx/dh and dx/dv are packed with undef.
|
||||
if (((I + 1) >= (AddrIdx + NumVAddrs)) ||
|
||||
((NumGradients / 2) % 2 == 1 &&
|
||||
(I == DimIdx + (NumGradients / 2) - 1 ||
|
||||
I == DimIdx + NumGradients - 1))) {
|
||||
PackedAddrs.push_back(
|
||||
B.buildBuildVector(V2S16, {AddrReg, B.buildUndef(S16).getReg(0)})
|
||||
.getReg(0));
|
||||
} else {
|
||||
PackedAddrs.push_back(
|
||||
B.buildBuildVector(V2S16, {AddrReg, MI.getOperand(I + 1).getReg()})
|
||||
.getReg(0));
|
||||
++I;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3419,15 +3425,18 @@ static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
|
|||
MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
|
||||
}
|
||||
|
||||
static int getImageNumVAddr(const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
|
||||
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode) {
|
||||
/// Return number of address arguments, and the number of gradients
|
||||
static std::pair<int, int>
|
||||
getImageNumVAddr(const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
|
||||
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode) {
|
||||
const AMDGPU::MIMGDimInfo *DimInfo
|
||||
= AMDGPU::getMIMGDimInfo(ImageDimIntr->Dim);
|
||||
|
||||
int NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
|
||||
int NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
|
||||
int NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
|
||||
return BaseOpcode->NumExtraArgs + NumGradients + NumCoords + NumLCM;
|
||||
int NumVAddr = BaseOpcode->NumExtraArgs + NumGradients + NumCoords + NumLCM;
|
||||
return {NumVAddr, NumGradients};
|
||||
}
|
||||
|
||||
static int getDMaskIdx(const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode,
|
||||
|
@ -3495,7 +3504,8 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
|
|||
LLT AddrTy = MRI->getType(MI.getOperand(DimIdx).getReg());
|
||||
const bool IsA16 = AddrTy == S16;
|
||||
|
||||
const int NumVAddrs = getImageNumVAddr(ImageDimIntr, BaseOpcode);
|
||||
int NumVAddrs, NumGradients;
|
||||
std::tie(NumVAddrs, NumGradients) = getImageNumVAddr(ImageDimIntr, BaseOpcode);
|
||||
|
||||
// If the register allocator cannot place the address registers contiguously
|
||||
// without introducing moves, then using the non-sequential address encoding
|
||||
|
@ -3522,7 +3532,8 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
|
|||
|
||||
if (NumVAddrs > 1) {
|
||||
SmallVector<Register, 4> PackedRegs;
|
||||
packImageA16AddressToDwords(B, MI, PackedRegs, DimIdx, NumVAddrs);
|
||||
packImageA16AddressToDwords(B, MI, PackedRegs, AddrIdx, DimIdx, NumVAddrs,
|
||||
NumGradients);
|
||||
|
||||
if (!UseNSA && PackedRegs.size() > 1) {
|
||||
LLT PackedAddrTy = LLT::vector(2 * PackedRegs.size(), 16);
|
||||
|
@ -3533,16 +3544,16 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
|
|||
|
||||
const int NumPacked = PackedRegs.size();
|
||||
for (int I = 0; I != NumVAddrs; ++I) {
|
||||
assert(MI.getOperand(DimIdx + I).getReg() != AMDGPU::NoRegister);
|
||||
assert(MI.getOperand(AddrIdx + I).getReg() != AMDGPU::NoRegister);
|
||||
|
||||
if (I < NumPacked)
|
||||
MI.getOperand(DimIdx + I).setReg(PackedRegs[I]);
|
||||
MI.getOperand(AddrIdx + I).setReg(PackedRegs[I]);
|
||||
else
|
||||
MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
|
||||
MI.getOperand(AddrIdx + I).setReg(AMDGPU::NoRegister);
|
||||
}
|
||||
}
|
||||
} else if (!UseNSA && NumVAddrs > 1) {
|
||||
convertImageAddrToPacked(B, MI, DimIdx, NumVAddrs);
|
||||
convertImageAddrToPacked(B, MI, AddrIdx, NumVAddrs);
|
||||
}
|
||||
|
||||
int DMaskLanes = 0;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,782 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.o.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_c_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %clamp) {
|
||||
; GFX6-LABEL: name: gather4_cl_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_cl_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %clamp) {
|
||||
; GFX6-LABEL: name: gather4_c_cl_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_cl_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_b_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_b_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_b_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.b.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_b_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %zcompare, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_c_b_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_b_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_b_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %s, float %t, float %clamp) {
|
||||
; GFX6-LABEL: name: gather4_b_cl_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_b_cl_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_b_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %zcompare, float %s, float %t, float %clamp) {
|
||||
; GFX6-LABEL: name: gather4_c_b_cl_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX6: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.o.2d), 1, [[BUILD_VECTOR2]](<6 x s32>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_b_cl_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
|
||||
; GFX6-LABEL: name: gather4_l_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_l_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
|
||||
; GFX6-LABEL: name: gather4_c_l_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_l_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_lz_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_lz_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.o.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_lz_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.lz.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_lz_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t) {
|
||||
; GFX6-LABEL: name: gather4_c_lz_o_2d
|
||||
; GFX6: bb.1.main_body:
|
||||
; GFX6: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX6: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX6: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX6: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
|
||||
; GFX6: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX6: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX6: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX6: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX6: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX6: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: gather4_c_lz_o_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.o.2d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.cl.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.b.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.b.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.lz.o.2d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
|
||||
attributes #0 = { nounwind readonly }
|
Loading…
Reference in New Issue