forked from OSchip/llvm-project
[AMDGPU] Remove lz and nomip combine from codegen
These combines have been moved into the IR combiner in D116042. Differential Revision: https://reviews.llvm.org/D116116
This commit is contained in:
parent
603d18033c
commit
ae2f9c8be8
|
@ -1510,10 +1510,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
|
|||
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
|
||||
|
||||
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
|
||||
const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
|
||||
AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
|
||||
const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
|
||||
AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
|
||||
unsigned IntrOpcode = Intr->BaseOpcode;
|
||||
const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
|
||||
|
||||
|
@ -1586,26 +1582,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
|
|||
}
|
||||
}
|
||||
|
||||
// Optimize _L to _LZ when _L is zero
|
||||
if (LZMappingInfo) {
|
||||
// The legalizer replaced the register with an immediate 0 if we need to
|
||||
// change the opcode.
|
||||
const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
|
||||
if (Lod.isImm()) {
|
||||
assert(Lod.getImm() == 0);
|
||||
IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize _mip away, when 'lod' is zero
|
||||
if (MIPMappingInfo) {
|
||||
const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
|
||||
if (Lod.isImm()) {
|
||||
assert(Lod.getImm() == 0);
|
||||
IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
|
||||
}
|
||||
}
|
||||
|
||||
// Set G16 opcode
|
||||
if (IsG16 && !IsA16) {
|
||||
const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
|
||||
|
|
|
@ -4450,44 +4450,6 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
|
|||
|
||||
unsigned CorrectedNumVAddrs = Intr->NumVAddrs;
|
||||
|
||||
// Optimize _L to _LZ when _L is zero
|
||||
if (const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
|
||||
AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode)) {
|
||||
const ConstantFP *ConstantLod;
|
||||
|
||||
if (mi_match(MI.getOperand(ArgOffset + Intr->LodIndex).getReg(), *MRI,
|
||||
m_GFCst(ConstantLod))) {
|
||||
if (ConstantLod->isZero() || ConstantLod->isNegative()) {
|
||||
// Set new opcode to _lz variant of _l, and change the intrinsic ID.
|
||||
const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr =
|
||||
AMDGPU::getImageDimIntrinsicByBaseOpcode(LZMappingInfo->LZ,
|
||||
Intr->Dim);
|
||||
|
||||
// The starting indexes should remain in the same place.
|
||||
--CorrectedNumVAddrs;
|
||||
|
||||
MI.getOperand(NumDefs).setIntrinsicID(
|
||||
static_cast<Intrinsic::ID>(NewImageDimIntr->Intr));
|
||||
MI.RemoveOperand(ArgOffset + Intr->LodIndex);
|
||||
Intr = NewImageDimIntr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize _mip away, when 'lod' is zero
|
||||
if (AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode)) {
|
||||
int64_t ConstantLod;
|
||||
if (mi_match(MI.getOperand(ArgOffset + Intr->MipIndex).getReg(), *MRI,
|
||||
m_ICst(ConstantLod))) {
|
||||
if (ConstantLod == 0) {
|
||||
// TODO: Change intrinsic opcode and remove operand instead or replacing
|
||||
// it with 0, as the _L to _LZ handling is done above.
|
||||
MI.getOperand(ArgOffset + Intr->MipIndex).ChangeToImmediate(0);
|
||||
--CorrectedNumVAddrs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite the addressing register layout before doing anything else.
|
||||
if (BaseOpcode->Gradients && !ST.hasG16() && (IsA16 != IsG16)) {
|
||||
// 16 bit gradients are supported, but are tied to the A16 control
|
||||
|
|
|
@ -6188,10 +6188,6 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
|
|||
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
|
||||
AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
|
||||
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
|
||||
const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
|
||||
AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
|
||||
const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
|
||||
AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
|
||||
unsigned IntrOpcode = Intr->BaseOpcode;
|
||||
bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget);
|
||||
|
||||
|
@ -6279,28 +6275,6 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
|
|||
unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd;
|
||||
SmallVector<SDValue, 4> VAddrs;
|
||||
|
||||
// Optimize _L to _LZ when _L is zero
|
||||
if (LZMappingInfo) {
|
||||
if (auto *ConstantLod = dyn_cast<ConstantFPSDNode>(
|
||||
Op.getOperand(ArgOffset + Intr->LodIndex))) {
|
||||
if (ConstantLod->isZero() || ConstantLod->isNegative()) {
|
||||
IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
|
||||
VAddrEnd--; // remove 'lod'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize _mip away, when 'lod' is zero
|
||||
if (MIPMappingInfo) {
|
||||
if (auto *ConstantLod = dyn_cast<ConstantSDNode>(
|
||||
Op.getOperand(ArgOffset + Intr->MipIndex))) {
|
||||
if (ConstantLod->isZero()) {
|
||||
IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
|
||||
VAddrEnd--; // remove 'mip'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for 16 bit addresses or derivatives and pack if true.
|
||||
MVT VAddrVT =
|
||||
Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType();
|
||||
|
|
|
@ -1,667 +0,0 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i16 %s) {
|
||||
; GFX9-LABEL: name: load_mip_1d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[DEF]](s32)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_1d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[DEF]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
|
||||
; GFX9-LABEL: name: load_mip_2d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: load_mip_3d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_3d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
|
||||
; GFX9-LABEL: name: load_mip_1darray
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_1darray
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: load_mip_2darray
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_2darray
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: load_mip_cube
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX9: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX9: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX9: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX9: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
; GFX10-LABEL: name: load_mip_cube
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY8]](s32), [[COPY9]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable load (<4 x s32>) from custom "ImageResource")
|
||||
; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
|
||||
; GFX10: $vgpr0 = COPY [[UV]](s32)
|
||||
; GFX10: $vgpr1 = COPY [[UV1]](s32)
|
||||
; GFX10: $vgpr2 = COPY [[UV2]](s32)
|
||||
; GFX10: $vgpr3 = COPY [[UV3]](s32)
|
||||
; GFX10: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s) {
|
||||
; GFX9-LABEL: name: store_mip_1d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_1d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
|
||||
; GFX9-LABEL: name: store_mip_2d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_2d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: store_mip_3d
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_3d
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
|
||||
; GFX9-LABEL: name: store_mip_1darray
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_1darray
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: store_mip_2darray
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_2darray
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
|
||||
; GFX9-LABEL: name: store_mip_cube
|
||||
; GFX9: bb.1.main_body:
|
||||
; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX9: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX9: S_ENDPGM 0
|
||||
; GFX10-LABEL: name: store_mip_cube
|
||||
; GFX10: bb.1.main_body:
|
||||
; GFX10: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
|
||||
; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
|
||||
; GFX10: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
|
||||
; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
|
||||
; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
|
||||
; GFX10: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
|
||||
; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
|
||||
; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
|
||||
; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||
; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
|
||||
; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
|
||||
; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||
; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
|
||||
; GFX10: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
|
||||
; GFX10: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
|
||||
; GFX10: [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32)
|
||||
; GFX10: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
; GFX10: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
|
||||
; GFX10: G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0, 3 :: (dereferenceable store (<4 x s32>) into custom "ImageResource")
|
||||
; GFX10: S_ENDPGM 0
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
|
||||
attributes #0 = { nounwind readonly }
|
||||
attributes #1 = { nounwind writeonly }
|
|
@ -1,403 +0,0 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
; GFX9-LABEL: load_mip_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v0, s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v0, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
|
||||
; GFX9-LABEL: load_mip_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: load_mip_3d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_3d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
|
||||
; GFX9-LABEL: load_mip_1darray:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_1darray:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D_ARRAY unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: load_mip_2darray:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_2darray:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: load_mip_cube:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: load_mip_cube:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_CUBE unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s) {
|
||||
; GFX9-LABEL: store_mip_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v4, s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v4, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
|
||||
; GFX9-LABEL: store_mip_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v[4:5], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v[4:5], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: store_mip_3d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_3d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
|
||||
; GFX9-LABEL: store_mip_1darray:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v[4:5], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_1darray:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v[4:5], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D_ARRAY unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: store_mip_2darray:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_2darray:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
; GFX9-LABEL: store_mip_cube:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf unorm da
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: store_mip_cube:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: image_store v[0:3], v[4:6], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_CUBE unorm
|
||||
; GFX10-NEXT: s_endpgm
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
|
||||
|
||||
attributes #0 = { nounwind readonly }
|
||||
attributes #1 = { nounwind writeonly }
|
|
@ -1,565 +0,0 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %lod) {
|
||||
; GFX9-LABEL: sample_l_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_l_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 15, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: sample_l_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v0, v0, v2, v1
|
||||
; GFX9-NEXT: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_l_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 15, half %s, half %t, half -0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %lod) {
|
||||
; GFX9-LABEL: sample_c_l_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
|
||||
; GFX9-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v2, s12
|
||||
; GFX9-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_c_l_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 15, float %zcompare, half %s, half -2.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: sample_c_l_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v3, v2
|
||||
; GFX9-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_c_l_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %lod) {
|
||||
; GFX9-LABEL: sample_l_o_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
|
||||
; GFX9-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v2, s12
|
||||
; GFX9-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_l_o_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f16(i32 15, i32 %offset, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: sample_l_o_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v3, v2
|
||||
; GFX9-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_l_o_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f16(i32 15, i32 %offset, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %lod) {
|
||||
; GFX9-LABEL: sample_c_l_o_1d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
||||
; GFX9-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v2, v2, v3, s12
|
||||
; GFX9-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_c_l_o_1d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_lshl_b32 s12, s0, 16
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, s12
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: sample_c_l_o_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v2, v2, v4, v3
|
||||
; GFX9-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: sample_c_l_o_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: gather4_l_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v0, v0, v2, v1
|
||||
; GFX9-NEXT: image_gather4_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: gather4_l_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v0, 0xffff, v0, v1
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_gather4_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 15, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: gather4_c_l_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v3, v2
|
||||
; GFX9-NEXT: image_gather4_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: gather4_c_l_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_gather4_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: gather4_l_o_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v1, v1, v3, v2
|
||||
; GFX9-NEXT: image_gather4_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: gather4_l_o_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, v2
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_gather4_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f16(i32 15, i32 %offset, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %t, half %lod) {
|
||||
; GFX9-LABEL: gather4_c_l_o_2d:
|
||||
; GFX9: ; %bb.0: ; %main_body
|
||||
; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff
|
||||
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; GFX9-NEXT: s_mov_b32 s0, s2
|
||||
; GFX9-NEXT: s_mov_b32 s1, s3
|
||||
; GFX9-NEXT: s_mov_b32 s2, s4
|
||||
; GFX9-NEXT: s_mov_b32 s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s4, s6
|
||||
; GFX9-NEXT: s_mov_b32 s5, s7
|
||||
; GFX9-NEXT: s_mov_b32 s6, s8
|
||||
; GFX9-NEXT: s_mov_b32 s7, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, s11
|
||||
; GFX9-NEXT: s_mov_b32 s10, s12
|
||||
; GFX9-NEXT: s_mov_b32 s11, s13
|
||||
; GFX9-NEXT: v_and_or_b32 v2, v2, v4, v3
|
||||
; GFX9-NEXT: image_gather4_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf a16
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: gather4_c_l_o_2d:
|
||||
; GFX10: ; %bb.0: ; %main_body
|
||||
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; GFX10-NEXT: s_mov_b32 s0, s2
|
||||
; GFX10-NEXT: s_mov_b32 s1, s3
|
||||
; GFX10-NEXT: s_mov_b32 s2, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, s5
|
||||
; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, v3
|
||||
; GFX10-NEXT: s_mov_b32 s4, s6
|
||||
; GFX10-NEXT: s_mov_b32 s5, s7
|
||||
; GFX10-NEXT: s_mov_b32 s6, s8
|
||||
; GFX10-NEXT: s_mov_b32 s7, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, s10
|
||||
; GFX10-NEXT: s_mov_b32 s9, s11
|
||||
; GFX10-NEXT: s_mov_b32 s10, s12
|
||||
; GFX10-NEXT: s_mov_b32 s11, s13
|
||||
; GFX10-NEXT: image_gather4_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 immarg, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 immarg, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f16(i32 immarg, i32, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f16(i32 immarg, i32, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f16(i32 immarg, i32, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f16(i32 immarg, i32, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f16(i32 immarg, i32, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f16(i32 immarg, i32, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
|
||||
attributes #0 = { nounwind readonly }
|
|
@ -1,293 +0,0 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -o - %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %lod) {
|
||||
; GCN-LABEL: sample_l_1d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32 15, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: sample_l_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32 15, float %s, float %t, float -0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %lod) {
|
||||
; GCN-LABEL: sample_c_l_1d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32 15, float %zcompare, float %s, float -2.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: sample_c_l_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_c_lz v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %lod) {
|
||||
; GCN-LABEL: sample_l_o_1d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: sample_l_o_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %lod) {
|
||||
; GCN-LABEL: sample_c_l_o_1d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: sample_c_l_o_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_sample_c_lz_o v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: gather4_l_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_gather4_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 15, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: gather4_c_l_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_gather4_c_lz v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: gather4_l_o_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_gather4_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
|
||||
; GCN-LABEL: gather4_c_l_o_2d:
|
||||
; GCN: ; %bb.0: ; %main_body
|
||||
; GCN-NEXT: s_mov_b32 s0, s2
|
||||
; GCN-NEXT: s_mov_b32 s1, s3
|
||||
; GCN-NEXT: s_mov_b32 s2, s4
|
||||
; GCN-NEXT: s_mov_b32 s3, s5
|
||||
; GCN-NEXT: s_mov_b32 s4, s6
|
||||
; GCN-NEXT: s_mov_b32 s5, s7
|
||||
; GCN-NEXT: s_mov_b32 s6, s8
|
||||
; GCN-NEXT: s_mov_b32 s7, s9
|
||||
; GCN-NEXT: s_mov_b32 s8, s10
|
||||
; GCN-NEXT: s_mov_b32 s9, s11
|
||||
; GCN-NEXT: s_mov_b32 s10, s12
|
||||
; GCN-NEXT: s_mov_b32 s11, s13
|
||||
; GCN-NEXT: image_gather4_c_lz_o v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
|
||||
|
||||
attributes #0 = { nounwind readonly }
|
|
@ -269,10 +269,10 @@ define amdgpu_ps void @cluster_image_load(<8 x i32> inreg %src, <8 x i32> inreg
|
|||
entry:
|
||||
%x1 = add i32 %x, 1
|
||||
%y1 = add i32 %y, 1
|
||||
%val1 = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %x1, i32 %y1, i32 0, <8 x i32> %src, i32 0, i32 0)
|
||||
%val1 = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32(i32 15, i32 %x1, i32 %y1, <8 x i32> %src, i32 0, i32 0)
|
||||
%x2 = add i32 %x, 2
|
||||
%y2 = add i32 %y, 2
|
||||
%val2 = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %x2, i32 %y2, i32 0, <8 x i32> %src, i32 0, i32 0)
|
||||
%val2 = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32(i32 15, i32 %x2, i32 %y2, <8 x i32> %src, i32 0, i32 0)
|
||||
%val = fadd fast <4 x float> %val1, %val2
|
||||
call void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float> %val, i32 15, i32 %x, i32 %y, <8 x i32> %dst, i32 0, i32 0)
|
||||
ret void
|
||||
|
@ -286,20 +286,22 @@ entry:
|
|||
define amdgpu_ps void @no_cluster_image_load(<8 x i32> inreg %src1, <8 x i32> inreg %src2, <8 x i32> inreg %dst, i32 %x, i32 %y) {
|
||||
; GFX9-LABEL: no_cluster_image_load:
|
||||
; GFX9: ; %bb.0: ; %entry
|
||||
; GFX9-NEXT: image_load v[2:5], v[0:1], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: image_load v[6:9], v[0:1], s[8:15] dmask:0xf unorm
|
||||
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
||||
; GFX9-NEXT: image_load_mip v[3:6], v[0:2], s[0:7] dmask:0xf unorm
|
||||
; GFX9-NEXT: image_load_mip v[7:10], v[0:2], s[8:15] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX9-NEXT: v_add_f32_e32 v6, v6, v10
|
||||
; GFX9-NEXT: v_add_f32_e32 v5, v5, v9
|
||||
; GFX9-NEXT: v_add_f32_e32 v4, v4, v8
|
||||
; GFX9-NEXT: v_add_f32_e32 v3, v3, v7
|
||||
; GFX9-NEXT: v_add_f32_e32 v2, v2, v6
|
||||
; GFX9-NEXT: image_store v[2:5], v[0:1], s[16:23] dmask:0xf unorm
|
||||
; GFX9-NEXT: image_store v[3:6], v[0:1], s[16:23] dmask:0xf unorm
|
||||
; GFX9-NEXT: s_endpgm
|
||||
;
|
||||
; GFX10-LABEL: no_cluster_image_load:
|
||||
; GFX10: ; %bb.0: ; %entry
|
||||
; GFX10-NEXT: image_load v[2:5], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: image_load v[6:9], v[0:1], s[8:15] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: v_mov_b32_e32 v10, 0
|
||||
; GFX10-NEXT: image_load_mip v[2:5], [v0, v1, v10], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: image_load_mip v[6:9], [v0, v1, v10], s[8:15] dmask:0xf dim:SQ_RSRC_IMG_2D unorm
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-NEXT: v_add_f32_e32 v5, v5, v9
|
||||
; GFX10-NEXT: v_add_f32_e32 v4, v4, v8
|
||||
|
@ -389,6 +391,7 @@ entry:
|
|||
ret void
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32(i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg)
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg)
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f32(i32, float, float, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32)
|
||||
declare void @llvm.amdgcn.image.store.2d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg)
|
||||
|
|
|
@ -5,6 +5,7 @@ define amdgpu_ps float @_amdgpu_ps_main() #0 {
|
|||
; GCN-LABEL: _amdgpu_ps_main:
|
||||
; GCN: ; %bb.0: ; %.entry
|
||||
; GCN-NEXT: s_mov_b32 s0, 0
|
||||
; GCN-NEXT: v_mov_b32_e32 v4, 0
|
||||
; GCN-NEXT: s_mov_b32 s1, s0
|
||||
; GCN-NEXT: s_mov_b32 s2, s0
|
||||
; GCN-NEXT: s_mov_b32 s3, s0
|
||||
|
@ -14,10 +15,11 @@ define amdgpu_ps float @_amdgpu_ps_main() #0 {
|
|||
; GCN-NEXT: s_mov_b32 s7, s0
|
||||
; GCN-NEXT: image_sample v[0:1], v[0:1], s[0:7], s[0:3] dmask:0x3 dim:SQ_RSRC_IMG_2D
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: s_clause 0x2
|
||||
; GCN-NEXT: s_clause 0x1
|
||||
; GCN-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
|
||||
; GCN-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
|
||||
; GCN-NEXT: image_load v4, v[0:1], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: image_load_mip v4, v[2:4], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
|
||||
; GCN-NEXT: s_clause 0x3
|
||||
; GCN-NEXT: s_buffer_load_dword s24, s[0:3], 0x5c
|
||||
; GCN-NEXT: s_buffer_load_dword s28, s[0:3], 0x7c
|
||||
|
@ -44,33 +46,31 @@ define amdgpu_ps float @_amdgpu_ps_main() #0 {
|
|||
; GCN-NEXT: v_sub_f32_e32 v8, s0, v1
|
||||
; GCN-NEXT: v_fma_f32 v7, -s2, v6, s6
|
||||
; GCN-NEXT: v_fma_f32 v5, v6, v5, 1.0
|
||||
; GCN-NEXT: v_mad_f32 v10, s2, v6, v2
|
||||
; GCN-NEXT: s_mov_b32 s0, 0x3c23d70a
|
||||
; GCN-NEXT: v_fmac_f32_e32 v1, v6, v8
|
||||
; GCN-NEXT: v_mac_f32_e32 v10, v7, v6
|
||||
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GCN-NEXT: v_mul_f32_e32 v9, s10, v0
|
||||
; GCN-NEXT: v_fma_f32 v0, -v0, s10, s14
|
||||
; GCN-NEXT: v_mul_f32_e32 v8, s18, v2
|
||||
; GCN-NEXT: v_mul_f32_e32 v3, s22, v3
|
||||
; GCN-NEXT: v_fmac_f32_e32 v9, v0, v6
|
||||
; GCN-NEXT: v_sub_f32_e32 v0, v1, v5
|
||||
; GCN-NEXT: v_fmac_f32_e32 v5, v0, v6
|
||||
; GCN-NEXT: s_waitcnt vmcnt(2)
|
||||
; GCN-NEXT: v_mad_f32 v10, s2, v6, v2
|
||||
; GCN-NEXT: v_mul_f32_e32 v8, s18, v2
|
||||
; GCN-NEXT: s_waitcnt vmcnt(1)
|
||||
; GCN-NEXT: v_mul_f32_e32 v3, s22, v3
|
||||
; GCN-NEXT: v_mac_f32_e32 v10, v7, v6
|
||||
; GCN-NEXT: v_mul_f32_e32 v1, v8, v6
|
||||
; GCN-NEXT: v_mul_f32_e32 v7, v6, v3
|
||||
; GCN-NEXT: v_fma_f32 v3, -v6, v3, v9
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: v_add_f32_e32 v4, v4, v10
|
||||
; GCN-NEXT: v_fmac_f32_e32 v5, v0, v6
|
||||
; GCN-NEXT: v_fma_f32 v0, v2, s26, -v1
|
||||
; GCN-NEXT: v_fmac_f32_e32 v7, v3, v6
|
||||
; GCN-NEXT: v_mul_f32_e32 v3, v4, v6
|
||||
; GCN-NEXT: v_fma_f32 v4, v5, s0, 0x3ca3d70a
|
||||
; GCN-NEXT: v_fmac_f32_e32 v1, v0, v6
|
||||
; GCN-NEXT: v_mul_f32_e32 v0, v2, v6
|
||||
; GCN-NEXT: v_mul_f32_e32 v2, v7, v4
|
||||
; GCN-NEXT: s_waitcnt vmcnt(0)
|
||||
; GCN-NEXT: v_add_f32_e32 v4, v4, v10
|
||||
; GCN-NEXT: v_mul_f32_e32 v3, v4, v6
|
||||
; GCN-NEXT: v_fma_f32 v4, v5, s0, 0x3ca3d70a
|
||||
; GCN-NEXT: v_mul_f32_e32 v1, v3, v1
|
||||
; GCN-NEXT: v_mul_f32_e32 v2, v7, v4
|
||||
; GCN-NEXT: v_fmac_f32_e32 v1, v2, v0
|
||||
; GCN-NEXT: v_max_f32_e32 v0, 0, v1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
|
||||
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_1d:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_2d:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_3d:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_1darray:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_2darray:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}load_mip_cube:
|
||||
; GCN-NOT: image_load_mip
|
||||
; GCN: image_load
|
||||
define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_1d:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_2d:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_3d:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_1darray:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_2darray:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}store_mip_cube:
|
||||
; GCN-NOT: image_store_mip
|
||||
; GCN: image_store
|
||||
define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
|
||||
main_body:
|
||||
call void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
|
||||
|
||||
declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float>, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float>, i32, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float>, i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float>, i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float>, i32, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float>, i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #0
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readonly }
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
|
||||
|
||||
|
||||
; GCN-LABEL: {{^}}sample_l_1d:
|
||||
; GCN: image_sample_lz v[0:3], v0, s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32 15, float %s, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_l_2d:
|
||||
; GCN: image_sample_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32 15, float %s, float %t, float -0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_c_l_1d:
|
||||
; GCN: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32 15, float %zcompare, float %s, float -2.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_c_l_2d:
|
||||
; GCN: image_sample_c_lz v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_l_o_1d:
|
||||
; GCN: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %s, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_l_o_2d:
|
||||
; GCN: image_sample_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_c_l_o_1d:
|
||||
; GCN: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}sample_c_l_o_2d:
|
||||
; GCN: image_sample_c_lz_o v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @sample_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}gather4_l_2d:
|
||||
; GCN: image_gather4_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 15, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}gather4_c_l_2d:
|
||||
; GCN: image_gather4_c_lz v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}gather4_l_o_2d:
|
||||
; GCN: image_gather4_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}gather4_c_l_o_2d:
|
||||
; GCN: image_gather4_c_lz_o v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf{{$}}
|
||||
define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.0, <8 x i32> %rsrc, <4 x i32> %samp, i1 0, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32, i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32, i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32, i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32, i32, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
|
|
@ -1235,6 +1235,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
; SI: ; %bb.0: ; %.entry
|
||||
; SI-NEXT: s_mov_b32 s4, 0
|
||||
; SI-NEXT: s_mov_b64 s[0:1], exec
|
||||
; SI-NEXT: v_mov_b32_e32 v4, 0
|
||||
; SI-NEXT: v_mov_b32_e32 v2, v1
|
||||
; SI-NEXT: v_mov_b32_e32 v3, v1
|
||||
; SI-NEXT: s_mov_b32 s5, s4
|
||||
|
@ -1244,7 +1245,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
; SI-NEXT: s_mov_b32 s9, s4
|
||||
; SI-NEXT: s_mov_b32 s10, s4
|
||||
; SI-NEXT: s_mov_b32 s11, s4
|
||||
; SI-NEXT: image_sample_lz v1, v[1:3], s[4:11], s[0:3] dmask:0x1 da
|
||||
; SI-NEXT: image_sample_l v1, v[1:4], s[4:11], s[0:3] dmask:0x1 da
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v1
|
||||
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
|
||||
|
@ -1274,6 +1275,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
;
|
||||
; GFX10-WAVE64-LABEL: cbranch_kill:
|
||||
; GFX10-WAVE64: ; %bb.0: ; %.entry
|
||||
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v2, 0
|
||||
; GFX10-WAVE64-NEXT: s_mov_b32 s4, 0
|
||||
; GFX10-WAVE64-NEXT: s_mov_b64 s[0:1], exec
|
||||
; GFX10-WAVE64-NEXT: s_mov_b32 s5, s4
|
||||
|
@ -1283,7 +1285,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
; GFX10-WAVE64-NEXT: s_mov_b32 s9, s4
|
||||
; GFX10-WAVE64-NEXT: s_mov_b32 s10, s4
|
||||
; GFX10-WAVE64-NEXT: s_mov_b32 s11, s4
|
||||
; GFX10-WAVE64-NEXT: image_sample_lz v1, [v1, v1, v1], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
|
||||
; GFX10-WAVE64-NEXT: image_sample_l v1, [v1, v1, v1, v2], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
|
||||
; GFX10-WAVE64-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-WAVE64-NEXT: v_cmp_ge_f32_e32 vcc, 0, v1
|
||||
; GFX10-WAVE64-NEXT: s_and_saveexec_b64 s[2:3], vcc
|
||||
|
@ -1313,6 +1315,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
;
|
||||
; GFX10-WAVE32-LABEL: cbranch_kill:
|
||||
; GFX10-WAVE32: ; %bb.0: ; %.entry
|
||||
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v2, 0
|
||||
; GFX10-WAVE32-NEXT: s_mov_b32 s4, 0
|
||||
; GFX10-WAVE32-NEXT: s_mov_b32 s0, exec_lo
|
||||
; GFX10-WAVE32-NEXT: s_mov_b32 s5, s4
|
||||
|
@ -1322,7 +1325,7 @@ define amdgpu_ps void @cbranch_kill(i32 inreg %0, float %val0, float %val1) {
|
|||
; GFX10-WAVE32-NEXT: s_mov_b32 s9, s4
|
||||
; GFX10-WAVE32-NEXT: s_mov_b32 s10, s4
|
||||
; GFX10-WAVE32-NEXT: s_mov_b32 s11, s4
|
||||
; GFX10-WAVE32-NEXT: image_sample_lz v1, [v1, v1, v1], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
|
||||
; GFX10-WAVE32-NEXT: image_sample_l v1, [v1, v1, v1, v2], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
|
||||
; GFX10-WAVE32-NEXT: s_waitcnt vmcnt(0)
|
||||
; GFX10-WAVE32-NEXT: v_cmp_ge_f32_e32 vcc_lo, 0, v1
|
||||
; GFX10-WAVE32-NEXT: s_and_saveexec_b32 s1, vcc_lo
|
||||
|
|
Loading…
Reference in New Issue