[GlobalISel][AMDGPU] add legalization for G_FREEZE

Summary:
Copy the legalization rules from SelectionDAG:
-widenScalar using anyext
-narrowScalar using intermediate merges
-scalarize/fewerElements using unmerge
-moreElements using G_IMPLICIT_DEF and insert

Add G_FREEZE legalization actions to AMDGPULegalizerInfo.
Use the same legalization actions as G_IMPLICIT_DEF.

Depends on D77795.

Reviewers: dsanders, arsenm, aqjune, aditya_nandakumar, t.p.northover, lebedev.ri, paquette, aemerson

Reviewed By: arsenm

Subscribers: kzhuravl, yaxunl, dstuttard, tpr, t-tye, jvesely, nhaehnle, kerbowa, wdng, rovka, hiraditya, volkan, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D78092
This commit is contained in:
Dominik Montada 2020-04-14 11:25:05 +02:00
parent c245d3e033
commit 55e3a7c6b2
5 changed files with 1278 additions and 88 deletions

View File

@ -215,11 +215,6 @@ public:
LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
unsigned TypeIdx, LLT NarrowTy);
/// Legalize a simple vector instruction where all operands are the same type
/// by splitting into multiple components.
LegalizeResult fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy);
/// Legalize a instruction with a vector type where each operand may have a
/// different element type. All type indexes must have the same number of
/// elements.
@ -251,6 +246,16 @@ public:
LegalizeResult
reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
/// Legalize an instruction by reducing the operation width, either by
/// narrowing the type of the operation or by reducing the number of elements
/// of a vector.
/// The used strategy (narrow vs. fewerElements) is decided by \p NarrowTy.
/// Narrow is used if the scalar type of \p NarrowTy and \p DstTy differ,
/// fewerElements is used when the scalar type is the same but the number of
/// elements between \p NarrowTy and \p DstTy differ.
LegalizeResult reduceOperationWidth(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy);
LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy);

View File

@ -825,6 +825,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
return Legalized;
}
case TargetOpcode::G_FREEZE:
return reduceOperationWidth(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_ADD: {
// FIXME: add support for when SizeOp0 isn't an exact multiple of
// NarrowSize.
@ -1728,6 +1731,13 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
}
case TargetOpcode::G_FREEZE:
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
widenScalarDst(MI, WideTy);
Observer.changedInstr(MI);
return Legalized;
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_MUL:
@ -2594,80 +2604,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
return Legalized;
}
// Handles operands with different types, but all must have the same number of
// elements. There will be multiple type indexes. NarrowTy is expected to have
// the result element type.
LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
assert(TypeIdx == 0 && "only one type index expected");
const unsigned Opc = MI.getOpcode();
const int NumOps = MI.getNumOperands() - 1;
const Register DstReg = MI.getOperand(0).getReg();
const unsigned Flags = MI.getFlags();
assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources");
SmallVector<Register, 8> ExtractedRegs[3];
SmallVector<Register, 8> Parts;
unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
// Break down all the sources into NarrowTy pieces we can operate on. This may
// involve creating merges to a wider type, padded with undef.
for (int I = 0; I != NumOps; ++I) {
Register SrcReg = MI.getOperand(I + 1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
// Each operand may have its own type, but only the number of elements
// matters.
LLT OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType());
LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg);
// Build a sequence of NarrowTy pieces in ExtractedRegs for this operand.
buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy,
ExtractedRegs[I], TargetOpcode::G_ANYEXT);
}
SmallVector<Register, 8> ResultRegs;
// Input operands for each sub-instruction.
SmallVector<SrcOp, 4> InputRegs(NumOps, Register());
int NumParts = ExtractedRegs[0].size();
const LLT DstTy = MRI.getType(DstReg);
const unsigned DstSize = DstTy.getSizeInBits();
LLT DstLCMTy = getLCMType(DstTy, NarrowTy);
const unsigned NarrowSize = NarrowTy.getSizeInBits();
// We widened the source registers to satisfy merge/unmerge size
// constraints. We'll have some extra fully undef parts.
const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize;
for (int I = 0; I != NumRealParts; ++I) {
// Emit this instruction on each of the split pieces.
for (int J = 0; J != NumOps; ++J)
InputRegs[J] = ExtractedRegs[J][I];
auto Inst = MIRBuilder.buildInstr(Opc, {NarrowTy}, InputRegs, Flags);
ResultRegs.push_back(Inst.getReg(0));
}
// Fill out the widened result with undef instead of creating instructions
// with undef inputs.
int NumUndefParts = NumParts - NumRealParts;
if (NumUndefParts != 0)
ResultRegs.append(NumUndefParts, MIRBuilder.buildUndef(NarrowTy).getReg(0));
// Extract the possibly padded result to the original result register.
buildWidenedRemergeToDst(DstReg, DstLCMTy, ResultRegs);
MI.eraseFromParent();
return Legalized;
}
// Handle splitting vector operations which need to have the same number of
// elements in each type index, but each type index may have a different element
// type.
@ -3210,6 +3146,117 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
return Legalized;
}
LegalizerHelper::LegalizeResult
LegalizerHelper::reduceOperationWidth(MachineInstr &MI, unsigned int TypeIdx,
LLT NarrowTy) {
assert(TypeIdx == 0 && "only one type index expected");
const unsigned Opc = MI.getOpcode();
const int NumOps = MI.getNumOperands() - 1;
const Register DstReg = MI.getOperand(0).getReg();
const unsigned Flags = MI.getFlags();
const unsigned NarrowSize = NarrowTy.getSizeInBits();
const LLT NarrowScalarTy = LLT::scalar(NarrowSize);
assert(NumOps <= 3 && "expected instruction with 1 result and 1-3 sources");
// First of all check whether we are narrowing (changing the element type)
// or reducing the vector elements
const LLT DstTy = MRI.getType(DstReg);
const bool IsNarrow = NarrowTy.getScalarType() != DstTy.getScalarType();
SmallVector<Register, 8> ExtractedRegs[3];
SmallVector<Register, 8> Parts;
unsigned NarrowElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
// Break down all the sources into NarrowTy pieces we can operate on. This may
// involve creating merges to a wider type, padded with undef.
for (int I = 0; I != NumOps; ++I) {
Register SrcReg = MI.getOperand(I + 1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
// The type to narrow SrcReg to. For narrowing, this is a smaller scalar.
// For fewerElements, this is a smaller vector with the same element type.
LLT OpNarrowTy;
if (IsNarrow) {
OpNarrowTy = NarrowScalarTy;
// In case of narrowing, we need to cast vectors to scalars for this to
// work properly
// FIXME: Can we do without the bitcast here if we're narrowing?
if (SrcTy.isVector()) {
SrcTy = LLT::scalar(SrcTy.getSizeInBits());
SrcReg = MIRBuilder.buildBitcast(SrcTy, SrcReg).getReg(0);
}
} else {
OpNarrowTy = LLT::scalarOrVector(NarrowElts, SrcTy.getScalarType());
}
LLT GCDTy = extractGCDType(ExtractedRegs[I], SrcTy, OpNarrowTy, SrcReg);
// Build a sequence of NarrowTy pieces in ExtractedRegs for this operand.
buildLCMMergePieces(SrcTy, OpNarrowTy, GCDTy, ExtractedRegs[I],
TargetOpcode::G_ANYEXT);
}
SmallVector<Register, 8> ResultRegs;
// Input operands for each sub-instruction.
SmallVector<SrcOp, 4> InputRegs(NumOps, Register());
int NumParts = ExtractedRegs[0].size();
const unsigned DstSize = DstTy.getSizeInBits();
const LLT DstScalarTy = LLT::scalar(DstSize);
// Narrowing needs to use scalar types
LLT DstLCMTy, NarrowDstTy;
if (IsNarrow) {
DstLCMTy = getLCMType(DstScalarTy, NarrowScalarTy);
NarrowDstTy = NarrowScalarTy;
} else {
DstLCMTy = getLCMType(DstTy, NarrowTy);
NarrowDstTy = NarrowTy;
}
// We widened the source registers to satisfy merge/unmerge size
// constraints. We'll have some extra fully undef parts.
const int NumRealParts = (DstSize + NarrowSize - 1) / NarrowSize;
for (int I = 0; I != NumRealParts; ++I) {
// Emit this instruction on each of the split pieces.
for (int J = 0; J != NumOps; ++J)
InputRegs[J] = ExtractedRegs[J][I];
auto Inst = MIRBuilder.buildInstr(Opc, {NarrowDstTy}, InputRegs, Flags);
ResultRegs.push_back(Inst.getReg(0));
}
// Fill out the widened result with undef instead of creating instructions
// with undef inputs.
int NumUndefParts = NumParts - NumRealParts;
if (NumUndefParts != 0)
ResultRegs.append(NumUndefParts,
MIRBuilder.buildUndef(NarrowDstTy).getReg(0));
// Extract the possibly padded result. Use a scratch register if we need to do
// a final bitcast, otherwise use the original result register.
Register MergeDstReg;
if (IsNarrow && DstTy.isVector())
MergeDstReg = MRI.createGenericVirtualRegister(DstScalarTy);
else
MergeDstReg = DstReg;
buildWidenedRemergeToDst(MergeDstReg, DstLCMTy, ResultRegs);
// Recast to vector if we narrowed a vector
if (IsNarrow && DstTy.isVector())
MIRBuilder.buildBitcast(DstReg, MergeDstReg);
MI.eraseFromParent();
return Legalized;
}
LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
@ -3293,7 +3340,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
case G_FMAXIMUM:
case G_FSHL:
case G_FSHR:
return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy);
case G_FREEZE:
return reduceOperationWidth(MI, TypeIdx, NarrowTy);
case G_SHL:
case G_LSHR:
case G_ASHR:
@ -3606,6 +3654,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
Observer.changedInstr(MI);
return Legalized;
case TargetOpcode::G_INSERT:
case TargetOpcode::G_FREEZE:
if (TypeIdx != 0)
return UnableToLegalize;
Observer.changingInstr(MI);

View File

@ -371,14 +371,14 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.legalFor({S32, S64, S16})
.clampScalar(0, S16, S64);
getActionDefinitionsBuilder(G_IMPLICIT_DEF)
.legalFor({S1, S32, S64, S16, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
.clampScalarOrElt(0, S32, S1024)
.legalIf(isMultiple32(0))
.widenScalarToNextPow2(0, 32)
.clampMaxNumElements(0, S32, 16);
getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
.legalFor({S1, S32, S64, S16, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
.clampScalarOrElt(0, S32, S1024)
.legalIf(isMultiple32(0))
.widenScalarToNextPow2(0, 32)
.clampMaxNumElements(0, S32, 16);
setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
getActionDefinitionsBuilder(G_GLOBAL_VALUE)

View File

@ -0,0 +1,925 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
---
name: test_freeze_s1
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s1
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE [[TRUNC]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FREEZE]](s1)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s1) = G_TRUNC %0
%2:_(s1) = G_FREEZE %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: test_freeze_s7
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s7
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[FREEZE]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s7) = G_TRUNC %0
%2:_(s7) = G_FREEZE %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: test_freeze_s8
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s8
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[FREEZE]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s8) = G_TRUNC %0
%2:_(s8) = G_FREEZE %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: test_freeze_s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s16
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FREEZE]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_TRUNC %0
%2:_(s16) = G_FREEZE %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: test_freeze_s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
; CHECK: $vgpr0 = COPY [[FREEZE]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_s48
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s48
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
; CHECK: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[FREEZE]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s48) = G_TRUNC %0
%2:_(s48) = G_FREEZE %1
%3:_(s64) = G_ANYEXT %2
$vgpr0_vgpr1 = COPY %3
...
---
name: test_freeze_s64
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_s65
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s65
; CHECK: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
; CHECK: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MV]](s64), [[MV1]](s64)
; CHECK: [[FREEZE:%[0-9]+]]:_(s128) = G_FREEZE [[MV2]]
; CHECK: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[FREEZE]](s128)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s65) = G_TRUNC %0
%2:_(s65) = G_FREEZE %1
%3:_(s96) = G_ANYEXT %2
$vgpr0_vgpr1_vgpr2 = COPY %3
...
---
name: test_freeze_s128
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s128
; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[FREEZE:%[0-9]+]]:_(s128) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[FREEZE]](s128)
%0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s128) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_freeze_256
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_256
; CHECK: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK: [[FREEZE:%[0-9]+]]:_(s256) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[FREEZE]](s256)
%0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(s256) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
...
---
name: test_freeze_s448
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s448
; CHECK: [[COPY:%[0-9]+]]:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[TRUNC:%[0-9]+]]:_(s448) = G_TRUNC [[COPY]](s512)
; CHECK: [[FREEZE:%[0-9]+]]:_(s448) = G_FREEZE [[TRUNC]]
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64), [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[FREEZE]](s448)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s512) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64), [[UV2]](s64), [[UV3]](s64), [[UV4]](s64), [[UV5]](s64), [[UV6]](s64), [[DEF]](s64)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[MV]](s512)
%0:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(s448) = G_TRUNC %0
%2:_(s448) = G_FREEZE %1
%3:_(s512) = G_ANYEXT %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %3
...
---
name: test_freeze_s512
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s512
; CHECK: [[COPY:%[0-9]+]]:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[FREEZE:%[0-9]+]]:_(s512) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[FREEZE]](s512)
%0:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(s512) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %1
...
---
name: test_freeze_s1024
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s1024
; CHECK: [[COPY:%[0-9]+]]:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64), [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s512)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64), [[UV2]](s64), [[UV3]](s64), [[UV4]](s64), [[UV5]](s64), [[UV6]](s64), [[UV7]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64)
; CHECK: [[FREEZE:%[0-9]+]]:_(s1024) = G_FREEZE [[MV]]
; CHECK: S_NOP 0, implicit [[FREEZE]](s1024)
%0:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(s1024) = G_ANYEXT %0
%2:_(s1024) = G_FREEZE %1
S_NOP 0, implicit %2
...
---
name: test_freeze_s1056
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s1056
; CHECK: [[COPY:%[0-9]+]]:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s512)
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
; CHECK: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[UV3]](s32)
; CHECK: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV5]](s32)
; CHECK: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV6]](s32), [[UV7]](s32)
; CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[UV9]](s32)
; CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV10]](s32), [[UV11]](s32)
; CHECK: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV12]](s32), [[UV13]](s32)
; CHECK: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV14]](s32), [[UV15]](s32)
; CHECK: [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV8:%[0-9]+]]:_(s2112) = G_MERGE_VALUES [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s1056) = G_TRUNC [[MV8]](s2112)
; CHECK: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32), [[UV35:%[0-9]+]]:_(s32), [[UV36:%[0-9]+]]:_(s32), [[UV37:%[0-9]+]]:_(s32), [[UV38:%[0-9]+]]:_(s32), [[UV39:%[0-9]+]]:_(s32), [[UV40:%[0-9]+]]:_(s32), [[UV41:%[0-9]+]]:_(s32), [[UV42:%[0-9]+]]:_(s32), [[UV43:%[0-9]+]]:_(s32), [[UV44:%[0-9]+]]:_(s32), [[UV45:%[0-9]+]]:_(s32), [[UV46:%[0-9]+]]:_(s32), [[UV47:%[0-9]+]]:_(s32), [[UV48:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](s1056)
; CHECK: [[MV9:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[UV16]](s32), [[UV17]](s32), [[UV18]](s32), [[UV19]](s32), [[UV20]](s32), [[UV21]](s32), [[UV22]](s32), [[UV23]](s32), [[UV24]](s32), [[UV25]](s32), [[UV26]](s32), [[UV27]](s32), [[UV28]](s32), [[UV29]](s32), [[UV30]](s32), [[UV31]](s32), [[UV32]](s32), [[UV33]](s32), [[UV34]](s32), [[UV35]](s32), [[UV36]](s32), [[UV37]](s32), [[UV38]](s32), [[UV39]](s32), [[UV40]](s32), [[UV41]](s32), [[UV42]](s32), [[UV43]](s32), [[UV44]](s32), [[UV45]](s32), [[UV46]](s32), [[UV47]](s32)
; CHECK: [[MV10:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[UV48]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK: [[DEF2:%[0-9]+]]:_(s1024) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(s1024) = G_FREEZE [[MV9]]
; CHECK: [[FREEZE1:%[0-9]+]]:_(s1024) = G_FREEZE [[MV10]]
; CHECK: [[MV11:%[0-9]+]]:_(s33792) = G_MERGE_VALUES [[FREEZE]](s1024), [[FREEZE1]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024), [[DEF2]](s1024)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1056) = G_TRUNC [[MV11]](s33792)
; CHECK: S_NOP 0, implicit [[TRUNC1]](s1056)
%0:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(s1056) = G_ANYEXT %0
%2:_(s1056) = G_FREEZE %1
S_NOP 0, implicit %2
...
---
name: test_freeze_s2048
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_s2048
; CHECK: [[COPY:%[0-9]+]]:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64), [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s512)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64), [[UV2]](s64), [[UV3]](s64), [[UV4]](s64), [[UV5]](s64), [[UV6]](s64), [[UV7]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64)
; CHECK: [[MV1:%[0-9]+]]:_(s1024) = G_MERGE_VALUES [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64), [[DEF]](s64)
; CHECK: [[FREEZE:%[0-9]+]]:_(s1024) = G_FREEZE [[MV]]
; CHECK: [[FREEZE1:%[0-9]+]]:_(s1024) = G_FREEZE [[MV1]]
; CHECK: [[MV2:%[0-9]+]]:_(s2048) = G_MERGE_VALUES [[FREEZE]](s1024), [[FREEZE1]](s1024)
; CHECK: S_NOP 0, implicit [[MV2]](s2048)
%0:_(s512) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(s2048) = G_ANYEXT %0
%2:_(s2048) = G_FREEZE %1
S_NOP 0, implicit %2
...
---
name: test_freeze_v2s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_v3s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v3s32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[FREEZE:%[0-9]+]]:_(<3 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[FREEZE]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2 = COPY %1
...
---
name: test_freeze_v4s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v4s32
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[FREEZE]](<4 x s32>)
%0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<4 x s32>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_freeze_v5s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v5s32
; CHECK: [[COPY:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
; CHECK: [[FREEZE:%[0-9]+]]:_(<5 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4 = COPY [[FREEZE]](<5 x s32>)
%0:_(<5 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
%1:_(<5 x s32>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4 = COPY %1
...
---
name: test_freeze_v6s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v6s32
; CHECK: [[DEF:%[0-9]+]]:_(<6 x s32>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<6 x s32>) = G_FREEZE [[DEF]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<6 x s32>)
%0:_(<6 x s32>) = G_IMPLICIT_DEF
%1:_(<6 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v7s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v7s32
; CHECK: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<7 x s32>) = G_FREEZE [[DEF]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<7 x s32>)
%0:_(<7 x s32>) = G_IMPLICIT_DEF
%1:_(<7 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v8s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v8s32
; CHECK: [[COPY:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK: [[FREEZE:%[0-9]+]]:_(<8 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[FREEZE]](<8 x s32>)
%0:_(<8 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(<8 x s32>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
...
---
name: test_freeze_v16s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v16s32
; CHECK: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[FREEZE:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[FREEZE]](<16 x s32>)
%0:_(<16 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(<16 x s32>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %1
...
---
name: test_freeze_v17s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v17s32
; CHECK: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; CHECK: [[FREEZE:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[COPY]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<16 x s32>)
%0:_(<16 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
%1:_(<16 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v32s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v32s32
; CHECK: [[DEF:%[0-9]+]]:_(<32 x s32>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<32 x s32>) = G_FREEZE [[DEF]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<32 x s32>)
%0:_(<32 x s32>) = G_IMPLICIT_DEF
%1:_(<32 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v33s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v33s32
; CHECK: [[DEF:%[0-9]+]]:_(<33 x s32>) = G_IMPLICIT_DEF
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<33 x s32>)
; CHECK: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[UV3]](s32), [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32), [[UV8]](s32), [[UV9]](s32), [[UV10]](s32), [[UV11]](s32), [[UV12]](s32), [[UV13]](s32), [[UV14]](s32), [[UV15]](s32)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[UV16]](s32), [[UV17]](s32), [[UV18]](s32), [[UV19]](s32), [[UV20]](s32), [[UV21]](s32), [[UV22]](s32), [[UV23]](s32), [[UV24]](s32), [[UV25]](s32), [[UV26]](s32), [[UV27]](s32), [[UV28]](s32), [[UV29]](s32), [[UV30]](s32), [[UV31]](s32)
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[UV32]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32), [[DEF1]](s32)
; CHECK: [[DEF2:%[0-9]+]]:_(<16 x s32>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[BUILD_VECTOR]]
; CHECK: [[FREEZE1:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[BUILD_VECTOR1]]
; CHECK: [[FREEZE2:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[BUILD_VECTOR2]]
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<528 x s32>) = G_CONCAT_VECTORS [[FREEZE]](<16 x s32>), [[FREEZE1]](<16 x s32>), [[FREEZE2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>), [[DEF2]](<16 x s32>)
; CHECK: [[EXTRACT:%[0-9]+]]:_(<33 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<528 x s32>), 0
; CHECK: S_NOP 0, implicit [[EXTRACT]](<33 x s32>)
%0:_(<33 x s32>) = G_IMPLICIT_DEF
%1:_(<33 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v64s32
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v64s32
; CHECK: [[DEF:%[0-9]+]]:_(<16 x s32>) = G_IMPLICIT_DEF
; CHECK: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY [[DEF]](<16 x s32>)
; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s32>) = COPY [[DEF]](<16 x s32>)
; CHECK: [[COPY2:%[0-9]+]]:_(<16 x s32>) = COPY [[DEF]](<16 x s32>)
; CHECK: [[FREEZE:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[DEF]]
; CHECK: [[FREEZE1:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[COPY]]
; CHECK: [[FREEZE2:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[COPY1]]
; CHECK: [[FREEZE3:%[0-9]+]]:_(<16 x s32>) = G_FREEZE [[COPY2]]
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<64 x s32>) = G_CONCAT_VECTORS [[FREEZE]](<16 x s32>), [[FREEZE1]](<16 x s32>), [[FREEZE2]](<16 x s32>), [[FREEZE3]](<16 x s32>)
; CHECK: S_NOP 0, implicit [[CONCAT_VECTORS]](<64 x s32>)
%0:_(<64 x s32>) = G_IMPLICIT_DEF
%1:_(<64 x s32>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v2s1
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s1
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s32>) = G_FREEZE [[COPY]]
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY [[FREEZE]](<2 x s32>)
; CHECK: $vgpr0_vgpr1 = COPY [[COPY1]](<2 x s32>)
%0:_(<2 x s1>) = G_IMPLICIT_DEF
%1:_(<2 x s1>) = G_FREEZE %0
%2:_(<2 x s32>) = G_ANYEXT %1
$vgpr0_vgpr1 = COPY %2
...
---
name: test_freeze_v3s1
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v3s1
; CHECK: [[DEF:%[0-9]+]]:_(<3 x s1>) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(<4 x s1>) = G_IMPLICIT_DEF
; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[DEF1]](<4 x s1>)
; CHECK: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[ANYEXT]], [[DEF]](<3 x s1>), 0
; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[INSERT]](<4 x s16>)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[TRUNC]](<4 x s1>)
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s32>) = G_FREEZE [[ANYEXT1]]
; CHECK: [[TRUNC1:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[FREEZE]](<4 x s32>)
; CHECK: [[EXTRACT:%[0-9]+]]:_(<3 x s1>) = G_EXTRACT [[TRUNC1]](<4 x s1>), 0
; CHECK: [[ANYEXT2:%[0-9]+]]:_(<3 x s32>) = G_ANYEXT [[EXTRACT]](<3 x s1>)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[ANYEXT2]](<3 x s32>)
%0:_(<3 x s1>) = G_IMPLICIT_DEF
%1:_(<3 x s1>) = G_FREEZE %0
%2:_(<3 x s32>) = G_ANYEXT %1
$vgpr0_vgpr1_vgpr2 = COPY %2
...
---
name: test_freeze_v2s8
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s8
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY [[COPY]](<2 x s32>)
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s32>) = G_FREEZE [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[FREEZE]](<2 x s32>)
; CHECK: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_TRUNC %0
%2:_(<2 x s8>) = G_FREEZE %1
%3:_(<2 x s32>) = G_ANYEXT %2
$vgpr0_vgpr1 = COPY %3
...
---
name: test_freeze_v3s8
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v3s8
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[TRUNC:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[COPY]](<3 x s32>)
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s8>) = G_IMPLICIT_DEF
; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[DEF]](<4 x s8>)
; CHECK: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[ANYEXT]], [[TRUNC]](<3 x s8>), 0
; CHECK: [[TRUNC1:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[INSERT]](<4 x s16>)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[TRUNC1]](<4 x s8>)
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s32>) = G_FREEZE [[ANYEXT1]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[FREEZE]](<4 x s32>)
; CHECK: [[EXTRACT:%[0-9]+]]:_(<3 x s8>) = G_EXTRACT [[TRUNC2]](<4 x s8>), 0
; CHECK: [[ANYEXT2:%[0-9]+]]:_(<3 x s32>) = G_ANYEXT [[EXTRACT]](<3 x s8>)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[ANYEXT2]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s8>) = G_TRUNC %0
%2:_(<3 x s8>) = G_FREEZE %1
%3:_(<3 x s32>) = G_ANYEXT %2
$vgpr0_vgpr1_vgpr2 = COPY %3
...
---
name: test_freeze_v2s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s16
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s16>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0 = COPY [[FREEZE]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_v3s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v3s16
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[DEF]](s32)
; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
; CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<2 x s32>)
; CHECK: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s16>), [[TRUNC1]](<2 x s16>), [[DEF1]](<2 x s16>)
; CHECK: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
; CHECK: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CHECK: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT]](<3 x s16>), 0
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s16>) = G_FREEZE [[INSERT]]
; CHECK: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[FREEZE]](<4 x s16>), 0
; CHECK: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
; CHECK: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR2]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>) = G_TRUNC %0
%2:_(<3 x s16>) = G_FREEZE %1
%3:_(<3 x s32>) = G_ANYEXT %2
$vgpr0_vgpr1_vgpr2 = COPY %3
...
---
name: test_freeze_v4s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v4s16
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s16>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_v5s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v5s16
; CHECK: [[COPY:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<5 x s32>)
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV2]](s32), [[UV3]](s32)
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UV4]](s32), [[DEF]](s32)
; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
; CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<2 x s32>)
; CHECK: [[TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR2]](<2 x s32>)
; CHECK: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<10 x s16>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s16>), [[TRUNC1]](<2 x s16>), [[TRUNC2]](<2 x s16>), [[DEF1]](<2 x s16>), [[DEF1]](<2 x s16>)
; CHECK: [[EXTRACT:%[0-9]+]]:_(<5 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<10 x s16>), 0
; CHECK: [[DEF2:%[0-9]+]]:_(<6 x s32>) = G_IMPLICIT_DEF
; CHECK: [[UV5:%[0-9]+]]:_(<2 x s32>), [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[DEF2]](<6 x s32>)
; CHECK: [[TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV5]](<2 x s32>)
; CHECK: [[TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV6]](<2 x s32>)
; CHECK: [[TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV7]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[TRUNC3]](<2 x s16>), [[TRUNC4]](<2 x s16>), [[TRUNC5]](<2 x s16>)
; CHECK: [[INSERT:%[0-9]+]]:_(<6 x s16>) = G_INSERT [[CONCAT_VECTORS1]], [[EXTRACT]](<5 x s16>), 0
; CHECK: [[UV8:%[0-9]+]]:_(<2 x s16>), [[UV9:%[0-9]+]]:_(<2 x s16>), [[UV10:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<6 x s16>)
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV8]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV9]](<2 x s16>)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; CHECK: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV10]](<2 x s16>)
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(<6 x s32>) = G_FREEZE [[BUILD_VECTOR3]]
; CHECK: [[UV11:%[0-9]+]]:_(<2 x s32>), [[UV12:%[0-9]+]]:_(<2 x s32>), [[UV13:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[FREEZE]](<6 x s32>)
; CHECK: [[TRUNC6:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV11]](<2 x s32>)
; CHECK: [[TRUNC7:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV12]](<2 x s32>)
; CHECK: [[TRUNC8:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV13]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS2:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[TRUNC6]](<2 x s16>), [[TRUNC7]](<2 x s16>), [[TRUNC8]](<2 x s16>)
; CHECK: [[EXTRACT1:%[0-9]+]]:_(<5 x s16>) = G_EXTRACT [[CONCAT_VECTORS2]](<6 x s16>), 0
; CHECK: [[DEF3:%[0-9]+]]:_(<6 x s32>) = G_IMPLICIT_DEF
; CHECK: [[UV14:%[0-9]+]]:_(<2 x s32>), [[UV15:%[0-9]+]]:_(<2 x s32>), [[UV16:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[DEF3]](<6 x s32>)
; CHECK: [[TRUNC9:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV14]](<2 x s32>)
; CHECK: [[TRUNC10:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV15]](<2 x s32>)
; CHECK: [[TRUNC11:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV16]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS3:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[TRUNC9]](<2 x s16>), [[TRUNC10]](<2 x s16>), [[TRUNC11]](<2 x s16>)
; CHECK: [[INSERT1:%[0-9]+]]:_(<6 x s16>) = G_INSERT [[CONCAT_VECTORS3]], [[EXTRACT1]](<5 x s16>), 0
; CHECK: [[UV17:%[0-9]+]]:_(<2 x s16>), [[UV18:%[0-9]+]]:_(<2 x s16>), [[UV19:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<6 x s16>)
; CHECK: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV17]](<2 x s16>)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
; CHECK: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV18]](<2 x s16>)
; CHECK: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
; CHECK: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV19]](<2 x s16>)
; CHECK: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST4]](s32)
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY [[BITCAST5]](s32)
; CHECK: [[BUILD_VECTOR4:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4 = COPY [[BUILD_VECTOR4]](<5 x s32>)
%0:_(<5 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
%1:_(<5 x s16>) = G_TRUNC %0
%2:_(<5 x s16>) = G_FREEZE %1
%3:_(<5 x s32>) = G_ANYEXT %2
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4 = COPY %3
...
---
name: test_freeze_v6s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v6s16
; CHECK: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; CHECK: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(<6 x s32>) = G_FREEZE [[BUILD_VECTOR]]
; CHECK: [[UV3:%[0-9]+]]:_(<2 x s32>), [[UV4:%[0-9]+]]:_(<2 x s32>), [[UV5:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[FREEZE]](<6 x s32>)
; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV3]](<2 x s32>)
; CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV4]](<2 x s32>)
; CHECK: [[TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV5]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s16>), [[TRUNC1]](<2 x s16>), [[TRUNC2]](<2 x s16>)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<6 x s16>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2 = COPY %1
...
---
name: test_freeze_v8s16
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v8s16
; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; CHECK: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
; CHECK: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(<8 x s32>) = G_FREEZE [[BUILD_VECTOR]]
; CHECK: [[UV4:%[0-9]+]]:_(<2 x s32>), [[UV5:%[0-9]+]]:_(<2 x s32>), [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[FREEZE]](<8 x s32>)
; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV4]](<2 x s32>)
; CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV5]](<2 x s32>)
; CHECK: [[TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV6]](<2 x s32>)
; CHECK: [[TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[UV7]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[TRUNC]](<2 x s16>), [[TRUNC1]](<2 x s16>), [[TRUNC2]](<2 x s16>), [[TRUNC3]](<2 x s16>)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<8 x s16>)
%0:_(<8 x s16>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<8 x s16>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_freeze_v2s64
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s64>) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[FREEZE]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = G_FREEZE %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_freeze_v4s8
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v4s8
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s8>) = COPY $vgpr0
; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<4 x s8>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32), [[ANYEXT3]](s32)
; CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s32>) = G_FREEZE [[BUILD_VECTOR]]
; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[FREEZE]](<4 x s32>)
; CHECK: $vgpr0 = COPY [[TRUNC]](<4 x s8>)
%0:_(<4 x s8>) = COPY $vgpr0
%1:_(<4 x s8>) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_p0
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(p0) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](p0)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(p0) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_p1
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p1
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(p1) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](p1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p1) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_p2
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p2
; CHECK: [[COPY:%[0-9]+]]:_(p2) = COPY $vgpr0
; CHECK: [[FREEZE:%[0-9]+]]:_(p2) = G_FREEZE [[COPY]]
; CHECK: $vgpr0 = COPY [[FREEZE]](p2)
%0:_(p2) = COPY $vgpr0
%1:_(p2) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_p3
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p3
; CHECK: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CHECK: [[FREEZE:%[0-9]+]]:_(p3) = G_FREEZE [[COPY]]
; CHECK: $vgpr0 = COPY [[FREEZE]](p3)
%0:_(p3) = COPY $vgpr0
%1:_(p3) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_p4
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p4
; CHECK: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(p4) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](p4)
%0:_(p4) = COPY $vgpr0_vgpr1
%1:_(p4) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_p5
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p5
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CHECK: [[FREEZE:%[0-9]+]]:_(p5) = G_FREEZE [[COPY]]
; CHECK: $vgpr0 = COPY [[FREEZE]](p5)
%0:_(p5) = COPY $vgpr0
%1:_(p5) = G_FREEZE %0
$vgpr0 = COPY %1
...
---
name: test_freeze_p999
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_p999
; CHECK: [[COPY:%[0-9]+]]:_(p999) = COPY $vgpr0_vgpr1
; CHECK: [[FREEZE:%[0-9]+]]:_(p999) = G_FREEZE [[COPY]]
; CHECK: $vgpr0_vgpr1 = COPY [[FREEZE]](p999)
%0:_(p999) = COPY $vgpr0_vgpr1
%1:_(p999) = G_FREEZE %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_freeze_v2s1024
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v2s1024
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s1024>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<2 x s1024>) = G_FREEZE [[DEF]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<2 x s1024>)
%0:_(<2 x s1024>) = G_IMPLICIT_DEF
%1:_(<2 x s1024>) = G_FREEZE %0
S_NOP 0, implicit %1
...
---
name: test_freeze_v3s1024
body: |
bb.0:
; CHECK-LABEL: name: test_freeze_v3s1024
; CHECK: [[DEF:%[0-9]+]]:_(<3 x s1024>) = G_IMPLICIT_DEF
; CHECK: [[FREEZE:%[0-9]+]]:_(<3 x s1024>) = G_FREEZE [[DEF]]
; CHECK: S_NOP 0, implicit [[FREEZE]](<3 x s1024>)
%0:_(<3 x s1024>) = G_IMPLICIT_DEF
%1:_(<3 x s1024>) = G_FREEZE %0
S_NOP 0, implicit %1
...

View File

@ -2804,4 +2804,215 @@ TEST_F(AArch64GISelMITest, NarrowImplicitDef) {
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
// Test widening of G_FREEZE
TEST_F(AArch64GISelMITest, WidenFreeze) {
setUp();
if (!TM)
return;
DefineLegalizerInfo(A, {});
// Make sure that G_FREEZE is widened with anyext
LLT S64{LLT::scalar(64)};
LLT S128{LLT::scalar(128)};
LLT V2S32{LLT::vector(2, 32)};
LLT V2S64{LLT::vector(2, 64)};
auto Vector = B.buildBitcast(V2S32, Copies[0]);
auto FreezeScalar = B.buildInstr(TargetOpcode::G_FREEZE, {S64}, {Copies[0]});
auto FreezeVector = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector});
AInfo Info(MF->getSubtarget());
DummyGISelObserver Observer;
LegalizerHelper Helper(*MF, Info, Observer, B);
// Perform Legalization
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.widenScalar(*FreezeScalar, 0, S128));
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.widenScalar(*FreezeVector, 0, V2S64));
const auto *CheckStr = R"(
CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY
CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]]
CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]]
CHECK: [[FREEZE:%[0-9]+]]:_(s128) = G_FREEZE [[ANYEXT]]
CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[FREEZE]]
CHECK: [[ANYEXT1:%[0-9]+]]:_(<2 x s64>) = G_ANYEXT [[BITCAST]]
CHECK: [[FREEZE1:%[0-9]+]]:_(<2 x s64>) = G_FREEZE [[ANYEXT1]]
CHECK: [[TRUNC1:%[0-9]+]]:_(<2 x s32>) = G_TRUNC [[FREEZE1]]
)";
// Check
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
// Test narrowing of G_FREEZE
TEST_F(AArch64GISelMITest, NarrowFreeze) {
setUp();
if (!TM)
return;
DefineLegalizerInfo(A, {});
// Make sure that G_FREEZE is narrowed using unmerge/extract
LLT S16{LLT::scalar(16)};
LLT S32{LLT::scalar(32)};
LLT S33{LLT::scalar(33)};
LLT S64{LLT::scalar(64)};
LLT V2S16{LLT::vector(2, 16)};
LLT V2S32{LLT::vector(2, 32)};
auto Trunc = B.buildTrunc(S33, {Copies[0]});
auto Vector = B.buildBitcast(V2S32, Copies[0]);
auto FreezeScalar = B.buildInstr(TargetOpcode::G_FREEZE, {S64}, {Copies[0]});
auto FreezeOdd = B.buildInstr(TargetOpcode::G_FREEZE, {S33}, {Trunc});
auto FreezeVector = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector});
auto FreezeVector1 = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector});
AInfo Info(MF->getSubtarget());
DummyGISelObserver Observer;
LegalizerHelper Helper(*MF, Info, Observer, B);
// Perform Legalization
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.narrowScalar(*FreezeScalar, 0, S32));
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.narrowScalar(*FreezeOdd, 0, S32));
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.narrowScalar(*FreezeVector, 0, V2S16));
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.narrowScalar(*FreezeVector1, 0, S16));
const auto *CheckStr = R"(
CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY
CHECK: [[TRUNC:%[0-9]+]]:_(s33) = G_TRUNC [[COPY]]
CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]]
CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]]
CHECK: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[UV]]
CHECK: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[UV1]]
CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FREEZE]]:_(s32), [[FREEZE1]]
CHECK: (s1) = G_UNMERGE_VALUES [[TRUNC]]:_(s33)
CHECK: [[UNDEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF
CHECK: [[MV1:%[0-9]+]]:_(s32) = G_MERGE_VALUES
CHECK: [[MV2:%[0-9]+]]:_(s32) = G_MERGE_VALUES
CHECK: [[UNDEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
CHECK: [[FREEZE2:%[0-9]+]]:_(s32) = G_FREEZE [[MV1]]
CHECK: [[FREEZE3:%[0-9]+]]:_(s32) = G_FREEZE [[MV2]]
CHECK: [[UNDEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
CHECK: [[MV3:%[0-9]+]]:_(s1056) = G_MERGE_VALUES [[FREEZE2]]:_(s32), [[FREEZE3]]:_(s32), [[UNDEF2]]
CHECK: [[TRUNC1:%[0-9]+]]:_(s33) = G_TRUNC [[MV3]]
CHECK: [[BITCAST1:%[0-9]+]]:_(s64) = G_BITCAST [[BITCAST]]
CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST1]]
CHECK: [[FREEZE4:%[0-9]+]]:_(s32) = G_FREEZE [[UV2]]
CHECK: [[FREEZE5:%[0-9]+]]:_(s32) = G_FREEZE [[UV3]]
CHECK: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FREEZE4]]:_(s32), [[FREEZE5]]:_(s32)
CHECK: [[BITCAST2:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[MV4]]
CHECK: [[BITCAST3:%[0-9]+]]:_(s64) = G_BITCAST [[BITCAST]]
CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[BITCAST3]]
CHECK: [[FREEZE6:%[0-9]+]]:_(s16) = G_FREEZE [[UV4]]
CHECK: [[FREEZE7:%[0-9]+]]:_(s16) = G_FREEZE [[UV5]]
CHECK: [[FREEZE8:%[0-9]+]]:_(s16) = G_FREEZE [[UV6]]
CHECK: [[FREEZE9:%[0-9]+]]:_(s16) = G_FREEZE [[UV7]]
CHECK: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FREEZE6]]:_(s16), [[FREEZE7]]:_(s16), [[FREEZE8]]:_(s16), [[FREEZE9]]
CHECK: [[BITCAST3:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[MV5]]
)";
// Check
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
// Test fewer elements of G_FREEZE
TEST_F(AArch64GISelMITest, FewerElementsFreeze) {
setUp();
if (!TM)
return;
DefineLegalizerInfo(A, {});
LLT S32{LLT::scalar(32)};
LLT V2S16{LLT::vector(2, 16)};
LLT V2S32{LLT::vector(2, 32)};
LLT V4S16{LLT::vector(4, 16)};
auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
auto Vector2 = B.buildBitcast(V4S16, Copies[0]);
auto FreezeVector1 = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector1});
auto FreezeVector2 = B.buildInstr(TargetOpcode::G_FREEZE, {V4S16}, {Vector2});
AInfo Info(MF->getSubtarget());
DummyGISelObserver Observer;
LegalizerHelper Helper(*MF, Info, Observer, B);
// Perform Legalization
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.fewerElementsVector(*FreezeVector1, 0, S32));
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.fewerElementsVector(*FreezeVector2, 0, V2S16));
const auto *CheckStr = R"(
CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY
CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]]
CHECK: [[BITCAST1:%[0-9]+]]:_(<4 x s16>) = G_BITCAST [[COPY]]
CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]]
CHECK: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[UV]]
CHECK: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[UV1]]
CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FREEZE]]:_(s32), [[FREEZE1]]
CHECK: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BITCAST1]]
CHECK: [[FREEZE2:%[0-9]+]]:_(<2 x s16>) = G_FREEZE [[UV]]
CHECK: [[FREEZE3:%[0-9]+]]:_(<2 x s16>) = G_FREEZE [[UV1]]
CHECK: [[MV:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[FREEZE2]]:_(<2 x s16>), [[FREEZE3]]
)";
// Check
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
// Test more elements of G_FREEZE
TEST_F(AArch64GISelMITest, MoreElementsFreeze) {
setUp();
if (!TM)
return;
DefineLegalizerInfo(A, {});
LLT V2S32{LLT::vector(2, 32)};
LLT V4S32{LLT::vector(4, 32)};
auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
auto FreezeVector1 = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector1});
AInfo Info(MF->getSubtarget());
DummyGISelObserver Observer;
LegalizerHelper Helper(*MF, Info, Observer, B);
// Perform Legalization
EXPECT_EQ(LegalizerHelper::LegalizeResult::Legalized,
Helper.moreElementsVector(*FreezeVector1, 0, V4S32));
const auto *CheckStr = R"(
CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY
CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]]
CHECK: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
CHECK: [[CV:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BITCAST]]:_(<2 x s32>), [[UNDEF]]
CHECK: [[FREEZE:%[0-9]+]]:_(<4 x s32>) = G_FREEZE [[CV]]
CHECK: [[EXTR:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[FREEZE]]:_(<4 x s32>), 0
)";
// Check
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
} // namespace