diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 1f4256662031..3d3b504c6abd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -317,8 +317,15 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF); if (OpRC) { + unsigned MinNumRegs = MinRCSize; + // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique + // virtual register. + if (Op.isMachineOpcode() && + Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) + MinNumRegs = 0; + const TargetRegisterClass *ConstrainedRC - = MRI->constrainRegClass(VReg, OpRC, MinRCSize); + = MRI->constrainRegClass(VReg, OpRC, MinNumRegs); if (!ConstrainedRC) { OpRC = TRI->getAllocatableClass(OpRC); assert(OpRC && "Constraints cannot be fulfilled for allocation"); diff --git a/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll new file mode 100644 index 000000000000..2aa01273d883 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/implicit-def-copy.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc < %s -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel | FileCheck %s + +; Make sure we don't create a COPY instruction for IMPLICIT_DEF. + +define @vpload_nxv8i64(* %ptr, %m, i32 zeroext %evl) #1 { + ; CHECK-LABEL: name: vpload_nxv8i64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $x10, $v0, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: $v0 = COPY [[COPY1]] + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[DEF]], [[COPY2]], $v0, [[COPY]], 6 /* e64 */, 1 :: (load unknown-size from %ir.ptr, align 64) + ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] + ; CHECK-NEXT: PseudoRET implicit $v8m8 + %load = call @llvm.vp.load.nxv8i64.p0nxv8i64(* %ptr, %m, i32 %evl) + ret %load +} + +declare @llvm.vp.load.nxv8i64.p0nxv8i64(*, , i32)