[WebAssembly] Make returns variadic

Summary:
This is necessary and sufficient to get simple cases of multiple
return working with multivalue enabled. More complex cases will
require block and loop signatures to be generalized to potentially be
type indices as well.

Reviewers: aheejin, dschuff

Subscribers: sbc100, jgravelle-google, hiraditya, sunfish, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D68684

llvm-svn: 374235
This commit is contained in:
Thomas Lively 2019-10-09 21:42:08 +00:00
parent ffb26d9c78
commit 00f9e5aa76
20 changed files with 106 additions and 200 deletions

View File

@ -52,7 +52,9 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
// Print any additional variadic operands.
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
if (Desc.isVariadic())
if (Desc.isVariadic()) {
if (Desc.getNumOperands() == 0 && MI->getNumOperands() > 0)
OS << "\t";
for (auto I = Desc.getNumOperands(), E = MI->getNumOperands(); I < E; ++I) {
// FIXME: For CALL_INDIRECT_VOID, don't print a leading comma, because
// we have an extra flags operand which is not currently printed, for
@ -63,6 +65,7 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
OS << ", ";
printOperand(MI, I, OS);
}
}
// Print any added annotation.
printAnnotation(OS, Annot);

View File

@ -332,43 +332,15 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// These represent values which are live into the function entry, so there's
// no instruction to emit.
break;
case WebAssembly::FALLTHROUGH_RETURN_I32:
case WebAssembly::FALLTHROUGH_RETURN_I32_S:
case WebAssembly::FALLTHROUGH_RETURN_I64:
case WebAssembly::FALLTHROUGH_RETURN_I64_S:
case WebAssembly::FALLTHROUGH_RETURN_F32:
case WebAssembly::FALLTHROUGH_RETURN_F32_S:
case WebAssembly::FALLTHROUGH_RETURN_F64:
case WebAssembly::FALLTHROUGH_RETURN_F64_S:
case WebAssembly::FALLTHROUGH_RETURN_v16i8:
case WebAssembly::FALLTHROUGH_RETURN_v16i8_S:
case WebAssembly::FALLTHROUGH_RETURN_v8i16:
case WebAssembly::FALLTHROUGH_RETURN_v8i16_S:
case WebAssembly::FALLTHROUGH_RETURN_v4i32:
case WebAssembly::FALLTHROUGH_RETURN_v4i32_S:
case WebAssembly::FALLTHROUGH_RETURN_v2i64:
case WebAssembly::FALLTHROUGH_RETURN_v2i64_S:
case WebAssembly::FALLTHROUGH_RETURN_v4f32:
case WebAssembly::FALLTHROUGH_RETURN_v4f32_S:
case WebAssembly::FALLTHROUGH_RETURN_v2f64:
case WebAssembly::FALLTHROUGH_RETURN_v2f64_S: {
case WebAssembly::FALLTHROUGH_RETURN: {
// These instructions represent the implicit return at the end of a
// function body. Always pops one value off the stack.
// function body.
if (isVerbose()) {
OutStreamer->AddComment("fallthrough-return-value");
OutStreamer->AddComment("fallthrough-return");
OutStreamer->AddBlankLine();
}
break;
}
case WebAssembly::FALLTHROUGH_RETURN_VOID:
case WebAssembly::FALLTHROUGH_RETURN_VOID_S:
// This instruction represents the implicit return at the end of a
// function body with no return value.
if (isVerbose()) {
OutStreamer->AddComment("fallthrough-return-void");
OutStreamer->AddBlankLine();
}
break;
case WebAssembly::COMPILER_FENCE:
// This is a compiler barrier that prevents instruction reordering during
// backend compilation, and should not be emitted.

View File

@ -1227,11 +1227,11 @@ getDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
/// checks for such cases and fixes up the signatures.
void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
const auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
assert(MFI.getResults().size() <= 1);
if (MFI.getResults().empty())
return;
// TODO: Generalize from value types to function types for multivalue
WebAssembly::ExprType RetType;
switch (MFI.getResults().front().SimpleTy) {
case MVT::i32:
@ -1266,10 +1266,14 @@ void WebAssemblyCFGStackify::fixEndsAtEndOfFunction(MachineFunction &MF) {
if (MI.isPosition() || MI.isDebugInstr())
continue;
if (MI.getOpcode() == WebAssembly::END_BLOCK) {
if (MFI.getResults().size() > 1)
report_fatal_error("Multivalue block signatures not implemented yet");
EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType));
continue;
}
if (MI.getOpcode() == WebAssembly::END_LOOP) {
if (MFI.getResults().size() > 1)
report_fatal_error("Multivalue loop signatures not implemented yet");
EndToBegin[&MI]->getOperand(0).setImm(int32_t(RetType));
continue;
}

View File

@ -1302,51 +1302,33 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) {
if (Ret->getNumOperands() == 0) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::RETURN_VOID));
TII.get(WebAssembly::RETURN));
return true;
}
// TODO: support multiple return in FastISel
if (Ret->getNumOperands() > 1)
return false;
Value *RV = Ret->getOperand(0);
if (!Subtarget->hasSIMD128() && RV->getType()->isVectorTy())
return false;
unsigned Opc;
switch (getSimpleType(RV->getType())) {
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
Opc = WebAssembly::RETURN_I32;
break;
case MVT::i64:
Opc = WebAssembly::RETURN_I64;
break;
case MVT::f32:
Opc = WebAssembly::RETURN_F32;
break;
case MVT::f64:
Opc = WebAssembly::RETURN_F64;
break;
case MVT::v16i8:
Opc = WebAssembly::RETURN_v16i8;
break;
case MVT::v8i16:
Opc = WebAssembly::RETURN_v8i16;
break;
case MVT::v4i32:
Opc = WebAssembly::RETURN_v4i32;
break;
case MVT::v2i64:
Opc = WebAssembly::RETURN_v2i64;
break;
case MVT::v4f32:
Opc = WebAssembly::RETURN_v4f32;
break;
case MVT::v2f64:
Opc = WebAssembly::RETURN_v2f64;
break;
case MVT::exnref:
Opc = WebAssembly::RETURN_EXNREF;
break;
default:
return false;
@ -1363,7 +1345,9 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) {
if (Reg == 0)
return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(Reg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::RETURN))
.addReg(Reg);
return true;
}

View File

@ -852,8 +852,8 @@ bool WebAssemblyTargetLowering::CanLowerReturn(
CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext & /*Context*/) const {
// WebAssembly can't currently handle returning tuples.
return Outs.size() <= 1;
// WebAssembly can only handle returning tuples with multivalue enabled
return Subtarget->hasMultivalue() || Outs.size() <= 1;
}
SDValue WebAssemblyTargetLowering::LowerReturn(
@ -861,7 +861,8 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const {
assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
assert(Subtarget->hasMultivalue() ||
Outs.size() <= 1 && "MVP WebAssembly can only return up to one value");
if (!callingConvSupported(CallConv))
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");

View File

@ -84,49 +84,19 @@ let isTerminator = 1, isBarrier = 1 in
defm END_FUNCTION : NRI<(outs), (ins), [], "end_function", 0x0b>;
} // Uses = [VALUE_STACK], Defs = [VALUE_STACK]
multiclass RETURN<WebAssemblyRegClass vt> {
defm RETURN_#vt : I<(outs), (ins vt:$val), (outs), (ins),
[(WebAssemblyreturn vt:$val)],
"return \t$val", "return", 0x0f>;
// Equivalent to RETURN_#vt, for use at the end of a function when wasm
// semantics return by falling off the end of the block.
let isCodeGenOnly = 1 in
defm FALLTHROUGH_RETURN_#vt : I<(outs), (ins vt:$val), (outs), (ins), []>;
}
multiclass SIMD_RETURN<ValueType vt> {
defm RETURN_#vt : I<(outs), (ins V128:$val), (outs), (ins),
[(WebAssemblyreturn (vt V128:$val))],
"return \t$val", "return", 0x0f>,
Requires<[HasSIMD128]>;
// Equivalent to RETURN_#vt, for use at the end of a function when wasm
// semantics return by falling off the end of the block.
let isCodeGenOnly = 1 in
defm FALLTHROUGH_RETURN_#vt : I<(outs), (ins V128:$val), (outs), (ins),
[]>,
Requires<[HasSIMD128]>;
}
let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
let isReturn = 1 in {
defm "": RETURN<I32>;
defm "": RETURN<I64>;
defm "": RETURN<F32>;
defm "": RETURN<F64>;
defm "": RETURN<EXNREF>;
defm "": SIMD_RETURN<v16i8>;
defm "": SIMD_RETURN<v8i16>;
defm "": SIMD_RETURN<v4i32>;
defm "": SIMD_RETURN<v2i64>;
defm "": SIMD_RETURN<v4f32>;
defm "": SIMD_RETURN<v2f64>;
defm RETURN_VOID : NRI<(outs), (ins), [(WebAssemblyreturn)], "return", 0x0f>;
defm RETURN : I<(outs), (ins variable_ops), (outs), (ins),
[(WebAssemblyreturn)],
"return", "return", 0x0f>;
// Equivalent to RETURN, for use at the end of a function when wasm
// semantics return by falling off the end of the block.
let isCodeGenOnly = 1 in
defm FALLTHROUGH_RETURN : I<(outs), (ins variable_ops), (outs), (ins), []>;
// This is to RETURN_VOID what FALLTHROUGH_RETURN_#vt is to RETURN_#vt.
let isCodeGenOnly = 1 in
defm FALLTHROUGH_RETURN_VOID : NRI<(outs), (ins), []>;
} // isReturn = 1
defm UNREACHABLE : NRI<(outs), (ins), [(trap)], "unreachable", 0x00>;

View File

@ -106,7 +106,8 @@ def WebAssemblybr_table : SDNode<"WebAssemblyISD::BR_TABLE",
def WebAssemblyargument : SDNode<"WebAssemblyISD::ARGUMENT",
SDT_WebAssemblyArgument>;
def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN",
SDT_WebAssemblyReturn, [SDNPHasChain]>;
SDT_WebAssemblyReturn,
[SDNPHasChain, SDNPVariadic]>;
def WebAssemblywrapper : SDNode<"WebAssemblyISD::Wrapper",
SDT_WebAssemblyWrapper>;
def WebAssemblywrapperPIC : SDNode<"WebAssemblyISD::WrapperPIC",

View File

@ -49,10 +49,12 @@ void llvm::computeSignatureVTs(const FunctionType *Ty, const Function &F,
computeLegalValueVTs(F, TM, Ty->getReturnType(), Results);
MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
if (Results.size() > 1) {
// WebAssembly currently can't lower returns of multiple values without
// demoting to sret (see WebAssemblyTargetLowering::CanLowerReturn). So
// replace multiple return values with a pointer parameter.
if (Results.size() > 1 &&
!TM.getSubtarget<WebAssemblySubtarget>(F).hasMultivalue()) {
// WebAssembly can't lower returns of multiple values without demoting to
// sret unless multivalue is enabled (see
// WebAssemblyTargetLowering::CanLowerReturn). So replace multiple return
// values with a poitner parameter.
Results.clear();
Params.push_back(PtrVT);
}

View File

@ -75,9 +75,7 @@ static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
const MachineFunction &MF,
WebAssemblyFunctionInfo &MFI,
MachineRegisterInfo &MRI,
const WebAssemblyInstrInfo &TII,
unsigned FallthroughOpc,
unsigned CopyLocalOpc) {
const WebAssemblyInstrInfo &TII) {
if (DisableWebAssemblyFallthroughReturnOpt)
return false;
if (&MBB != &MF.back())
@ -90,13 +88,36 @@ static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
if (&MI != &*End)
return false;
if (FallthroughOpc != WebAssembly::FALLTHROUGH_RETURN_VOID) {
// If the operand isn't stackified, insert a COPY to read the operand and
// stackify it.
MachineOperand &MO = MI.getOperand(0);
for (auto &MO : MI.explicit_operands()) {
// If the operand isn't stackified, insert a COPY to read the operands and
// stackify them.
Register Reg = MO.getReg();
if (!MFI.isVRegStackified(Reg)) {
Register NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
unsigned CopyLocalOpc;
const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
switch (RegClass->getID()) {
case WebAssembly::I32RegClassID:
CopyLocalOpc = WebAssembly::COPY_I32;
break;
case WebAssembly::I64RegClassID:
CopyLocalOpc = WebAssembly::COPY_I64;
break;
case WebAssembly::F32RegClassID:
CopyLocalOpc = WebAssembly::COPY_F32;
break;
case WebAssembly::F64RegClassID:
CopyLocalOpc = WebAssembly::COPY_F64;
break;
case WebAssembly::V128RegClassID:
CopyLocalOpc = WebAssembly::COPY_V128;
break;
case WebAssembly::EXNREFRegClassID:
CopyLocalOpc = WebAssembly::COPY_EXNREF;
break;
default:
llvm_unreachable("Unexpected register class for return operand");
}
Register NewReg = MRI.createVirtualRegister(RegClass);
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(CopyLocalOpc), NewReg)
.addReg(Reg);
MO.setReg(NewReg);
@ -104,8 +125,7 @@ static bool maybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
}
}
// Rewrite the return.
MI.setDesc(TII.get(FallthroughOpc));
MI.setDesc(TII.get(WebAssembly::FALLTHROUGH_RETURN));
return true;
}
@ -157,60 +177,8 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
break;
}
// Optimize away an explicit void return at the end of the function.
case WebAssembly::RETURN_I32:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I32,
WebAssembly::COPY_I32);
break;
case WebAssembly::RETURN_I64:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I64,
WebAssembly::COPY_I64);
break;
case WebAssembly::RETURN_F32:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F32,
WebAssembly::COPY_F32);
break;
case WebAssembly::RETURN_F64:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F64,
WebAssembly::COPY_F64);
break;
case WebAssembly::RETURN_v16i8:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v16i8,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_v8i16:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v8i16,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_v4i32:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4i32,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_v2i64:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2i64,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_v4f32:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4f32,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_v2f64:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v2f64,
WebAssembly::COPY_V128);
break;
case WebAssembly::RETURN_VOID:
Changed |= maybeRewriteToFallthrough(
MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_VOID,
WebAssembly::INSTRUCTION_LIST_END);
case WebAssembly::RETURN:
Changed |= maybeRewriteToFallthrough(MI, MBB, MF, MFI, MRI, TII);
break;
}

View File

@ -9,5 +9,5 @@ body: |
liveins: $arguments
%0:i32 = CONST_I32 0, implicit-def dead $arguments
; CHECK: %0:i32 = CONST_I32 0, implicit-def dead $arguments
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...

View File

@ -39,7 +39,7 @@ body: |
COMPILER_FENCE implicit-def $arguments
%2:i32 = ADD_I32 %0:i32, %0:i32, implicit-def $arguments
CALL_VOID @foo, %2:i32, %1:i32, implicit-def $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
@ -63,6 +63,5 @@ body: |
ATOMIC_FENCE 0, implicit-def $arguments
%2:i32 = ADD_I32 %0:i32, %0:i32, implicit-def $arguments
CALL_VOID @foo, %2:i32, %1:i32, implicit-def $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...

View File

@ -42,5 +42,5 @@ body: |
bb.2:
; predecessors: %bb.0, %bb.1
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...

View File

@ -19,5 +19,5 @@ body: |
; CHECK-NOT: dead %{{[0-9]+}}
; CHECK: DROP_I32 killed %{{[0-9]+}}
dead %0:i32 = CONST_I32 0, implicit-def dead $arguments, implicit $sp32, implicit $sp64
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...

View File

@ -8,5 +8,5 @@ liveins:
- { reg: '$arguments' }
body: |
bb.0:
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...

View File

@ -7,7 +7,7 @@ define i64 @testmsxs_builtin(float %x) {
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i64.call llroundf
; CHECK-NEXT: # fallthrough-return-value
; CHECK-NEXT: # fallthrough-return
; CHECK-NEXT: end_function
entry:
%0 = tail call i64 @llvm.llround.f32(float %x)
@ -20,7 +20,7 @@ define i64 @testmsxd_builtin(double %x) {
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: local.get 0
; CHECK-NEXT: i64.call llround
; CHECK-NEXT: # fallthrough-return-value
; CHECK-NEXT: # fallthrough-return
; CHECK-NEXT: end_function
entry:
%0 = tail call i64 @llvm.llround.f64(double %x)

View File

@ -9,15 +9,17 @@ target triple = "wasm32-unknown-unknown"
%pair = type { i32, i32 }
%packed_pair = type <{ i32, i32 }>
; CHECK-LABEL: sret:
; CHECK-NEXT: sret (i32, i32, i32) -> ()
define %pair @sret(%pair %p) {
; CHECK-LABEL: pair_ident:
; CHECK-NEXT: pair_ident (i32, i32) -> (i32, i32)
; CHECK-NEXT: return $0, $1{{$}}
define %pair @pair_ident(%pair %p) {
ret %pair %p
}
; CHECK-LABEL: packed_sret:
; CHECK-NEXT: packed_sret (i32, i32, i32) -> ()
define %packed_pair @packed_sret(%packed_pair %p) {
; CHECK-LABEL: packed_pair_ident:
; CHECK-NEXT: packed_pair_ident (i32, i32) -> (i32, i32)
; CHECK-nEXT: return $0, $1{{$}}
define %packed_pair @packed_pair_ident(%packed_pair %p) {
ret %packed_pair %p
}

View File

@ -11,7 +11,7 @@ body: |
bb.0:
%0:i32 = CONST_I32 0, implicit-def $arguments
%1:i32 = ARGUMENT_i32 0, implicit $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: argument_i64
@ -22,7 +22,7 @@ body: |
bb.0:
%0:i32 = CONST_I32 0, implicit-def $arguments
%1:i64 = ARGUMENT_i64 0, implicit $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: argument_f32
@ -33,7 +33,7 @@ body: |
bb.0:
%0:i32 = CONST_I32 0, implicit-def $arguments
%1:f32 = ARGUMENT_f32 0, implicit $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: argument_f64
@ -44,7 +44,7 @@ body: |
bb.0:
%0:i32 = CONST_I32 0, implicit-def $arguments
%1:f64 = ARGUMENT_f64 0, implicit $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: argument_exnref
@ -55,5 +55,5 @@ body: |
bb.0:
%0:i32 = CONST_I32 0, implicit-def $arguments
%1:exnref = ARGUMENT_exnref 0, implicit $arguments
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...

View File

@ -6,10 +6,10 @@ name: copy_i32
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:i32 = COPY_I32 %1:i32
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:i32 = COPY %1:i32
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: copy_i64
@ -17,10 +17,10 @@ name: copy_i64
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:i64 = COPY_I64 %1:i64
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:i64 = COPY %1:i64
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: copy_f32
@ -28,10 +28,10 @@ name: copy_f32
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:f32 = COPY_F32 %1:f32
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:f32 = COPY %1:f32
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: copy_f64
@ -39,10 +39,10 @@ name: copy_f64
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:f64 = COPY_F64 %1:f64
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:f64 = COPY %1:f64
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: copy_v128
@ -50,10 +50,10 @@ name: copy_v128
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:v128 = COPY_V128 %1:v128
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:v128 = COPY %1:v128
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...
---
name: copy_exnref
@ -61,8 +61,8 @@ name: copy_exnref
body: |
; CHECK-LABEL: bb.0:
; CHECK-NEXT: %0:exnref = COPY_EXNREF %1:exnref
; CHECK-NEXT: RETURN_VOID
; CHECK-NEXT: RETURN
bb.0:
%0:exnref = COPY %1:exnref
RETURN_VOID implicit-def $arguments
RETURN implicit-def $arguments
...

View File

@ -60,6 +60,6 @@ body: |
bb.1:
CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...

View File

@ -55,6 +55,6 @@ body: |
%1:i32 = CALL_i32 @bar, implicit-def dead $arguments, implicit $sp32, implicit $sp64
DBG_VALUE %1:i32, $noreg, !12, !DIExpression(), debug-location !15; <unknown>:357:12 line no:357
CALL_VOID @foo, %1:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64
RETURN_VOID implicit-def dead $arguments
RETURN implicit-def dead $arguments
...