Refactor & fixup interpreter implementation of tail calls

This commit is contained in:
Maybe Waffle 2024-03-15 15:08:20 +00:00 committed by Maybe Lapkin
parent 30b18d7c36
commit cda25e56c8
4 changed files with 162 additions and 70 deletions

View File

@ -159,6 +159,13 @@ pub enum StackPopCleanup {
Root { cleanup: bool },
}
/// Return type of [`InterpCx::pop_stack_frame`].
pub struct StackPop<'tcx, Prov: Provenance> {
pub jump: StackPopJump,
pub target: StackPopCleanup,
pub destination: MPlaceTy<'tcx, Prov>,
}
/// State of a local variable including a memoized layout
#[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> {
@ -803,14 +810,31 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
// First push a stack frame so we have access to the local args
self.push_new_stack_frame(instance, body, return_to_block, return_place.clone())?;
self.after_stack_frame_push(instance, body)?;
Ok(())
}
/// Creates a new stack frame, initializes it and pushes it unto the stack.
/// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame).
fn push_new_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
return_to_block: StackPopCleanup,
return_place: MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
let locals = IndexVec::from_elem(dead_local, &body.local_decls);
// First push a stack frame so we have access to the local args
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place: return_place.clone(),
return_place,
locals,
instance,
tracing_span: SpanGuard::new(),
@ -819,6 +843,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let frame = M::init_frame(self, pre_frame)?;
self.stack_mut().push(frame);
Ok(())
}
/// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame).
fn after_stack_frame_push(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
) -> InterpResult<'tcx> {
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
for &const_ in &body.required_consts {
let c =
@ -839,6 +872,59 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(())
}
/// Pops a stack frame from the stack and returns some information about it.
///
/// This also deallocates locals, if necessary.
///
/// [`M::before_stack_pop`] should be called before calling this function.
/// [`M::after_stack_pop`] is called by this function automatically.
///
/// [`M::before_stack_pop`]: Machine::before_stack_pop
/// [`M::after_stack_pop`]: Machine::after_stack_pop
pub fn pop_stack_frame(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx, StackPop<'tcx, M::Provenance>> {
let cleanup = self.cleanup_current_frame_locals()?;
let frame =
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
let target = frame.return_to_block;
let destination = frame.return_place.clone();
let jump = if cleanup {
M::after_stack_pop(self, frame, unwinding)?
} else {
StackPopJump::NoCleanup
};
Ok(StackPop { jump, target, destination })
}
/// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame).
/// Returns whatever the cleanup was done.
fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> {
// Cleanup: deallocate locals.
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// We do this while the frame is still on the stack, so errors point to the callee.
let return_to_block = self.frame().return_to_block;
let cleanup = match return_to_block {
StackPopCleanup::Goto { .. } => true,
StackPopCleanup::Root { cleanup, .. } => cleanup,
};
if cleanup {
// We need to take the locals out, since we need to mutate while iterating.
let locals = mem::take(&mut self.frame_mut().locals);
for local in &locals {
self.deallocate_local(local.value)?;
}
}
Ok(cleanup)
}
/// Jump to the given block.
#[inline]
pub fn go_to_block(&mut self, target: mir::BasicBlock) {
@ -886,7 +972,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
/// Pops the current frame from the stack, deallocating the
/// memory for allocated locals.
/// memory for allocated locals, and jumps to an appropriate place.
///
/// If `unwinding` is `false`, then we are performing a normal return
/// from a function. In this case, we jump back into the frame of the caller,
@ -899,7 +985,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// The cleanup block ends with a special `Resume` terminator, which will
/// cause us to continue unwinding.
#[instrument(skip(self), level = "debug")]
pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
pub(super) fn return_from_current_stack_frame(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx> {
info!(
"popping stack frame ({})",
if unwinding { "during unwinding" } else { "returning from function" }
@ -947,45 +1036,31 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(())
};
// Cleanup: deallocate locals.
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// We do this while the frame is still on the stack, so errors point to the callee.
let return_to_block = self.frame().return_to_block;
let cleanup = match return_to_block {
StackPopCleanup::Goto { .. } => true,
StackPopCleanup::Root { cleanup, .. } => cleanup,
};
if cleanup {
// We need to take the locals out, since we need to mutate while iterating.
let locals = mem::take(&mut self.frame_mut().locals);
for local in &locals {
self.deallocate_local(local.value)?;
}
}
// All right, now it is time to actually pop the frame.
// Note that its locals are gone already, but that's fine.
let frame =
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
let frame = self.pop_stack_frame(unwinding)?;
// Report error from return value copy, if any.
copy_ret_result?;
match frame.jump {
StackPopJump::Normal => {}
StackPopJump::NoJump => {
// The hook already did everything.
return Ok(());
}
StackPopJump::NoCleanup => {
// If we are not doing cleanup, also skip everything else.
if !cleanup {
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Skip machine hook.
return Ok(());
}
if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
// The hook already did everything.
return Ok(());
}
// Normal return, figure out where to jump.
if unwinding {
// Follow the unwind edge.
let unwind = match return_to_block {
let unwind = match frame.target {
StackPopCleanup::Goto { unwind, .. } => unwind,
StackPopCleanup::Root { .. } => {
panic!("encountered StackPopCleanup::Root when unwinding!")
@ -995,7 +1070,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.unwind_to_block(unwind)
} else {
// Follow the normal return edge.
match return_to_block {
match frame.target {
StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
StackPopCleanup::Root { .. } => {
assert!(

View File

@ -36,6 +36,9 @@ pub enum StackPopJump {
/// Indicates that we should *not* jump to the return/unwind address, as the callback already
/// took care of everything.
NoJump,
/// Returned by [`InterpCx::pop_stack_frame`] when no cleanup should be done.
NoCleanup,
}
/// Whether this kind of memory is allowed to leak

View File

@ -32,7 +32,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// We are unwinding and this fn has no cleanup code.
// Just go on unwinding.
trace!("unwinding: skipping frame");
self.pop_stack_frame(/* unwinding */ true)?;
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(true);
};
let basic_block = &self.body().basic_blocks[loc.block];

View File

@ -4,9 +4,8 @@ use either::Either;
use rustc_middle::ty::TyCtxt;
use tracing::trace;
use rustc_middle::span_bug;
use rustc_middle::{
mir,
bug, mir, span_bug,
ty::{
self,
layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout},
@ -26,7 +25,10 @@ use super::{
InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, Scalar,
StackPopCleanup,
};
use crate::fluent_generated as fluent;
use crate::{
fluent_generated as fluent,
interpret::{eval_context::StackPop, StackPopJump},
};
/// An argment passed to a function.
#[derive(Clone, Debug)]
@ -93,7 +95,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
use rustc_middle::mir::TerminatorKind::*;
match terminator.kind {
Return => {
self.pop_stack_frame(/* unwinding */ false)?
self.return_from_current_stack_frame(/* unwinding */ false)?
}
Goto { target } => self.go_to_block(target),
@ -160,35 +162,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
self.eval_callee_and_args(terminator, func, args)?;
// This is the "canonical" implementation of tails calls,
// a pop of the current stack frame, followed by a normal call
// which pushes a new stack frame, with the return address from
// the popped stack frame.
//
// Note that we can't use `pop_stack_frame` as it "executes"
// the goto to the return block, but we don't want to,
// only the tail called function should return to the current
// return block.
let Some(prev_frame) = self.stack_mut().pop() else {
span_bug!(
terminator.source_info.span,
"empty stack while evaluating this tail call"
)
};
let StackPopCleanup::Goto { ret, unwind } = prev_frame.return_to_block else {
span_bug!(terminator.source_info.span, "tail call with the root stack frame")
};
self.eval_fn_call(
callee,
(fn_sig.abi, fn_abi),
&args,
with_caller_location,
&prev_frame.return_place,
ret,
unwind,
)?;
self.eval_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
if self.frame_idx() != old_frame_idx {
span_bug!(
@ -235,7 +209,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
trace!("unwinding: resuming from cleanup");
// By definition, a Resume terminator means
// that we're unwinding
self.pop_stack_frame(/* unwinding */ true)?;
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(());
}
@ -989,6 +963,46 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
pub(crate) fn eval_fn_tail_call(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
args: &[FnArg<'tcx, M::Provenance>],
with_caller_location: bool,
) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", fn_val);
// This is the "canonical" implementation of tails calls,
// a pop of the current stack frame, followed by a normal call
// which pushes a new stack frame, with the return address from
// the popped stack frame.
//
// Note that we can't use `pop_stack_frame` as it "executes"
// the goto to the return block, but we don't want to,
// only the tail called function should return to the current
// return block.
M::before_stack_pop(self, self.frame())?;
let StackPop { jump, target, destination } = self.pop_stack_frame(false)?;
assert_eq!(jump, StackPopJump::Normal);
let StackPopCleanup::Goto { ret, unwind } = target else {
bug!("can't tailcall as root");
};
self.eval_fn_call(
fn_val,
(caller_abi, caller_fn_abi),
args,
with_caller_location,
&destination,
ret,
unwind,
)
}
fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
// Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());