diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index fd1f081a0a8..6e846d721f2 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -396,9 +396,7 @@ pub(crate) fn codegen_terminator_call<'tcx>( source_info, ) { Ok(()) => return, - // Unimplemented intrinsics must have a fallback body. The fallback body is obtained - // by converting the `InstanceDef::Intrinsic` to an `InstanceDef::Item`. - Err(()) => Some(Instance::new(instance.def_id(), instance.args)), + Err(instance) => Some(instance), } } InstanceDef::DropGlue(_, None) => { diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 5e64fec76e4..210a3da2c5d 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -268,7 +268,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( destination: CPlace<'tcx>, target: Option, source_info: mir::SourceInfo, -) -> Result<(), ()> { +) -> Result<(), Instance<'tcx>> { let intrinsic = fx.tcx.item_name(instance.def_id()); let instance_args = instance.args; @@ -431,7 +431,7 @@ fn codegen_regular_intrinsic_call<'tcx>( ret: CPlace<'tcx>, destination: Option, source_info: mir::SourceInfo, -) -> Result<(), ()> { +) -> Result<(), Instance<'tcx>> { assert_eq!(generic_args, instance.args); let usize_layout = fx.layout_of(fx.tcx.types.usize); @@ -1229,14 +1229,6 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout())); } - sym::const_allocate => { - intrinsic_args!(fx, args => (_size, _align); intrinsic); - - // returns a null pointer at runtime. - let null = fx.bcx.ins().iconst(fx.pointer_type, 0); - ret.write_cvalue(fx, CValue::by_val(null, ret.layout())); - } - sym::const_deallocate => { intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic); // nop at runtime. @@ -1257,7 +1249,9 @@ fn codegen_regular_intrinsic_call<'tcx>( ); } - _ => return Err(()), + // Unimplemented intrinsics must have a fallback body. The fallback body is obtained + // by converting the `InstanceDef::Intrinsic` to an `InstanceDef::Item`. + _ => return Err(Instance::new(instance.def_id(), instance.args)), } let ret_block = fx.get_block(destination.unwrap()); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index eac8cb43779..f162ef831b7 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -90,7 +90,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> } impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { + fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); @@ -137,7 +137,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { args[2].immediate(), llresult, ); - return; + return Ok(()); } sym::breakpoint => { unimplemented!(); @@ -166,12 +166,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.volatile_store(self, dst); - return; + return Ok(()); } sym::unaligned_volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.unaligned_volatile_store(self, dst); - return; + return Ok(()); } sym::prefetch_read_data | sym::prefetch_write_data @@ -269,7 +269,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }, None => { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); - return; + return Ok(()); } } } @@ -339,7 +339,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { extended_asm.set_volatile_flag(true); // We have copied the value to `result` already. - return; + return Ok(()); } sym::ptr_mask => { @@ -357,11 +357,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, - Err(()) => return, + Err(()) => return Ok(()), } } - _ => bug!("unknown intrinsic '{}'", name), + // Fall back to default body + _ => return Err(Instance::new(instance.def_id(), instance.args)), }; if !fn_abi.ret.is_ignore() { @@ -376,6 +377,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { .store(self, result); } } + Ok(()) } fn abort(&mut self) { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e3e48ecb3aa..4415c51acf6 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -86,7 +86,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { args: &[OperandRef<'tcx, &'ll Value>], llresult: &'ll Value, span: Span, - ) { + ) -> Result<(), ty::Instance<'tcx>> { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); @@ -141,7 +141,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { args[2].immediate(), llresult, ); - return; + return Ok(()); } sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]), sym::va_copy => { @@ -194,17 +194,17 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !result.layout.is_zst() { self.store(load, result.llval, result.align); } - return; + return Ok(()); } sym::volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.volatile_store(self, dst); - return; + return Ok(()); } sym::unaligned_volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.unaligned_volatile_store(self, dst); - return; + return Ok(()); } sym::prefetch_read_data | sym::prefetch_write_data @@ -305,7 +305,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { name, ty, }); - return; + return Ok(()); } } } @@ -387,7 +387,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`")); // We have copied the value to `result` already. - return; + return Ok(()); } _ if name.as_str().starts_with("simd_") => { @@ -395,11 +395,15 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, ) { Ok(llval) => llval, - Err(()) => return, + Err(()) => return Ok(()), } } - _ => bug!("unknown intrinsic '{}' -- should it have been lowered earlier?", name), + _ => { + debug!("unknown intrinsic '{}' -- falling back to default body", name); + // Call the fallback body instead of generating the intrinsic code + return Err(ty::Instance::new(instance.def_id(), instance.args)); + } }; if !fn_abi.ret.is_ignore() { @@ -411,6 +415,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { .store(self, result); } } + Ok(()) } fn abort(&mut self) { diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 0c4661ef16e..75d413dedad 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -837,8 +837,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; } - match intrinsic { - None | Some(sym::drop_in_place) => {} + let instance = match intrinsic { + None | Some(sym::drop_in_place) => instance, Some(intrinsic) => { let mut llargs = Vec::with_capacity(1); let ret_dest = self.make_return_dest( @@ -882,27 +882,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }) .collect(); - Self::codegen_intrinsic_call( - bx, - *instance.as_ref().unwrap(), - fn_abi, - &args, - dest, - span, - ); + let instance = *instance.as_ref().unwrap(); + match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) { + Ok(()) => { + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval); + } - if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval); + return if let Some(target) = target { + helper.funclet_br(self, bx, target, mergeable_succ) + } else { + bx.unreachable(); + MergingSucc::False + }; + } + Err(instance) => Some(instance), } - - return if let Some(target) = target { - helper.funclet_br(self, bx, target, mergeable_succ) - } else { - bx.unreachable(); - MergingSucc::False - }; } - } + }; let mut llargs = Vec::with_capacity(arg_count); let destination = target.as_ref().map(|&target| { diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 8530bf9e2b3..d6a0dc6bbef 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -54,6 +54,7 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + /// In the `Err` case, returns the instance that should be called instead. pub fn codegen_intrinsic_call( bx: &mut Bx, instance: ty::Instance<'tcx>, @@ -61,7 +62,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args: &[OperandRef<'tcx, Bx::Value>], llresult: Bx::Value, span: Span, - ) { + ) -> Result<(), ty::Instance<'tcx>> { let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all()); let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { @@ -81,7 +82,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llval = match name { sym::abort => { bx.abort(); - return; + return Ok(()); } sym::va_start => bx.va_start(args[0].immediate()), @@ -150,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[0].immediate(), args[2].immediate(), ); - return; + return Ok(()); } sym::write_bytes => { memset_intrinsic( @@ -161,7 +162,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return Ok(()); } sym::volatile_copy_nonoverlapping_memory => { @@ -174,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return Ok(()); } sym::volatile_copy_memory => { copy_intrinsic( @@ -186,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return Ok(()); } sym::volatile_set_memory => { memset_intrinsic( @@ -197,17 +198,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].immediate(), args[2].immediate(), ); - return; + return Ok(()); } sym::volatile_store => { let dst = args[0].deref(bx.cx()); args[1].val.volatile_store(bx, dst); - return; + return Ok(()); } sym::unaligned_volatile_store => { let dst = args[0].deref(bx.cx()); args[1].val.unaligned_volatile_store(bx, dst); - return; + return Ok(()); } sym::exact_div => { let ty = arg_tys[0]; @@ -225,7 +226,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name, ty, }); - return; + return Ok(()); } } } @@ -245,7 +246,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name, ty: arg_tys[0], }); - return; + return Ok(()); } } } @@ -256,14 +257,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { span, ty: arg_tys[0], }); - return; + return Ok(()); } let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, ty: ret_ty, }); - return; + return Ok(()); }; if signed { bx.fptosi(args[0].immediate(), llret_ty) @@ -280,14 +281,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - sym::const_allocate => { - // returns a null pointer at runtime. - bx.const_null(bx.type_ptr()) - } - sym::const_deallocate => { // nop at runtime. - return; + return Ok(()); } // This requires that atomic intrinsics follow a specific naming pattern: @@ -350,10 +346,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.store(val, dest.llval, dest.align); let dest = result.project_field(bx, 1); bx.store(success, dest.llval, dest.align); - return; } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); } + return Ok(()); } "load" => { @@ -383,7 +379,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) } } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return Ok(()); } } @@ -399,10 +396,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); - return; } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); } + return Ok(()); } "fence" => { @@ -410,7 +407,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { parse_ordering(bx, ordering), SynchronizationScope::CrossThread, ); - return; + return Ok(()); } "singlethreadfence" => { @@ -418,7 +415,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { parse_ordering(bx, ordering), SynchronizationScope::SingleThread, ); - return; + return Ok(()); } // These are all AtomicRMW ops @@ -449,7 +446,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) } else { - return invalid_monomorphization(ty); + invalid_monomorphization(ty); + return Ok(()); } } } @@ -458,7 +456,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::nontemporal_store => { let dst = args[0].deref(bx.cx()); args[1].val.nontemporal_store(bx, dst); - return; + return Ok(()); } sym::ptr_guaranteed_cmp => { @@ -493,8 +491,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => { // Need to use backend-specific things in the implementation. - bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); - return; + return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); } }; @@ -507,6 +504,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .store(bx, result); } } + Ok(()) } } diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs index 450672fb941..502f0b3fcb5 100644 --- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs @@ -8,6 +8,8 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes { /// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`, /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics, /// add them to `compiler/rustc_codegen_llvm/src/context.rs`. + /// Returns `Err` if another instance should be called instead. This is used to invoke + /// intrinsic default bodies in case an intrinsic is not implemented by the backend. fn codegen_intrinsic_call( &mut self, instance: ty::Instance<'tcx>, @@ -15,7 +17,7 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes { args: &[OperandRef<'tcx, Self::Value>], llresult: Self::Value, span: Span, - ); + ) -> Result<(), ty::Instance<'tcx>>; fn abort(&mut self); fn assume(&mut self, val: Self::Value); diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index b0cf1d1656d..397112e8db7 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -363,7 +363,7 @@ pub fn check_intrinsic_type( ), sym::const_allocate => { - (0, 0, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8)) + (0, 1, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8)) } sym::const_deallocate => ( 0, diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index 81cf31e6bf4..a9de37244c5 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -296,9 +296,9 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { if args.is_empty() { return; } - let ty = args.type_at(0); - let known_is_valid = intrinsic_assert_panics(self.tcx, self.param_env, ty, intrinsic_name); + let known_is_valid = + intrinsic_assert_panics(self.tcx, self.param_env, args[0], intrinsic_name); match known_is_valid { // We don't know the layout or it's not validity assertion at all, don't touch it None => {} @@ -317,10 +317,11 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { fn intrinsic_assert_panics<'tcx>( tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, + arg: ty::GenericArg<'tcx>, intrinsic_name: Symbol, ) -> Option { let requirement = ValidityRequirement::from_intrinsic(intrinsic_name)?; + let ty = arg.expect_ty(); Some(!tcx.check_validity_requirement((requirement, param_env.and(ty))).ok()?) } diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 149e4c2cb08..a9926820caf 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -956,19 +956,24 @@ fn visit_instance_use<'tcx>( if !should_codegen_locally(tcx, &instance) { return; } - - // The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will - // be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any - // of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to - // codegen a call to that function without generating code for the function itself. if let ty::InstanceDef::Intrinsic(def_id) = instance.def { let name = tcx.item_name(def_id); if let Some(_requirement) = ValidityRequirement::from_intrinsic(name) { + // The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will + // be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any + // of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to + // codegen a call to that function without generating code for the function itself. let def_id = tcx.lang_items().get(LangItem::PanicNounwind).unwrap(); let panic_instance = Instance::mono(tcx, def_id); if should_codegen_locally(tcx, &panic_instance) { output.push(create_fn_mono_item(tcx, panic_instance, source)); } + } else if tcx.has_attr(def_id, sym::rustc_intrinsic) { + // Codegen the fallback body of intrinsics with fallback bodies + let instance = ty::Instance::new(def_id, instance.args); + if should_codegen_locally(tcx, &instance) { + output.push(create_fn_mono_item(tcx, instance, source)); + } } } diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index d56c3d3b6c1..3a178510e79 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -2368,18 +2368,6 @@ extern "rust-intrinsic" { #[rustc_nounwind] pub fn ptr_guaranteed_cmp(ptr: *const T, other: *const T) -> u8; - /// Allocates a block of memory at compile time. - /// At runtime, just returns a null pointer. - /// - /// # Safety - /// - /// - The `align` argument must be a power of two. - /// - At compile time, a compile error occurs if this constraint is violated. - /// - At runtime, it is not checked. - #[rustc_const_unstable(feature = "const_heap", issue = "79597")] - #[rustc_nounwind] - pub fn const_allocate(size: usize, align: usize) -> *mut u8; - /// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time. /// At runtime, does nothing. /// @@ -2594,6 +2582,22 @@ pub(crate) const unsafe fn debug_assertions() -> bool { cfg!(debug_assertions) } +/// Allocates a block of memory at compile time. +/// At runtime, just returns a null pointer. +/// +/// # Safety +/// +/// - The `align` argument must be a power of two. +/// - At compile time, a compile error occurs if this constraint is violated. +/// - At runtime, it is not checked. +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +#[rustc_nounwind] +#[cfg_attr(not(bootstrap), rustc_intrinsic)] +pub const unsafe fn const_allocate(_size: usize, _align: usize) -> *mut u8 { + // const eval overrides this function, but runtime code should always just return null pointers. + crate::ptr::null_mut() +} + // Some functions are defined here because they accidentally got made // available in this module on stable. See . // (`transmute` also falls into this category, but it cannot be wrapped due to the